37         _base.assign_indices();
    40         size_t lhsOrder = 1, rhsOrder = 1; 
    42         for (
const Index& idx : _base.indices) {
    44                 if(misc::contains(_lhs.indices, idx)) {
    47                     REQUIRE(misc::contains(_rhs.indices, idx), 
"Every open index of factorisation base must be contained in one of the targets");
    53         _splitPos = lhsOrder-1;
    55         _lhs.assign_indices(lhsOrder);
    56         _rhs.assign_indices(rhsOrder);
    58         std::vector<Index> reorderedBaseIndices;
    59         reorderedBaseIndices.reserve(_base.indices.size());
    61         _lhsPreliminaryIndices.reserve(_lhs.indices.size());
    63         _rhsPreliminaryIndices.reserve(_rhs.indices.size());
    65         std::vector<size_t> reorderedBaseDimensions, lhsDims, rhsDims;
    66         reorderedBaseDimensions.reserve(_base.degree());
    67         lhsDims.reserve(_lhs.indices.size());
    68         rhsDims.reserve(_rhs.indices.size());
    77         for (
const auto &idx : _lhs.indices) {
    79             size_t j, dimOffset = 0;
    80             for(j = 0; j < _base.indices.size() && idx != _base.indices[j]; ++j) {
    81                 dimOffset += _base.indices[j].span;
    84             if(j < _base.indices.size()) {
    85                 _lhsPreliminaryIndices.push_back(_base.indices[j]);
    86                 reorderedBaseIndices.push_back(_base.indices[j]);
    87                 for(
size_t k = 0; k < _base.indices[j].span; ++k) {
    88                     reorderedBaseDimensions.push_back(_base.tensorObjectReadOnly->dimensions.at(dimOffset+k));
    89                     lhsDims.push_back(_base.tensorObjectReadOnly->dimensions[dimOffset+k]);
    90                     _lhsSize *= _base.tensorObjectReadOnly->dimensions[dimOffset+k];
    93                 REQUIRE(!foundCommon, 
"Left part of factorization must have exactly one index that is not contained in base. Here it is more than one.");
    98         _lhsPreliminaryIndices.push_back(auxiliaryIndex);
   102         for (
const auto &idx : _rhs.indices) {
   104             size_t j, dimOffset = 0;
   105             for(j = 0; j < _base.indices.size() && idx != _base.indices[j]; ++j) {
   106                 dimOffset += _base.indices[j].span;
   109             if(j < _base.indices.size()) {
   110                 _rhsPreliminaryIndices.push_back(_base.indices[j]);
   111                 reorderedBaseIndices.push_back(_base.indices[j]);
   112                 for(
size_t k = 0; k < _base.indices[j].span; ++k) {
   113                     reorderedBaseDimensions.push_back(_base.tensorObjectReadOnly->dimensions.at(dimOffset+k));
   114                     rhsDims.push_back(_base.tensorObjectReadOnly->dimensions[dimOffset+k]);
   115                     _rhsSize *= _base.tensorObjectReadOnly->dimensions[dimOffset+k];
   118                 REQUIRE(!foundCommon, 
"Right part of factorization must have exactly one index that is not contained in base. Here it is more than one.");
   120                 auxiliaryIndex = idx;
   123         _rhsPreliminaryIndices.insert(_rhsPreliminaryIndices.begin(), auxiliaryIndex);
   126         evaluate(std::move(reorderedBaseTensor), std::move(_base));
   129         _rank = std::min(_lhsSize, _rhsSize);
   130         lhsDims.push_back(_rank);
   131         rhsDims.insert(rhsDims.begin(), _rank);
   134         _lhs.tensorObject->ensure_own_data_no_copy();
   137         _rhs.tensorObject->ensure_own_data_no_copy();
   139         return std::unique_ptr<Tensor>(reorderedBaseTensor.
tensorObject);
   143         REQUIRE(_output.size() == 3, 
"SVD requires two output tensors, not " << _output.size());
   152         size_t lhsSize, rhsSize, rank, splitPos;
   153         std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
   155         std::unique_ptr<Tensor> reorderedBaseTensor = 
prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(U), std::move(Vt));
   165             for(
size_t i = 1; i < oldRank; ++i) {
   174             if(rank != oldRank) {
   176                 for(
size_t i = 0; i < rank; ++i) {
   187         std::vector<Index> midPreliminaryIndices({lhsPreliminaryIndices.back(), rhsPreliminaryIndices.front()});
   188         U = (*U.tensorObjectReadOnly)(lhsPreliminaryIndices);
   195         REQUIRE(_output.size() == 2, 
"QR factorisation requires two output tensors, not " << _output.size());
   200         size_t lhsSize, rhsSize, rank, splitPos;
   201         std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
   203         std::unique_ptr<Tensor> reorderedBaseTensor = 
prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(Q), std::move(R));
   213         REQUIRE(_output.size() == 2, 
"RQ factorisation requires two output tensors, not " << _output.size());
   218         size_t lhsSize, rhsSize, rank, splitPos;
   219         std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
   221         std::unique_ptr<Tensor> reorderedBaseTensor = 
prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(R), std::move(Q));
   232         REQUIRE(_output.size() == 2, 
"QC factorisation requires two output tensors, not " << _output.size());
   237         size_t lhsSize, rhsSize, rank, splitPos;
   238         std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
   240         std::unique_ptr<Tensor> reorderedBaseTensor = 
prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(Q), std::move(C));
   250         REQUIRE(_output.size() == 2, 
"CQ factorisation requires two output tensors, not " << _output.size());
   255         size_t lhsSize, rhsSize, rank, splitPos;
   256         std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
   258         std::unique_ptr<Tensor> reorderedBaseTensor = 
prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(C), std::move(Q));
 const double softThreshold
 
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
 
Header file for the blas and lapack wrapper functions. 
 
void ensure_own_data()
Ensures that this tensor is the sole owner of its data. If needed new space is allocated and all entr...
 
Header file for the Index class. 
 
Header file for the standard contaienr support functions. 
 
bool open() const
Checks whether the index is open. 
 
Internal representation of an readable indexed Tensor or TensorNetwork. 
 
internal::IndexedTensorReadOnly< Tensor > * input
 
Header file for the classes defining factorisations of Tensors. 
 
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q. 
 
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R. 
 
The main namespace of xerus. 
 
Class that handles simple (non-decomposed) tensors in a dense or sparse representation. 
 
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
 
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
 
void evaluate(IndexedTensorWritable< Tensor > &&_out, IndexedTensorReadOnly< Tensor > &&_base)
 
Header file for the Tensor class. 
 
Internal representation of an readable and writeable indexed Tensor or TensorNetwork. 
 
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q. 
 
tensor_type * tensorObject
Non-const pointer to the tensor object. 
 
std::unique_ptr< Tensor > prepare_split(size_t &_lhsSize, size_t &_rhsSize, size_t &_rank, size_t &_splitPos, std::vector< Index > &_lhsPreliminaryIndices, std::vector< Index > &_rhsPreliminaryIndices, internal::IndexedTensorReadOnly< Tensor > &&_base, internal::IndexedTensor< Tensor > &&_lhs, internal::IndexedTensor< Tensor > &&_rhs)
 
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
 
size_t degree() const
Returns the degree of the associated tensorObejct. 
 
Header file for the low level array support function. 
 
Header file for comfort functions and macros that should not be exported in the library. 
 
constexpr const value_t EPSILON
The default epsilon value in xerus. 
 
Class used to represent indices that can be used to write tensor calculations in index notation...
 
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt. 
 
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
 
size_t span
The span states how many dimensions are covered by the index. 
 
const tensor_type * tensorObjectReadOnly
Pointer to the associated Tensor/TensorNetwork object. 
 
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.