xerus
a general purpose tensor library
indexedTensor_tensor_factorisations.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
26 #include <xerus/index.h>
27 #include <xerus/tensor.h>
28 
32 #include <xerus/misc/internal.h>
33 
34 namespace xerus {
35 
36  std::unique_ptr<Tensor> prepare_split(size_t& _lhsSize, size_t& _rhsSize, size_t& _rank, size_t& _splitPos, std::vector<Index>& _lhsPreliminaryIndices, std::vector<Index>& _rhsPreliminaryIndices, internal::IndexedTensorReadOnly<Tensor>&& _base, internal::IndexedTensor<Tensor>&& _lhs, internal::IndexedTensor<Tensor>&& _rhs) {
37  _base.assign_indices();
38 
39  // Calculate the future order of lhs and rhs.
40  size_t lhsOrder = 1, rhsOrder = 1; // Start with 1 because there is a new dimension introduced in the split.
41 
42  for (const Index& idx : _base.indices) {
43  if (idx.open()) {
44  if(misc::contains(_lhs.indices, idx)) {
45  lhsOrder += idx.span;
46  } else {
47  REQUIRE(misc::contains(_rhs.indices, idx), "Every open index of factorisation base must be contained in one of the targets");
48  rhsOrder += idx.span;
49  }
50  }
51  }
52 
53  _splitPos = lhsOrder-1;
54 
55  _lhs.assign_indices(lhsOrder);
56  _rhs.assign_indices(rhsOrder);
57 
58  std::vector<Index> reorderedBaseIndices;
59  reorderedBaseIndices.reserve(_base.indices.size());
60 
61  _lhsPreliminaryIndices.reserve(_lhs.indices.size());
62 
63  _rhsPreliminaryIndices.reserve(_rhs.indices.size());
64 
65  std::vector<size_t> reorderedBaseDimensions, lhsDims, rhsDims;
66  reorderedBaseDimensions.reserve(_base.degree());
67  lhsDims.reserve(_lhs.indices.size());
68  rhsDims.reserve(_rhs.indices.size());
69 
70  _lhsSize=1;
71  _rhsSize=1;
72 
73  Index auxiliaryIndex;
74 
75  // Work through the indices of lhs
76  IF_CHECK(bool foundCommon = false;)
77  for (const auto &idx : _lhs.indices) {
78  // Find index in A and get dimension offset
79  size_t j, dimOffset = 0;
80  for(j = 0; j < _base.indices.size() && idx != _base.indices[j]; ++j) {
81  dimOffset += _base.indices[j].span;
82  }
83 
84  if(j < _base.indices.size()) {
85  _lhsPreliminaryIndices.push_back(_base.indices[j]);
86  reorderedBaseIndices.push_back(_base.indices[j]);
87  for(size_t k = 0; k < _base.indices[j].span; ++k) {
88  reorderedBaseDimensions.push_back(_base.tensorObjectReadOnly->dimensions.at(dimOffset+k));
89  lhsDims.push_back(_base.tensorObjectReadOnly->dimensions[dimOffset+k]);
90  _lhsSize *= _base.tensorObjectReadOnly->dimensions[dimOffset+k];
91  }
92  } else {
93  REQUIRE(!foundCommon, "Left part of factorization must have exactly one index that is not contained in base. Here it is more than one.");
94  IF_CHECK(foundCommon = true;)
95  auxiliaryIndex = idx;
96  }
97  }
98  _lhsPreliminaryIndices.push_back(auxiliaryIndex);
99 
100  // Work through the indices of rhs
101  IF_CHECK(foundCommon = false;)
102  for (const auto &idx : _rhs.indices) {
103  // Find index in A and get dimension offset
104  size_t j, dimOffset = 0;
105  for(j = 0; j < _base.indices.size() && idx != _base.indices[j]; ++j) {
106  dimOffset += _base.indices[j].span;
107  }
108 
109  if(j < _base.indices.size()) {
110  _rhsPreliminaryIndices.push_back(_base.indices[j]);
111  reorderedBaseIndices.push_back(_base.indices[j]);
112  for(size_t k = 0; k < _base.indices[j].span; ++k) {
113  reorderedBaseDimensions.push_back(_base.tensorObjectReadOnly->dimensions.at(dimOffset+k));
114  rhsDims.push_back(_base.tensorObjectReadOnly->dimensions[dimOffset+k]);
115  _rhsSize *= _base.tensorObjectReadOnly->dimensions[dimOffset+k];
116  }
117  } else {
118  REQUIRE(!foundCommon, "Right part of factorization must have exactly one index that is not contained in base. Here it is more than one.");
119  IF_CHECK(foundCommon = true;)
120  auxiliaryIndex = idx;
121  }
122  }
123  _rhsPreliminaryIndices.insert(_rhsPreliminaryIndices.begin(), auxiliaryIndex);
124 
125  internal::IndexedTensor<Tensor> reorderedBaseTensor(new Tensor(std::move(reorderedBaseDimensions), _base.tensorObjectReadOnly->representation, Tensor::Initialisation::None), std::move(reorderedBaseIndices), false);
126  evaluate(std::move(reorderedBaseTensor), std::move(_base));
127  reorderedBaseTensor.tensorObject->ensure_own_data();
128 
129  _rank = std::min(_lhsSize, _rhsSize);
130  lhsDims.push_back(_rank);
131  rhsDims.insert(rhsDims.begin(), _rank);
132 
133  _lhs.tensorObject->reset(std::move(lhsDims), Tensor::Initialisation::None);
134  _lhs.tensorObject->ensure_own_data_no_copy();
135 
136  _rhs.tensorObject->reset(std::move(rhsDims), Tensor::Initialisation::None);
137  _rhs.tensorObject->ensure_own_data_no_copy();
138 
139  return std::unique_ptr<Tensor>(reorderedBaseTensor.tensorObject);
140  }
141 
142  void SVD::operator()(const std::vector<internal::IndexedTensor<Tensor>*>& _output) const {
143  REQUIRE(_output.size() == 3, "SVD requires two output tensors, not " << _output.size());
145  internal::IndexedTensor<Tensor>& U = *_output[0];
146  internal::IndexedTensor<Tensor>& S = *_output[1];
147  internal::IndexedTensor<Tensor>& Vt = *_output[2];
148 
149  REQUIRE(epsilon < 1, "Epsilon must be smaller than one.");
150  REQUIRE(maxRank > 0, "maxRank must be larger than zero.");
151 
152  size_t lhsSize, rhsSize, rank, splitPos;
153  std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
154 
155  std::unique_ptr<Tensor> reorderedBaseTensor = prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(U), std::move(Vt));
156 
157  calculate_svd(*U.tensorObject, *S.tensorObject, *Vt.tensorObject, *reorderedBaseTensor, splitPos, maxRank, epsilon);
158 
159  if(softThreshold > 0.0) {
160  const size_t oldRank = S.tensorObjectReadOnly->dimensions[0];
161  rank = oldRank;
162 
163  (*S.tensorObject)[0] = std::max((*S.tensorObject)[0]-softThreshold, preventZero ? EPSILON*(*S.tensorObject)[0] : 0.0);
164 
165  for(size_t i = 1; i < oldRank; ++i) {
166  if((*S.tensorObject)[i+i*oldRank] < softThreshold) {
167  rank = i;
168  break;
169  } else {
170  (*S.tensorObject)[i+i*oldRank] -= softThreshold;
171  }
172  }
173 
174  if(rank != oldRank) {
175  Tensor newS({rank, rank}, Tensor::Representation::Sparse);
176  for(size_t i = 0; i < rank; ++i) {
177  newS[i+i*rank] = (*S.tensorObject)[i+i*oldRank];
178  }
179  *S.tensorObject = newS;
180 
181  U.tensorObject->resize_mode(U.degree()-1, rank);
182  Vt.tensorObject->resize_mode(0, rank);
183  }
184  }
185 
186  // Post evaluate the results
187  std::vector<Index> midPreliminaryIndices({lhsPreliminaryIndices.back(), rhsPreliminaryIndices.front()});
188  U = (*U.tensorObjectReadOnly)(lhsPreliminaryIndices);
189  S = (*S.tensorObjectReadOnly)(midPreliminaryIndices);
190  Vt = (*Vt.tensorObjectReadOnly)(rhsPreliminaryIndices);
191  }
192 
193 
194  void QR::operator()(const std::vector<internal::IndexedTensor<Tensor>*>& _output) const {
195  REQUIRE(_output.size() == 2, "QR factorisation requires two output tensors, not " << _output.size());
197  internal::IndexedTensor<Tensor>& Q = *_output[0];
198  internal::IndexedTensor<Tensor>& R = *_output[1];
199 
200  size_t lhsSize, rhsSize, rank, splitPos;
201  std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
202 
203  std::unique_ptr<Tensor> reorderedBaseTensor = prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(Q), std::move(R));
204 
205  calculate_qr(*Q.tensorObject, *R.tensorObject, *reorderedBaseTensor, splitPos);
206 
207  // Post evaluate the results
208  Q = (*Q.tensorObjectReadOnly)(lhsPreliminaryIndices);
209  R = (*R.tensorObjectReadOnly)(rhsPreliminaryIndices);
210  }
211 
212  void RQ::operator()(const std::vector<internal::IndexedTensor<Tensor>*>& _output) const {
213  REQUIRE(_output.size() == 2, "RQ factorisation requires two output tensors, not " << _output.size());
215  internal::IndexedTensor<Tensor>& R = *_output[0];
216  internal::IndexedTensor<Tensor>& Q = *_output[1];
217 
218  size_t lhsSize, rhsSize, rank, splitPos;
219  std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
220 
221  std::unique_ptr<Tensor> reorderedBaseTensor = prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(R), std::move(Q));
222 
223  calculate_rq(*R.tensorObject, *Q.tensorObject, *reorderedBaseTensor, splitPos);
224 
225  // Post evaluate the results
226  R = (*R.tensorObjectReadOnly)(lhsPreliminaryIndices);
227  Q = (*Q.tensorObjectReadOnly)(rhsPreliminaryIndices);
228  }
229 
230 
231  void QC::operator()(const std::vector<internal::IndexedTensor<Tensor>*>& _output) const {
232  REQUIRE(_output.size() == 2, "QC factorisation requires two output tensors, not " << _output.size());
234  internal::IndexedTensor<Tensor>& Q = *_output[0];
235  internal::IndexedTensor<Tensor>& C = *_output[1];
236 
237  size_t lhsSize, rhsSize, rank, splitPos;
238  std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
239 
240  std::unique_ptr<Tensor> reorderedBaseTensor = prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(Q), std::move(C));
241 
242  calculate_qc(*Q.tensorObject, *C.tensorObject, *reorderedBaseTensor, splitPos);
243 
244  // Post evaluate the results
245  Q = (*Q.tensorObjectReadOnly)(lhsPreliminaryIndices);
246  C = (*C.tensorObjectReadOnly)(rhsPreliminaryIndices);
247  }
248 
249  void CQ::operator()(const std::vector<internal::IndexedTensor<Tensor>*>& _output) const {
250  REQUIRE(_output.size() == 2, "CQ factorisation requires two output tensors, not " << _output.size());
252  internal::IndexedTensor<Tensor>& C = *_output[0];
253  internal::IndexedTensor<Tensor>& Q = *_output[1];
254 
255  size_t lhsSize, rhsSize, rank, splitPos;
256  std::vector<Index> lhsPreliminaryIndices, rhsPreliminaryIndices;
257 
258  std::unique_ptr<Tensor> reorderedBaseTensor = prepare_split(lhsSize, rhsSize, rank, splitPos, lhsPreliminaryIndices, rhsPreliminaryIndices, std::move(A), std::move(C), std::move(Q));
259 
260  calculate_cq(*C.tensorObject, *Q.tensorObject, *reorderedBaseTensor, splitPos);
261 
262  // Post evaluate the results
263  C = (*C.tensorObjectReadOnly)(lhsPreliminaryIndices);
264  Q = (*Q.tensorObjectReadOnly)(rhsPreliminaryIndices);
265  }
266 } // namespace xerus
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
Header file for the blas and lapack wrapper functions.
void ensure_own_data()
Ensures that this tensor is the sole owner of its data. If needed new space is allocated and all entr...
Definition: tensor.cpp:1158
Header file for the Index class.
Header file for the standard contaienr support functions.
#define REQUIRE
Definition: internal.h:33
bool open() const
Checks whether the index is open.
Definition: index.cpp:111
Internal representation of an readable indexed Tensor or TensorNetwork.
internal::IndexedTensorReadOnly< Tensor > * input
Header file for the classes defining factorisations of Tensors.
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q.
Definition: tensor.cpp:1511
#define IF_CHECK
Definition: internal.h:35
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R.
Definition: tensor.cpp:1492
The main namespace of xerus.
Definition: basic.h:37
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Definition: tensor.h:70
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
void evaluate(IndexedTensorWritable< Tensor > &&_out, IndexedTensorReadOnly< Tensor > &&_base)
Header file for the Tensor class.
Internal representation of an readable and writeable indexed Tensor or TensorNetwork.
Definition: indexedTensor.h:37
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q.
Definition: tensor.cpp:1548
tensor_type * tensorObject
Non-const pointer to the tensor object.
std::unique_ptr< Tensor > prepare_split(size_t &_lhsSize, size_t &_rhsSize, size_t &_rank, size_t &_splitPos, std::vector< Index > &_lhsPreliminaryIndices, std::vector< Index > &_rhsPreliminaryIndices, internal::IndexedTensorReadOnly< Tensor > &&_base, internal::IndexedTensor< Tensor > &&_lhs, internal::IndexedTensor< Tensor > &&_rhs)
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
size_t degree() const
Returns the degree of the associated tensorObejct.
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
constexpr const value_t EPSILON
The default epsilon value in xerus.
Definition: basic.h:50
Class used to represent indices that can be used to write tensor calculations in index notation...
Definition: index.h:43
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
Definition: tensor.cpp:1424
virtual void operator()(const std::vector< internal::IndexedTensor< Tensor > *> &_output) const override
size_t span
The span states how many dimensions are covered by the index.
Definition: index.h:65
const tensor_type * tensorObjectReadOnly
Pointer to the associated Tensor/TensorNetwork object.
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.
Definition: tensor.cpp:1528