xerus
a general purpose tensor library
ttNetwork.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
25 #include <algorithm>
26 #include <memory>
27 
28 #include <xerus/ttNetwork.h>
29 
30 #include <xerus/misc/check.h>
31 #include <xerus/misc/math.h>
33 #include <xerus/misc/internal.h>
34 
35 #include <xerus/basic.h>
37 #include <xerus/index.h>
38 #include <xerus/tensor.h>
39 #include <xerus/ttStack.h>
42 
43 namespace xerus {
44  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Constructors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
45  template<bool isOperator>
46  TTNetwork<isOperator>::TTNetwork() : TensorNetwork(), canonicalized(false) {}
47 
48 
49  template<bool isOperator>
50  TTNetwork<isOperator>::TTNetwork(const Tensor& _tensor, const double _eps, const size_t _maxRank) :
51  TTNetwork(_tensor, _eps, std::vector<size_t>(_tensor.degree() == 0 ? 0 : _tensor.degree()/N-1, _maxRank)) {}
52 
53 
54  template<bool isOperator>
55  TTNetwork<isOperator>::TTNetwork(const size_t _degree) : TTNetwork(std::vector<size_t>(_degree, 1)) { }
56 
57  template<bool isOperator>
58  TTNetwork<isOperator>::TTNetwork(Tensor::DimensionTuple _dimensions) : TensorNetwork(ZeroNode::None), canonicalized(true), corePosition(0) {
59  dimensions = std::move(_dimensions);
60 
61  REQUIRE(dimensions.size()%N==0, "Illegal degree for TTOperator.");
62  REQUIRE(!misc::contains(dimensions, size_t(0)), "Zero is no valid dimension.");
63  const size_t numComponents = dimensions.size()/N;
64 
65  if (numComponents == 0) {
66  nodes.emplace_back(std::make_unique<Tensor>());
67  return;
68  }
69 
70  // ExternalLinks
71  externalLinks.reserve(dimensions.size());
72  for (size_t i = 0; i < numComponents; ++i) {
73  externalLinks.emplace_back(i+1, 1, dimensions[i], false);
74  }
75 
76  if (isOperator) {
77  for (size_t i = 0; i < numComponents; ++i) {
78  externalLinks.emplace_back(i+1, 2, dimensions[numComponents+i], false);
79  }
80  }
81 
82  std::vector<TensorNetwork::Link> neighbors;
83 
84  neighbors.emplace_back(1, 0, 1, false);
85 
86  nodes.emplace_back(std::make_unique<Tensor>(Tensor::ones({1})), std::move(neighbors));
87 
88  for (size_t i = 0; i < numComponents; ++i) {
89  neighbors.clear();
90  neighbors.emplace_back(i, i==0 ? 0 : N+1, 1, false);
91  neighbors.emplace_back(-1, i, dimensions[i], true);
92  if(isOperator) { neighbors.emplace_back(-1, i+numComponents, dimensions[numComponents+i], true); }
93  neighbors.emplace_back(i+2, 0, 1, false);
94 
95  if(!isOperator) {
96  nodes.emplace_back( std::make_unique<Tensor>(Tensor::dirac({1, dimensions[i], 1}, 0)), std::move(neighbors) );
97  } else {
98  nodes.emplace_back( std::make_unique<Tensor>(Tensor::dirac({1, dimensions[i], dimensions[numComponents+i], 1}, 0)), std::move(neighbors) );
99  }
100  }
101 
102  neighbors.clear();
103  neighbors.emplace_back(numComponents, N+1, 1, false);
104  nodes.emplace_back( std::make_unique<Tensor>(Tensor::ones({1})), std::move(neighbors));
105 
106  // Make a Zero Tensor (at core)
107  (*nodes[1].tensorObject)[0] = 0;
108  }
109 
110 
111  template<bool isOperator>
112  TTNetwork<isOperator>::TTNetwork(const Tensor& _tensor, const double _eps, const TensorNetwork::RankTuple& _maxRanks): TTNetwork(_tensor.degree()) {
113  REQUIRE(_tensor.degree()%N==0, "Number of indicis must be even for TTOperator");
114  REQUIRE(_eps >= 0 && _eps < 1, "_eps must be positive and smaller than one. " << _eps << " was given.");
115  REQUIRE(_maxRanks.size() == num_ranks(), "We need " << num_ranks() <<" ranks but " << _maxRanks.size() << " where given");
116  REQUIRE(!misc::contains(_maxRanks, size_t(0)), "Maximal ranks must be strictly positive. Here: " << _maxRanks);
117 
118  const size_t numComponents = degree()/N;
119 
120  if (_tensor.degree() == 0) {
121  *nodes[0].tensorObject = _tensor;
122  return;
123  }
124 
125  dimensions = _tensor.dimensions;
126 
127  Tensor remains;
128  if(isOperator) {
129  std::vector<size_t> shuffle(_tensor.degree());
130  for(size_t i = 0; i < numComponents; ++i) {
131  shuffle[i] = 2*i;
132  shuffle[numComponents + i] = 2*i+1;
133  }
134 
135  xerus::reshuffle(remains, _tensor, shuffle);
136  } else {
137  remains = _tensor;
138  }
139 
140  // Add ghost dimensions used in the nodes
141  std::vector<size_t> extDimensions;
142  extDimensions.reserve(remains.degree()+2);
143  extDimensions.emplace_back(1);
144  extDimensions.insert(extDimensions.end(), remains.dimensions.begin(), remains.dimensions.end());
145  extDimensions.emplace_back(1);
146  remains.reinterpret_dimensions(extDimensions);
147 
148 
149  Tensor singularValues, newNode;
150  for(size_t position = numComponents-1; position > 0; --position) {
151  calculate_svd(remains, singularValues, newNode, remains, 1+position*N, _maxRanks[position-1], _eps);
152 
153  set_component(position, std::move(newNode));
154  newNode.reset();
155  xerus::contract(remains, remains, false, singularValues, false, 1);
156  }
157 
158  set_component(0, remains);
159  assume_core_position(0);
160  }
161 
162 
163 // template<bool isOperator>
164 // TTNetwork<isOperator>::TTNetwork(const TensorNetwork &_network, double _eps) : TTNetwork(Tensor(_network)) {
165 // LOG(warning, "Cast of arbitrary tensor network to TT not yet supported. Casting to Tensor first"); // TODO
166 // }
167 
168 
169  template<bool isOperator>
170  TTNetwork<isOperator> TTNetwork<isOperator>::ones(const std::vector<size_t>& _dimensions) {
171  REQUIRE(_dimensions.size()%N == 0, "Illegal number of dimensions for ttOperator");
172  REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a TTTensor with dimension 0 is not possible.");
173 
174  if(_dimensions.empty()) {
175  return TTNetwork(Tensor::ones({}));
176  }
177 
178  TTNetwork result(_dimensions.size());
179  const size_t numNodes = _dimensions.size()/N;
180 
181  std::vector<size_t> dimensions(isOperator ? 4 : 3, 1);
182  for(size_t i = 0; i < numNodes; ++i) {
183  dimensions[1] = _dimensions[i];
184  if (isOperator) {
185  dimensions[2] = _dimensions[i+numNodes];
186  }
187  result.set_component(i, Tensor::ones(dimensions));
188  }
189  result.canonicalize_left();
190  return result;
191  }
192 
193 
194  template<> template<>
195  TTNetwork<true> TTNetwork<true>::identity(const std::vector<size_t>& _dimensions) {
196  REQUIRE(_dimensions.size()%N==0, "Illegal number of dimensions for ttOperator");
197  REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a TTTensor with dimension 0 is not possible.");
198 
199  if(_dimensions.empty()) {
200  return TTNetwork(Tensor::ones({}));
201  }
202 
203  const size_t numComponents = _dimensions.size()/N;
204 
205  TTNetwork result(_dimensions.size());
206 
207  std::vector<size_t> constructionVector(4, 1);
208  for (size_t i = 0; i < numComponents; ++i) {
209  constructionVector[1] = _dimensions[i];
210  constructionVector[2] = _dimensions[i+numComponents];
211  result.set_component(i, Tensor(constructionVector, [](const std::vector<size_t> &_idx){
212  if (_idx[1] == _idx[2]) {
213  return 1.0;
214  }
215  return 0.0;
216  }));
217  }
218 
219  result.canonicalize_left();
220  return result;
221  }
222 
223  template<bool isOperator>
224  TTNetwork<isOperator> TTNetwork<isOperator>::kronecker(const std::vector<size_t>& _dimensions) {
225  REQUIRE(_dimensions.size()%N == 0, "Illegal number of dimensions for ttOperator");
226  REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a TTNetwork with dimension 0 is not possible.");
227 
228  if(_dimensions.empty()) {
229  return TTNetwork(Tensor::kronecker({}));
230  }
231 
232  TTNetwork result(_dimensions.size());
233  const size_t numNodes = _dimensions.size()/N;
234 
235  const auto minN = misc::min(_dimensions);
236 
237  // All nodes are simply kronecker tensors themself
238  std::vector<size_t> dimensions;
239  for(size_t i = 0; i < numNodes; ++i) {
240  dimensions.reserve(4);
241  if(i > 0) { dimensions.push_back(minN); }
242  dimensions.push_back(_dimensions[i]);
243  if (isOperator) { dimensions.push_back(_dimensions[i+numNodes]); }
244  if(i+1 < numNodes) { dimensions.push_back(minN); }
245  auto newComp = Tensor::kronecker(dimensions);
246  if(i == 0) { dimensions.insert(dimensions.begin(), 1); }
247  if(i+1 == numNodes) { dimensions.insert(dimensions.end(), 1); }
248  if(i == 0 || i+1 == numNodes) { newComp.reinterpret_dimensions(std::move(dimensions)); }
249  result.set_component(i, std::move(newComp));
250  dimensions.clear();
251  }
252  result.canonicalize_left();
253  return result;
254  }
255 
256  template<bool isOperator>
257  TTNetwork<isOperator> TTNetwork<isOperator>::dirac(std::vector<size_t> _dimensions, const std::vector<size_t>& _position) {
258  REQUIRE(_dimensions.size()%N==0, "Illegal number of dimensions for ttOperator");
259  REQUIRE(!misc::contains(_dimensions, size_t(0)), "Trying to construct a TTTensor with dimension 0 is not possible.");
260  REQUIRE(_dimensions.size() == _position.size(), "Inconsitend number of entries in _dimensions and _position.");
261 
262  const size_t numComponents = _dimensions.size()/N;
263 
264  if(numComponents <= 1) {
265  return TTNetwork(Tensor::dirac(_dimensions, _position));
266  }
267 
268  TTNetwork result(_dimensions);
269 
270  for (size_t i = 0; i < numComponents; ++i) {
271  if(isOperator) {
272  result.set_component(i, Tensor::dirac({1, result.dimensions[i], result.dimensions[numComponents+i], 1}, _position[i]*result.dimensions[numComponents+i] + _position[numComponents+i]));
273  } else {
274  result.set_component(i, Tensor::dirac({1, result.dimensions[i], 1}, _position[i]));
275  }
276  }
277  return result;
278  }
279 
280  template<bool isOperator>
281  TTNetwork<isOperator> TTNetwork<isOperator>::dirac(std::vector<size_t> _dimensions, const size_t _position) {
282  return dirac(_dimensions, Tensor::position_to_multiIndex(_position, _dimensions));
283  }
284 
285 
286  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Internal helper functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
287 
288  #ifndef XERUS_DISABLE_RUNTIME_CHECKS
289  template<bool isOperator>
291  require_valid_network(); // Network must at least be valid.
292 
293  const size_t numComponents = degree()/N;
294  const size_t numNodes = degree() == 0 ? 1 : degree()/N + 2;
295  REQUIRE(nodes.size() == numNodes, "Wrong number of nodes: " << nodes.size() << " expected " << numNodes << ".");
296  REQUIRE(!canonicalized || (degree() == 0 && corePosition == 0) || corePosition < numComponents, "Invalid corePosition: " << corePosition << " there are only " << numComponents << " components.");
297 
298  // Per external link
299  for (size_t n = 0; n < externalLinks.size(); ++n) {
300  const TensorNetwork::Link &l = externalLinks[n];
301  REQUIRE(l.other == (n%numComponents)+1, "The " << n << "-th external link must point the the " << (n%numComponents) << "-th component (i.e. the " << (n%numComponents)+1 << "-th node) but does point to the " << l.other << "-th node.");
302  }
303 
304  // Virtual nodes
305  if(degree() > 0) {
306  REQUIRE(nodes.front().degree() == 1, "The left virtual node must have degree 1, but has size " << nodes.front().degree());
307  REQUIRE(nodes.front().neighbors[0].dimension == 1, "The left virtual node's single dimension must be 1, but is " << nodes.front().neighbors[0].dimension);
308  REQUIRE(nodes.front().neighbors[0].other == 1, "The left virtual node's single link must be to node 1, but is towards node " << nodes.front().neighbors[0].other);
309  REQUIRE(nodes.front().neighbors[0].indexPosition == 0, "The left virtual node's single link must link at indexPosition 0, but link at " << nodes.front().neighbors[0].indexPosition);
310  REQUIRE(misc::hard_equal((*nodes.front().tensorObject)[0], 1.0), "The left virtual node's single entry must be 1.0, but it is " << (*nodes.front().tensorObject)[0]);
311  REQUIRE(!nodes.front().tensorObject->has_factor(), "The left virtual node must no carry a non-trivial factor.");
312 
313  REQUIRE(nodes.back().degree() == 1, "The right virtual node must have degree 1, but has size " << nodes.back().degree());
314  REQUIRE(nodes.back().neighbors[0].dimension == 1, "The right virtual node's single dimension must be 1, but is " << nodes.back().neighbors[0].dimension);
315  REQUIRE(nodes.back().neighbors[0].other == numNodes-2, "The right virtual node's single link must be to node " << numNodes-2 << ", but is towards node " << nodes.back().neighbors[0].other);
316  REQUIRE(nodes.back().neighbors[0].indexPosition == N+1, "The right virtual node's single link must link at indexPosition " << N+1 << ", but link at " << nodes.back().neighbors[0].indexPosition);
317  REQUIRE(misc::hard_equal((*nodes.back().tensorObject)[0], 1.0), "The right virtual node's single entry must be 1.0, but it is " << (*nodes.back().tensorObject)[0]);
318  REQUIRE(!nodes.back().tensorObject->has_factor(), "The right virtual node must no carry a non-trivial factor.");
319  }
320 
321  // Per component
322  for (size_t n = 0; n < numComponents; ++n) {
323  const TensorNode& node = nodes[n+1];
324 
325  REQUIRE(!canonicalized || n == corePosition || !node.tensorObject->has_factor(), "In canonicalized TTNetworks only the core may carry a non-trivial factor. Violated by component " << n << " factor: " << node.tensorObject->factor << " corepos: " << corePosition);
326 
327  REQUIRE(node.degree() == N+2, "Every TT-Component must have degree " << N+2 << ", but component " << n << " has degree " << node.degree());
328  REQUIRE(!node.neighbors[0].external, "The first link of each TT-Component must not be external. Violated by component " << n);
329  REQUIRE(node.neighbors[0].other == n, "The first link of each TT-Component must link to the previous node. Violated by component " << n << ", which instead links to node " << node.neighbors[0].other << " (expected " << n << ").");
330  REQUIRE(node.neighbors[0].indexPosition == (n==0?0:N+1), "The first link of each TT-Component must link to the last last index of the previous node. Violated by component " << n << ", which instead links to index " << node.neighbors[0].indexPosition << " (expected " << (n==0?0:N+1) << ").");
331 
332  REQUIRE(node.neighbors[1].external, "The second link of each TT-Component must be external. Violated by component " << n << ".");
333  REQUIRE(node.neighbors[1].indexPosition == n, "The second link of each TT-Component must link to the external dimension equal to the component position. Violated by component " << n << " which links at " << node.neighbors[1].indexPosition);
334  REQUIRE(!isOperator || node.neighbors[2].external, "The third link of each TTO-Component must be external. Violated by component " << n << ".");
335  REQUIRE(!isOperator || node.neighbors[2].indexPosition == numComponents+n, "The third link of each TTO-Component must link to the external dimension equal to the component position + numComponents. Violated by component " << n << " which links at " << node.neighbors[2].indexPosition << " (expected " << numComponents+n << ").");
336 
337  REQUIRE(!node.neighbors.back().external, "The last link of each TT-Component must not be external. Violated by component " << n);
338  REQUIRE(node.neighbors.back().other == n+2, "The last link of each TT-Component must link to the next node. Violated by component " << n << ", which instead links to node " << node.neighbors.back().other << " (expected " << n+2 << ").");
339  REQUIRE(node.neighbors.back().indexPosition == 0, "The last link of each TT-Component must link to the first index of the next node. Violated by component " << n << ", which instead links to index " << node.neighbors.back().indexPosition << " (expected 0).");
340  }
341  }
342  #else
343  template<bool isOperator>
345  #endif
346 
347 
348  template<bool isOperator>
350  const size_t numComponents = dimensions.size()/N;
351  for (size_t i = 0; i < numComponents; ++i) {
352  const Tensor& comp = get_component(i);
353  const size_t extDim = isOperator ? comp.dimensions[1]*comp.dimensions[2] : comp.dimensions[1];
354  if (comp.dimensions.front() > extDim * comp.dimensions.back() || comp.dimensions.back() > extDim * comp.dimensions.front()) {
355  return true;
356  }
357  }
358  return false;
359  }
360 
361 
362  template<bool isOperator>
364  return degree() == 0 ? 0 : degree()/N-1;
365  }
366 
367  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
368 
369  template<bool isOperator>
370  std::vector<size_t> TTNetwork<isOperator>::reduce_to_maximal_ranks(std::vector<size_t> _ranks, const std::vector<size_t>& _dimensions) {
371  const size_t numComponents = _dimensions.size()/N;
372  REQUIRE(_dimensions.size()%N == 0, "invalid number of dimensions for TTOperator");
373  REQUIRE(numComponents == _ranks.size()+1, "Invalid number of ranks ("<<_ranks.size()<<") or dimensions ("<<_dimensions.size()<<") given.");
374 
375  // Left to right sweep
376  size_t currMax = 1;
377  for (size_t i = 0; i+1 < numComponents; ++i) {
378  currMax *= _dimensions[i];
379  if (isOperator) { currMax *= _dimensions[numComponents+i]; }
380 
381  if (currMax < _ranks[i]) {
382  _ranks[i] = currMax;
383  } else {
384  currMax = _ranks[i];
385  }
386  }
387 
388  // Right to left sweep
389  currMax = 1;
390  for (size_t i = 1; i < numComponents; ++i) {
391  currMax *= _dimensions[numComponents-i];
392  if (isOperator) { currMax *= _dimensions[2*numComponents-i]; }
393 
394  if (currMax < _ranks[numComponents-i-1]) {
395  _ranks[numComponents-i-1] = currMax;
396  } else {
397  currMax = _ranks[numComponents-i-1];
398  }
399  }
400 
401  return _ranks;
402  }
403 
404 
405  template<bool isOperator>
406  size_t TTNetwork<isOperator>::degrees_of_freedom(const std::vector<size_t> &_dimensions, const std::vector<size_t> &_ranks) {
407  if (_dimensions.empty()) { return 1; }
408  const size_t numComponents = _dimensions.size()/N;
409  REQUIRE(_dimensions.size()%N == 0, "invalid number of dimensions for TTOperator");
410  REQUIRE(numComponents == _ranks.size()+1, "Invalid number of ranks ("<<_ranks.size()<<") or dimensions ("<<_dimensions.size()<<") given.");
411  size_t result = 0;
412  for (size_t i=0; i<numComponents; ++i) {
413  size_t component = i==0? 1 : _ranks[i-1];
414  component *= _dimensions[i];
415  if (isOperator) { component *= _dimensions[i+numComponents]; }
416  if (i<_ranks.size()) { component *= _ranks[i]; }
417  result += component;
418  }
419  for (const auto r : _ranks) {
420  result -= misc::sqr(r);
421  }
422  return result;
423  }
424 
425  template<bool isOperator>
427  return degrees_of_freedom(dimensions, ranks());
428  }
429 
430 
431  template<bool isOperator>
432  void TTNetwork<isOperator>::fix_mode(const size_t _mode, const size_t _slatePosition) {
433  REQUIRE(!isOperator, "fix_mode(), does not work for TTOperators, if applicable cast to TensorNetwork first");
434  TensorNetwork::fix_mode(_mode, _slatePosition);
435  }
436 
437  template<bool isOperator>
438  void TTNetwork<isOperator>::resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos) {
439  TensorNetwork::resize_mode(_mode, _newDim, _cutPos);
440  if(canonicalized && _newDim != corePosition) {
441  const size_t oldCorePosition = corePosition;
442  const size_t numComponents = degree()/N;
443  move_core(_mode%numComponents);
444  move_core(oldCorePosition);
445  }
446  }
447 
448 
449  template<bool isOperator>
451  for (size_t i = 0; i < degree(); ++i) {
452  component(i).use_dense_representation();
453  }
454  }
455 
456  template<bool isOperator>
458  REQUIRE(_idx == 0 || _idx < degree()/N, "Illegal index " << _idx <<" in TTNetwork::component, as there are onyl " << degree()/N << " components.");
459  return *nodes[degree() == 0 ? 0 : _idx+1].tensorObject;
460  }
461 
462 
463  template<bool isOperator>
464  const Tensor& TTNetwork<isOperator>::get_component(const size_t _idx) const {
465  REQUIRE(_idx == 0 || _idx < degree()/N, "Illegal index " << _idx <<" in TTNetwork::get_component.");
466  return *nodes[degree() == 0 ? 0 : _idx+1].tensorObject;
467  }
468 
469 
470  template<bool isOperator>
471  void TTNetwork<isOperator>::set_component(const size_t _idx, Tensor _T) {
472  if(degree() == 0) {
473  REQUIRE(_idx == 0, "Illegal index " << _idx <<" in TTNetwork::set_component");
474  REQUIRE(_T.degree() == 0, "Component of degree zero TTNetwork must have degree zero. Given: " << _T.degree());
475  *nodes[0].tensorObject = std::move(_T);
476  } else {
477  REQUIRE(_idx < degree()/N, "Illegal index " << _idx <<" in TTNetwork::set_component");
478  REQUIRE(_T.degree() == N+2, "Component " << _idx << " must have degree " << N+2 << ". Given: " << _T.degree());
479 
480  TensorNode& currNode = nodes[_idx+1];
481  *currNode.tensorObject = std::move(_T);
482  for (size_t i = 0; i < N+2; ++i) {
483  currNode.neighbors[i].dimension = currNode.tensorObject->dimensions[i];
484  if (currNode.neighbors[i].external) {
485  externalLinks[currNode.neighbors[i].indexPosition].dimension = currNode.tensorObject->dimensions[i];
486  dimensions[currNode.neighbors[i].indexPosition] = currNode.tensorObject->dimensions[i];
487  }
488  }
489  }
490 
491  canonicalized = canonicalized && (corePosition == _idx);
492  }
493 
494 
495  template<bool isOperator>
496  std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> get_grouped_entries(const Tensor& _component) {
497  REQUIRE(_component.is_sparse(), "Not usefull (and not implemented) for dense Tensors.");
498 
499  const size_t externalDim = isOperator ? _component.dimensions[1] * _component.dimensions[2] : _component.dimensions[1];
500 
501  std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groups(externalDim);
502 
503  for(const auto& entry : _component.get_unsanitized_sparse_data()) {
504  const size_t r2 = entry.first%_component.dimensions.back();
505  const size_t n = (entry.first/_component.dimensions.back())%externalDim;
506  const size_t r1 = (entry.first/_component.dimensions.back())/externalDim;
507  groups[n].emplace_back(r1, r2, _component.factor*entry.second);
508  }
509 
510  return groups;
511  }
512 
513 
514  template<bool isOperator>
515  std::pair<TensorNetwork, TensorNetwork> TTNetwork<isOperator>::chop(const size_t _position) const {
516  require_correct_format();
517 
518  const size_t numComponents = degree()/N;
519  REQUIRE(_position < numComponents, "Can't split a " << numComponents << " component TTNetwork at position " << _position);
520 
521  // Create the resulting TNs
522  TensorNetwork left(ZeroNode::None);
523  TensorNetwork right(ZeroNode::None);
524 
525  left.nodes.push_back(nodes[0]);
526  for (size_t i = 0; i < _position; ++i) {
527  left.dimensions.push_back(dimensions[i]);
528  left.externalLinks.push_back(externalLinks[i]);
529  left.nodes.push_back(nodes[i+1]);
530  }
531  if(isOperator) {
532  for(size_t i = 0; i < _position; ++i) {
533  left.dimensions.push_back(dimensions[i+numComponents]);
534  left.externalLinks.push_back(externalLinks[i+numComponents]);
535  }
536  }
537  left.dimensions.push_back(left.nodes.back().neighbors.back().dimension);
538  left.externalLinks.emplace_back(_position, _position==0?0:N+1, left.nodes.back().neighbors.back().dimension , false);
539  left.nodes.back().neighbors.back().external = true;
540  left.nodes.back().neighbors.back().indexPosition = isOperator ? 2*_position-1 : _position;
541 
542  right.dimensions.push_back(nodes[_position+2].neighbors.front().dimension);
543  right.externalLinks.emplace_back(_position+2, 0, nodes[_position+2].neighbors.front().dimension , false); // NOTE other will be corrected to 0 in the following steps
544 
545  for(size_t i = _position+1; i < numComponents; ++i) {
546  right.dimensions.push_back(dimensions[i]);
547  right.externalLinks.push_back(externalLinks[i]);
548  right.nodes.push_back(nodes[i+1]);
549  }
550  if(isOperator) {
551  for(size_t i = _position+1; i < numComponents+1; ++i) {
552  right.dimensions.push_back(dimensions[i+numComponents]);
553  right.externalLinks.push_back(externalLinks[i+numComponents]);
554  }
555  }
556  // The last node
557  right.nodes.push_back(nodes.back());
558 
559  right.nodes.front().neighbors.front().external = true;
560  right.nodes.front().neighbors.front().indexPosition = _position; // NOTE indexPosition will be corrected to 0 in the following steps
561 
562  // Account for the fact that the first _position+2 nodes do not exist
563  for(TensorNetwork::Link& link : right.externalLinks) {
564  link.other -= _position+2;
565  }
566 
567  for(TensorNode& node : right.nodes) {
568  for(TensorNetwork::Link& link : node.neighbors) {
569  if(link.external) {
570  link.indexPosition -= _position;
571  } else {
572  link.other -= _position+2;
573  }
574  }
575  }
576 
577  return std::pair<TensorNetwork, TensorNetwork>(std::move(left), std::move(right));
578  }
579 
580 
581  template<bool isOperator>
582  void TTNetwork<isOperator>::move_core(const size_t _position, const bool _keepRank) {
583  const size_t numComponents = degree()/N;
584  REQUIRE(_position < numComponents || (_position == 0 && degree() == 0), "Illegal core-position " << _position << " chosen for TTNetwork with " << numComponents << " components");
585  require_correct_format();
586 
587  if(canonicalized) {
588  // Move right?
589  for (size_t n = corePosition; n < _position; ++n) {
590  transfer_core(n+1, n+2, !_keepRank);
591  }
592 
593  // Move left?
594  for (size_t n = corePosition; n > _position; --n) {
595  transfer_core(n+1, n, !_keepRank);
596  }
597  } else {
598  // Move right?
599  for (size_t n = 0; n < _position; ++n) {
600  transfer_core(n+1, n+2, !_keepRank);
601  }
602 
603  // Move left?
604  for (size_t n = numComponents; n > _position+1; --n) {
605  transfer_core(n, n-1, !_keepRank);
606  }
607  }
608 
609  while (exceeds_maximal_ranks()) {
610  // Move left from given CorePosition
611  for (size_t n = _position; n > 0; --n) {
612  transfer_core(n+1, n, !_keepRank);
613  }
614 
615  // Move to the most right
616  for (size_t n = 0; n+1 < numComponents; ++n) {
617  transfer_core(n+1, n+2, !_keepRank);
618  }
619 
620  // Move back left to given CorePosition
621  for (size_t n = numComponents; n > _position+1; --n) {
622  transfer_core(n, n-1, !_keepRank);
623  }
624  }
625 
626  canonicalized = true;
627  corePosition = _position;
628  }
629 
630 
631  template<bool isOperator>
633  move_core(0);
634  }
635 
636 
637  template<bool isOperator>
639  move_core(degree() == 0 ? 0 : degree()/N-1);
640  }
641 
642 
643  template<bool isOperator>
644  void TTNetwork<isOperator>::round(const std::vector<size_t>& _maxRanks, const double _eps) {
645  require_correct_format();
646  const size_t numComponents = degree()/N;
647  REQUIRE(_eps < 1, "_eps must be smaller than one. " << _eps << " was given.");
648  REQUIRE(_maxRanks.size()+1 == numComponents || (_maxRanks.empty() && numComponents == 0), "There must be exactly degree/N-1 maxRanks. Here " << _maxRanks.size() << " instead of " << numComponents-1 << " are given.");
649  REQUIRE(!misc::contains(_maxRanks, size_t(0)), "Trying to round a TTTensor to rank 0 is not possible.");
650 
651  const bool initialCanonicalization = canonicalized;
652  const size_t initialCorePosition = corePosition;
653 
654  canonicalize_right();
655 
656  for(size_t i = 0; i+1 < numComponents; ++i) {
657  round_edge(numComponents-i, numComponents-i-1, _maxRanks[numComponents-i-2], _eps, 0.0);
658  }
659 
660  assume_core_position(0);
661 
662  if(initialCanonicalization) {
663  move_core(initialCorePosition);
664  }
665  }
666 
667 
668  template<bool isOperator>
669  void TTNetwork<isOperator>::round(const size_t _maxRank) {
670  round(std::vector<size_t>(num_ranks(), _maxRank), EPSILON);
671  }
672 
673 
674  template<bool isOperator>
675  void TTNetwork<isOperator>::round(const int _maxRank) {
676  REQUIRE( _maxRank > 0, "MaxRank must be positive");
677  round(size_t(_maxRank));
678  }
679 
680 
681  template<bool isOperator>
683  round(std::vector<size_t>(num_ranks(), std::numeric_limits<size_t>::max()), _eps);
684  }
685 
686 
687  template<bool isOperator>
688  void TTNetwork<isOperator>::soft_threshold(const std::vector<double> &_taus, const bool /*_preventZero*/) {
689  const size_t numComponents = degree()/N;
690  REQUIRE(_taus.size()+1 == numComponents || (_taus.empty() && numComponents == 0), "There must be exactly degree/N-1 taus. Here " << _taus.size() << " instead of " << numComponents-1 << " are given.");
691  require_correct_format();
692 
693  const bool initialCanonicalization = canonicalized;
694  const size_t initialCorePosition = corePosition;
695 
696  canonicalize_right();
697 
698  for(size_t i = 0; i+1 < numComponents; ++i) {
699  round_edge(numComponents-i, numComponents-i-1, std::numeric_limits<size_t>::max(), 0.0, _taus[i]);
700  }
701 
702  assume_core_position(0);
703 
704  if(initialCanonicalization) {
705  move_core(initialCorePosition);
706  }
707  }
708 
709 
710  template<bool isOperator>
711  void TTNetwork<isOperator>::soft_threshold(const double _tau, const bool _preventZero) {
712  soft_threshold(std::vector<double>(num_ranks(), _tau), _preventZero);
713  }
714 
715 
716  template<bool isOperator>
717  std::vector<size_t> TTNetwork<isOperator>::ranks() const {
718  std::vector<size_t> res;
719  res.reserve(num_ranks());
720  for (size_t n = 1; n+2 < nodes.size(); ++n) {
721  res.push_back(nodes[n].neighbors.back().dimension);
722  }
723  return res;
724  }
725 
726 
727  template<bool isOperator>
728  size_t TTNetwork<isOperator>::rank(const size_t _i) const {
729  REQUIRE(_i+1 < degree()/N, "Requested illegal rank " << _i);
730  return nodes[_i+1].neighbors.back().dimension;
731  }
732 
733 
734  template<bool isOperator>
736  REQUIRE(_pos < degree()/N || (degree() == 0 && _pos == 0), "Invalid core position.");
737  corePosition = _pos;
738  canonicalized = true;
739  }
740 
741 
742  template<bool isOperator>
744  return new TTNetwork(*this);
745  }
746 
747  template<bool isOperator>
749  if(degree() == 0) {
750  std::set<size_t> all;
751  for(size_t i = 0; i < nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
752  contract(all);
753  canonicalized = false;
754  } else {
755  REQUIRE(nodes.size() > 2, "Invalid TTNetwork");
756  const size_t numComponents = nodes.size()-2;
757 
758  for(size_t i = 0; i+1 < numComponents; ++i) {
759  if(nodes[i+1].degree() == 2) {
760  // If we are the core, everything is fine, we contract ourself to the next node, then get removed and the corePositions stays. If the next Node is the core, we have to change the corePosition to ours, because we will be removed. In all other cases cannonicalization is destroyed.
761  if(corePosition == i+1) { corePosition = i; }
762  else if(corePosition != i) { canonicalized = false; }
763  contract(i+1, i+2);
764  }
765  }
766 
767  // Extra treatment for last component to avoid contraction to the pseudo-node.
768  if(nodes[numComponents].degree() == 2) {
769  if(corePosition == numComponents-1) { corePosition = numComponents-2; }
770  else if(corePosition != numComponents-2) { canonicalized = false; }
771  contract(numComponents-1, numComponents);
772  }
773  }
774 
775  INTERNAL_CHECK(corePosition < degree() || !canonicalized, "Woot");
776 
777  sanitize();
778  }
779 
780 
781  template<bool isOperator>
783  require_correct_format();
784  if (canonicalized) {
785  return get_component(corePosition).frob_norm();
786  }
787  const Index i;
788  return std::sqrt(value_t((*this)(i&0)*(*this)(i&0)));
789  }
790 
791 
792 
793  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - */
794 
795  // TODO why sparse?
796  template<bool isOperator>
798  REQUIRE(dimensions == _other.dimensions, "The dimensions in TT sum must coincide. Given " << dimensions << " vs " << _other.dimensions);
799  require_correct_format();
800 
801  const size_t numComponents = degree()/N;
802 
803  const bool initialCanonicalization = canonicalized;
804  const size_t initialCorePosition = corePosition;
805 
806  if (numComponents <= 1) {
807  component(0) += _other.get_component(0);
808  return *this;
809  }
810 
812  for(size_t position = 0; position < numComponents; ++position) {
813  // Get current components
814  const Tensor& myComponent = get_component(position);
815  const Tensor& otherComponent = _other.get_component(position);
816 
817  // Structure has to be (for degree 4)
818  // (L1 R1) * ( L2 0 ) * ( L3 0 ) * ( L4 )
819  // ( 0 R2 ) ( 0 R3 ) ( R4 )
820 
821  // Create a Tensor for the result
822  std::vector<size_t> nxtDimensions;
823  nxtDimensions.emplace_back(position == 0 ? 1 : myComponent.dimensions.front()+otherComponent.dimensions.front());
824  nxtDimensions.emplace_back(myComponent.dimensions[1]);
825  if (isOperator) { nxtDimensions.emplace_back(myComponent.dimensions[2]); }
826  nxtDimensions.emplace_back(position == numComponents-1 ? 1 : myComponent.dimensions.back()+otherComponent.dimensions.back());
827 
828  const Tensor::Representation newRep = myComponent.is_sparse() && otherComponent.is_sparse() ? Tensor::Representation::Sparse : Tensor::Representation::Dense;
829  std::unique_ptr<Tensor> newComponent(new Tensor(std::move(nxtDimensions), newRep));
830 
831  newComponent->offset_add(myComponent, isOperator ? std::vector<size_t>({0,0,0,0}) : std::vector<size_t>({0,0,0}));
832 
833  const size_t leftOffset = position == 0 ? 0 : myComponent.dimensions.front();
834  const size_t rightOffset = position == numComponents-1 ? 0 : myComponent.dimensions.back();
835 
836  newComponent->offset_add(otherComponent, isOperator ? std::vector<size_t>({leftOffset,0,0,rightOffset}) : std::vector<size_t>({leftOffset,0,rightOffset}));
837 
838  set_component(position, std::move(*newComponent));
839  }
840  XERUS_PA_END("ADD/SUB", "TTNetwork ADD/SUB", std::string("Dims:")+misc::to_string(dimensions)+" Ranks: "+misc::to_string(ranks()));
841 
842  if(initialCanonicalization) {
843  move_core(initialCorePosition);
844  }
845 
846  return *this;
847  }
848 
849 
850  template<bool isOperator>
852  operator*=(-1.0);
853  operator+=(_other);
854  operator*=(-1.0);
855  return *this;
856  }
857 
858 
859  template<bool isOperator>
861  REQUIRE(!nodes.empty(), "There must not be a TTNetwork without any node");
862 
863  if(canonicalized) {
864  component(corePosition) *= _factor;
865  } else {
866  component(0) *= _factor;
867  }
868  }
869 
870 
871  template<bool isOperator>
873  operator*=(1/_divisor);
874  }
875 
876 
877 
878  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Operator specializations - - - - - - - - - - - - - - - - - - - - - - - - - - */
879 
880 
881 
882  template<>
884  // Only TTOperators construct stacks, so no specialized contractions for TTTensors
885  return false;
886  }
887 
888  template<>
890  _me.assign_indices();
891  _other.assign_indices();
892 
893  const TTNetwork* const meTT = dynamic_cast<const TTNetwork*>(_me.tensorObjectReadOnly);
894  const internal::TTStack<true>* const meTTStack = dynamic_cast<const internal::TTStack<true>*>(_me.tensorObjectReadOnly);
895  INTERNAL_CHECK(meTT || meTTStack, "Internal Error.");
896 
897  const TTTensor* const otherTT = dynamic_cast<const TTTensor*>(_other.tensorObjectReadOnly);
898  const internal::TTStack<false>* const otherTTStack = dynamic_cast<const internal::TTStack<false>*>(_other.tensorObjectReadOnly);
899  const TTOperator* const otherTTO = dynamic_cast<const TTOperator*>(_other.tensorObjectReadOnly);
900  const internal::TTStack<true>* const otherTTOStack = dynamic_cast<const internal::TTStack<true>*>(_other.tensorObjectReadOnly);
901 
902  if ((otherTT == nullptr) && (otherTTStack == nullptr) && (otherTTO == nullptr) && (otherTTOStack == nullptr)) {
903  return false;
904  }
905 
906  bool cannoAtTheEnd = false;
907  size_t coreAtTheEnd = 0;
908  if (meTT != nullptr) {
909  cannoAtTheEnd = meTT->canonicalized;
910  coreAtTheEnd = meTT->corePosition;
911  } else {
912  cannoAtTheEnd = meTTStack->cannonicalization_required;
913  coreAtTheEnd = meTTStack->futureCorePosition;
914  }
915 
916 
917  // TODO profiler should warn if other->corePosition is not identical to coreAtTheEnd
918 
919  // Determine my first half and second half of indices
920  auto midIndexItr = _me.indices.begin();
921  size_t spanSum = 0;
922  while (spanSum < _me.degree() / 2) {
923  INTERNAL_CHECK(midIndexItr != _me.indices.end(), "Internal Error.");
924  spanSum += midIndexItr->span;
925  ++midIndexItr;
926  }
927  if (spanSum > _me.degree() / 2) {
928  return false; // an index spanned some links of the left and some of the right side
929  }
930 
931  if ((otherTT != nullptr) || (otherTTStack != nullptr)) {
932  // ensure fitting indices
933  if (std::equal(_me.indices.begin(), midIndexItr, _other.indices.begin()) || std::equal(midIndexItr, _me.indices.end(), _other.indices.begin())) {
934  _out.reset(new internal::IndexedTensorMoveable<TensorNetwork>(new internal::TTStack<false>(cannoAtTheEnd, coreAtTheEnd), _me.indices));
935  *_out->tensorObject = *_me.tensorObjectReadOnly;
936  TensorNetwork::add_network_to_network(std::move(*_out), std::move(_other));
937  return true;
938  }
939  return false;
940 
941  } else { // other is operator or operator stack
942  // determine other middle index
943  auto otherMidIndexItr = _other.indices.begin();
944  spanSum = 0;
945  while (spanSum < _other.degree() / 2) {
946  INTERNAL_CHECK(otherMidIndexItr != _other.indices.end(), "Internal Error.");
947  spanSum += otherMidIndexItr->span;
948  ++otherMidIndexItr;
949  }
950  if (spanSum > _other.degree() / 2) {
951  return false; // an index spanned some links of the left and some of the right side
952  }
953  // or indices in fitting order to contract the TTOs
954  if ( std::equal(_me.indices.begin(), midIndexItr, _other.indices.begin())
955  || std::equal(midIndexItr, _me.indices.end(), _other.indices.begin())
956  || std::equal(_me.indices.begin(), midIndexItr, otherMidIndexItr)
957  || std::equal(midIndexItr, _me.indices.end(), otherMidIndexItr))
958  {
959  _out.reset(new internal::IndexedTensorMoveable<TensorNetwork>(new internal::TTStack<true>(cannoAtTheEnd, coreAtTheEnd), _me.indices));
960  *_out->tensorObject = *_me.tensorObjectReadOnly;
961  TensorNetwork::add_network_to_network(std::move(*_out), std::move(_other));
962  return true;
963  }
964  return false;
965 
966  }
967  }
968 
969 
970  template<bool isOperator>
972 
973  template<>
975 
976  template<>
978  _ttNetwork.transpose();
979  }
980 
981  template<bool isOperator>
983  _me.assign_indices();
984  _other.assign_indices();
985 
986  // If the other is not a TT tensor (or stack) fall back to default summation (i.e. return false)
987  const TTNetwork* otherTT = dynamic_cast<const TTNetwork*>( _other.tensorObjectReadOnly);
988  const internal::TTStack<isOperator>* otherTTStack = dynamic_cast<const internal::TTStack<isOperator>*>( _other.tensorObjectReadOnly);
989  if (!otherTT && !otherTTStack) { return false; }
990 
991  bool transposeRHS;
992  if(_me.indices == _other.indices) { // Everything is easy.
993  REQUIRE(_me.tensorObjectReadOnly->dimensions == _other.tensorObjectReadOnly->dimensions, "TT sum requires both operants to share the same dimensions.");
994  transposeRHS = false;
995  } else if (isOperator) { // Check for transposition
996  // Find index mid-points to compare the halves separately
997  auto myMidIndexItr = _me.indices.begin();
998  size_t spanSum = 0;
999  while (spanSum < _me.degree() / 2) {
1000  spanSum += myMidIndexItr->span;
1001  ++myMidIndexItr;
1002  }
1003 
1004  auto otherMidIndexItr = _other.indices.begin();
1005  spanSum = 0;
1006  while (spanSum < _other.degree() / 2) {
1007  spanSum += otherMidIndexItr->span;
1008  ++otherMidIndexItr;
1009  }
1010 
1011  if(std::equal(_me.indices.begin(), myMidIndexItr, otherMidIndexItr) && std::equal(myMidIndexItr, _me.indices.end(), _other.indices.begin())) {
1012  transposeRHS = true;
1013  } else {
1014  return false;
1015  }
1016  } else {
1017  return false; // Not Operator and index order differs.
1018  }
1019 
1020  // Check whether we are a TTStack
1021  std::unique_ptr<TTNetwork<isOperator>> meStorage;
1022  const TTNetwork* usedMe;
1023 
1026  if(moveMe && (stackMe = dynamic_cast<internal::TTStack<isOperator>*>(moveMe->tensorObject))) {
1027  meStorage.reset(new TTNetwork());
1028  usedMe = meStorage.get();
1029  *meStorage = TTNetwork(*stackMe);
1030  INTERNAL_CHECK(usedMe->dimensions == stackMe->dimensions, "Ie " << stackMe->dimensions << " vs " << usedMe->dimensions);
1031  } else { // I am normal
1032  INTERNAL_CHECK(dynamic_cast<const TTNetwork<isOperator>*>(_me.tensorObjectReadOnly),"Non-moveable TTStack (or other error) detected.");
1033  usedMe = static_cast<const TTNetwork<isOperator>*>(_me.tensorObjectReadOnly);
1034  }
1035  const TTNetwork& ttMe = *usedMe;
1036 
1037 
1038  // Check whether the other is a TTStack
1039  TTNetwork ttOther;
1040 
1042  internal::TTStack<isOperator>* stackOther;
1043  if(moveOther && (stackOther = dynamic_cast<internal::TTStack<isOperator>*>(moveOther->tensorObject))) {
1044  ttOther = TTNetwork(*stackOther);
1045  INTERNAL_CHECK(ttOther.dimensions == stackOther->dimensions, "Ie");
1046  } else { // Other is normal
1047  INTERNAL_CHECK(dynamic_cast<const TTNetwork<isOperator>*>(_other.tensorObjectReadOnly),"Non-moveable TTStack (or other error) detected.");
1048  ttOther = *static_cast<const TTNetwork<isOperator>*>(_other.tensorObjectReadOnly);
1049 
1050  }
1051 
1052  if(transposeRHS) {
1053  transpose_if_operator(ttOther);
1054  }
1055 
1056  _out.reset(new internal::IndexedTensorMoveable<TensorNetwork>( new TTNetwork(ttMe), _me.indices));
1057 
1058  *static_cast<TTNetwork*>(_out->tensorObject) += ttOther;
1059  return true;
1060  }
1061 
1062 
1063  template<bool isOperator>
1065  INTERNAL_CHECK(_me.tensorObject == this, "Internal Error.");
1066 
1067  _me.assign_indices(_other.degree());
1068  _other.assign_indices();
1069  const size_t numComponents = _other.degree()/N;
1070  TTNetwork& meTTN = static_cast<TTNetwork&>(*_me.tensorObject);
1071 
1072  // First check whether the other is a TTNetwork as well, otherwise we can skip to fallback
1073  const TTNetwork* const otherTTN = dynamic_cast<const TTNetwork*>(_other.tensorObjectReadOnly);
1074  const internal::TTStack<isOperator>* const otherTTStack = dynamic_cast<const internal::TTStack<isOperator>*>(_other.tensorObjectReadOnly);
1076 
1077  if(otherTTN || otherTTStack) {
1078  if (otherTTStack) {
1079  INTERNAL_CHECK(movOther, "Not moveable TTStack encountered...");
1080  internal::TTStack<isOperator>::contract_stack(std::move(*movOther));
1081  }
1082 
1083  // Check whether the index order coincides
1084  if (_me.indices == _other.indices) {
1085  if (otherTTN) {
1086  meTTN = *otherTTN;
1087  } else {
1088  _me.tensorObject->operator=(*_other.tensorObjectReadOnly);
1089  meTTN.canonicalized = false;
1090  if (otherTTStack->cannonicalization_required) {
1091  meTTN.move_core(otherTTStack->futureCorePosition);
1092  }
1093  }
1094  return;
1095  }
1096 
1097  // For TTOperators also check whether the index order is transposed
1098  if (isOperator) {
1099  bool transposed = false;
1100 
1101  auto midIndexItr = _me.indices.begin();
1102  size_t spanSum = 0;
1103  while (spanSum < numComponents) {
1104  INTERNAL_CHECK(midIndexItr != _me.indices.end(), "Internal Error.");
1105  spanSum += midIndexItr->span;
1106  ++midIndexItr;
1107  }
1108  if (spanSum == numComponents) {
1109  // Transposition possible on my end
1110  auto otherMidIndexItr = _other.indices.begin();
1111  spanSum = 0;
1112  while (spanSum < numComponents) {
1113  INTERNAL_CHECK(otherMidIndexItr != _other.indices.end(), "Internal Error.");
1114  spanSum += otherMidIndexItr->span;
1115  ++otherMidIndexItr;
1116  }
1117  if (spanSum == numComponents) {
1118  // Other tensor also transposable
1119  transposed = (std::equal(_me.indices.begin(), midIndexItr, otherMidIndexItr))
1120  && (std::equal(midIndexItr, _me.indices.end(), _other.indices.begin()));
1121  }
1122  }
1123 
1124  if (transposed) {
1125  if (otherTTN) {
1126  meTTN = *otherTTN;
1127  } else {
1128  _me.tensorObject->operator=(*_other.tensorObjectReadOnly);
1129  meTTN.canonicalized = false;
1130  if (otherTTStack->cannonicalization_required) {
1131  meTTN.move_core(otherTTStack->futureCorePosition);
1132  }
1133  }
1134  require_correct_format();
1135  dynamic_cast<TTOperator*>(_me.tensorObject)->transpose(); // NOTE will never be called if !isOperator
1136  return;
1137  }
1138  }
1139  }
1140 
1141  // Use Tensor fallback
1142  if (_other.tensorObjectReadOnly->nodes.size() > 1) {
1143  LOG_ONCE(warning, "Assigning a general tensor network to TTOperator not yet implemented. casting to fullTensor first");
1144  }
1145  Tensor otherFull(*_other.tensorObjectReadOnly);
1146  Tensor otherReordered;
1147  otherReordered(_me.indices) = otherFull(_other.indices);
1148 
1149  // Cast to TTNetwork
1150  *_me.tensorObject = TTNetwork(std::move(otherReordered));
1151  }
1152 
1153 
1154  // Explicit instantiation of the two template parameters that will be implemented in the xerus library
1155  template class TTNetwork<false>;
1156  template class TTNetwork<true>;
1157 
1158 
1159 
1160  template<bool isOperator>
1162  _lhs += _rhs; // NOTE pass-by-value!
1163  return _lhs;
1164  }
1165 
1166 
1167  template<bool isOperator>
1169  _lhs -= _rhs; // NOTE pass-by-value!
1170  return _lhs;
1171  }
1172 
1173 
1174  template<bool isOperator>
1176  _network *= _factor; // NOTE pass-by-value!
1177  return _network;
1178  }
1179 
1180 
1181  template<bool isOperator>
1183  _network *= _factor; // NOTE pass-by-value!
1184  return _network;
1185  }
1186 
1187 
1188  template<bool isOperator>
1190  _network /= _divisor; // NOTE pass-by-value!
1191  return _network;
1192  }
1193 
1194  // Explicit instantiation for both types
1195  template TTNetwork<false> operator+(TTNetwork<false> _lhs, const TTNetwork<false>& _rhs);
1196  template TTNetwork<true> operator+(TTNetwork<true> _lhs, const TTNetwork<true>& _rhs);
1197  template TTNetwork<false> operator-(TTNetwork<false> _lhs, const TTNetwork<false>& _rhs);
1198  template TTNetwork<true> operator-(TTNetwork<true> _lhs, const TTNetwork<true>& _rhs);
1199  template TTNetwork<false> operator*(TTNetwork<false> _network, const value_t _factor);
1200  template TTNetwork<true> operator*(TTNetwork<true> _network, const value_t _factor);
1201  template TTNetwork<false> operator*(const value_t _factor, TTNetwork<false> _network);
1202  template TTNetwork<true> operator*(const value_t _factor, TTNetwork<true> _network);
1203  template TTNetwork<false> operator/(TTNetwork<false> _network, const value_t _divisor);
1204  template TTNetwork<true> operator/(TTNetwork<true> _network, const value_t _divisor);
1205 
1206 
1207 
1208 
1209  template<bool isOperator>
1210  void perform_component_product(Tensor& _newComponent, const Tensor& _componentA, const Tensor& _componentB) {
1211  const size_t externalDim = isOperator ? _componentA.dimensions[1] * _componentA.dimensions[2] : _componentA.dimensions[1];
1212 
1213  if(_componentA.is_dense() && _componentB.is_dense()) {
1214  INTERNAL_CHECK(_newComponent.is_dense(), "IE");
1215  value_t* const newCompData = _newComponent.get_dense_data();
1216  const value_t* const compBData = _componentB.get_unsanitized_dense_data();
1217  for (size_t r1 = 0; r1 < _componentA.dimensions.front(); ++r1) {
1218  for (size_t s1 = 0; s1 < _componentB.dimensions.front(); ++s1) {
1219  for (size_t n = 0; n < externalDim; ++n) {
1220  for (size_t r2 = 0; r2 < _componentA.dimensions.back(); ++r2) {
1221  const size_t offsetA = (r1*externalDim + n)*_componentA.dimensions.back()+r2;
1222  const size_t offsetB = (s1*externalDim + n)*_componentB.dimensions.back();
1223  const size_t offsetResult = (((r1*_componentB.dimensions.front() + s1)*externalDim + n)*_componentA.dimensions.back()+r2)*_componentB.dimensions.back();
1224  misc::copy_scaled(newCompData+offsetResult, _componentB.factor*_componentA[offsetA], compBData+offsetB, _componentB.dimensions.back());
1225  }
1226  }
1227  }
1228  }
1229  } else if(_componentA.is_dense()) { // B sparse
1230  const std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groupedEntriesB = get_grouped_entries<isOperator>(_componentB);
1231  value_t* const newCompData = _newComponent.get_dense_data();
1232  for (size_t r1 = 0; r1 < _componentA.dimensions.front(); ++r1) {
1233  for (size_t n = 0; n < externalDim; ++n) {
1234  for (size_t r2 = 0; r2 < _componentA.dimensions.back(); ++r2) {
1235  for(const std::tuple<size_t, size_t, value_t>& entryB : groupedEntriesB[n]) {
1236  const size_t offsetA = (r1*externalDim + n)*_componentA.dimensions.back()+r2;
1237  const size_t offsetResult = (((r1*_componentB.dimensions.front() + std::get<0>(entryB))*externalDim + n)*_componentA.dimensions.back()+r2)*_componentB.dimensions.back()+std::get<1>(entryB);
1238  newCompData[offsetResult] = _componentA[offsetA]*std::get<2>(entryB);
1239  }
1240  }
1241  }
1242  }
1243  } else if(_componentB.is_dense()) { // A sparse
1244  LOG(woot, "");
1245  value_t* const newCompData = _newComponent.get_dense_data();
1246  const value_t* const compBData = _componentB.get_unsanitized_dense_data();
1247  for(const auto& entryA : _componentA.get_unsanitized_sparse_data()) {
1248  const size_t r2 = entryA.first%_componentA.dimensions.back();
1249  const size_t n = (entryA.first/_componentA.dimensions.back())%externalDim;
1250  const size_t r1 = (entryA.first/_componentA.dimensions.back())/externalDim;
1251 
1252  for (size_t s1 = 0; s1 < _componentB.dimensions.front(); ++s1) {
1253  const size_t offsetB = (s1*externalDim + n)*_componentB.dimensions.back();
1254  const size_t offsetResult = (((r1*_componentB.dimensions.front() + s1)*externalDim + n)*_componentA.dimensions.back()+r2)*_componentB.dimensions.back();
1255  misc::copy_scaled(newCompData+offsetResult, _componentB.factor*_componentA.factor*entryA.second, compBData+offsetB, _componentB.dimensions.back());
1256  }
1257  }
1258  } else {
1259  const std::vector<std::vector<std::tuple<size_t, size_t, value_t>>> groupedEntriesB = get_grouped_entries<isOperator>(_componentB);
1260  std::map<size_t, value_t>& dataMap = _newComponent.get_sparse_data();
1261  INTERNAL_CHECK(dataMap.empty(), "IE");
1262  for(const auto& entryA : _componentA.get_unsanitized_sparse_data()) {
1263  const size_t r2 = entryA.first%_componentA.dimensions.back();
1264  const size_t n = (entryA.first/_componentA.dimensions.back())%externalDim;
1265  const size_t r1 = (entryA.first/_componentA.dimensions.back())/externalDim;
1266 
1267  for(const std::tuple<size_t, size_t, value_t>& entryB : groupedEntriesB[n]) {
1268  dataMap.emplace((((r1*_componentB.dimensions.front() + std::get<0>(entryB))*externalDim + n)*_componentA.dimensions.back()+r2)*_componentB.dimensions.back()+std::get<1>(entryB), _componentA.factor*entryA.second*std::get<2>(entryB));
1269  }
1270  }
1271  }
1272  }
1273 
1274  template<bool isOperator>
1276  static constexpr const size_t N = isOperator?2:1;
1277  REQUIRE(_A.dimensions == _B.dimensions, "Entrywise_product ill-defined for different external dimensions.");
1278 
1279  if(_A.degree() == 0) {
1280  TTNetwork<isOperator> result(_A);
1281  result *= _B[0];
1282  return result;
1283  }
1284 
1285  TTNetwork<isOperator> result(_A.degree());
1286  const size_t numComponents = _A.degree() / N;
1287 
1288  #pragma omp for schedule(static)
1289  for (size_t i = 0; i < numComponents; ++i) {
1290  const Tensor& componentA = _A.get_component(i);
1291  const Tensor& componentB = _B.get_component(i);
1292  const Tensor::Representation newRep = componentA.is_sparse() && componentB.is_sparse() ? Tensor::Representation::Sparse : Tensor::Representation::Dense;
1293  Tensor newComponent(isOperator ?
1294  Tensor::DimensionTuple({componentA.dimensions.front()*componentB.dimensions.front(), componentA.dimensions[1], componentA.dimensions[2], componentA.dimensions.back()*componentB.dimensions.back()}) :
1295  Tensor::DimensionTuple({componentA.dimensions.front()*componentB.dimensions.front(), componentA.dimensions[1], componentA.dimensions.back()*componentB.dimensions.back()}), newRep);
1296 
1297  perform_component_product<isOperator>(newComponent, componentA, componentB);
1298 
1299  #pragma omp critical
1300  {
1301  result.set_component(i, std::move(newComponent));
1302  }
1303  }
1304 
1305  if (_A.canonicalized && _B.canonicalized) {
1306  result.move_core(_A.corePosition);
1307  }
1308  return result;
1309  }
1310 
1311 
1312  //Explicit instantiation for both types
1314  template TTNetwork<true> entrywise_product(const TTNetwork<true> &_A, const TTNetwork<true> &_B);
1315 
1316 
1317 
1318  template<bool isOperator>
1320  constexpr size_t N = isOperator?2:1;
1321  _lhs.require_correct_format();
1322  _rhs.require_correct_format();
1323 
1324  if (_lhs.degree() == 0) {
1325  TTNetwork<isOperator> result(_rhs);
1326  result *= _lhs[0];
1327  return result;
1328  }
1329 
1330  TTNetwork<isOperator> result(_lhs);
1331  if (_rhs.degree() == 0) {
1332  result *= _rhs[0];
1333  return result;
1334  }
1335 
1336  const size_t lhsNumComponents = _lhs.degree()/N;
1337  const size_t rhsNumComponents = _rhs.degree()/N;
1338 
1339  // fix external links of lhs nodes
1340  for (size_t i=1; i<result.nodes.size(); ++i) {
1341  for (TensorNetwork::Link &l : result.nodes[i].neighbors) {
1342  if (l.external) {
1343  if (l.indexPosition >= lhsNumComponents) {
1344  l.indexPosition += rhsNumComponents;
1345  }
1346  }
1347  }
1348  }
1349 
1350  // Add all nodes of rhs and fix neighbor relations
1351  result.nodes.pop_back();
1352  result.nodes.reserve(_lhs.degree()+_rhs.degree()+2);
1353  for (size_t i = 1; i < _rhs.nodes.size(); ++i) {
1354  result.nodes.emplace_back(_rhs.nodes[i]);
1355  for (TensorNetwork::Link &l : result.nodes.back().neighbors) {
1356  if (l.external) {
1357  if (l.indexPosition < rhsNumComponents) {
1358  l.indexPosition += lhsNumComponents;
1359  } else {
1360  l.indexPosition += 2*lhsNumComponents;
1361  }
1362  } else {
1363  if (l.other==0) {
1364  l.indexPosition = N+1;
1365  }
1366  l.other += lhsNumComponents;
1367  }
1368  }
1369  }
1370 
1371  // Add all external indices of rhs
1372  result.externalLinks.clear(); // NOTE that this is necessary because in the operator case we added indices
1373  result.dimensions.clear(); // in the wrong position when we copied the lhs
1374  result.externalLinks.reserve(_lhs.degree()+_rhs.degree());
1375  result.dimensions.reserve(_lhs.degree()+_rhs.degree());
1376 
1377  for (size_t i = 0; i < lhsNumComponents; ++i) {
1378  const size_t d=_lhs.dimensions[i];
1379  result.externalLinks.emplace_back(i+1, 1, d, false);
1380  result.dimensions.push_back(d);
1381  }
1382 
1383  for (size_t i = 0; i < rhsNumComponents; ++i) {
1384  const size_t d = _rhs.dimensions[i];
1385  result.externalLinks.emplace_back(lhsNumComponents+i+1, 1, d, false);
1386  result.dimensions.push_back(d);
1387  }
1388 
1389  if (isOperator) {
1390  for (size_t i = 0; i < lhsNumComponents; ++i) {
1391  const size_t d = _lhs.dimensions[i];
1392  result.externalLinks.emplace_back(i+1, 2, d, false);
1393  result.dimensions.push_back(d);
1394  }
1395  for (size_t i = 0; i < rhsNumComponents; ++i) {
1396  const size_t d = _rhs.dimensions[i];
1397  result.externalLinks.emplace_back(lhsNumComponents+i+1, 2, d, false);
1398  result.dimensions.push_back(d);
1399  }
1400  }
1401 
1402  if (_lhs.canonicalized && _rhs.canonicalized) {
1403  if (_lhs.corePosition == 0 && _rhs.corePosition == 0) {
1404  result.canonicalized = true;
1405  result.corePosition = lhsNumComponents;
1406  // the other core might have carried a factor
1407  if (result.nodes[1].tensorObject->has_factor()) {
1408  (*result.nodes[lhsNumComponents+1].tensorObject) *= result.nodes[1].tensorObject->factor;
1409  result.nodes[1].tensorObject->factor = 1.0;
1410  }
1411  result.move_core(0);
1412  } else if (_lhs.corePosition == lhsNumComponents-1 && _rhs.corePosition == rhsNumComponents-1) {
1413  result.canonicalized = true;
1414  result.corePosition = lhsNumComponents-1;
1415  const size_t lastIdx = lhsNumComponents + rhsNumComponents -1;
1416  // the other core might have carried a factor
1417  if (result.nodes[lastIdx+1].tensorObject->has_factor()) {
1418  (*result.nodes[lhsNumComponents].tensorObject) *= result.nodes[lastIdx+1].tensorObject->factor;
1419  result.nodes[lastIdx+1].tensorObject->factor = 1.0;
1420  }
1421  result.move_core(lastIdx);
1422  }
1423  } else {
1424  result.canonicalized = false;
1425  }
1426 
1427  result.require_correct_format();
1428  return result;
1429  }
1430 
1431  template TTNetwork<true> dyadic_product(const TTNetwork<true> &_lhs, const TTNetwork<true> &_rhs);
1432  template TTNetwork<false> dyadic_product(const TTNetwork<false> &_lhs, const TTNetwork<false> &_rhs);
1433 
1434  template<bool isOperator>
1436  if (_tensors.empty()) { return TTNetwork<isOperator>(); }
1437 
1438  TTNetwork<isOperator> result(_tensors.back());
1439  // construct dyadic products right to left as default cannonicalization is left
1440  for (size_t i = _tensors.size()-1; i > 0; --i) {
1442  result = dyadic_product(_tensors[i-1], result);
1443  }
1444  return result;
1445  }
1446 
1447  template TTNetwork<true> dyadic_product(const std::vector<TTNetwork<true>>& _tensors);
1448  template TTNetwork<false> dyadic_product(const std::vector<TTNetwork<false>>& _tensors);
1449 
1450 
1451 
1452  namespace misc {
1453 
1454  template<bool isOperator>
1455  void stream_writer(std::ostream& _stream, const TTNetwork<isOperator> &_obj, misc::FileFormat _format) {
1456  if(_format == misc::FileFormat::TSV) {
1457  _stream << std::setprecision(std::numeric_limits<value_t>::digits10 + 1);
1458  }
1459  // storage version number
1460  write_to_stream<size_t>(_stream, 1, _format);
1461 
1462  // store TN specific data
1463  write_to_stream<bool>(_stream, _obj.canonicalized, _format);
1464  write_to_stream<size_t>(_stream, _obj.corePosition, _format);
1465 
1466 
1467  // save rest of TN
1468  write_to_stream<TensorNetwork>(_stream, _obj, _format);
1469  }
1470  template void stream_writer(std::ostream& _stream, const TTNetwork<true> &_obj, misc::FileFormat _format);
1471  template void stream_writer(std::ostream& _stream, const TTNetwork<false> &_obj, misc::FileFormat _format);
1472 
1473 
1474  template<bool isOperator>
1475  void stream_reader(std::istream& _stream, TTNetwork<isOperator> &_obj, const misc::FileFormat _format) {
1476  IF_CHECK( size_t ver = ) read_from_stream<size_t>(_stream, _format);
1477  REQUIRE(ver == 1, "Unknown stream version to open (" << ver << ")");
1478 
1479  // load TN specific data
1480  read_from_stream<bool>(_stream, _obj.canonicalized, _format);
1481  read_from_stream<size_t>(_stream, _obj.corePosition, _format);
1482 
1483 
1484  // load rest of TN
1485  read_from_stream<TensorNetwork>(_stream, _obj, _format);
1486 
1487  _obj.require_correct_format();
1488  }
1489  template void stream_reader(std::istream& _stream, TTNetwork<true> &_obj, const misc::FileFormat _format);
1490  template void stream_reader(std::istream& _stream, TTNetwork<false> &_obj, const misc::FileFormat _format);
1491  } // namespace misc
1492 
1493 } // namespace xerus
Header file for some additional math functions.
Header file for CHECK and REQUIRE macros.
Header file for the performance analysis global objects and analysis function.
template TTNetwork< true > operator-(TTNetwork< true > _lhs, const TTNetwork< true > &_rhs)
Internal representation of an read and write and moveable indexed Tensor or TensorNetwork.
void transpose_if_operator< true >(TTNetwork< true > &_ttNetwork)
Definition: ttNetwork.cpp:977
const bool cannonicalization_required
Definition: ttStack.h:43
value_t factor
Single value representing a constant scaling factor.
Definition: tensor.h:105
size_t degree() const
Returns the degree of the tensor.
Definition: tensor.cpp:206
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
Header file for the TTNetwork class (and thus TTTensor and TTOperator).
void transpose()
Transpose the TTOperator.
Definition: ttNetwork.h:443
ZeroNode
Internal indicator to prevent the creation of an degree zero node in TensorNetwork constructor...
Header file for the Index class.
#define XERUS_REQUIRE_TEST
Definition: check.h:51
Header file for the IndexedTensorMoveable class.
Header file defining lists of indexed tensors.
#define REQUIRE
Definition: internal.h:33
Very general class used to represent arbitary tensor networks.
Definition: tensorNetwork.h:42
Internal representation of an readable indexed Tensor or TensorNetwork.
#define LOG
Definition: internal.h:38
Header file for the TTStack class.
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Definition: tensor.h:99
#define IF_CHECK
Definition: internal.h:35
The TensorNode class is used by the class TensorNetwork to store the componentent tensors defining th...
Definition: tensorNetwork.h:85
template void stream_reader(std::istream &_stream, TTNetwork< false > &_obj, const misc::FileFormat _format)
Specialized TensorNetwork class used to represent TTTensor and TToperators.
void transpose_if_operator< false >(TTNetwork< false > &)
Definition: ttNetwork.cpp:974
The main namespace of xerus.
Definition: basic.h:37
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Definition: tensor.h:70
template TTNetwork< true > operator+(TTNetwork< true > _lhs, const TTNetwork< true > &_rhs)
value_t * get_dense_data()
Returns a pointer for direct access to the dense data array in row major order.
Definition: tensor.cpp:401
STL namespace.
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
Definition: tensor.cpp:445
bool canonicalized
Flag indicating whether the TTNetwork is canonicalized.
Definition: ttNetwork.h:51
template void stream_writer(std::ostream &_stream, const TTNetwork< false > &_obj, misc::FileFormat _format)
std::vector< size_t > DimensionTuple
: Represention of the dimensions of a Tensor.
Definition: tensor.h:92
void reinterpret_dimensions(DimensionTuple _newDimensions)
Reinterprets the dimensions of the tensor.
Definition: tensor.cpp:620
size_t degree() const
Gets the degree of the TensorNetwork.
template TTNetwork< true > entrywise_product(const TTNetwork< true > &_A, const TTNetwork< true > &_B)
Header file for the Tensor class.
void move_core(const size_t _position, const bool _keepRank=false)
Move the core to a new position.
Definition: ttNetwork.cpp:582
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
Definition: tensor.cpp:1252
TTNetwork()
Constructs an order zero TTNetwork.
Definition: ttNetwork.cpp:46
tensor_type * tensorObject
Non-const pointer to the tensor object.
FileFormat
possible file formats for tensor storage
Definition: fileIO.h:43
void set_component(const size_t _idx, Tensor _T)
Sets a specific component of the TT decomposition.
Definition: ttNetwork.cpp:471
void copy_scaled(T *const __restrict _x, const T _alpha, const T *const _y, const size_t _n) noexcept
Copys _n entries from _y to _x, simulationously scaling each entry by the factor _alpha. I.e x = alpha*y.
int comp(const Tensor &_a, const Tensor &_b)
void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init=Initialisation::Zero)
Resets the tensor to the given dimensions and representation.
Definition: tensor.cpp:500
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
size_t degree() const noexcept
Definition: tensorNode.cpp:74
constexpr const value_t EPSILON
The default epsilon value in xerus.
Definition: basic.h:50
const size_t futureCorePosition
Definition: ttStack.h:45
std::vector< Link > externalLinks
The open links of the network in order.
value_t frob_norm(const IndexedTensorReadOnly< tensor_type > &_idxTensor)
Returns the frobenious norm of the associated tensorObejct.
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
Definition: math.h:43
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Definition: basic.h:43
virtual void require_correct_format() const override
Tests whether the network resembles that of a TTTensor and checks consistency with the underlying ten...
Definition: ttNetwork.cpp:290
template TTNetwork< false > dyadic_product(const std::vector< TTNetwork< false >> &_tensors)
Class used to represent indices that can be used to write tensor calculations in index notation...
Definition: index.h:43
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
Definition: tensor.cpp:1424
#define XERUS_PA_END(group, name, parameter)
bool is_dense() const
Returns whether the current representation is dense.
Definition: tensor.cpp:220
std::vector< size_t > RankTuple
: Represention of the ranks of a TensorNetwork.
Definition: tensorNetwork.h:45
std::map< size_t, value_t > & get_sparse_data()
Returns a reference for direct access to the sparse data map.
Definition: tensor.cpp:437
std::unique_ptr< Tensor > tensorObject
Save slot for the tensorObject associated with this node.
Definition: tensorNetwork.h:88
#define XERUS_PA_START
static XERUS_force_inline std::string to_string(const bool obj)
Definition: stringFromTo.h:41
bool is_sparse() const
Returns whether the current representation is sparse.
Definition: tensor.cpp:226
#define INTERNAL_CHECK
Definition: internal.h:37
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
Definition: tensor.cpp:408
template TTNetwork< true > operator*(const value_t _factor, TTNetwork< true > _network)
std::vector< Link > neighbors
Vector of links defining the connection of this node to the network.
Definition: tensorNetwork.h:91
template TTNetwork< true > operator/(TTNetwork< true > _network, const value_t _divisor)
void perform_component_product(Tensor &_newComponent, const Tensor &_componentA, const Tensor &_componentB)
Definition: ttNetwork.cpp:1210
#define LOG_ONCE
Definition: internal.h:40
XERUS_force_inline void transpose(double *const __restrict _out, const double *const __restrict _in, const size_t _leftDim, const size_t _rightDim)
constexpr bool hard_equal(const T _a, const T _b) noexcept
: Checks for hard equality ( == operator) without compiler warnings.
Definition: math.h:105
std::vector< TensorNode > nodes
The nodes constituting the network. The order determines the ids of the nodes.
const Tensor & get_component(const size_t _idx) const
Read access to a specific component of the TT decomposition.
Definition: ttNetwork.cpp:464
size_t corePosition
The position of the core.
Definition: ttNetwork.h:58
void transpose_if_operator(TTNetwork< isOperator > &_ttNetwork)
std::vector< std::vector< std::tuple< size_t, size_t, value_t > > > get_grouped_entries(const Tensor &_component)
Definition: ttNetwork.cpp:496
Internal class used to represent stacks of (possibly multiply) applications of TTOperators to either ...
Definition: ttStack.h:38
Representation
Flags indicating the internal representation of the data of Tensor objects.
Definition: tensor.h:89
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...