xerus
a general purpose tensor library
tensorNetwork.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
25 #include <xerus/tensorNetwork.h>
26 
27 #include <fstream>
28 
31 #include <xerus/misc/math.h>
33 #include <xerus/misc/fileIO.h>
34 #include <xerus/misc/internal.h>
35 
36 #include <xerus/basic.h>
37 #include <xerus/index.h>
38 #include <xerus/tensor.h>
43 
44 namespace xerus {
45  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Constructors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
47  nodes.emplace_back(TensorNode(std::make_unique<Tensor>()));
48  }
49 
50 
51  TensorNetwork::TensorNetwork(Tensor _other) : dimensions(_other.dimensions) { //NOTE don't use std::move here, because we need _other to be untouched to move it later
52  nodes.emplace_back(std::make_unique<Tensor>(std::move(_other)), init_from_dimension_array());
53  }
54 
55 
56  TensorNetwork::TensorNetwork( std::unique_ptr<Tensor>&& _tensor) : dimensions(_tensor->dimensions) {
57  nodes.emplace_back(std::move(_tensor), init_from_dimension_array());
58  }
59 
60 
61  TensorNetwork::TensorNetwork(size_t _degree) : dimensions(std::vector<size_t>(_degree, 1)) {
62  nodes.emplace_back(std::make_unique<Tensor>(std::vector<size_t>(_degree, 1)), init_from_dimension_array());
63  }
64 
65 
67  if(_nodeStatus == ZeroNode::Add) {
68  nodes.emplace_back(TensorNode(std::make_unique<Tensor>()));
69  }
70  }
71 
73  return new TensorNetwork(*this);
74  }
75 
76 
77  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Internal Helper functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
78 
79  std::vector<TensorNetwork::Link> TensorNetwork::init_from_dimension_array() {
80  std::vector<TensorNetwork::Link> newLinks;
81  for (size_t d = 0; d < dimensions.size(); ++d) {
82  externalLinks.emplace_back(0, d, dimensions[d], false);
83  newLinks.emplace_back(-1, d, dimensions[d], true);
84  }
85  return newLinks;
86  }
87 
88 
89  TensorNetwork TensorNetwork::stripped_subnet(const std::function<bool(size_t)>& _idF) const {
91  cpy.nodes.resize(nodes.size());
92  cpy.dimensions = dimensions;
94  for (size_t id = 0; id < nodes.size(); ++id) {
95  if (!_idF(id)) { continue; }
96  cpy.nodes[id] = nodes[id].strippped_copy();
97  for (size_t i = 0; i < cpy.nodes[id].neighbors.size(); ++i) {
98  TensorNetwork::Link &l = cpy.nodes[id].neighbors[i];
99  if (!l.external) { // Link was not external before
100  if (!_idF(l.other)) { // ...but is "external" to this subnet
101  l.external = true;
102  l.indexPosition = cpy.externalLinks.size();
103  cpy.dimensions.emplace_back(l.dimension);
104  cpy.externalLinks.emplace_back(id, i, l.dimension, false);
105  }
106  }
107  }
108  }
109 
110  size_t correction = 0;
111  std::vector<long> toErase;
112  for (size_t eid = 0; eid < cpy.externalLinks.size(); ++eid) {
113  TensorNetwork::Link &l = cpy.externalLinks[eid];
114  if (!_idF(l.other)) {
115  toErase.emplace_back(long(eid));
116  correction++;
117  } else {
118  INTERNAL_CHECK(cpy.nodes[l.other].neighbors[l.indexPosition].external, "ie");
119  INTERNAL_CHECK(cpy.nodes[l.other].neighbors[l.indexPosition].indexPosition == eid, "ie");
120  cpy.nodes[l.other].neighbors[l.indexPosition].indexPosition -= correction;
121  }
122  }
123 
124  for (size_t i = toErase.size(); i > 0; --i) {
125  cpy.dimensions.erase(cpy.dimensions.begin()+toErase[i-1]);
126  cpy.externalLinks.erase(cpy.externalLinks.begin()+toErase[i-1]);
127  }
128 
129  cpy.require_valid_network(false);
130  return cpy;
131  }
132 
133 
136 
137  if(degree() == 0) {
138  std::set<size_t> all;
139  for(size_t i = 0; i < nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
140  contract(all);
141 
142  } else {
143  std::vector<bool> seen(nodes.size(), false);
144  std::vector<size_t> expansionStack;
145  expansionStack.reserve(nodes.size());
146 
147  // Starting at every external link...
148  for (const TensorNetwork::Link& el : externalLinks) {
149  if(!seen[el.other]) {
150  seen[el.other] = true;
151  expansionStack.push_back(el.other);
152  }
153  }
154 
155  // ...traverse the connected nodes in a depth-first manner.
156  while (!expansionStack.empty()) {
157  const size_t curr = expansionStack.back();
158  expansionStack.pop_back();
159 
160  // Add unseen neighbors
161  for (const TensorNetwork::Link& n : nodes[curr].neighbors) {
162  if ( !n.external && !seen[n.other] ) {
163  seen[n.other] = true;
164  expansionStack.push_back(n.other);
165  }
166  }
167  }
168 
169  // Construct set of all unseen nodes...
170  std::set<size_t> toContract;
171  for (size_t i = 0; i < nodes.size(); ++i) {
172  if (!seen[i]) {
173  toContract.emplace_hint(toContract.end(), i);
174  }
175  }
176 
177  // ...and contract them
178  if (!toContract.empty()) {
179  const size_t remaining = contract(toContract);
180 
181  INTERNAL_CHECK(nodes[remaining].degree() == 0, "Internal Error.");
182 
183  // Remove contracted degree-0 tensor
184  nodes[remaining].erased = true;
185  for(size_t i = 0; i < nodes.size(); ++i) {
186  if(!nodes[i].erased) {
187  *nodes[i].tensorObject *= (*nodes[remaining].tensorObject)[0];
188  break;
189  }
190  INTERNAL_CHECK(i < nodes.size()-1, "Internal Error.");
191  }
192  }
193  }
194 
195  sanitize();
196 
197  INTERNAL_CHECK(!nodes.empty(), "Internal error");
198  }
199 
200 
201  std::pair< size_t, size_t > TensorNetwork::find_common_edge(const size_t _nodeA, const size_t _nodeB) const {
202  size_t posA=~0ul, posB=~0ul;
203 
204  // Find common edge in nodeA
205  IF_CHECK(bool foundCommon = false;)
206  for(size_t i = 0; i < nodes[_nodeA].neighbors.size(); ++i) {
207  if(nodes[_nodeA].neighbors[i].other == _nodeB) {
208  posA = i;
209  REQUIRE(!foundCommon, "TN round/move core does not work if the two nodes share more than one link.");
210  IF_CHECK( foundCommon = true; )
211  IF_NO_CHECK( break; )
212  }
213  }
214  REQUIRE(foundCommon, "TN round does not work if the two nodes share no link.");
215 
216  posB = nodes[_nodeA].neighbors[posA].indexPosition;
217 
218  return std::pair<size_t, size_t>(posA, posB);
219  }
220 
221 
222  void TensorNetwork::perform_traces(const size_t _nodeId) {
223  for (size_t i = 0; i < nodes[_nodeId].degree(); ++i) {
224  const TensorNetwork::Link &link = nodes[_nodeId].neighbors[i];
225  if (link.links(_nodeId)) {
226  nodes[_nodeId].tensorObject->perform_trace(i, link.indexPosition);
227 
228  const std::vector<Link> linkCopy(nodes[_nodeId].neighbors);
229 
230  for(size_t j = i+1; j < link.indexPosition; ++j) {
231  const Link& otherLink = linkCopy[j];
232  if(otherLink.external) {
233  externalLinks[otherLink.indexPosition].indexPosition -= 1;
234  } else {
235  nodes[otherLink.other].neighbors[otherLink.indexPosition].indexPosition -= 1;
236  }
237  }
238 
239  for(size_t j = link.indexPosition+1; j < nodes[_nodeId].degree(); ++j) {
240  const Link& otherLink = linkCopy[j];
241  if(otherLink.external) {
242  externalLinks[otherLink.indexPosition].indexPosition -= 2;
243  } else {
244  nodes[otherLink.other].neighbors[otherLink.indexPosition].indexPosition -= 2;
245  }
246  }
247 
248  nodes[_nodeId].neighbors.erase(nodes[_nodeId].neighbors.begin() + link.indexPosition);
249  nodes[_nodeId].neighbors.erase(nodes[_nodeId].neighbors.begin() + i);
250 
251  //Redo this index
252  i -= 1;
253  }
254  }
255  }
256 
257 
259  std::vector<size_t> idMap(nodes.size());
260 
261  // Move nodes
262  size_t newId = 0, oldId = 0;
263  for (; oldId < nodes.size(); ++oldId) {
264  if (!nodes[oldId].erased) {
265  idMap[oldId] = newId;
266  if (newId != oldId) { std::swap(nodes[newId], nodes[oldId]); }
267  newId++;
268  }
269  }
270 
271  // Update links
272  nodes.resize(newId);
273  for (TensorNode &n : nodes) {
274  for (TensorNetwork::Link &l : n.neighbors) {
275  if (!l.external) { l.other = idMap[l.other]; }
276  }
277  }
278 
279  // Update external links
281  l.other = idMap[l.other];
282  }
283  }
284 
285 
286  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Conversions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
287  TensorNetwork::operator Tensor() const {
289 
290  std::set<size_t> all;
291  for(size_t i = 0; i < nodes.size(); ++i) { all.emplace_hint(all.end(), i); }
292 
293  TensorNetwork cpy(*this);
294  size_t res = cpy.contract(all);
295 
296  std::vector<size_t> shuffle(degree());
297  for(size_t i = 0; i < cpy.nodes[res].neighbors.size(); ++i) {
298  INTERNAL_CHECK(cpy.nodes[res].neighbors[i].external, "Internal Error");
299  shuffle[i] = cpy.nodes[res].neighbors[i].indexPosition;
300  }
301  Tensor result;
302 
303  reshuffle(result, *cpy.nodes[res].tensorObject, shuffle);
304 
305  return result;
306  }
307 
308 
309  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Access - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
310  value_t TensorNetwork::operator[](const size_t _position) const {
312 
313  if (degree() == 0) {
314  REQUIRE(_position == 0, "Tried to access non-existing entry of TN");
315  value_t value = 1.0;
316  for(const TensorNode& node : nodes) { value *= (*node.tensorObject)[0]; }
317  return value;
318  }
319 
320  std::vector<size_t> positions(degree());
321  size_t remains = _position;
322  for(size_t i = degree(); i > 1; --i) {
323  positions[i-1] = remains%dimensions[i-1];
324  remains /= dimensions[i-1];
325  }
326  positions[0] = remains;
327  return operator[](positions);
328  }
329 
330 
333 
334  TensorNetwork partialCopy;
335  partialCopy.nodes = nodes;
336 
337  // Set all external indices in copy to the fixed values and evaluate the tensorObject accordingly
338  for(TensorNode& node : partialCopy.nodes) {
339  // Fix slates in external links
340  size_t killedDimensions = 0;
341  for(size_t i = 0; i < node.neighbors.size(); ++i) {
342  if(node.neighbors[i].external) {
343  node.tensorObject->fix_mode(i-killedDimensions, _positions[node.neighbors[i].indexPosition]);
344  killedDimensions++;
345  }
346  }
347 
348  // Remove all external links, because they don't exist anymore
349  node.neighbors.erase(std::remove_if(node.neighbors.begin(), node.neighbors.end(), [](const TensorNetwork::Link& _test){return _test.external;}), node.neighbors.end());
350 
351  // Adjust the Links
352  for(size_t i = 0; i < node.neighbors.size(); ++i) {
353  partialCopy.nodes[node.neighbors[i].other].neighbors[node.neighbors[i].indexPosition].indexPosition = i;
354  }
355  }
356 
357  // Contract the complete network (there are not external Links)
358  partialCopy.contract_unconnected_subnetworks();
359 
360  INTERNAL_CHECK(partialCopy.nodes.size() == 1, "Internal Error.");
361 
362  return (*partialCopy.nodes[0].tensorObject)[0];
363  }
364 
365 
366  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Basic arithmetics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
367 
368  void TensorNetwork::operator*=(const value_t _factor) {
369  REQUIRE(!nodes.empty(), "There must not be a TTNetwork without any node");
370  REQUIRE(!nodes[0].erased, "There must not be an erased node.");
371  *nodes[0].tensorObject *= _factor;
372  }
373 
374 
375  void TensorNetwork::operator/=(const value_t _divisor) {
376  REQUIRE(!nodes.empty(), "There must not be a TTNetwork without any node");
377  REQUIRE(!nodes[0].erased, "There must not be an erased node.");
378  *nodes[0].tensorObject /= _divisor;
379  }
380 
381 
382  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Indexing - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
384  return internal::IndexedTensor<TensorNetwork>(this, _indices, false);
385  }
386 
387 
389  return internal::IndexedTensor<TensorNetwork>(this, std::move(_indices), false);
390  }
391 
392 
394  return internal::IndexedTensorReadOnly<TensorNetwork>(this, _indices);
395  }
396 
397 
399  return internal::IndexedTensorReadOnly<TensorNetwork>(this, std::move(_indices));
400  }
401 
402 
403  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Operator specializations - - - - - - - - - - - - - - - - - - - - - - - - - - */
405  return false; // A general tensor Network can't do anything specialized
406  }
407 
408 
410  return false; // A general tensor Network can't do anything specialized
411  }
412 
413 
415  TensorNetwork& me = *_me.tensorObject;
416  const TensorNetwork& other = *_other.tensorObjectReadOnly;
417 
418  me = other;
419 
420  _other.assign_indices();
421 
422  link_traces_and_fix((me)(_other.indices));
423 
424  _me.assign_indices();
425 
426  // Needed if &me == &other
427  const std::vector<size_t> otherDimensions = other.dimensions;
428  const std::vector<Link> otherExtLinks = other.externalLinks;
429 
430  for (size_t i = 0, dimPosA = 0; i < _me.indices.size(); dimPosA += _me.indices[i].span, ++i) {
431  size_t j = 0, dimPosB = 0;
432  while (_me.indices[i] != _other.indices[j]) {
433  dimPosB += _other.indices[j].span;
434  ++j;
435  REQUIRE( j < _other.indices.size(), "LHS Index " << _me.indices[i] << " not found in RHS " << _other.indices);
436  }
437 
438  REQUIRE(_me.indices[i].span == _other.indices[j].span, "Index spans must coincide");
439 
440  for (size_t s = 0; s < _me.indices[i].span; ++s) {
441  me.dimensions[dimPosA+s] = otherDimensions[dimPosB+s];
442  me.externalLinks[dimPosA+s] = otherExtLinks[dimPosB+s];
443  me.nodes[me.externalLinks[dimPosA+s].other].neighbors[me.externalLinks[dimPosA+s].indexPosition].indexPosition = dimPosA+s;
444  me.nodes[me.externalLinks[dimPosA+s].other].neighbors[me.externalLinks[dimPosA+s].indexPosition].dimension = me.dimensions[dimPosA+s];
445  }
446  }
447  }
448 
449  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Miscellaneous - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
450 
451  size_t TensorNetwork::degree() const {
452  return dimensions.size();
453  }
454 
455  size_t TensorNetwork::datasize() const {
456  size_t result = 0;
457  for (const TensorNode& node : nodes) {
458  result += node.tensorObject->size;
459  }
460  return result;
461  }
462 
463  void TensorNetwork::reshuffle_nodes(const std::function<size_t(size_t)>& _f) {
464  std::vector<TensorNode> newNodes(nodes.size());
465  size_t newSize = 0;
466  for (size_t i = 0; i < nodes.size(); ++i) {
467  if (nodes[i].erased) { continue; }
468  const size_t newIndex = _f(i);
469  newSize = std::max(newSize, newIndex+1);
470  REQUIRE(newNodes[newIndex].erased, "Tried to shuffle two nodes to the same new position " << newIndex << " i= " << i);
471  newNodes[newIndex] = nodes[i];
472  for (TensorNetwork::Link &l : newNodes[newIndex].neighbors) {
473  if (!l.external) { l.other = _f(l.other); }
474  }
475  }
476  nodes = newNodes;
477  nodes.resize(newSize);
478 
479  for (auto &link : externalLinks) {
480  link.other = _f(link.other);
481  }
482  }
483 
484 #ifndef XERUS_DISABLE_RUNTIME_CHECKS
485  void TensorNetwork::require_valid_network(const bool _check_erased) const {
486  REQUIRE(externalLinks.size() == dimensions.size(), "externalLinks.size() != dimensions.size()");
487  REQUIRE(!nodes.empty(), "There must always be at least one node!");
488 
489  // Per external link
490  for (size_t n = 0; n < externalLinks.size(); ++n) {
491  const TensorNetwork::Link& el = externalLinks[n];
492  REQUIRE(el.other < nodes.size(), "External link " << n << " is inconsitent. The linked node " << el.other << " does not exist, as there are only " << nodes.size() << " nodes.");
493  REQUIRE(el.dimension > 0, "External link " << n << " is corrupted. The link specifies zero as dimension.");
494  REQUIRE(el.dimension == dimensions[n], "External link " << n << " is inconsitent. The specified dimension " << el.dimension << " does not match the " << n << "-th dimension of the Network, which is " << dimensions[n] << ".");
495  REQUIRE(!el.external, "External link " << n << " is corrupted. It specifies itself to be external, but must link to a node.");
496 
497  const TensorNode& otherNode = nodes[el.other];
498  const TensorNetwork::Link& otherLink = otherNode.neighbors[el.indexPosition];
499  REQUIRE(otherNode.degree() > el.indexPosition, "External link " << n << " is inconsitent. The link points to node " << el.other << " at IP " << el.indexPosition << ", but the target node only has " << otherNode.degree() << " links.");
500  REQUIRE(otherLink.external, "External link " << n << " is inconsitent. The link points to node " << el.other << " at IP " << el.indexPosition << ", but the target link says it is not external.");
501  REQUIRE(otherLink.indexPosition == n, "External link " << n << " is inconsitent. The link points to node " << el.other << " at IP " << el.indexPosition << ", but the nodes link points to IP " << otherLink.indexPosition << " instead of " << n << ".");
502  REQUIRE(otherLink.dimension == el.dimension, "External link " << n << " is inconsitent. The link points to node " << el.other << " at IP " << el.indexPosition << ". The dimension specified by the external link is " << el.dimension << " but the one of the target link is " << otherLink.dimension << ".");
503  }
504 
505  // Per node
506  for (size_t n = 0; n < nodes.size(); ++n) {
507  const TensorNode &currNode = nodes[n];
508  REQUIRE(!_check_erased || !currNode.erased, "Node " << n << " is marked erased, although this was not allowed.");
509  if (currNode.tensorObject) {
510  REQUIRE(currNode.degree() == currNode.tensorObject->degree(), "Node " << n << " has is inconsitent, as its tensorObject has degree " << currNode.tensorObject->degree() << " but there are " << currNode.degree() << " links.");
511  }
512 
513  // Per neighbor
514  for (size_t i = 0; i < currNode.neighbors.size(); ++i) {
515  const TensorNetwork::Link &el = currNode.neighbors[i];
516  REQUIRE(el.dimension > 0, "n=" << n << " i=" << i);
517  if (currNode.tensorObject) {
518  REQUIRE(el.dimension == currNode.tensorObject->dimensions[i], "n=" << n << " i=" << i << " " << el.dimension << " vs " << currNode.tensorObject->dimensions[i]);
519  }
520 
521  if(!el.external) { // externals were already checked
522  REQUIRE(el.other < nodes.size(), "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". The target node does not exist, as there are only " << nodes.size() << " nodes.");
523  const TensorNode &other = nodes[el.other];
524  REQUIRE(other.degree() > el.indexPosition, "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". Link at target does not exist as there are only " << other.degree() << " links.");
525  REQUIRE(!other.neighbors[el.indexPosition].external, "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". Link at target says it is external.");
526  REQUIRE(other.neighbors[el.indexPosition].other == n, "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". Link at target links to node " << other.neighbors[el.indexPosition].other << " at IP " << other.neighbors[el.indexPosition].indexPosition);
527  REQUIRE(other.neighbors[el.indexPosition].indexPosition == i, "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". Link at target links to node " << other.neighbors[el.indexPosition].other << " at IP " << other.neighbors[el.indexPosition].indexPosition);
528  REQUIRE(other.neighbors[el.indexPosition].dimension == el.dimension, "Inconsitent Link from node " << n << " to node " << el.other << " from IP " << i << " to IP " << el.indexPosition << ". Dimension of this link is " << el.dimension << " but the target link says the dimension is " << other.neighbors[el.indexPosition].dimension);
529  }
530  }
531  }
532  }
533 #else
534  void TensorNetwork::require_valid_network(bool _check_erased) const { }
536 #endif
537 
540  }
541 
542 
543  void TensorNetwork::swap_external_links(const size_t _i, const size_t _j) {
544  const TensorNetwork::Link& li = externalLinks[_i];
545  const TensorNetwork::Link& lj = externalLinks[_j];
546  nodes[li.other].neighbors[li.indexPosition].indexPosition = _j;
547  nodes[lj.other].neighbors[lj.indexPosition].indexPosition = _i;
548  std::swap(externalLinks[_i], externalLinks[_j]);
549  std::swap(dimensions[_i], dimensions[_j]);
550  }
551 
552 
554  _base.assign_indices();
555  _toInsert.assign_indices();
556 
557  TensorNetwork &base = *_base.tensorObject;
558  const TensorNetwork &toInsert = *_toInsert.tensorObjectReadOnly;
559 
560  const size_t firstNew = base.nodes.size();
561  const size_t firstNewExternal = base.externalLinks.size();
562 
563  // Insert everything
564  _base.indices.insert(_base.indices.end(), _toInsert.indices.begin(), _toInsert.indices.end());
565  base.dimensions.insert(base.dimensions.end(), toInsert.dimensions.begin(), toInsert.dimensions.end());
566  base.externalLinks.insert(base.externalLinks.end(), toInsert.externalLinks.begin(), toInsert.externalLinks.end());
567  base.nodes.insert(base.nodes.end(), toInsert.nodes.begin(), toInsert.nodes.end());
568 
569  IF_CHECK (
570  for (const Index &idx : _base.indices) {
571  REQUIRE(misc::count(_base.indices, idx) < 3, "Index must not appear three (or more) times.");
572  }
573  )
574 
575  // Sanitize the externalLinks
576  for (size_t i = firstNewExternal; i < base.externalLinks.size(); ++i) {
577  base.externalLinks[i].other += firstNew;
578  }
579 
580  // Sanitize the nodes (treating all external links as new external links)
581  for (size_t i = firstNew; i < base.nodes.size(); ++i) {
582  for(TensorNetwork::Link &l : base.nodes[i].neighbors) {
583  if (!l.external) { // Link inside the added network
584  l.other += firstNew;
585  } else { // External link
586  l.indexPosition += firstNewExternal;
587  }
588  }
589  }
590 
591  // Find traces (former contractions may have become traces due to the joining)
592  link_traces_and_fix(std::move(_base));
593 
594  _base.tensorObject->require_valid_network();
595  }
596 
597 
599  TensorNetwork &base = *_base.tensorObject;
600  _base.assign_indices();
601 
602  base.require_valid_network();
603 
604  IF_CHECK( std::set<Index> contractedIndices; )
605 
606  size_t passedDegree = 0;
607  for(size_t i = 0; i < _base.indices.size(); ) {
608  const Index& idx = _base.indices[i];
609 
610  // Search for a second occurance
611  size_t j = i+1;
612  size_t passedDegreeSecond = passedDegree + idx.span;
613  for( ; j < _base.indices.size(); passedDegreeSecond += _base.indices[j].span, ++j) {
614  if(idx == _base.indices[j]) { break; }
615  }
616 
617  if(j < _base.indices.size()) { // There is a second occurance.
618  REQUIRE(!misc::contains(contractedIndices, idx), "Indices must occur at most twice per contraction");
619  REQUIRE(idx.span == _base.indices[j].span, "Index spans do not coincide " << idx << " vs " << _base.indices[j]);
620  IF_CHECK( contractedIndices.insert(idx); )
621 
622  for (size_t n = 0; n < idx.span; ++n) {
623  const TensorNetwork::Link &link1 = base.externalLinks[passedDegree+n];
624  const TensorNetwork::Link &link2 = base.externalLinks[passedDegreeSecond+n];
625  REQUIRE(link1.dimension == link2.dimension, "Index dimensions do not coincide: ["<<n<<"] " << link1.dimension << " vs " << link2.dimension << " Indices are " << idx << " and " << _base.indices[j] << " from " << _base.indices);
626 
627  base.nodes[link1.other].neighbors[link1.indexPosition] = link2;
628  base.nodes[link2.other].neighbors[link2.indexPosition] = link1;
629  }
630 
631  // Remove external links and dimensions from network
632  base.externalLinks.erase(base.externalLinks.begin()+long(passedDegreeSecond), base.externalLinks.begin()+long(passedDegreeSecond+idx.span)); // note that passedDegreeSecond > passedDegree
633  base.externalLinks.erase(base.externalLinks.begin()+long(passedDegree), base.externalLinks.begin()+long(passedDegree+idx.span));
634  base.dimensions.erase(base.dimensions.begin()+long(passedDegreeSecond), base.dimensions.begin()+long(passedDegreeSecond+idx.span));
635  base.dimensions.erase(base.dimensions.begin()+long(passedDegree), base.dimensions.begin()+long(passedDegree+idx.span));
636 
637  // Sanitize external links
638  for(size_t k = passedDegree; k < passedDegreeSecond-idx.span; ++k) {
639  base.nodes[base.externalLinks[k].other].neighbors[base.externalLinks[k].indexPosition].indexPosition -= idx.span;
640  }
641 
642  for(size_t k = passedDegreeSecond-idx.span; k < base.externalLinks.size(); ++k) {
643  base.nodes[base.externalLinks[k].other].neighbors[base.externalLinks[k].indexPosition].indexPosition -= 2*idx.span;
644  }
645 
646  // Remove indices
647  _base.indices.erase(_base.indices.begin()+j);
648  _base.indices.erase(_base.indices.begin()+i);
649  } else {
650  passedDegree += idx.span;
651  ++i;
652  }
653  }
654 
655  // Apply fixed indices
656  passedDegree = 0;
657  for(size_t i = 0; i < _base.indices.size(); ) {
658  const Index& idx = _base.indices[i];
659 
660  if(idx.fixed()) {
661  // Fix the slates
662  for(size_t k = passedDegree; k < passedDegree+idx.span; ++k) {
663  base.fix_mode(passedDegree, idx.fixed_position());
664  }
665 
666  // Remove index
667  _base.indices.erase(_base.indices.begin()+i);
668  } else {
669  passedDegree += idx.span;
670  ++i;
671  }
672  }
673 
675  }
676 
677 
678  void TensorNetwork::round_edge(const size_t _nodeA, const size_t _nodeB, const size_t _maxRank, const double _eps, const double _softThreshold) {
680 
681  size_t fromPos, toPos;
682  std::tie(fromPos, toPos) = find_common_edge(_nodeA, _nodeB);
683 
684  Tensor& fromTensor = *nodes[_nodeA].tensorObject;
685  Tensor& toTensor = *nodes[_nodeB].tensorObject;
686 
687  const size_t fromDegree = fromTensor.degree();
688  const size_t toDegree = toTensor.degree();
689 
690  const size_t currRank = fromTensor.dimensions[fromPos];
691 
692  // Reshuffle From if nessecary
693  const bool transFrom = (fromPos == 0);
694  const bool reshuffleFrom = (!transFrom && fromPos != fromDegree-1);
695  std::vector<size_t> forwardShuffleFrom;
696  std::vector<size_t> backwardShuffleFrom;
697  if(reshuffleFrom) {
698  forwardShuffleFrom.resize(fromDegree);
699  backwardShuffleFrom.resize(fromDegree);
700 
701  for(size_t i = 0; i < fromPos; ++i) {
702  forwardShuffleFrom[i] = i;
703  backwardShuffleFrom[i] = i;
704  }
705 
706  for(size_t i = fromPos; i+1 < fromDegree; ++i) {
707  forwardShuffleFrom[i+1] = i;
708  backwardShuffleFrom[i] = i+1;
709  }
710 
711  forwardShuffleFrom[fromPos] = fromDegree-1;
712  backwardShuffleFrom[fromDegree-1] = fromPos;
713 
714  reshuffle(fromTensor, fromTensor, forwardShuffleFrom);
715  }
716 
717  // Reshuffle To if nessecary
718  const bool transTo = (toPos == toDegree-1);
719  const bool reshuffleTo = (!transTo && toPos != 0);
720  std::vector<size_t> forwardShuffleTo;
721  std::vector<size_t> backwardShuffleTo;
722  if(reshuffleTo) {
723  forwardShuffleTo.resize(toDegree);
724  backwardShuffleTo.resize(toDegree);
725 
726  for(size_t i = 0; i < toPos; ++i) {
727  forwardShuffleTo[i] = i+1;
728  backwardShuffleTo[i+1] = i;
729  }
730 
731  for(size_t i = toPos+1; i < toDegree; ++i) {
732  forwardShuffleTo[i] = i;
733  backwardShuffleTo[i] = i;
734  }
735 
736  forwardShuffleTo[toPos] = 0;
737  backwardShuffleTo[0] = toPos;
738 
739  reshuffle(toTensor, toTensor, forwardShuffleTo);
740  }
741 
742  Tensor X, S;
743 
744  // Check whether prior QR makes sense
745  if (5*fromTensor.size*toTensor.size >= 6*misc::pow(currRank, 4) ) {
746  // Seperate the cores ...
747  Tensor coreA, coreB;
748  if(transFrom) {
749  calculate_cq(coreA, fromTensor, fromTensor, 1);
750  } else {
751  calculate_qc(fromTensor, coreA, fromTensor, fromDegree-1);
752  }
753 
754  if(transTo) {
755  calculate_qc(toTensor, coreB, toTensor, toDegree-1);
756  } else {
757  calculate_cq(coreB, toTensor, toTensor, 1);
758  }
759 
760  // ... contract them ...
761  xerus::contract(X, coreA, transFrom, coreB, transTo, 1);
762 
763  // ... calculate svd ...
764  calculate_svd(coreA, S, coreB, X, 1, _maxRank, _eps);
765 
766  S.modify_diagonal_entries([&](value_t& _d){ _d = std::max(0.0, _d - _softThreshold); });
767 
768  // ... contract S to the right ...
769  xerus::contract(coreB, S, false, coreB, false, 1);
770 
771  // ... and contract the cores back to their origins.
772  if(transFrom) {
773  xerus::contract(fromTensor, coreA, true, fromTensor, false, 1);
774  } else {
775  xerus::contract(fromTensor, fromTensor, false, coreA, false, 1);
776  }
777 
778  if(transTo) {
779  xerus::contract(toTensor, toTensor, false, coreB, true, 1);
780  } else {
781  xerus::contract(toTensor, coreB, false, toTensor, false, 1);
782  }
783  } else {
784  xerus::contract(X, fromTensor, transFrom, toTensor, transTo, 1);
785 
786  calculate_svd(fromTensor, S, toTensor, X, fromDegree-1, _maxRank, _eps);
787 
788  S.modify_diagonal_entries([&](value_t& _d){ _d = std::max(0.0, _d - _softThreshold); });
789 
790  if(transTo) {
791  xerus::contract(toTensor, toTensor, true, S, true, 1);
792  } else {
793  xerus::contract(toTensor, S, false, toTensor, false, 1);
794  }
795 
796  if(transFrom) {
797  backwardShuffleFrom.resize(fromDegree);
798  for(size_t i = 0; i+1 < fromDegree; ++i) {
799  backwardShuffleFrom[i] = i+1;
800  }
801  backwardShuffleFrom[fromDegree-1] = 0;
802  reshuffle(fromTensor, fromTensor, backwardShuffleFrom);
803  }
804  }
805 
806 
807  if(reshuffleFrom) {
808  reshuffle(fromTensor, fromTensor, backwardShuffleFrom);
809  }
810 
811  if(reshuffleTo) {
812  reshuffle(toTensor, toTensor, backwardShuffleTo);
813  }
814 
815  // Set the new dimension in the nodes
816  nodes[_nodeA].neighbors[fromPos].dimension = nodes[_nodeA].tensorObject->dimensions[fromPos];
817  nodes[_nodeB].neighbors[toPos].dimension = nodes[_nodeB].tensorObject->dimensions[toPos];
818  }
819 
820 
821  void TensorNetwork::transfer_core(const size_t _from, const size_t _to, const bool _allowRankReduction) {
822  REQUIRE(_from < nodes.size() && _to < nodes.size(), " Illegal node IDs " << _from << "/" << _to << " as there are only " << nodes.size() << " nodes");
824 
825  Tensor Q, R;
826  size_t posA, posB;
827  std::tie(posA, posB) = find_common_edge(_from, _to);
828 
829  Tensor& fromTensor = *nodes[_from].tensorObject;
830  Tensor& toTensor = *nodes[_to].tensorObject;
831 
832  bool transR = false;
833  if(posA == 0) {
834  if(_allowRankReduction) {
835  calculate_cq(R, Q, fromTensor, 1);
836  } else {
837  calculate_rq(R, Q, fromTensor, 1);
838  }
839  fromTensor = Q;
840  transR = true;
841  } else if(posA == nodes[_from].degree()-1) {
842  if(_allowRankReduction) {
843  calculate_qc(Q, R, fromTensor, nodes[_from].degree()-1);
844  } else {
845  calculate_qr(Q, R, fromTensor, nodes[_from].degree()-1);
846  }
847  fromTensor = Q;
848  } else {
849  std::vector<size_t> forwardShuffle(nodes[_from].degree());
850  std::vector<size_t> backwardShuffle(nodes[_from].degree());
851 
852  for(size_t i = 0; i < posA; ++i) {
853  forwardShuffle[i] = i;
854  backwardShuffle[i] = i;
855  }
856 
857  for(size_t i = posA; i+1 < nodes[_from].degree(); ++i) {
858  forwardShuffle[i+1] = i;
859  backwardShuffle[i] = i+1;
860  }
861 
862  forwardShuffle[posA] = nodes[_from].degree()-1;
863  backwardShuffle[nodes[_from].degree()-1] = posA;
864 
865  reshuffle(fromTensor, fromTensor, forwardShuffle);
866 
867  if(_allowRankReduction) {
868  calculate_qc(Q, R, fromTensor, nodes[_from].degree()-1);
869  } else {
870  calculate_qr(Q, R, fromTensor, nodes[_from].degree()-1);
871  }
872 
873  reshuffle(fromTensor, Q, backwardShuffle);
874  }
875 
876  if( posB == 0 ) {
877  xerus::contract(toTensor, R, transR, toTensor, false, 1);
878 
879  } else if( posB == nodes[_to].degree()-1 ) {
880  xerus::contract(toTensor, toTensor, false, R, !transR, 1);
881 
882  } else {
883  std::vector<size_t> forwardShuffle(nodes[_to].degree());
884  std::vector<size_t> backwardShuffle(nodes[_to].degree());
885 
886  for(size_t i = 0; i < posB; ++i) {
887  forwardShuffle[i] = i+1;
888  backwardShuffle[i+1] = i;
889  }
890 
891  for(size_t i = posB+1; i < nodes[_to].degree(); ++i) {
892  forwardShuffle[i] = i;
893  backwardShuffle[i] = i;
894  }
895 
896  forwardShuffle[posB] = 0;
897  backwardShuffle[0] = posB;
898 
899  reshuffle(toTensor, toTensor, forwardShuffle);
900 
901  xerus::contract(toTensor, R, posA == 0, toTensor, false, 1);
902 
903  reshuffle(toTensor, toTensor, backwardShuffle);
904  }
905 
906  // Set the new dimension in the nodes
907  nodes[_from].neighbors[posA].dimension = fromTensor.dimensions[posA];
908  nodes[_to].neighbors[posB].dimension = toTensor.dimensions[posB];
909  }
910 
911 
912  void TensorNetwork::fix_mode(const size_t _mode, const size_t _slatePosition) {
914 
915  REQUIRE(_mode < degree(), "Invalid dimension to remove");
916  REQUIRE(_slatePosition < dimensions[_mode], "Invalide _slatePosition to choose");
917 
918  const size_t extNode = externalLinks[_mode].other;
919  const size_t extNodeIndexPos = externalLinks[_mode].indexPosition;
920 
921  // Correct the nodes external links
922  for(size_t i = _mode+1; i < dimensions.size(); ++i) {
923  REQUIRE(nodes[externalLinks[i].other].neighbors[externalLinks[i].indexPosition].indexPosition > 0, "Woo");
924  nodes[externalLinks[i].other].neighbors[externalLinks[i].indexPosition].indexPosition--;
925  }
926 
927  externalLinks.erase(externalLinks.begin()+_mode);
928  dimensions.erase(dimensions.begin()+_mode);
929 
930  // Correct the others links of the affected node.
931  for(size_t i = extNodeIndexPos+1; i < nodes[extNode].neighbors.size(); ++i) {
932  const Link& link = nodes[extNode].neighbors[i];
933  if(link.external) {
934  externalLinks[link.indexPosition].indexPosition--;
935  } else {
936  // Check critical self links (i.e. where index position was allready modified).
937  if(link.other == extNode && link.indexPosition+1 > extNodeIndexPos && link.indexPosition < i) {
938  nodes[link.other].neighbors[link.indexPosition+1].indexPosition--;
939  } else {
940  nodes[link.other].neighbors[link.indexPosition].indexPosition--;
941  }
942  }
943  }
944 
945  nodes[extNode].tensorObject->fix_mode(extNodeIndexPos, _slatePosition);
946  nodes[extNode].neighbors.erase(nodes[extNode].neighbors.begin() + extNodeIndexPos);
947 
949 
951  }
952 
953 
954  void TensorNetwork::remove_slate(const size_t _mode, const size_t _slatePosition) {
956 
957  REQUIRE(_mode < degree(), "invalid dimension to remove a slate from");
958  REQUIRE(_slatePosition < dimensions[_mode], "invalide slate position to choose");
959  REQUIRE(dimensions[_mode] > 0, "removing the last possible slate from this index position would result a dimension of size 0");
960 
961  const size_t extNode = externalLinks[_mode].other;
962  const size_t extNodeIndexPos = externalLinks[_mode].indexPosition;
963 
964  externalLinks[_mode].dimension -= 1;
965  dimensions[_mode] -= 1;
966  if (nodes[extNode].tensorObject) {
967  nodes[extNode].tensorObject->remove_slate(extNodeIndexPos, _slatePosition);
968  }
969  nodes[extNode].neighbors[extNodeIndexPos].dimension -= 1;
970  }
971 
972 
973 
974  void TensorNetwork::resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos) {
975  REQUIRE(_mode < degree(), "Invalid dimension given for resize_mode");
977 
978  const size_t extNode = externalLinks[_mode].other;
979  const size_t extNodeIndexPos = externalLinks[_mode].indexPosition;
980 
981  nodes[extNode].tensorObject->resize_mode(extNodeIndexPos, _newDim, _cutPos);
982  nodes[extNode].neighbors[extNodeIndexPos].dimension = _newDim;
983  externalLinks[_mode].dimension = _newDim;
984  dimensions[_mode] = _newDim;
985 
987  }
988 
989 
992 
993  TensorNetwork strippedNet = stripped_subnet();
994  std::vector<std::set<size_t>> contractions(strippedNet.nodes.size());
995  for (size_t id1=0; id1 < strippedNet.nodes.size(); ++id1) {
996  TensorNode &currNode = strippedNet.nodes[id1];
997  if (currNode.erased) {
998  continue;
999  }
1000  for (Link &l : currNode.neighbors) {
1001  if (l.external) { continue; }
1002  size_t r=1;
1003  for (Link &l2 : currNode.neighbors) {
1004  if (l2.other == l.other) {
1005  r *= l2.dimension;
1006  }
1007  }
1008  if (r*r >= currNode.size() || r*r >= strippedNet.nodes[l.other].size()) {
1009  if (contractions[id1].empty()) {
1010  contractions[id1].insert(id1);
1011  }
1012  if (contractions[l.other].empty()) {
1013  contractions[id1].insert(l.other);
1014  } else {
1015  contractions[id1].insert(contractions[l.other].begin(), contractions[l.other].end());
1016  contractions[l.other].clear();
1017  }
1018  strippedNet.contract(id1, l.other);
1019  id1 -= 1; // check the same node again in the next iteration
1020  break; // for-each iterator is broken, so we have to break
1021  }
1022  }
1023  }
1024 
1025  // perform the collected contractions from above
1026  for (std::set<size_t> &ids : contractions) {
1027  if (ids.size() > 1) {
1028  contract(ids);
1029  }
1030  }
1031 
1032  sanitize();
1034  }
1035 
1036 
1037  void TensorNetwork::contract(const size_t _nodeId1, const size_t _nodeId2) {
1038  TensorNode &node1 = nodes[_nodeId1];
1039  TensorNode &node2 = nodes[_nodeId2];
1040 
1041  REQUIRE(!node1.erased, "It appears node1 = " << _nodeId1 << " was already contracted?");
1042  REQUIRE(!node2.erased, "It appears node2 = " << _nodeId2 << " was already contracted?");
1043  INTERNAL_CHECK(externalLinks.size() == degree(), "Internal Error: " << externalLinks.size() << " != " << degree());
1044 
1045  std::vector<TensorNetwork::Link> newLinks;
1046  newLinks.reserve(node1.degree() + node2.degree());
1047 
1048  if (!node1.tensorObject) {
1049  INTERNAL_CHECK(!node2.tensorObject, "Internal Error.");
1050 
1051  // Determine the links of the resulting tensor (first half)
1052  for ( const Link& l : node1.neighbors ) {
1053  if (!l.links(_nodeId1) && !l.links(_nodeId2)) {
1054  newLinks.emplace_back(l);
1055  }
1056  }
1057  // Determine the links of the resulting tensor (second half)
1058  for ( const Link& l : node2.neighbors ) {
1059  if (!l.links(_nodeId2) && !l.links(_nodeId1)) {
1060  newLinks.emplace_back(l);
1061  }
1062  }
1063  } else {
1064  INTERNAL_CHECK(node2.tensorObject, "Internal Error.");
1065 
1066  size_t contractedDimCount = 0;
1067  bool separated1;
1068  bool separated2;
1069  bool matchingOrder;
1070 
1071  // first pass of the links of node1 to determine
1072  // 1. the number of links between the two nodes,
1073  // 2. determine whether node1 is separated (ownlinks-commonlinks) or transposed separated (commonlinks-ownlinks)
1074  // 3. determine the links of the resulting tensor (first half)
1075  if(node1.degree() > 1) {
1076  uint_fast8_t switches = 0;
1077  bool previous = node1.neighbors[0].links(_nodeId2);
1078  for (const Link& l : node1.neighbors) {
1079  if (l.links(_nodeId2)) {
1080  contractedDimCount++;
1081  if (!previous) {
1082  switches++;
1083  previous = true;
1084  }
1085  } else {
1086  newLinks.emplace_back(l);
1087  if (previous) {
1088  switches++;
1089  previous = false;
1090  }
1091  }
1092  }
1093  separated1 = (switches < 2);
1094  } else {
1095  if(!node1.neighbors.empty()) {
1096  if(node1.neighbors[0].links(_nodeId2)) {
1097  contractedDimCount = 1;
1098  } else {
1099  newLinks.emplace_back(node1.neighbors[0]);
1100  }
1101  }
1102  separated1 = true;
1103  }
1104 
1105  // first pass of the links of node2 to determine
1106  // 1. whether the order of common links is correct
1107  // 2. whether any self-links exist
1108  // 3. whether the second node is separated
1109  // 4. determine the links of the resulting tensor (second half)
1110  if(node2.degree() > 1 && contractedDimCount > 0) {
1111  bool previous = node2.neighbors[0].links(_nodeId1);
1112  uint_fast8_t switches = 0;
1113  size_t lastPosOfCommon = 0;
1114  matchingOrder = true;
1115  for (const Link& l : node2.neighbors) {
1116  if (l.links(_nodeId1)) {
1117  if (l.indexPosition < lastPosOfCommon) {
1118  matchingOrder = false;
1119  }
1120  lastPosOfCommon = l.indexPosition;
1121  if (!previous) {
1122  switches++;
1123  previous = true;
1124  }
1125  } else {
1126  newLinks.emplace_back(l);
1127  if (previous) {
1128  switches++;
1129  previous = false;
1130  }
1131  }
1132  }
1133  separated2 = (switches < 2);
1134  } else {
1135  if(contractedDimCount == 0) {
1136  newLinks.insert(newLinks.end(), node2.neighbors.begin(), node2.neighbors.end());
1137  }
1138 
1139  separated2 = true;
1140  matchingOrder = true;
1141  }
1142 
1143  // Determine which (if any) node should be reshuffled
1144  // if order of common links does not match, reshuffle the smaller one
1145  if (!matchingOrder && separated1 && separated2) {
1146  if (node1.size() < node2.size()) {
1147  separated1 = false;
1148  } else {
1149  separated2 = false;
1150  }
1151  }
1152 
1153  // reshuffle first node
1154  if (!separated1) {
1155  std::vector<size_t> shuffle(node1.degree());
1156  size_t pos = 0;
1157 
1158  for (size_t d = 0; d < node1.degree(); ++d) {
1159  if (!node1.neighbors[d].links(_nodeId2)) {
1160  shuffle[d] = pos++;
1161  }
1162  }
1163 
1164  for (const Link& l : node2.neighbors) {
1165  if (l.links(_nodeId1)) {
1166  shuffle[l.indexPosition] = pos++;
1167  }
1168  }
1169 
1170  INTERNAL_CHECK(pos == node1.degree(), "IE");
1171  reshuffle(*node1.tensorObject, *node1.tensorObject, shuffle);
1172 
1173  matchingOrder = true;
1174  }
1175 
1176  // reshuffle second node
1177  if (!separated2) {
1178  std::vector<size_t> shuffle(node2.degree());
1179  size_t pos = 0;
1180 
1181  if (matchingOrder) {
1182  // Add common links in order as they appear in node2 to avoid both nodes changing to the opposite link order
1183  for (size_t d = 0; d < node2.degree(); ++d) {
1184  if (node2.neighbors[d].links(_nodeId1)) {
1185  shuffle[d] = pos++;
1186  }
1187  }
1188  } else {
1189  for (const Link& l : node1.neighbors) {
1190  if (l.links(_nodeId2)) {
1191  shuffle[l.indexPosition] = pos++;
1192  }
1193  }
1194  }
1195 
1196  for (size_t d = 0; d < node2.degree(); ++d) {
1197  if (!node2.neighbors[d].links(_nodeId1)) {
1198  shuffle[d] = pos++;
1199  }
1200  }
1201 
1202  INTERNAL_CHECK(pos == node2.degree(), "IE");
1203  reshuffle(*node2.tensorObject, *node2.tensorObject, shuffle);
1204  }
1205 
1206  const bool trans1 = separated1 && !node1.neighbors.empty() && node1.neighbors[0].links(_nodeId2);
1207  const bool trans2 = separated2 &&!node2.neighbors.empty() &&!(node2.neighbors[0].links(_nodeId1));
1208 
1209  xerus::contract(*node1.tensorObject, *node1.tensorObject, trans1, *node2.tensorObject, trans2, contractedDimCount);
1210  }
1211 
1212  // Set Nodes
1213  nodes[_nodeId1].neighbors = std::move(newLinks);
1214  nodes[_nodeId2].erase();
1215 
1216  // Fix indices of other nodes // note that the indices that were previously part of node1 might also have changed
1217  for (size_t d = 0; d < nodes[_nodeId1].neighbors.size(); ++d) {
1218  const Link& l = nodes[_nodeId1].neighbors[d];
1219  if (l.external) {
1220  externalLinks[l.indexPosition].other = _nodeId1;
1221  externalLinks[l.indexPosition].indexPosition = d;
1222  } else {
1223  nodes[l.other].neighbors[l.indexPosition].other = _nodeId1;
1224  nodes[l.other].neighbors[l.indexPosition].indexPosition = d;
1225  }
1226  }
1227 
1228  require_valid_network(false);
1229  }
1230 
1231 
1232  double TensorNetwork::contraction_cost(const size_t _nodeId1, const size_t _nodeId2) const {
1233  REQUIRE(!nodes[_nodeId1].erased, "It appears node1 = " << _nodeId1 << " was already contracted?");
1234  REQUIRE(!nodes[_nodeId2].erased, "It appears node2 = " << _nodeId2 << " was already contracted?");
1235 
1236  if (_nodeId1 == _nodeId2) {
1237  return static_cast<double>(nodes[_nodeId1].size()); // Costs of a trace
1238  }
1239 
1240  //TODO add correct calculation for sparse matrices
1241 
1242  // Assume cost of mxr * rxn = m*n*r (which is a rough approximation of the actual cost for openBlas/Atlas)
1243  size_t cost = nodes[_nodeId1].size();
1244  for(const Link& neighbor : nodes[_nodeId2].neighbors) {
1245  if(!neighbor.links(_nodeId1)) {
1246  cost *= neighbor.dimension;
1247  }
1248  }
1249  return static_cast<double>(cost);
1250  }
1251 
1252 
1253  size_t TensorNetwork::contract(const std::set<size_t>& _ids) {
1254  // Trace out all single-node traces
1255  for ( const size_t id : _ids ) {
1256  perform_traces(id);
1257  }
1258 
1259  if (_ids.empty()) { return ~0ul; }
1260 
1261  if (_ids.size() == 1) { return *_ids.begin(); }
1262 
1263  if (_ids.size() == 2) {
1264  auto secItr = _ids.begin(); ++secItr;
1265  contract(*_ids.begin(), *secItr);
1266  return *_ids.begin();
1267  }
1268 
1269  if (_ids.size() == 3) {
1270  auto idItr = _ids.begin();
1271  const size_t a = *idItr; TensorNode &na = nodes[a]; ++idItr;
1272  const size_t b = *idItr; TensorNode &nb = nodes[b]; ++idItr;
1273  const size_t c = *idItr; TensorNode &nc = nodes[c];
1274  double sa = 1, sb = 1, sc = 1; // sizes devided by the link dimensions between a,b,c
1275  double sab = 1, sbc = 1, sac = 1; // link dimensions
1276  for (size_t d = 0; d < na.degree(); ++d) {
1277  if (na.neighbors[d].links(b)) {
1278  sab *= static_cast<double>(na.neighbors[d].dimension);
1279  } else if (na.neighbors[d].links(c)) {
1280  sac *= static_cast<double>(na.neighbors[d].dimension);
1281  } else {
1282  sa *= static_cast<double>(na.neighbors[d].dimension);
1283  }
1284  }
1285  for (size_t d = 0; d < nb.degree(); ++d) {
1286  if (nb.neighbors[d].links(c)) {
1287  sbc *= static_cast<double>(nb.neighbors[d].dimension);
1288  } else if (!nb.neighbors[d].links(a)) {
1289  sb *= static_cast<double>(nb.neighbors[d].dimension);
1290  }
1291  }
1292  for (size_t d = 0; d < nc.degree(); ++d) {
1293  if (!nc.neighbors[d].links(a) && !nc.neighbors[d].links(b)) {
1294  sc *= static_cast<double>(nc.neighbors[d].dimension);
1295  }
1296  }
1297  // cost of contraction a-b first etc.
1298  double costAB = sa*sb*sac*sbc*(sab+sc); // (sa*sac)*sab*(sb*sbc) + sa*sb*sac*sbc*sc;
1299  double costAC = sa*sc*sab*sbc*(sac+sb);
1300  double costBC = sb*sc*sab*sac*(sbc+sa);
1301 
1302  if (costAB < costAC && costAB < costBC) {
1303  LOG(TNContract, "contraction of ab first " << sa << " " << sb << " " << sc << " " << sab << " " << sbc << " " << sac);
1304  contract(a, b); contract(a, c);
1305  } else if (costAC < costBC) {
1306  LOG(TNContract, "contraction of ac first " << sa << " " << sb << " " << sc << " " << sab << " " << sbc << " " << sac);
1307  contract(a, c); contract(a, b);
1308  } else {
1309  LOG(TNContract, "contraction of bc first " << sa << " " << sb << " " << sc << " " << sab << " " << sbc << " " << sac);
1310  contract(b, c); contract(a, b);
1311  }
1312  return a;
1313  }
1314 
1315 
1316  TensorNetwork strippedNetwork = stripped_subnet([&](size_t _id){ return misc::contains(_ids, _id); });
1317  double bestCost = std::numeric_limits<double>::max();
1318  std::vector<std::pair<size_t, size_t>> bestOrder;
1319 
1320  // Ask the heuristics
1322  c(bestCost, bestOrder, strippedNetwork);
1323  }
1324 
1325  INTERNAL_CHECK(bestCost < std::numeric_limits<double>::max() && !bestOrder.empty(), "Internal Error.");
1326 
1327  for (const std::pair<size_t,size_t> &c : bestOrder) {
1328  contract(c.first, c.second);
1329  }
1330 
1331  // Note: no sanitization as eg. TTStacks require the indices not to change after calling this function
1332  return bestOrder.back().first;
1333  }
1334 
1335 
1337  const Index i;
1338  Tensor res;
1339  res() = (*this)(i&0) * (*this)(i&0);
1340  return std::sqrt(res[0]);
1341  }
1342 
1343 
1344  void TensorNetwork::draw(const std::string& _filename) const {
1345  std::stringstream graphLayout;
1346 
1347  graphLayout << "graph G {" << std::endl;
1348  graphLayout << "graph [mclimit=1000, maxiter=1000, overlap = false, splines = true]" << std::endl;
1349 
1350  for(size_t i = 0; i < nodes.size(); ++i) {
1351  // Create the Nodes
1352  if(nodes[i].erased) {
1353  graphLayout << "\tN"<<i<<" [label=\"N"<<i<<"\", shape=circle, fixedsize=shape, height=0.45];" << std::endl;
1354  } else {
1355  graphLayout << "\tN"<<i<<" [label=\"";
1356  for(size_t k=0; k+1 < nodes[i].degree(); ++k) {
1357  if(nodes[i].degree()/2 == k) {
1358  if(nodes[i].degree()%2 == 0) {
1359  graphLayout << "<i"<<k<<"> "<<i<<"| ";
1360  } else {
1361  graphLayout << "<i"<<k<<"> N"<<i<<"| ";
1362  }
1363  } else if(nodes[i].degree()%2 == 0 && nodes[i].degree()/2 == k+1) {
1364  graphLayout << "<i"<<k<<"> N| ";
1365  } else {
1366  graphLayout << "<i"<<k<<"> | ";
1367  }
1368  }
1369  if(nodes[i].degree() <= 2) {
1370  graphLayout << "<i"<<nodes[i].degree()-1<<"> N"<<i<<"\", shape=record, fixedsize=shape, height=0.45, style=\"rounded,filled\"];" << std::endl;
1371  } else {
1372  graphLayout << "<i"<<nodes[i].degree()-1<<">\", shape=record, fixedsize=shape, height=0.45, style=\"rounded,filled\"];" << std::endl;
1373  }
1374 
1375  // Add all links to nodes with smaller index and externals
1376  for(size_t j = 0; j < nodes[i].neighbors.size(); ++j) {
1377  if(nodes[i].neighbors[j].external) {
1378  graphLayout << "\t"<<nodes[i].neighbors[j].indexPosition<<" [shape=diamond, fixedsize=shape, height=0.38, width=0.38, style=filled];" << std::endl;
1379  graphLayout << "\tN"<<i<<":i"<<j<<" -- " << nodes[i].neighbors[j].indexPosition << " [len=1, label=\""<<nodes[i].neighbors[j].dimension<<"\"];" << std::endl;
1380  } else if(nodes[i].neighbors[j].other < i) {
1381  graphLayout << "\tN"<<i<<":i"<<j<<" -- " << "N"<<nodes[i].neighbors[j].other << ":i"<< nodes[i].neighbors[j].indexPosition<<" [label=\""<<nodes[i].neighbors[j].dimension<<"\"];" << std::endl;
1382  }
1383  }
1384  }
1385  }
1386  graphLayout << "}" << std::endl;
1387  misc::exec(std::string("dot -Tsvg > ") + _filename+".svg", graphLayout.str());
1388  }
1389 
1390 
1391 
1393  TensorNetwork res(*_lhs.get_copy());
1394  res *= _factor;
1395  return res;
1396  }
1397 
1399  TensorNetwork res(*_rhs.get_copy());
1400  res *= _factor;
1401  return res;
1402  }
1403 
1405  TensorNetwork res(*_lhs.get_copy());
1406  res *= 1.0/_factor;
1407  return res;
1408  }
1409 
1410 
1411  bool approx_equal(const TensorNetwork& _a, const TensorNetwork& _b, const value_t _eps) {
1412  REQUIRE(_a.dimensions == _b.dimensions, "The dimensions of the compared tensors don't match: " << _a.dimensions <<" vs. " << _b.dimensions);
1413  const Index i; // TODO no indices
1414  return frob_norm(_a(i&0) - _b(i&0)) <= _eps*(_a.frob_norm() + _b.frob_norm())/2.0;
1415  }
1416 
1417 
1418  bool approx_equal(const TensorNetwork& _a, const Tensor& _b, const value_t _eps) {
1419  return approx_equal(Tensor(_a), _b, _eps);
1420  }
1421 
1422 
1423  bool approx_equal(const Tensor& _a, const TensorNetwork& _b, const value_t _eps) {
1424  return approx_equal(_a, Tensor(_b), _eps);
1425  }
1426 
1427  namespace misc {
1428 
1429  void stream_writer(std::ostream &_stream, const TensorNetwork &_obj, const FileFormat _format) {
1430  if(_format == FileFormat::TSV) {
1431  _stream << std::setprecision(std::numeric_limits<value_t>::digits10 + 1);
1432  }
1433  // storage version number
1434  write_to_stream<size_t>(_stream, 1, _format);
1435 
1436  // Save dimensions
1437  write_to_stream(_stream, _obj.dimensions, _format);
1438  if(_format == FileFormat::TSV) { _stream << '\n'; }
1439 
1440  // save external links
1441  for(const TensorNetwork::Link& el : _obj.externalLinks) {
1442  write_to_stream<size_t>(_stream, el.other, _format);
1443  write_to_stream<size_t>(_stream, el.indexPosition, _format);
1444  write_to_stream<size_t>(_stream, el.dimension, _format);
1445  }
1446  if(_format == FileFormat::TSV) { _stream << "\n\n"; }
1447 
1448  // Save nodes with their links
1449  write_to_stream<size_t>(_stream, _obj.nodes.size(), _format);
1450  if(_format == FileFormat::TSV) { _stream << '\n'; }
1451  for(const TensorNetwork::TensorNode& node : _obj.nodes) {
1452  write_to_stream<size_t>(_stream, node.neighbors.size(), _format);
1453  for(const TensorNetwork::Link& link : node.neighbors) {
1454  write_to_stream<bool>(_stream, link.external, _format);
1455  write_to_stream<size_t>(_stream, link.other, _format);
1456  write_to_stream<size_t>(_stream, link.indexPosition, _format);
1457  write_to_stream<size_t>(_stream, link.dimension, _format);
1458  }
1459  }
1460  if(_format == FileFormat::TSV) { _stream << '\n'; }
1461 
1462  // Save tensorObjects
1463  for(const TensorNetwork::TensorNode& node : _obj.nodes) {
1464  write_to_stream(_stream, *node.tensorObject, _format);
1465  if(_format == FileFormat::TSV) { _stream << '\n'; }
1466  }
1467  }
1468 
1469  void stream_reader(std::istream& _stream, TensorNetwork &_obj, const FileFormat _format) {
1470  IF_CHECK( size_t ver = ) read_from_stream<size_t>(_stream, _format);
1471  REQUIRE(ver == 1, "Unknown stream version to open (" << ver << ")");
1472 
1473  // Load dimensions
1474  read_from_stream(_stream, _obj.dimensions, _format);
1475 
1476  // load external links
1477  _obj.externalLinks.resize(_obj.dimensions.size());
1478  for(TensorNetwork::Link& el : _obj.externalLinks) {
1479  el.external = false;
1480  el.other = read_from_stream<size_t>(_stream, _format);
1481  el.indexPosition = read_from_stream<size_t>(_stream, _format);
1482  el.dimension = read_from_stream<size_t>(_stream, _format);
1483  }
1484 
1485  // Load nodes with their links
1486  _obj.nodes.resize(read_from_stream<size_t>(_stream, _format));
1487  for(TensorNetwork::TensorNode& node : _obj.nodes) {
1488  node.neighbors.resize(read_from_stream<size_t>(_stream, _format));
1489  node.erased = false;
1490  for(TensorNetwork::Link& link : node.neighbors) {
1491  link.external = read_from_stream<bool>(_stream, _format);
1492  link.other = read_from_stream<size_t>(_stream, _format);
1493  link.indexPosition = read_from_stream<size_t>(_stream, _format);
1494  link.dimension = read_from_stream<size_t>(_stream, _format);
1495  }
1496  }
1497 
1498  // load tensorObjects
1499  for(TensorNetwork::TensorNode& node : _obj.nodes) {
1500  node.tensorObject.reset(new Tensor());
1501  read_from_stream<Tensor>(_stream, *node.tensorObject, _format);
1502  }
1503 
1504  _obj.require_valid_network();
1505  }
1506  } // namespace misc
1507 } // namespace xerus
Header file for some additional math functions.
static void add_network_to_network(internal::IndexedTensorWritable< TensorNetwork > &&_base, internal::IndexedTensorReadOnly< TensorNetwork > &&_toInsert)
Inserts all nodes from _toInsert into _base, creating links where demanded by the indices...
void contract(const size_t _nodeId1, const size_t _nodeId2)
Contracts the nodes with indices _nodeId1 and _nodeId2.
TensorNetwork stripped_subnet(const std::function< bool(size_t)> &_idF=[](size_t){ return true;}) const
Creates a dataless copy of a subnet.
void draw(const std::string &_filename) const
Draws a graph representation of the TensorNetwork.
static void link_traces_and_fix(internal::IndexedTensorWritable< TensorNetwork > &&_base)
Finds traces defined by the indices and internally links the corresponding indices. Also applys all fixed indices.
Internal representation of an read and write and moveable indexed Tensor or TensorNetwork.
size_t degree() const
Returns the degree of the tensor.
Definition: tensor.cpp:206
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
void sanitize()
Removes all erased nodes from the TensorNetwork.
virtual void remove_slate(const size_t _mode, const size_t _slatePosition)
removes the given _slatePosition from the _mode. this reduces the given dimension by one ...
const std::vector< ContractionHeuristic > contractionHeuristics
ZeroNode
Internal indicator to prevent the creation of an degree zero node in TensorNetwork constructor...
virtual TensorNetwork * get_copy() const
Returns a new copy of the network.
Header file for the Index class.
std::pair< size_t, size_t > find_common_edge(const size_t _nodeA, const size_t _nodeB) const
Finds the position of a single common edge between two nodes.
Header file for the IndexedTensorMoveable class.
virtual void contract_unconnected_subnetworks()
Contracts all nodes that are not connected to any external links.
Header file defining lists of indexed tensors.
Header file for the standard contaienr support functions.
#define REQUIRE
Definition: internal.h:33
Very general class used to represent arbitary tensor networks.
Definition: tensorNetwork.h:42
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Definition: tensor.h:102
Internal representation of an readable indexed Tensor or TensorNetwork.
#define LOG
Definition: internal.h:38
void stream_reader(std::istream &_stream, Tensor &_obj, const FileFormat _format)
tries to restore the tensor from a stream of data.
Definition: tensor.cpp:1807
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Definition: tensor.h:99
Header file for the classes defining factorisations of Tensors.
void calculate_rq(Tensor &_R, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level RQ calculation of a given Tensor _input = _R _Q.
Definition: tensor.cpp:1511
#define IF_CHECK
Definition: internal.h:35
size_t datasize() const
Calculates the storage requirement of the current representation.
The TensorNode class is used by the class TensorNetwork to store the componentent tensors defining th...
Definition: tensorNetwork.h:85
void calculate_qr(Tensor &_Q, Tensor &_R, Tensor _input, const size_t _splitPos)
Low-Level QR calculation of a given Tensor _input = _Q _R.
Definition: tensor.cpp:1492
The main namespace of xerus.
Definition: basic.h:37
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Definition: tensor.h:70
STL namespace.
Header file for templates to store and restore objects from / to files / streams. ...
std::string exec(const std::string &_cmd)
Execute a given command.
Tensor operator*(const value_t _factor, Tensor _tensor)
Calculates the entrywise multiplication of the Tensor _tensor with the constant _factor.
Definition: tensor.cpp:1233
void perform_traces(const size_t _nodeId)
Performs all traces in the given node.
void modify_diagonal_entries(const std::function< void(value_t &)> &_f)
Modifies the diagonal entries according to the given function.
Definition: tensor.cpp:841
XERUS_force_inline void read_from_stream(std::istream &_stream, T &_obj, const FileFormat _format)
Definition: fileIO.h:71
size_t size() const noexcept
Definition: tensorNode.cpp:66
size_t fixed_position() const
: Returns the fixed position of a fixed index.
Definition: index.cpp:129
void reduce_representation()
Contracts all nodes that are joined by a full-rank edge.
size_t degree() const
Gets the degree of the TensorNetwork.
value_t operator[](const size_t _position) const
Read the value at a specific position.
Header file for the Tensor class.
Header file for some helper functions.
Internal representation of an readable and writeable indexed Tensor or TensorNetwork.
Definition: indexedTensor.h:37
void calculate_cq(Tensor &_C, Tensor &_Q, Tensor _input, const size_t _splitPos)
Low-Level CQ calculation of a given Tensor _input = _C _Q.
Definition: tensor.cpp:1548
double contraction_cost(const size_t _nodeId1, const size_t _nodeId2) const
Approximates the cost of contraction two given nodes.
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
Definition: tensor.cpp:1252
virtual bool specialized_contraction(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other) const
(Internal) Calculates the contraction between _me and _other and stores the result in _out...
FileFormat
possible file formats for tensor storage
Definition: fileIO.h:43
virtual void require_correct_format() const
Sanity check for the TensorNetwork and if applicable for the specific format.
void reshuffle_nodes(const std::function< size_t(size_t)> &_f)
reshuffled the nodes according to the given function
virtual void resize_mode(const size_t _mode, const size_t _newDim, const size_t _cutPos=~0ul)
Resizes a specific mode of the TensorNetwork.
virtual bool specialized_sum(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other) const
(Internal) Calculates the sum between _me and _other and stores the result in _out. Requires that *this is the tensorObjectReadOnly of _me.
TensorNetwork()
Constructs an order zero TensorNetwork.
Header file for comfort functions and macros that should not be exported in the library.
size_t degree() const noexcept
Definition: tensorNode.cpp:74
Header file for some elementary string manipulation routines.
void(* ContractionHeuristic)(double &, std::vector< std::pair< size_t, size_t >> &, TensorNetwork)
std::vector< Link > externalLinks
The open links of the network in order.
Header file for the class managing the contraction heuristics.
#define IF_NO_CHECK
Definition: internal.h:36
void require_valid_network(const bool _check_erased=true) const
Sanity checks the network.
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Definition: basic.h:43
Class used to represent indices that can be used to write tensor calculations in index notation...
Definition: index.h:43
XERUS_force_inline void write_to_stream(std::ostream &_stream, const T &_value, FileFormat _format)
Definition: fileIO.h:47
void calculate_svd(Tensor &_U, Tensor &_S, Tensor &_Vt, Tensor _input, const size_t _splitPos, const size_t _maxRank, const value_t _eps)
Low-Level SVD calculation of a given Tensor _input = _U _S _Vt.
Definition: tensor.cpp:1424
void stream_writer(std::ostream &_stream, const Tensor &_obj, const FileFormat _format)
pipes all information necessary to restore the current tensor into _stream.
Definition: tensor.cpp:1781
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
Definition: tensor.cpp:1738
std::unique_ptr< Tensor > tensorObject
Save slot for the tensorObject associated with this node.
Definition: tensorNetwork.h:88
virtual void operator/=(const value_t _divisor)
Performs the entrywise divison by a constant _divisor.
virtual value_t frob_norm() const
Calculates the frobenious norm of the TensorNetwork.
std::vector< Link > init_from_dimension_array()
: Sets the externalLinks and returns an Link vector for a node, assuming that this node is the only n...
virtual void transfer_core(const size_t _from, const size_t _to, const bool _allowRankReduction=true)
Transfers the core from one given node to another.
#define INTERNAL_CHECK
Definition: internal.h:37
size_t span
The span states how many dimensions are covered by the index.
Definition: index.h:65
virtual void fix_mode(const size_t _mode, const size_t _slatePosition)
Fixes a specific mode to a specific value, effectively reducing the order by one. ...
std::vector< Link > neighbors
Vector of links defining the connection of this node to the network.
Definition: tensorNetwork.h:91
virtual void round_edge(const size_t _nodeA, const size_t _nodeB, const size_t _maxRank, const double _eps, const double _softThreshold)
Thresholds the rank between two given nodes.
bool fixed() const
Checks whether the Index represents a fixed number.
Definition: index.cpp:96
Header file for the TensorNetwork class.
void swap_external_links(const size_t _i, const size_t _j)
Swaps the external indices _i and _j, effectively changing those indices for the represented Tensor (...
std::vector< size_t > MultiIndex
: Represention of a MultiIndex, i.e. the tuple of positions for each dimension determining a single p...
Definition: tensor.h:95
internal::IndexedTensor< TensorNetwork > operator()(args... _args)
Indexes the TensorNetwork for read/write use.
virtual void operator*=(const value_t _factor)
Performs the entrywise multiplication with a constant _factor.
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
Definition: math.h:50
std::vector< TensorNode > nodes
The nodes constituting the network. The order determines the ids of the nodes.
internal::IndexedTensorMoveable< Tensor > operator/(internal::IndexedTensorReadOnly< Tensor > &&_b, internal::IndexedTensorReadOnly< Tensor > &&_A)
virtual void specialized_evaluation(internal::IndexedTensorWritable< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other)
(Internal) Evaluates _other into _me. Requires that *this is the tensorObjectReadOnly of _me...
void calculate_qc(Tensor &_Q, Tensor &_C, Tensor _input, const size_t _splitPos)
Low-Level QC calculation of a given Tensor _input = _Q _C.
Definition: tensor.cpp:1528
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...