xerus
a general purpose tensor library
ttStack.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
25 #include <xerus/ttStack.h>
26 #include <xerus/basic.h>
27 #include <xerus/misc/check.h>
28 #include <xerus/misc/internal.h>
29 
30 #include <xerus/index.h>
31 #include <xerus/tensor.h>
32 #include <xerus/ttNetwork.h>
33 
34 
35 namespace xerus {
36  namespace internal {
37  template<bool isOperator>
38  TTStack<isOperator>::TTStack(const bool _canno, const size_t _corePos) : cannonicalization_required(_canno), futureCorePosition(_corePos) {}
39 
40 
41  template<bool isOperator>
43  return new TTStack(*this);
44  }
45 
46 
47  template<bool isOperator>
50 
51  if(degree() == 0) {
52  std::set<size_t> toContract;
53  for(size_t i = 0; i < nodes.size(); ++i) {
54  toContract.insert(i);
55  contract(toContract);
56  return TTNetwork<isOperator>(*nodes[0].tensorObject);
57  }
58  }
59 
60  const size_t numComponents = degree()/N;
61  const size_t numNodes = degree()/N+2;
62  const size_t stackSize = nodes.size()/numNodes;
63 
64  INTERNAL_CHECK(nodes.size()%numNodes == 0, "IE");
65 
66  // Contract the stack to a TTNetwork node structure.
67  std::set<size_t> toContract;
68  for (size_t currentNode = 0; currentNode < numNodes; ++currentNode) {
69  toContract.clear();
70  for (size_t i = 0; i < stackSize; i++) {
71  toContract.insert(currentNode+i*numNodes);
72  }
73  contract(toContract);
74  }
75 
76  // Reshuffle the nodes to be in the correct order after contraction the nodes will have one of the ids: node, node+numNodes, node+2*numNodes,... (as those were part of the contraction) so modulus gives the correct wanted id.
77  reshuffle_nodes([numNodes](const size_t _i){return _i%(numNodes);});
78 
79  INTERNAL_CHECK(nodes.size() == numNodes, "Internal Error.");
80 
81  // Reset to new external links
82  for(size_t i = 0; i < numComponents; ++i) {
83  externalLinks[i].other = i+1;
84  externalLinks[i].indexPosition = 1;
85  }
86  if(isOperator) {
87  for(size_t i = 0; i < numComponents; ++i) {
88  externalLinks[numComponents+i].other = i+1;
89  externalLinks[numComponents+i].indexPosition = 2;
90  }
91  }
92 
93  // Fix the first virtual node
94  nodes[0].tensorObject->reinterpret_dimensions({1});
95  nodes[0].neighbors.resize(1);
96  nodes[0].neighbors.front().other = 1;
97  nodes[0].neighbors.front().indexPosition = 0;
98 
99  // Fix all real components
100  std::vector<size_t> shuffle(N+2*stackSize);
101  for (size_t i = 1; i+1 < numNodes; ++i) {
102  size_t leftCount = 0;
103  size_t leftDim = 1, rightDim = 1;
104  size_t fullDim = 1;
105  for(size_t k = 0; k < N+2*stackSize; ++k) {
106  INTERNAL_CHECK(!nodes[i].erased, "IE");
107  const TensorNetwork::Link& link = nodes[i].neighbors[k];
108  fullDim *= link.dimension;
109  if(link.external) {
110  if(link.indexPosition < numComponents) {
111  shuffle[k] = stackSize;
112  } else {
113  INTERNAL_CHECK(isOperator, "IE " << link.indexPosition << " vs " << numComponents << " vs " << degree());
114  shuffle[k] = stackSize+1;
115  }
116  } else {
117  if(link.other == i-1) {
118  shuffle[k] = leftCount++;
119  leftDim *= link.dimension;
120  } else {
121  // We want the order of the next node (so the next one can keep its order)
122  size_t otherPos = 0;
123  for(const TensorNetwork::Link& otherLink : nodes[i+1].neighbors) {
124  if(otherLink.other == i) {
125  if(otherLink.indexPosition == k) {
126  break;
127  } else {
128  otherPos++;
129  }
130  }
131  }
132  shuffle[k] = stackSize+N+otherPos;
133  rightDim *= link.dimension;
134  }
135  }
136  }
137  INTERNAL_CHECK(fullDim == nodes[i].tensorObject->size, "Uhh");
138  INTERNAL_CHECK(leftCount == stackSize, "IE");
139 
140  xerus::reshuffle(*nodes[i].tensorObject, *nodes[i].tensorObject, shuffle);
141  if(isOperator) {
142  nodes[i].tensorObject->reinterpret_dimensions({leftDim, dimensions[i-1], dimensions[i-1+numComponents], rightDim});
143  } else {
144  nodes[i].tensorObject->reinterpret_dimensions({leftDim, dimensions[i-1], rightDim});
145  }
146 
147  nodes[i].neighbors.clear();
148  nodes[i].neighbors.emplace_back(i-1, i==1 ? 0 : N+1, leftDim, false);
149  nodes[i].neighbors.emplace_back(0, i-1 , dimensions[i-1], true);
150  if(isOperator) { nodes[i].neighbors.emplace_back(0, numComponents+i-1, dimensions[numComponents+i-1], true); }
151  nodes[i].neighbors.emplace_back(i+1, 0, rightDim, false);
152  }
153 
154  // Fix the second virtual node
155  nodes[numNodes-1].tensorObject->reinterpret_dimensions({1});
156  nodes[numNodes-1].neighbors.resize(1);
157  nodes[numNodes-1].neighbors.front().other = numNodes-2;
158  nodes[numNodes-1].neighbors.front().indexPosition = N+1;
159 
160  // Create actual TTNetwork
161  TTNetwork<isOperator> result;
162  static_cast<TensorNetwork&>(result) = static_cast<TensorNetwork&>(*this);
164  result.canonicalized = false;
166  } else {
167  result.canonicalized = true;
169  }
170  result.require_correct_format();
171 
172  return result;
173  }
174 
175  template<bool isOperator>
177  INTERNAL_CHECK(!nodes.empty(), "There must not be a TTNetwork without any node");
178 
180  *nodes[futureCorePosition+1].tensorObject *= _factor;
181  } else if(degree() > 0) {
182  *nodes[1].tensorObject *= _factor;
183  } else {
184  *nodes[0].tensorObject *= _factor;
185  }
186  }
187 
188 
189  template<bool isOperator>
191  operator*=(1/_divisor);
192  }
193 
194 
195  // TODO get rid of this function and use TTN cast instead
196  template<bool isOperator>
198  _me.tensorObject->require_valid_network();
199 
200  if(_me.tensorObject->degree() == 0) {
201  std::set<size_t> toContract;
202  for(size_t i = 0; i < _me.tensorObject->nodes.size(); ++i) {
203  toContract.insert(i);
204  _me.tensorObject->contract(toContract);
205  _me.tensorObject->reshuffle_nodes([](const size_t _i){return 0;});
206  return;
207  }
208  }
209 
210  const size_t numComponents = _me.tensorObject->degree()/N;
211  const size_t numNodes = _me.tensorObject->degree()/N+2;
212  const size_t stackSize = _me.tensorObject->nodes.size()/numNodes;
213 
214  INTERNAL_CHECK(_me.tensorObject->nodes.size()%numNodes == 0, "IE");
215 
216  // Contract the stack to a TTNetwork node structure.
217  std::set<size_t> toContract;
218  for (size_t currentNode = 0; currentNode < numNodes; ++currentNode) {
219  toContract.clear();
220  for (size_t i = 0; i < stackSize; i++) {
221  toContract.insert(currentNode+i*numNodes);
222  }
223  _me.tensorObject->contract(toContract);
224  }
225 
226  // Reshuffle the nodes to be in the correct order after contraction the nodes will have one of the ids: node, node+numNodes, node+2*numNodes,... (as those were part of the contraction) so modulus gives the correct wanted id.
227  _me.tensorObject->reshuffle_nodes([numNodes](const size_t _i){return _i%(numNodes);});
228 
229  INTERNAL_CHECK(_me.tensorObject->nodes.size() == numNodes, "Internal Error.");
230 
231  // Reset to new external links
232  for(size_t i = 0; i < numComponents; ++i) {
233  _me.tensorObject->externalLinks[i].other = i+1;
234  _me.tensorObject->externalLinks[i].indexPosition = 1;
235  }
236  if(isOperator) {
237  for(size_t i = 0; i < numComponents; ++i) {
238  _me.tensorObject->externalLinks[numComponents+i].other = i+1;
239  _me.tensorObject->externalLinks[numComponents+i].indexPosition = 2;
240  }
241  }
242 
243  // Fix the first virtual node
244  _me.tensorObject->nodes[0].tensorObject->reinterpret_dimensions({1});
245  _me.tensorObject->nodes[0].neighbors.resize(1);
246  _me.tensorObject->nodes[0].neighbors.front().other = 1;
247  _me.tensorObject->nodes[0].neighbors.front().indexPosition = 0;
248 
249  // Fix all real components
250  std::vector<size_t> shuffle(N+2*stackSize);
251  for (size_t i = 1; i+1 < numNodes; ++i) {
252  size_t leftCount = 0;
253  size_t leftDim = 1, rightDim = 1;
254  size_t fullDim = 1;
255  for(size_t k = 0; k < N+2*stackSize; ++k) {
256  INTERNAL_CHECK(!_me.tensorObject->nodes[i].erased, "IE");
257  const TensorNetwork::Link& link = _me.tensorObject->nodes[i].neighbors[k];
258  fullDim *= link.dimension;
259  if(link.external) {
260  if(link.indexPosition < numComponents) {
261  shuffle[k] = stackSize;
262  } else {
263  INTERNAL_CHECK(isOperator, "IE " << link.indexPosition << " vs " << numComponents << " vs " << _me.tensorObject->degree());
264  shuffle[k] = stackSize+1;
265  }
266  } else {
267  if(link.other == i-1) {
268  shuffle[k] = leftCount++;
269  leftDim *= link.dimension;
270  } else {
271  // We want the order of the next node (so the next one can keep its order)
272  size_t otherPos = 0;
273  for(const TensorNetwork::Link& otherLink : _me.tensorObject->nodes[i+1].neighbors) {
274  if(otherLink.other == i) {
275  if(otherLink.indexPosition == k) {
276  break;
277  } else {
278  otherPos++;
279  }
280  }
281  }
282  shuffle[k] = stackSize+N+otherPos;
283  rightDim *= link.dimension;
284  }
285  }
286  }
287  INTERNAL_CHECK(fullDim == _me.tensorObject->nodes[i].tensorObject->size, "Uhh");
288  INTERNAL_CHECK(leftCount == stackSize, "IE");
289 
290  xerus::reshuffle(*_me.tensorObject->nodes[i].tensorObject, *_me.tensorObject->nodes[i].tensorObject, shuffle);
291  if(isOperator) {
292  _me.tensorObject->nodes[i].tensorObject->reinterpret_dimensions({leftDim, _me.tensorObject->dimensions[i-1], _me.tensorObject->dimensions[i-1+numComponents], rightDim});
293  } else {
294  _me.tensorObject->nodes[i].tensorObject->reinterpret_dimensions({leftDim, _me.tensorObject->dimensions[i-1], rightDim});
295  }
296 
297  _me.tensorObject->nodes[i].neighbors.clear();
298  _me.tensorObject->nodes[i].neighbors.emplace_back(i-1, i==1 ? 0 : N+1, leftDim, false);
299  _me.tensorObject->nodes[i].neighbors.emplace_back(0, i-1 , _me.tensorObject->dimensions[i-1], true);
300  if(isOperator) { _me.tensorObject->nodes[i].neighbors.emplace_back(0, numComponents+i-1, _me.tensorObject->dimensions[numComponents+i-1], true); }
301  _me.tensorObject->nodes[i].neighbors.emplace_back(i+1, 0, rightDim, false);
302  }
303 
304  // Fix the second virtual node
305  _me.tensorObject->nodes[numNodes-1].tensorObject->reinterpret_dimensions({1});
306  _me.tensorObject->nodes[numNodes-1].neighbors.resize(1);
307  _me.tensorObject->nodes[numNodes-1].neighbors.front().other = numNodes-2;
308  _me.tensorObject->nodes[numNodes-1].neighbors.front().indexPosition = N+1;
309  }
310 
311  /*- - - - - - - - - - - - - - - - - - - - - - - - - - Operator specializations - - - - - - - - - - - - - - - - - - - - - - - - - - */
312  template<bool isOperator>
314  LOG(fatal, "TTStack not supported as a storing type");
315  }
316 
317 
318  template<bool isOperator>
320  return TTNetwork<isOperator>::specialized_contraction_f(_out, std::move(_me), std::move(_other));
321  }
322 
323 
324  template<bool isOperator>
326  return TTNetwork<isOperator>::specialized_sum_f(_out, std::move(_me), std::move(_other));
327  }
328 
329 
330  template<bool isOperator>
332  const Index i;
334  tmp(i&0) = IndexedTensorMoveable<TensorNetwork>(this->get_copy(), {i&0});
335  return tmp.frob_norm();
336  }
337 
338 
339  // Explicit instantiation of the two template parameters that will be implemented in the xerus library
340  template class TTStack<false>;
341  template class TTStack<true>;
342  } // namespace internal
343 } // namespace xerus
Header file for CHECK and REQUIRE macros.
void contract(const size_t _nodeId1, const size_t _nodeId2)
Contracts the nodes with indices _nodeId1 and _nodeId2.
Internal representation of an read and write and moveable indexed Tensor or TensorNetwork.
const bool cannonicalization_required
Definition: ttStack.h:43
static void contract_stack(IndexedTensorWritable< TensorNetwork > &&_me)
Definition: ttStack.cpp:197
static bool specialized_contraction_f(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other)
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
Header file for the TTNetwork class (and thus TTTensor and TTOperator).
Header file for the Index class.
Very general class used to represent arbitary tensor networks.
Definition: tensorNetwork.h:42
Internal representation of an readable indexed Tensor or TensorNetwork.
#define LOG
Definition: internal.h:38
Header file for the TTStack class.
static bool specialized_sum_f(std::unique_ptr< internal::IndexedTensorMoveable< TensorNetwork >> &_out, internal::IndexedTensorReadOnly< TensorNetwork > &&_me, internal::IndexedTensorReadOnly< TensorNetwork > &&_other)
Definition: ttNetwork.cpp:982
Specialized TensorNetwork class used to represent TTTensor and TToperators.
The main namespace of xerus.
Definition: basic.h:37
bool canonicalized
Flag indicating whether the TTNetwork is canonicalized.
Definition: ttNetwork.h:51
virtual bool specialized_sum(std::unique_ptr< IndexedTensorMoveable< TensorNetwork >> &_out, IndexedTensorReadOnly< TensorNetwork > &&_me, IndexedTensorReadOnly< TensorNetwork > &&_other) const override
(Internal) Calculates the sum between _me and _other and stores the result in _out. Requires that *this is the tensorObjectReadOnly of _me.
Definition: ttStack.cpp:325
size_t degree() const
Gets the degree of the TensorNetwork.
Header file for the Tensor class.
void move_core(const size_t _position, const bool _keepRank=false)
Move the core to a new position.
Definition: ttNetwork.cpp:582
void reshuffle_nodes(const std::function< size_t(size_t)> &_f)
reshuffled the nodes according to the given function
virtual void specialized_evaluation(IndexedTensorWritable< TensorNetwork > &&, IndexedTensorReadOnly< TensorNetwork > &&) override
(Internal) Evaluates _other into _me. Requires that *this is the tensorObjectReadOnly of _me...
Definition: ttStack.cpp:313
Header file for comfort functions and macros that should not be exported in the library.
const size_t futureCorePosition
Definition: ttStack.h:45
std::vector< Link > externalLinks
The open links of the network in order.
void require_valid_network(const bool _check_erased=true) const
Sanity checks the network.
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Definition: basic.h:43
virtual value_t frob_norm() const override
Calculates the frobenious norm of the TensorNetwork.
Definition: ttNetwork.cpp:782
virtual void require_correct_format() const override
Tests whether the network resembles that of a TTTensor and checks consistency with the underlying ten...
Definition: ttNetwork.cpp:290
Class used to represent indices that can be used to write tensor calculations in index notation...
Definition: index.h:43
virtual void operator/=(const value_t _divisor) override
Performs the entrywise divison by a constant _divisor.
Definition: ttStack.cpp:190
#define INTERNAL_CHECK
Definition: internal.h:37
virtual void operator*=(const value_t _factor) override
Performs the entrywise multiplication with a constant _factor.
Definition: ttStack.cpp:176
static constexpr const size_t N
The number of external links in each node, i.e. one for TTTensors and two for TTOperators.
Definition: ttStack.h:41
virtual value_t frob_norm() const override
Calculates the frobenious norm of the TensorNetwork.
Definition: ttStack.cpp:331
virtual TensorNetwork * get_copy() const override
Returns a new copy of the network.
Definition: ttStack.cpp:42
virtual bool specialized_contraction(std::unique_ptr< IndexedTensorMoveable< TensorNetwork >> &_out, IndexedTensorReadOnly< TensorNetwork > &&_me, IndexedTensorReadOnly< TensorNetwork > &&_other) const override
(Internal) Calculates the contraction between _me and _other and stores the result in _out...
Definition: ttStack.cpp:319
std::vector< TensorNode > nodes
The nodes constituting the network. The order determines the ids of the nodes.
size_t corePosition
The position of the core.
Definition: ttNetwork.h:58
Internal class used to represent stacks of (possibly multiply) applications of TTOperators to either ...
Definition: ttStack.h:38
TTStack(const bool _canno, const size_t _corePos=0)
Definition: ttStack.cpp:38
std::vector< size_t > dimensions
Dimensions of the external indices, i.e. the dimensions of the tensor represented by the network...