xerus
a general purpose tensor library
indexedTensor_tensor_evaluate.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
25 #include <xerus/basic.h>
26 #include <xerus/index.h>
27 #include <xerus/misc/check.h>
28 #include <xerus/tensor.h>
29 
30 #include <memory>
32 
35 #include <xerus/misc/internal.h>
36 
37 namespace xerus {
38 
39  void increase_indices(const size_t _i, value_t*& _outPosition, const std::vector<size_t>& _stepSizes, const std::vector<size_t>& _baseDimensions ) {
40  size_t index = _stepSizes.size()-1;
41  _outPosition += _stepSizes[index];
42  size_t multStep = _baseDimensions[index];
43  while(_i%multStep == 0) {
44  _outPosition -= _baseDimensions[index]*_stepSizes[index]; // "reset" current index to 0
45  --index; // Advance to next index
46  _outPosition += _stepSizes[index]; // increase next index
47  multStep *= _baseDimensions[index]; // next stepSize
48  }
49  }
50 
55  void reshuffle(Tensor& _out, const Tensor& _base, const std::vector<size_t>& _shuffle) {
56  IF_CHECK(
57  INTERNAL_CHECK(_shuffle.size() == _base.degree(), "IE");
58  for(size_t x = 0; x < _base.degree(); ++x) {
59  REQUIRE(_shuffle[x] < _base.degree(), _shuffle[x] << " is no valid new position!");
60  REQUIRE(misc::count(_shuffle, x) == 1, x << " illegally appeared twice.");
61  }
62  )
63 
64  // Count how many indices in the back do not change and calculate blockSize
65  long lastShuffled = -1;
66  for(size_t i = 0; i < _shuffle.size(); ++i) {
67  if(_shuffle[i] != i) { lastShuffled = long(i) ; }
68  }
69 
70  const size_t numToShuffle = size_t(lastShuffled+1);
71  const size_t lastToShuffle = _base.dimensions.size(); // NOTE inlining this into the next line causes an internal segfault in gcc 4.8.1
72  const size_t blockSize = misc::product(_base.dimensions, numToShuffle, lastToShuffle);
73 
74  if(blockSize == _base.size) { // No actual reshuffling
75  _out = _base;
76  return;
77  }
78 
79  std::vector<size_t> outDimensions(_base.degree());
80  for( size_t i = 0; i < _base.degree(); ++i ) {
81  outDimensions[_shuffle[i]] = _base.dimensions[i];
82  }
83 
84  std::vector<size_t> stepSizes(numToShuffle);
85  for( size_t i = 0; i < numToShuffle; ++i ) {
86  stepSizes[i] = misc::product(outDimensions, _shuffle[i]+1, numToShuffle)*blockSize;
87  }
88 
89  // Rescue _base from reset in case _out and _base coincide
90  std::unique_ptr<Tensor> rescueSlot;
91  const Tensor* usedBase;
92  if(&_out == &_base) {
93  rescueSlot.reset(new Tensor(std::move(_out)));
94  usedBase = rescueSlot.get();
95  } else {
96  usedBase = &_base;
97  }
98 
99  _out.reset(std::move(outDimensions), usedBase->representation, Tensor::Initialisation::None);
100 
101  if(usedBase->is_dense()) {
102  const size_t numBlocks = misc::product(usedBase->dimensions, 0, numToShuffle);
103  value_t* outPosition = _out.override_dense_data();
104  const value_t* basePosition = usedBase->get_unsanitized_dense_data();
105 
106  if(blockSize == 1) {
107  *outPosition = *basePosition;
108  for(size_t b = 1; b < numBlocks; ++b) {
109  increase_indices(b, outPosition, stepSizes, usedBase->dimensions);
110  *outPosition = *(basePosition + b*blockSize);
111  }
112  } else {
113  misc::copy(outPosition, basePosition, blockSize);
114  for(size_t b = 1; b < numBlocks; ++b) {
115  increase_indices(b, outPosition, stepSizes, usedBase->dimensions);
116  misc::copy(outPosition, basePosition + b*blockSize, blockSize);
117  }
118  }
119  _out.factor = usedBase->factor;
120 
121  } else {
122  const std::map<size_t, value_t>& baseEntries = usedBase->get_unsanitized_sparse_data();
123  std::map<size_t, value_t>& outEntries = _out.override_sparse_data();
124 
125  for(const auto& entry : baseEntries) {
126  size_t basePosition = entry.first;
127  size_t position = basePosition%blockSize;
128  basePosition /= blockSize;
129 
130  for(size_t i = numToShuffle; i > 0; --i) {
131  position += (basePosition%usedBase->dimensions[i-1])*stepSizes[i-1];
132  basePosition /= usedBase->dimensions[i-1];
133  }
134  outEntries.emplace(position, usedBase->factor*entry.second);
135  }
136  }
137  }
138 
139  Tensor reshuffle(const Tensor& _base, const std::vector<size_t>& _shuffle) {
140  Tensor result;
141  reshuffle(result, _base, _shuffle);
142  return result;
143  }
144 
145  namespace internal {
146 
147  inline void increase_indices(const size_t _i, const value_t*& _oldPosition, const std::vector<size_t>& _steps, const std::vector<size_t>& _multDimensions) {
148  size_t index = _steps.size()-1;
149  _oldPosition += _steps[index];
150  size_t multStep = _multDimensions[index];
151  while(_i%multStep == 0) {
152  _oldPosition -= _multDimensions[index]*_steps[index]; // "reset" current index to 0
153  --index; // Advance to next index
154  _oldPosition += _steps[index]; // increase next index
155  multStep *= _multDimensions[index]; // next stepSize
156  }
157  }
158 
159  inline void sum_traces( value_t* const _newPosition,
160  const value_t* _oldPosition,
161  const std::vector<size_t>& _doubleSteps,
162  const std::vector<size_t>& _doubleMultDimensions,
163  const size_t _numSummations) {
164  *_newPosition = *_oldPosition;
165  for(size_t k = 1; k < _numSummations; ++k) {
166  increase_indices(k, _oldPosition, _doubleSteps, _doubleMultDimensions);
167  *_newPosition += *_oldPosition;
168  }
169  }
170 
171  inline void sum_traces( value_t* const _newPosition,
172  const value_t* _oldPosition,
173  const std::vector<size_t>& _doubleSteps,
174  const std::vector<size_t>& _doubleMultDimensions,
175  const size_t _numSummations,
176  const size_t _orderedIndicesMultDim ) {
177  misc::copy(_newPosition, _oldPosition, _orderedIndicesMultDim);
178  for(size_t k = 1; k < _numSummations; ++k) {
179  increase_indices(k, _oldPosition, _doubleSteps, _doubleMultDimensions);
180  misc::add(_newPosition, _oldPosition, _orderedIndicesMultDim);
181  }
182  }
183 
184 
185  inline size_t get_position(const std::pair<size_t, value_t>& _entry,
186  const size_t* const _baseIndexDim,
187  const size_t* const _divisors,
188  const size_t* const _attributes,
189  const size_t numIndices ) {
190  size_t position = 0;
191  for(size_t i = 0; i < numIndices; ++i) {
192  position += ((_entry.first/_divisors[i])%_baseIndexDim[i])*_attributes[i];
193  }
194  return position;
195  }
196 
197  inline bool check_position(size_t & _position,
198  const std::pair<size_t, value_t>& _entry,
199  const size_t* const _baseIndexDim,
200  const size_t* const _divisors,
201  const size_t* const _attributes,
202  const bool* const _fixedFlags,
203  const bool* const _traceFlags,
204  const size_t numIndices ) {
205  _position = 0;
206  // Process each index
207  for(size_t i = 0; i < numIndices; ++i) {
208  const size_t indexPosition = (_entry.first/_divisors[i])%_baseIndexDim[i];
209 
210  // The position is only changed if the index is not fixed...
211  if(_fixedFlags[i]) {
212  if(indexPosition != _attributes[i]) {
213  return false; // The indexPosition differs from the fixed position => Do not add this value
214  }
215  // ... and not part of a trace.
216  } else if(_traceFlags[i]) {
217  if(indexPosition != (_entry.first/_divisors[_attributes[i]])%_baseIndexDim[_attributes[i]]) {
218  return false; // This entry is not on the diagonal of the trace => Do not add this value
219  }
220  } else {
221  _position += indexPosition*_attributes[i];
222  }
223  }
224  return true;
225  }
226 
227  inline std::vector<size_t> get_dimension_vector(const std::vector<Index>& _indices) {
228  std::vector<size_t> dimensions;
229  dimensions.reserve(_indices.size());
230  for(const Index& idx : _indices) {
231  dimensions.emplace_back(idx.dimension());
232  }
233  return dimensions;
234  }
235 
236  inline std::vector<size_t> get_step_sizes(const std::vector<Index>& _indices) {
237  std::vector<size_t> stepSizes(_indices.size());
238  if(!_indices.empty()) {
239  stepSizes.back() = 1;
240  for(size_t i = stepSizes.size(); i > 1; --i) {
241  stepSizes[i-2] = stepSizes[i-1]*_indices[i-1].dimension();
242  }
243  }
244  return stepSizes;
245  }
246 
247 
249  // Assign the indices
250  _base.assign_indices();
251  _out.assign_indices();
252 
253 
254  // Assign the dimensions
255  _base.assign_index_dimensions();
256  _out.assign_index_dimensions();
257 
258  #ifndef XERUS_DISABLE_RUNTIME_CHECKS // Performe complete check whether the input is valid
259  // Check base indices
260  for(size_t i = 0; i < _base.indices.size(); ++i) {
261  // If the index is fixed we don't expect to find it anywhere
262  if(_base.indices[i].fixed()) {
263  REQUIRE(_base.indices[i].span == 1, "Fixed indices must have span one. Indices are: " << _base.indices << ", total should be " << _base.indices.size() << ". The problem is: " << _base.indices[i] << " -- " << _base.indices[i].fixed());
264  continue;
265  }
266 
267  // Try to find index in _out
268  size_t j = 0;
269  while(j < _out.indices.size() && _base.indices[i] != _out.indices[j]) { ++j; }
270  if(j < _out.indices.size()) {
271  REQUIRE(_base.indices[i].dimension() == _out.indices[j].dimension(), "The indexDimensions in the target and base of evaluation must coincide. Here " << _base.indices[i].dimension() << "!=" << _out.indices[j].dimension() << ". For index " << _base.indices[i] << " == " << _out.indices[j]);
272  REQUIRE(_base.indices[i].span == _out.indices[j].span, "The indexSpans in the target and base of evaluation must coincide.");
273  REQUIRE(_base.indices[i].open(), "Indices appearing in the target of evaluation must not be part of a trace nor be fixed. Base: " << _base.indices << " Out: " << _out.indices);
274  continue;
275  }
276 
277  // Try to find index a second time in base
278  j = 0;
279  while(j < _base.indices.size() && (i == j || _base.indices[i] != _base.indices[j])) { ++j; }
280  REQUIRE(j < _base.indices.size(), "All indices of evalutation base must either be fixed, appear in the target or be part of a trace. Base: " << _base.indices << " Out: " << _out.indices);
281  REQUIRE(misc::count(_base.indices, _base.indices[i]) == 2, "Indices must appear at most two times. Base: " << _base.indices << " Out: " << _out.indices);
282  REQUIRE(_base.indices[i].dimension() == _base.indices[j].dimension(), "The indexDimensions of two traced indices must conince.");
283  REQUIRE(_base.indices[i].span == 1 && _base.indices[j].span == 1, "The indexSpans of traced indices must be one (It is ambigious what a trace of span 2 indices is meant to be).");
284  }
285 
286  // Check out indices
287  for(size_t i = 0; i < _out.indices.size(); ++i) {
288  REQUIRE(_out.indices[i].open(), "Traces and fixed indices are not allowed in the target of evaluation. Base: " << _base.indices << " Out: " << _out.indices);
289  REQUIRE(misc::count(_base.indices, _out.indices[i]) == 1, "Every index of the target must appear exactly once in the base of evaluation. Base: " << _base.indices << " Out: " << _out.indices);
290  }
291  #endif
292 
293  // If there is no index reshuffling, the evalutation is trivial (NOTE must be after index assignment so span zero indices are erased. Otherwise empty index arrays may occur)
294  if(_base.indices == _out.indices) {
295  *_out.tensorObject = *_base.tensorObjectReadOnly;
296  return; // We are finished here
297  }
298 
299  // Extract base index dimensions and stepSizes
300  const std::vector<size_t> baseIndexStepSizes(get_step_sizes(_base.indices));
301  const std::vector<size_t> baseIndexDimensions(get_dimension_vector(_base.indices));
302 
303  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Full => Full - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
304  if(_base.tensorObjectReadOnly->is_dense()) {
305  // Extract out index dimensions
306  const std::vector<size_t> outIndexDimensions(get_dimension_vector(_out.indices));
307 
308  // Count how many indices in the back are already ordered (We know that base has at least as many indices as out)
309  size_t numOrderedIndices;
310  for(numOrderedIndices = 0; numOrderedIndices < _out.indices.size() && _base.indices[_base.indices.size()-1-numOrderedIndices] == _out.indices[_out.indices.size()-1-numOrderedIndices]; ++numOrderedIndices) { }
311 
312  const size_t orderedIndexDim = baseIndexStepSizes[_base.indices.size()-numOrderedIndices-1];
313 
314  std::vector<size_t> stepSizes(_out.indices.size()-numOrderedIndices); // How much we have to move in base when the i-th index of _out increases
315 
316  size_t fixedIndexOffset = 0; // Fixed offset in base due to fixed indices
317 
318  std::vector<size_t> traceStepSizes; // How much we have to move in base if the index of the i-th trace is increased
319  std::vector<size_t> traceDimensions; // The dimensions of the traces
320  size_t totalTraceDim = 1; // Total number of summantions, i.e. Product of all trace dimensions
321 
322  // Calculate stepSizes for our tensor. We will march in steps of orderedIndicesMultDim in _out.data
323  for(size_t i = 0; i < _base.indices.size()-numOrderedIndices; ++i) {
324  // First try to find the index in _out (if it is not contained it must be fixed or part of a trace)
325  size_t outPos = 0;
326  while(outPos < _out.indices.size() && _base.indices[i] != _out.indices[outPos]) { ++outPos; }
327 
328  if(outPos < _out.indices.size()) { // If we found it we are basically finished
329  stepSizes[outPos] = baseIndexStepSizes[i];
330  } else if(_base.indices[i].fixed()) { // One reason for an index to not be in _out is to be fixed
331  fixedIndexOffset += _base.indices[i].fixed_position()*baseIndexStepSizes[i];
332  } else { // If the Index is not fixed, then it has to be part of a trace
333  for(size_t j = i+1; j < _base.indices.size()-numOrderedIndices; ++j) {
334  if(_base.indices[i] == _base.indices[j]) {
335  traceStepSizes.emplace_back(baseIndexStepSizes[i]+baseIndexStepSizes[j]);
336  traceDimensions.emplace_back(_base.indices[i].dimension());
337  totalTraceDim *= _base.indices[i].dimension();
338  break;
339  }
340  }
341  }
342  }
343 
344  // Start performance analysis for low level part
346 
347  // Get pointers to the data and delay the deletion of the base data in case _out and _base coincide
348  const value_t* oldPosition = _base.tensorObjectReadOnly->get_unsanitized_dense_data()+fixedIndexOffset;
349  std::shared_ptr<value_t> delaySlot;
350  if(_base.tensorObjectReadOnly == _out.tensorObjectReadOnly) { delaySlot = _out.tensorObject->get_internal_dense_data(); }
351  value_t* const newPosition = _out.tensorObject->override_dense_data();
352 
353  // Get specific sizes
354  const size_t numSteps = _out.tensorObject->size/orderedIndexDim;
355 
356  if(orderedIndexDim == 1) { // We have to copy/add single entries
357  if(totalTraceDim == 1) { // We don't need to sum any traces
358  newPosition[0] = *oldPosition;
359  for(size_t i = 1; i < numSteps; ++i) {
360  increase_indices(i, oldPosition, stepSizes, outIndexDimensions);
361  newPosition[i] = *oldPosition;
362  }
363  } else { // We have to add traces
364  sum_traces(newPosition, oldPosition, traceStepSizes, traceDimensions, totalTraceDim);
365  for(size_t i = 1; i < numSteps; ++i) {
366  increase_indices(i, oldPosition, stepSizes, outIndexDimensions);
367  sum_traces(newPosition+i, oldPosition, traceStepSizes, traceDimensions, totalTraceDim);
368  }
369  }
370  } else { // We can copy/add larger blocks
371  if(totalTraceDim == 1) { // We don't need to sum any traces
372  misc::copy(newPosition, oldPosition, orderedIndexDim);
373  for(size_t i = 1; i < numSteps; ++i) {
374  increase_indices(i, oldPosition, stepSizes, outIndexDimensions);
375  misc::copy(newPosition + i*orderedIndexDim, oldPosition, orderedIndexDim);
376  }
377  } else { // We have to add traces
378  sum_traces(newPosition, oldPosition, traceStepSizes, traceDimensions, totalTraceDim, orderedIndexDim);
379  for(size_t i = 1; i < numSteps; ++i) {
380  increase_indices(i, oldPosition, stepSizes, outIndexDimensions);
381  sum_traces(newPosition + i*orderedIndexDim, oldPosition, traceStepSizes, traceDimensions, totalTraceDim, orderedIndexDim);
382  }
383  }
384  }
385  XERUS_PA_END("Evaluation", "Full->Full", misc::to_string(_base.tensorObjectReadOnly->dimensions)+" ==> " + misc::to_string(_out.tensorObject->dimensions));
386 
387 
388  // Propagate the constant factor, since we don't apply it for dense Tensors
389  _out.tensorObject->factor = _base.tensorObjectReadOnly->factor;
390  }
391  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Sparse => Sparse - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
392  else if(_base.tensorObjectReadOnly->is_sparse()) {
393  #define VLA(T, name) auto name##_store = xerus::misc::make_unique_array(new T); const auto & (name) = name##_store.get();
394  VLA(bool[_base.indices.size()] , fixedFlags); // Flag for each index indicating whether the index is fixed
395  VLA(bool[_base.indices.size()] , traceFlags); // Flag for each index indicating whether the index is part of a trace
396  VLA(size_t[_base.indices.size()], attributes); // Either the factor in _out, the value of an fixed index or the position of the other part of a trace
397  #undef VLA
398  bool peacefullIndices = true;
399 
400  const std::vector<size_t> outIndexStepSizes(get_step_sizes(_out.indices));
401 
402  // Process base indices
403  for(size_t i = 0; i < _base.indices.size(); ++i) {
404  // Check whether the index is fixed
405  if(_base.indices[i].fixed()) {
406  fixedFlags[i] = true;
407  traceFlags[i] = false;
408  attributes[i] = _base.indices[i].fixed_position();
409  peacefullIndices = false;
410  continue;
411  }
412 
413  // Try to find the index in out
414  size_t outPos = 0;
415  while(outPos < _out.indices.size() && _out.indices[outPos] != _base.indices[i]) { ++outPos; }
416  if(outPos < _out.indices.size()) {
417  fixedFlags[i] = false;
418  traceFlags[i] = false;
419  attributes[i] = outIndexStepSizes[outPos];
420  continue;
421  }
422 
423  // If the index is not in out and not fixed then it has to be part of a trace
424  fixedFlags[i] = false;
425  traceFlags[i] = true;
426  peacefullIndices = false;
427  for(attributes[i] = 0; _base.indices[i] != _base.indices[attributes[i]] || attributes[i] == i; ++attributes[i]) { }
428  }
429 
430  // Get direct acces to the entries and delay deletion of _base data in case _base and _out coincide
431  const std::map<size_t, value_t>& baseEntries = _base.tensorObjectReadOnly->get_unsanitized_sparse_data();
432  std::shared_ptr<std::map<size_t, value_t>> delaySlot;
433  if(_base.tensorObjectReadOnly == _out.tensorObjectReadOnly) { delaySlot = _out.tensorObject->get_internal_sparse_data(); }
434  std::map<size_t, value_t>& outEntries = _out.tensorObject->override_sparse_data(); // Takes care that no entries are present
435 
436  const value_t factor = _base.tensorObjectReadOnly->factor;
437 
438  // Start performance analysis for low level part
440 
441  if(peacefullIndices) {
442  for(const auto& entry : baseEntries) {
443  outEntries.emplace(get_position(entry, baseIndexDimensions.data(), baseIndexStepSizes.data(), attributes, _base.indices.size()), factor*entry.second);
444  }
445  } else {
446  size_t newPosition;
447  for(const auto& entry : baseEntries) {
448  if(check_position(newPosition, entry, baseIndexDimensions.data(), baseIndexStepSizes.data(), attributes, fixedFlags, traceFlags, _base.indices.size())) {
449  outEntries[newPosition] += factor*entry.second;
450  }
451  }
452  }
453  XERUS_PA_END("Evaluation", "Sparse->Sparse", misc::to_string(_base.tensorObjectReadOnly->dimensions)+" ==> " + misc::to_string(_out.tensorObjectReadOnly->dimensions));
454  }
455  }
456  } // namespace internal
457 } // namespace xerus
Header file for CHECK and REQUIRE macros.
void add(T *const __restrict _x, const T *const __restrict _y, const size_t _n) noexcept
Adds _n entries of _y to the ones of _x. I.e. x += y.
Header file for the performance analysis global objects and analysis function.
value_t factor
Single value representing a constant scaling factor.
Definition: tensor.h:105
size_t degree() const
Returns the degree of the tensor.
Definition: tensor.cpp:206
void reshuffle(Tensor &_out, const Tensor &_base, const std::vector< size_t > &_shuffle)
: Performs a simple reshuffle. Much less powerfull then a full evaluate, but more efficient...
Header file for the Index class.
Header file for the standard contaienr support functions.
#define REQUIRE
Definition: internal.h:33
void increase_indices(const size_t _i, const value_t *&_oldPosition, const std::vector< size_t > &_steps, const std::vector< size_t > &_multDimensions)
std::vector< size_t > get_step_sizes(const std::vector< Index > &_indices)
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Definition: tensor.h:102
Internal representation of an readable indexed Tensor or TensorNetwork.
bool check_position(size_t &_position, const std::pair< size_t, value_t > &_entry, const size_t *const _baseIndexDim, const size_t *const _divisors, const size_t *const _attributes, const bool *const _fixedFlags, const bool *const _traceFlags, const size_t numIndices)
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Definition: tensor.h:99
#define IF_CHECK
Definition: internal.h:35
The main namespace of xerus.
Definition: basic.h:37
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Definition: tensor.h:70
std::map< size_t, value_t > & get_unsanitized_sparse_data()
Gives access to the internal sparse map, without any checks.
Definition: tensor.cpp:445
size_t get_position(const std::pair< size_t, value_t > &_entry, const size_t *const _baseIndexDim, const size_t *const _divisors, const size_t *const _attributes, const size_t numIndices)
void evaluate(IndexedTensorWritable< Tensor > &&_out, IndexedTensorReadOnly< Tensor > &&_base)
Header file for the Tensor class.
void reset(DimensionTuple _newDim, const Representation _representation, const Initialisation _init=Initialisation::Zero)
Resets the tensor to the given dimensions and representation.
Definition: tensor.cpp:500
void increase_indices(const size_t _i, value_t *&_outPosition, const std::vector< size_t > &_stepSizes, const std::vector< size_t > &_baseDimensions)
void sum_traces(value_t *const _newPosition, const value_t *_oldPosition, const std::vector< size_t > &_doubleSteps, const std::vector< size_t > &_doubleMultDimensions, const size_t _numSummations)
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
std::map< size_t, value_t > & override_sparse_data()
Returns a pointer to the internal sparse data map for complete rewrite purpose ONLY.
Definition: tensor.cpp:457
Abstract internal representation of an read and writeable indexed Tensor or TensorNetwork.
Header file for shorthand notations that are xerus specific but used throughout the library...
double value_t
The type of values to be used by xerus.
Definition: basic.h:43
Class used to represent indices that can be used to write tensor calculations in index notation...
Definition: index.h:43
#define XERUS_PA_END(group, name, parameter)
bool is_dense() const
Returns whether the current representation is dense.
Definition: tensor.cpp:220
void copy(T *const __restrict _dest, const T *const __restrict _src, const size_t _n) noexcept
Copys _n entries from _y to _x, where _y and _x must be disjunkt memory regions.
#define XERUS_PA_START
static XERUS_force_inline std::string to_string(const bool obj)
Definition: stringFromTo.h:41
#define INTERNAL_CHECK
Definition: internal.h:37
value_t * get_unsanitized_dense_data()
Gives access to the internal data pointer, without any checks.
Definition: tensor.cpp:408
std::vector< size_t > get_dimension_vector(const std::vector< Index > &_indices)
#define VLA(T, name)
value_t * override_dense_data()
Returns a pointer to the internal dense data array for complete rewrite purpose ONLY.
Definition: tensor.cpp:420
Representation representation
The current representation of the Tensor (i.e Dense or Sparse)
Definition: tensor.h:108