xerus
a general purpose tensor library
adf.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
25 #include <xerus/algorithms/adf.h>
26 
29 #include <xerus/misc/internal.h>
30 
31 #ifdef _OPENMP
32  #include <omp.h>
33 #endif
34 
35 namespace xerus {
36  template<class MeasurmentSet>
38  value_t normMeasuredValues = 0;
39  for(const value_t measurement : _measurments.measuredValues) {
40  normMeasuredValues += misc::sqr(measurement);
41  }
42  return std::sqrt(normMeasuredValues);
43  }
44 
45 
46  template<class MeasurmentSet>
48  const bool forward;
49  const size_t degree;
50  const MeasurmentSet& measurments;
51  public:
52  MeasurmentComparator(const MeasurmentSet& _measurments, const bool _forward);
53 
54  bool operator()(const size_t _a, const size_t _b) const;
55  };
56 
57  template<>
58  MeasurmentComparator<SinglePointMeasurementSet>::MeasurmentComparator(const SinglePointMeasurementSet& _measurments, const bool _forward) : forward(_forward), degree(_measurments.degree()), measurments(_measurments) { }
59 
60  template<>
61  bool MeasurmentComparator<SinglePointMeasurementSet>::operator()(const size_t _a, const size_t _b) const {
62  if(forward) {
63  for (size_t j = 0; j < degree; ++j) {
64  if (measurments.positions[_a][j] < measurments.positions[_b][j]) { return true; }
65  if (measurments.positions[_a][j] > measurments.positions[_b][j]) { return false; }
66  }
67  } else {
68  for (size_t j = degree; j > 0; --j) {
69  if (measurments.positions[_a][j-1] < measurments.positions[_b][j-1]) { return true; }
70  if (measurments.positions[_a][j-1] > measurments.positions[_b][j-1]) { return false; }
71  }
72  }
73  LOG(fatal, "Measurments must not appear twice."); // NOTE that the algorithm works fine even if measurements appear twice.
74  return false;
75  }
76 
77 
78  template<>
79  MeasurmentComparator<RankOneMeasurementSet>::MeasurmentComparator(const RankOneMeasurementSet& _measurments, const bool _forward) : forward(_forward), degree(_measurments.degree()), measurments(_measurments) { }
80 
81  template<>
82  bool MeasurmentComparator<RankOneMeasurementSet>::operator()(const size_t _a, const size_t _b) const {
83  if(forward) {
84  for (size_t j = 0; j < degree; ++j) {
85  const int res = internal::comp(measurments.positions[_a][j], measurments.positions[_b][j]);
86  if(res == -1) { return true; }
87  if(res == 1) { return false; }
88  }
89  } else {
90  for (size_t j = degree; j > 0; --j) {
91  const int res = internal::comp(measurments.positions[_a][j-1], measurments.positions[_b][j-1]);
92  if(res == -1) { return true; }
93  if(res == 1) { return false; }
94  }
95  }
96 
97  LOG(fatal, "Measurments must not appear twice. "); // NOTE that the algorithm works fine even if measurements appear twice.
98  return false;
99  }
100 
101 
102  template<class MeasurmentSet>
103  void ADFVariant::InternalSolver<MeasurmentSet>::construct_stacks(std::unique_ptr<Tensor[]>& _stackSaveSlot, std::vector<std::vector<size_t>>& _updates, const std::unique_ptr<Tensor*[]>& _stackMem, const bool _forward) {
104  using misc::approx_equal;
105 
106  // Direct reference to the stack (withou Mem)
107  Tensor** const stack(_stackMem.get()+numMeasurments);
108 
109  // Temporary map. For each stack entry (i.e. measurement number + corePosition) gives a measurement number of a stack entry (at same corePosition) that shall have an equal value (or its own number otherwise).
110  std::vector<size_t> calculationMap(degree*numMeasurments);
111 
112  // Count how many Tensors we need for the stacks
113  size_t numUniqueStackEntries = 0;
114 
115  // Create a reordering map
116  perfData << "Start sorting";
117  std::vector<size_t> reorderedMeasurments(numMeasurments);
118  std::iota(reorderedMeasurments.begin(), reorderedMeasurments.end(), 0);
119  std::sort(reorderedMeasurments.begin(), reorderedMeasurments.end(), MeasurmentComparator<MeasurmentSet>(measurments, _forward));
120  perfData << "End sorting " << _forward ;
121 
122  // Create the entries for the first measurement (these are allways unqiue).
123  for(size_t corePosition = 0; corePosition < degree; ++corePosition) {
124  const size_t realId = reorderedMeasurments[0];
125  calculationMap[realId + corePosition*numMeasurments] = realId;
126  ++numUniqueStackEntries;
127  }
128 
129  // Create the calculation map
130  for(size_t i = 1; i < numMeasurments; ++i) {
131  const size_t realId = reorderedMeasurments[i];
132  const size_t realPreviousId = reorderedMeasurments[i-1];
133 
134  size_t position = 0;
135  size_t corePosition = _forward ? position : degree-1-position;
136 
137  for( ;
138  position < degree && approx_equal(measurments.positions[realId][corePosition], measurments.positions[realPreviousId][corePosition]);
139  ++position, corePosition = _forward ? position : degree-1-position)
140  {
141  if( realPreviousId < realId ) {
142  calculationMap[realId + corePosition*numMeasurments] = calculationMap[realPreviousId + corePosition*numMeasurments];
143  } else if(realPreviousId == calculationMap[realPreviousId + corePosition*numMeasurments]) {
144  calculationMap[realPreviousId + corePosition*numMeasurments] = realId;
145  calculationMap[realId + corePosition*numMeasurments] = realId;
146  } else if( realId < calculationMap[realPreviousId + corePosition*numMeasurments]) {
147  const size_t nextOther = calculationMap[realPreviousId + corePosition*numMeasurments];
148  INTERNAL_CHECK(calculationMap[nextOther + corePosition*numMeasurments] == nextOther, "IE");
149  calculationMap[realPreviousId + corePosition*numMeasurments] = realId;
150  calculationMap[nextOther + corePosition*numMeasurments] = realId;
151  calculationMap[realId + corePosition*numMeasurments] = realId;
152  } else {
153  calculationMap[realId + corePosition*numMeasurments] = calculationMap[realPreviousId + corePosition*numMeasurments];
154  }
155  }
156 
157  for( ; position < degree; ++position, corePosition = _forward ? position : degree-1-position) {
158  calculationMap[realId + corePosition*numMeasurments] = realId;
159  ++numUniqueStackEntries;
160  }
161  }
162 
163  // Create the stack
164  numUniqueStackEntries++; // +1 for the special positions -1 and degree.
165  _stackSaveSlot.reset(new Tensor[numUniqueStackEntries]);
166  size_t usedSlots = 0;
167  _stackSaveSlot[usedSlots++] = Tensor::ones({1}); // Special slot reserved for the the position -1 and degree stacks
168 
169  // NOTE that _stackMem contains (degree+2)*numMeasurments entries and has an offset of numMeasurments (to have space for corePosition -1).
170 
171  // Set links for the special entries -1 and degree
172  for(size_t i = 0; i < numMeasurments; ++i) {
173  stack[i - 1*numMeasurments] = &_stackSaveSlot[0];
174  stack[i + degree*numMeasurments] = &_stackSaveSlot[0];
175  }
176 
177  for(size_t corePosition = 0; corePosition < degree; ++corePosition) {
178  for(size_t i = 0; i < numMeasurments; ++i) {
179  if(calculationMap[i + corePosition*numMeasurments] == i) {
180  _updates[corePosition].emplace_back(i);
181  stack[i + corePosition*numMeasurments] = &_stackSaveSlot[usedSlots];
182  usedSlots++;
183  } else {
184  stack[i + corePosition*numMeasurments] = stack[calculationMap[i + corePosition*numMeasurments] + corePosition*numMeasurments];
185  }
186  }
187  }
188 
189  INTERNAL_CHECK(usedSlots == numUniqueStackEntries, "Internal Error.");
190  perfData << "We have " << numUniqueStackEntries << " unique stack entries. There are " << numMeasurments*degree+1 << " virtual stack entries.";
191  }
192 
193  template<class MeasurmentSet>
195  #pragma omp parallel for schedule(static)
196  for(size_t corePosition = 0; corePosition < degree; ++corePosition) {
197  for(const size_t i : forwardUpdates[corePosition]) {
198  forwardStack[i + corePosition*numMeasurments]->reset({corePosition+1 == degree ? 1 : x.rank(corePosition)}, Tensor::Representation::Dense, Tensor::Initialisation::None);
199  }
200  for(const size_t i : backwardUpdates[corePosition]) {
201  backwardStack[i + corePosition*numMeasurments]->reset({corePosition == 0 ? 1 :x.rank(corePosition - 1)}, Tensor::Representation::Dense, Tensor::Initialisation::None);
202  }
203  }
204  }
205 
206  template<class MeasurmentSet>
208  std::vector<Tensor> fixedComponents(_component.dimensions[1]);
209 
210  for(size_t i = 0; i < _component.dimensions[1]; ++i) {
211  fixedComponents[i](r1, r2) = _component(r1, i, r2);
212  }
213 
214  return fixedComponents;
215  }
216 
217  template<>
218  void ADFVariant::InternalSolver<SinglePointMeasurementSet>::update_backward_stack(const size_t _corePosition, const Tensor& _currentComponent) {
219  INTERNAL_CHECK(_currentComponent.dimensions[1] == x.dimensions[_corePosition], "IE");
220 
221  const size_t numUpdates = backwardUpdates[_corePosition].size();
222 
223  std::vector<Tensor> fixedComponents = get_fixed_components(_currentComponent);
224 
225  // Update the stack
226  #pragma omp parallel for schedule(static)
227  for(size_t u = 0; u < numUpdates; ++u) {
228  const size_t i = backwardUpdates[_corePosition][u];
229  contract(*backwardStack[i + _corePosition*numMeasurments], fixedComponents[measurments.positions[i][_corePosition]], false, *backwardStack[i + (_corePosition+1)*numMeasurments], false, 1);
230  }
231  }
232 
233  template<>
234  void ADFVariant::InternalSolver<RankOneMeasurementSet>::update_backward_stack(const size_t _corePosition, const Tensor& _currentComponent) {
235  INTERNAL_CHECK(_currentComponent.dimensions[1] == x.dimensions[_corePosition], "IE");
236 
237  const size_t numUpdates = backwardUpdates[_corePosition].size();
238 
239  Tensor reshuffledComponent;
240  reshuffledComponent(i1, r1, r2) = _currentComponent(r1, i1, r2);
241 
242  Tensor mixedComponent({reshuffledComponent.dimensions[1], reshuffledComponent.dimensions[2]});
243 
244  // Update the stack
245  #pragma omp parallel for firstprivate(mixedComponent) schedule(static)
246  for(size_t u = 0; u < numUpdates; ++u) {
247  const size_t i = backwardUpdates[_corePosition][u];
248  contract(mixedComponent, measurments.positions[i][_corePosition], false, reshuffledComponent, false, 1);
249  contract(*backwardStack[i + _corePosition*numMeasurments], mixedComponent, false, *backwardStack[i + (_corePosition+1)*numMeasurments], false, 1);
250  }
251  }
252 
253 
254  template<>
255  void ADFVariant::InternalSolver<SinglePointMeasurementSet>::update_forward_stack( const size_t _corePosition, const Tensor& _currentComponent ) {
256  INTERNAL_CHECK(_currentComponent.dimensions[1] == x.dimensions[_corePosition], "IE");
257 
258  const size_t numUpdates = forwardUpdates[_corePosition].size();
259 
260  const std::vector<Tensor> fixedComponents = get_fixed_components(_currentComponent);
261 
262  // Update the stack
263  #pragma omp parallel for schedule(static)
264  for(size_t u = 0; u < numUpdates; ++u) {
265  const size_t i = forwardUpdates[_corePosition][u];
266  contract(*forwardStack[i + _corePosition*numMeasurments] , *forwardStack[i + (_corePosition-1)*numMeasurments], false, fixedComponents[measurments.positions[i][_corePosition]], false, 1);
267  }
268  }
269 
270  template<>
271  void ADFVariant::InternalSolver<RankOneMeasurementSet>::update_forward_stack( const size_t _corePosition, const Tensor& _currentComponent ) {
272  INTERNAL_CHECK(_currentComponent.dimensions[1] == x.dimensions[_corePosition], "IE");
273 
274  const size_t numUpdates = forwardUpdates[_corePosition].size();
275 
276  Tensor reshuffledComponent;
277  reshuffledComponent(i1, r1, r2) = _currentComponent(r1, i1, r2);
278 
279  Tensor mixedComponent({reshuffledComponent.dimensions[1], reshuffledComponent.dimensions[2]});
280 
281  // Update the stack
282  #pragma omp parallel for firstprivate(mixedComponent) schedule(static)
283  for(size_t u = 0; u < numUpdates; ++u) {
284  const size_t i = forwardUpdates[_corePosition][u];
285  contract(mixedComponent, measurments.positions[i][_corePosition], false, reshuffledComponent, false, 1);
286  contract(*forwardStack[i + _corePosition*numMeasurments] , *forwardStack[i + (_corePosition-1)*numMeasurments], false, mixedComponent, false, 1);
287  }
288  }
289 
290  template<class MeasurmentSet>
292  Tensor currentValue({});
293 
294  // Look which side of the stack needs less calculations
295  if(forwardUpdates[_corePosition].size() < backwardUpdates[_corePosition].size()) {
296  update_forward_stack(_corePosition, x.get_component(_corePosition));
297 
298  #pragma omp parallel for firstprivate(currentValue) schedule(static)
299  for(size_t i = 0; i < numMeasurments; ++i) {
300  contract(currentValue, *forwardStack[i + _corePosition*numMeasurments], false, *backwardStack[i + (_corePosition+1)*numMeasurments], false, 1);
301  residual[i] = (measurments.measuredValues[i]-currentValue[0]);
302  }
303  } else {
304  update_backward_stack(_corePosition, x.get_component(_corePosition));
305 
306  #pragma omp parallel for firstprivate(currentValue) schedule(static)
307  for(size_t i = 0; i < numMeasurments; ++i) {
308  contract(currentValue, *forwardStack[i + (_corePosition-1)*numMeasurments], false, *backwardStack[i + _corePosition*numMeasurments], false, 1);
309  residual[i] = (measurments.measuredValues[i]-currentValue[0]);
310  }
311  }
312  }
313 
314  template<> template<>
316  const size_t _localRightRank,
317  const value_t* const _leftPtr,
318  const value_t* const _rightPtr,
319  value_t* const _deltaPtr,
320  const value_t _residual,
321  const size_t& _position,
322  value_t* const
323  /*unused*/) {
324  value_t* const shiftedDeltaPtr = _deltaPtr + _position*_localLeftRank*_localRightRank;
325 
326  for(size_t k = 0; k < _localLeftRank; ++k) {
327  for(size_t j = 0; j < _localRightRank; ++j) {
328  shiftedDeltaPtr[k*_localRightRank+j] += _residual * _leftPtr[k] * _rightPtr[j];
329  }
330  }
331  }
332 
333  template<> template<>
335  const size_t _localRightRank,
336  const value_t* const _leftPtr,
337  const value_t* const _rightPtr,
338  value_t* const _deltaPtr,
339  const value_t _residual,
340  const Tensor& _position,
341  value_t* const _scratchSpace
342  ) {
343  // Create dyadic product without factors in scratch space
344  for(size_t k = 0; k < _localLeftRank; ++k) {
345  for(size_t j = 0; j < _localRightRank; ++j) {
346  _scratchSpace[k*_localRightRank+j] = _leftPtr[k] * _rightPtr[j];
347  }
348  }
349 
350  for(size_t n = 0; n < _position.size; ++n) {
351  misc::add_scaled(_deltaPtr + n*_localLeftRank*_localRightRank,
352  _position[n]*_residual,
353  _scratchSpace,
354  _localLeftRank*_localRightRank
355  );
356  }
357  }
358 
359  template<class MeasurmentSet>
361  const size_t localLeftRank = x.get_component(_corePosition).dimensions[0];
362  const size_t localRightRank = x.get_component(_corePosition).dimensions[2];
363 
364  projectedGradientComponent.reset({x.dimensions[_corePosition], localLeftRank, localRightRank});
365 
366  #pragma omp parallel
367  {
368  Tensor partialProjGradComp({x.dimensions[_corePosition], localLeftRank, localRightRank}, Tensor::Representation::Dense);
369 
370  std::unique_ptr<value_t[]> dyadicComponent(std::is_same<MeasurmentSet, RankOneMeasurementSet>::value ? new value_t[localLeftRank*localRightRank] : nullptr);
371 
372  #pragma omp for schedule(static)
373  for(size_t i = 0; i < numMeasurments; ++i) {
374  INTERNAL_CHECK(!forwardStack[i + (_corePosition-1)*numMeasurments]->has_factor() && !backwardStack[i + (_corePosition+1)*numMeasurments]->has_factor(), "IE");
375 
376  // Interestingly writing a dyadic product on our own turns out to be faster than blas...
377  perform_dyadic_product( localLeftRank,
378  localRightRank,
379  forwardStack[i + (_corePosition-1)*numMeasurments]->get_dense_data(),
380  backwardStack[i + (_corePosition+1)*numMeasurments]->get_dense_data(),
381  partialProjGradComp.get_unsanitized_dense_data(),
382  residual[i],
383  measurments.positions[i][_corePosition],
384  dyadicComponent.get()
385  );
386  }
387 
388  // Accumulate the partical components
389  #pragma omp critical
390  {
391  projectedGradientComponent += partialProjGradComp;
392  }
393  }
394 
395  projectedGradientComponent(r1, i1, r2) = projectedGradientComponent(i1, r1, r2);
396  }
397 
398  template<class MeasurmentSet>
399  inline size_t position_or_zero(const MeasurmentSet& _measurments, const size_t _meas, const size_t _corePosition);
400 
401  template<>
402  inline size_t position_or_zero<SinglePointMeasurementSet>(const SinglePointMeasurementSet& _measurments, const size_t _meas, const size_t _corePosition) {
403  return _measurments.positions[_meas][_corePosition];
404  }
405 
406  template<>
407  inline size_t position_or_zero<RankOneMeasurementSet>(const RankOneMeasurementSet& /*_measurments*/, const size_t /*_meas*/, const size_t /*_corePosition*/) {
408  return 0;
409  }
410 
411 
412 
413  template<class MeasurmentSet>
414  std::vector<value_t> ADFVariant::InternalSolver<MeasurmentSet>::calculate_slicewise_norm_A_projGrad( const size_t _corePosition) {
415  std::vector<value_t> normAProjGrad(x.dimensions[_corePosition], 0.0);
416 
417  Tensor currentValue({});
418 
419  // Look which side of the stack needs less calculations
420  if(forwardUpdates[_corePosition].size() < backwardUpdates[_corePosition].size()) {
421  update_forward_stack(_corePosition, projectedGradientComponent);
422 
423  #pragma omp parallel firstprivate(currentValue)
424  {
425  std::vector<value_t> partialNormAProjGrad(x.dimensions[_corePosition], 0.0);
426 
427  #pragma omp for schedule(static)
428  for(size_t i = 0; i < numMeasurments; ++i) {
429  contract(currentValue, *forwardStack[i + _corePosition*numMeasurments], false, *backwardStack[i + (_corePosition+1)*numMeasurments], false, 1);
430  partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]); // TODO measurmentNorms
431  }
432 
433  // Accumulate the partical components
434  #pragma omp critical
435  {
436  for(size_t i = 0; i < normAProjGrad.size(); ++i) {
437  normAProjGrad[i] += partialNormAProjGrad[i];
438  }
439  }
440  }
441  } else {
442  update_backward_stack(_corePosition, projectedGradientComponent);
443 
444  #pragma omp parallel firstprivate(currentValue)
445  {
446  std::vector<value_t> partialNormAProjGrad(x.dimensions[_corePosition], 0.0);
447 
448  #pragma omp for schedule(static)
449  for(size_t i = 0; i < numMeasurments; ++i) {
450  contract(currentValue, *forwardStack[i + (_corePosition-1)*numMeasurments], false, *backwardStack[i + _corePosition*numMeasurments], false, 1);
451  partialNormAProjGrad[position_or_zero(measurments, i, _corePosition)] += misc::sqr(currentValue[0]); // TODO measurmentNorms
452  }
453 
454  // Accumulate the partical components
455  #pragma omp critical
456  {
457  for(size_t i = 0; i < normAProjGrad.size(); ++i) {
458  normAProjGrad[i] += partialNormAProjGrad[i];
459  }
460  }
461  }
462  }
463 
464  return normAProjGrad;
465  }
466 
467 
468  template<>
469  void ADFVariant::InternalSolver<SinglePointMeasurementSet>::update_x(const std::vector<value_t>& _normAProjGrad, const size_t _corePosition) {
470  for(size_t j = 0; j < x.dimensions[_corePosition]; ++j) {
471  Tensor localDelta;
472  localDelta(r1, r2) = projectedGradientComponent(r1, j, r2);
473  const value_t PyR = misc::sqr(frob_norm(localDelta));
474 
475  // Update
476  x.component(_corePosition)(r1, i1, r2) = x.component(_corePosition)(r1, i1, r2) + (PyR/_normAProjGrad[j])*Tensor::dirac({x.dimensions[_corePosition]}, j)(i1)*localDelta(r1, r2);
477  }
478  }
479 
480 
481  template<>
482  void ADFVariant::InternalSolver<RankOneMeasurementSet>::update_x(const std::vector<value_t>& _normAProjGrad, const size_t _corePosition) {
483  const value_t PyR = misc::sqr(frob_norm(projectedGradientComponent));
484 
485  // Update
486  x.component(_corePosition)(r1, i1, r2) = x.component(_corePosition)(r1, i1, r2) + (PyR/misc::sum(_normAProjGrad))*projectedGradientComponent(r1, i1, r2);
487  }
488 
489  template<class MeasurmentSet>
491  double resDec1 = 0.0, resDec2 = 0.0, resDec3 = 0.0;
492 
493  for(; maxIterations == 0 || iteration < maxIterations; ++iteration) {
494 
495  // Move core back to position zero
496  x.move_core(0, true);
497 
498  // Rebuild backwardStack
499  for(size_t corePosition = x.degree()-1; corePosition > 0; --corePosition) {
500  update_backward_stack(corePosition, x.get_component(corePosition));
501  }
502 
503  calculate_residual(0);
504 
505  lastResidualNorm = residualNorm;
506  double residualNormSqr = 0;
507 
508  #pragma omp parallel for schedule(static) reduction(+:residualNormSqr)
509  for(size_t i = 0; i < numMeasurments; ++i) {
510  residualNormSqr += misc::sqr(residual[i]);
511  }
512  residualNorm = std::sqrt(residualNormSqr)/normMeasuredValues;
513 
514  perfData.add(iteration, residualNorm, x, 0);
515 
516  // Check for termination criteria
517  double resDec4 = resDec3; resDec3 = resDec2; resDec2 = resDec1;
518  resDec1 = residualNorm/lastResidualNorm;
519 // LOG(wup, resDec1*resDec2*resDec3*resDec4);
520  if(residualNorm < targetResidualNorm || resDec1*resDec2*resDec3*resDec4 > misc::pow(minimalResidualNormDecrease, 4)) { break; }
521 
522 
523  // Sweep from the first to the last component
524  for(size_t corePosition = 0; corePosition < degree; ++corePosition) {
525  if(corePosition > 0) { // For corePosition 0 this calculation is allready done in the calculation of the residual.
526  calculate_residual(corePosition);
527  }
528 
529  calculate_projected_gradient(corePosition);
530 
531  const std::vector<value_t> normAProjGrad = calculate_slicewise_norm_A_projGrad(corePosition);
532 
533  update_x(normAProjGrad, corePosition);
534 
535  // If we have not yet reached the end of the sweep we need to take care of the core and update our stacks
536  if(corePosition+1 < degree) {
537  x.move_core(corePosition+1, true);
538  update_forward_stack(corePosition, x.get_component(corePosition));
539  }
540  }
541  }
542  }
543 
544 
545  template<class MeasurmentSet>
546  inline void calc_measurment_norm(double* _norms, const MeasurmentSet& _measurments);
547 
548  template<>
549  inline void calc_measurment_norm<SinglePointMeasurementSet>(double* _norms, const SinglePointMeasurementSet& _measurments) {
550  for(size_t i = 0; i < _measurments.size(); ++i) {
551  _norms[i] = 1.0;
552  }
553  }
554 
555  template<>
556  inline void calc_measurment_norm<RankOneMeasurementSet>(double* _norms, const RankOneMeasurementSet& _measurments) {
557  for(size_t i = 0; i < _measurments.size(); ++i) {
558  _norms[i] = 1.0;
559  for(size_t j = 0; j < _measurments.degree(); ++j) {
560  _norms[i] *= _measurments.positions[i][j].frob_norm();
561  }
562  }
563  }
564 
565 
566  template<class MeasurmentSet>
568  perfData.start();
569 
570  #pragma omp parallel sections
571  {
572  #pragma omp section
573  construct_stacks(forwardStackSaveSlots, forwardUpdates, forwardStackMem, true);
574 
575  #pragma omp section
576  construct_stacks(backwardStackSaveSlots, backwardUpdates, backwardStackMem, false);
577  }
578 
579  calc_measurment_norm(measurmentNorms.get(), measurments);
580 
581  // We need x to be canonicalized in the sense that there is no edge with more than maximal rank (prior to stack resize).
582  x.canonicalize_left();
583 
584  resize_stack_tensors();
585 
586  // One inital run
587  solve_with_current_ranks();
588 
589  // If we follow a rank increasing strategie, increase the ransk until we reach the targetResidual, the maxRanks or the maxIterations.
590  while(residualNorm > targetResidualNorm && x.ranks() != maxRanks && (maxIterations == 0 || iteration < maxIterations)) {
591  // Increase the ranks
592  x.move_core(0, true);
593  const auto rndTensor = TTTensor::random(x.dimensions, std::vector<size_t>(x.degree()-1, 1));
594  const auto diff = (1e-6*frob_norm(x))*rndTensor/frob_norm(rndTensor);
595  x = x+diff;
596 
597  x.round(maxRanks);
598 
599  resize_stack_tensors();
600 
601  solve_with_current_ranks();
602  }
603  return residualNorm;
604  }
605 
606  // Explicit instantiation of the two template parameters that will be implemented in the xerus library
609 
610  const ADFVariant ADF(0, 1e-8, 0.999);
611 } // namespace xerus
std::vector< Tensor > get_fixed_components(const Tensor &_component)
Returns a vector of tensors containing the slices of _component where the second dimension is fixed...
Definition: adf.cpp:207
void update_backward_stack(const size_t _corePosition, const Tensor &_currentComponent)
For each measurment sets the backwardStack at the given _corePosition to the contraction between the ...
void update_forward_stack(const size_t _corePosition, const Tensor &_currentComponent)
For each measurment sets the forwardStack at the given _corePosition to the contraction between the f...
Header file for the IndexedTensorMoveable class.
void construct_stacks(std::unique_ptr< xerus::Tensor[] > &_stackSaveSlot, std::vector< std::vector< size_t > > &_updates, const std::unique_ptr< Tensor *[]> &_stackMem, const bool _forward)
Constructes either the forward or backward stack. That is, it determines the groups of partially equa...
Definition: adf.cpp:103
Class used to represent a single point measurments.
Definition: measurments.h:43
size_t size
Size of the Tensor – always equal to the product of the dimensions.
Definition: tensor.h:102
#define LOG
Definition: internal.h:38
DimensionTuple dimensions
Vector containing the individual dimensions of the tensor.
Definition: tensor.h:99
void solve_with_current_ranks()
Basically the complete algorithm, trying to reconstruct x using its current ranks.
Definition: adf.cpp:490
void calculate_projected_gradient(const size_t _corePosition)
: Calculates the component at _corePosition of the projected gradient from the residual, i.e. E(A^T(b-Ax)).
Definition: adf.cpp:360
bool approx_equal(T _a, T _b, T _eps=4 *std::numeric_limits< T >::epsilon()) noexcept
: Checks whether the relative difference between _a and _b (i.e. |a-b|/(|a|/2+|b|/2)) is smaller than...
Definition: math.h:91
void perform_dyadic_product(const size_t _localLeftRank, const size_t _localRightRank, const value_t *const _leftPtr, const value_t *const _rightPtr, value_t *const _deltaPtr, const value_t _residual, const PositionType &_position, value_t *const _scratchSpace)
Calculates one internal step of calculate_projected_gradient. In particular the dyadic product of the...
The main namespace of xerus.
Definition: basic.h:37
Class that handles simple (non-decomposed) tensors in a dense or sparse representation.
Definition: tensor.h:70
double solve()
Tries to solve the reconstruction problem with the current settings.
Definition: adf.cpp:567
static Tensor XERUS_warn_unused ones(DimensionTuple _dimensions)
: Returns a Tensor with all entries equal to one.
Definition: tensor.cpp:122
Wrapper class for all ADF variants.
Definition: adf.h:39
const ADFVariant ADF
Default variant of the ADF algorithm.
Header file for the ADF algorithm and its variants.
static Tensor XERUS_warn_unused dirac(DimensionTuple _dimensions, const MultiIndex &_position)
: Returns a Tensor with a single entry equals one and all other zero.
Definition: tensor.cpp:173
void contract(Tensor &_result, const Tensor &_lhs, const bool _lhsTrans, const Tensor &_rhs, const bool _rhsTrans, const size_t _numModes)
Low-level contraction between Tensors.
Definition: tensor.cpp:1252
std::vector< value_t > calculate_slicewise_norm_A_projGrad(const size_t _corePosition)
: Calculates ||P_n (A(E(A^T(b-Ax)))))|| = ||P_n (A(E(A^T(residual)))))|| = ||P_n (A(E(gradient)))|| f...
Definition: adf.cpp:414
void calc_measurment_norm< SinglePointMeasurementSet >(double *_norms, const SinglePointMeasurementSet &_measurments)
Definition: adf.cpp:549
MeasurmentComparator(const MeasurmentSet &_measurments, const bool _forward)
void calculate_residual(const size_t _corePosition)
(Re-)Calculates the current residual, i.e. Ax-b.
Definition: adf.cpp:291
static TTNetwork XERUS_warn_unused random(std::vector< size_t > _dimensions, const std::vector< size_t > &_ranks, distribution &_dist=xerus::misc::defaultNormalDistribution, generator &_rnd=xerus::misc::randomEngine)
Transforms a given TensorNetwork to a TTNetwork.
Definition: ttNetwork.h:129
int comp(const Tensor &_a, const Tensor &_b)
Header file for the low level array support function.
Header file for comfort functions and macros that should not be exported in the library.
static XERUS_force_inline value_t frob_norm(const Tensor &_tensor)
Calculates the frobenius norm of the given tensor.
Definition: tensor.h:931
constexpr T sqr(const T &_a) noexcept
: Calculates _a*_a.
Definition: math.h:43
double value_t
The type of values to be used by xerus.
Definition: basic.h:43
void resize_stack_tensors()
Resizes the unqiue stack tensors to correspond to the current ranks of x.
Definition: adf.cpp:194
size_t position_or_zero< RankOneMeasurementSet >(const RankOneMeasurementSet &, const size_t, const size_t)
Definition: adf.cpp:407
void calc_measurment_norm< RankOneMeasurementSet >(double *_norms, const RankOneMeasurementSet &_measurments)
Definition: adf.cpp:556
bool approx_equal(const xerus::Tensor &_a, const xerus::Tensor &_b, const xerus::value_t _eps=EPSILON)
Checks whether two tensors are approximately equal.
Definition: tensor.cpp:1738
static double calculate_norm_of_measured_values(const MeasurmentSet &_measurments)
calculates the two-norm of the measured values.
Definition: adf.cpp:37
void calc_measurment_norm(double *_norms, const MeasurmentSet &_measurments)
#define INTERNAL_CHECK
Definition: internal.h:37
void add_scaled(T *const __restrict _x, const T _alpha, const T *const __restrict _y, const size_t _n) noexcept
Adds _n entries of _y, scaled by _alpha to the ones of _x. I.e. x += alpha*y.
bool operator()(const size_t _a, const size_t _b) const
size_t position_or_zero< SinglePointMeasurementSet >(const SinglePointMeasurementSet &_measurments, const size_t _meas, const size_t _corePosition)
Definition: adf.cpp:402
void update_x(const std::vector< value_t > &_normAProjGrad, const size_t _corePosition)
Updates the current solution x. For SinglePointMeasurments the is done for each slice speratly...
constexpr T pow(const T &_base, const uint32 _exp) noexcept
: Calculates _base^_exp by binary exponentiation
Definition: math.h:50
size_t position_or_zero(const MeasurmentSet &_measurments, const size_t _meas, const size_t _corePosition)