xerus
a general purpose tensor library
allocator.cpp
Go to the documentation of this file.
1 // Xerus - A General Purpose Tensor Library
2 // Copyright (C) 2014-2017 Benjamin Huber and Sebastian Wolf.
3 //
4 // Xerus is free software: you can redistribute it and/or modify
5 // it under the terms of the GNU Affero General Public License as published
6 // by the Free Software Foundation, either version 3 of the License,
7 // or (at your option) any later version.
8 //
9 // Xerus is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU Affero General Public License for more details.
13 //
14 // You should have received a copy of the GNU Affero General Public License
15 // along with Xerus. If not, see <http://www.gnu.org/licenses/>.
16 //
17 // For further information on Xerus visit https://libXerus.org
18 // or contact us at contact@libXerus.org.
19 
26 #ifdef XERUS_REPLACE_ALLOCATOR
27 
28  #include <xerus/misc/allocator.h>
29  // #include <dlfcn.h>
30  #include <cstring>
31 
32  using xma = xerus::misc::AllocatorStorage;
33  namespace xm = xerus::misc;
34 
35  thread_local xerus::misc::AllocatorStorage xm::astore;
36  thread_local bool programIsRunning = true;
37 
38  xerus::misc::AllocatorStorage::AllocatorStorage() {
39  for (unsigned long i=0; i<xma::NUM_BUCKETS; ++i) {
40  allocCount[i]=0;
41  maxAlloc[i]=0;
42  currAlloc[i]=0;
43  }
44  }
45 
46  xerus::misc::AllocatorStorage::~AllocatorStorage(){ programIsRunning=false; }
47 
48 
49  using mallocType = void *(*)(size_t);
50  void* (*xerus::misc::r_malloc)(size_t) = &malloc;//(mallocType)dlsym(RTLD_NEXT, "malloc");
51  using freeType = void (*)(void*);
52  void (*xerus::misc::r_free)(void*) = &free;//(freeType)dlsym(RTLD_NEXT, "free");
53 
54 
55  void xerus::misc::AllocatorStorage::create_new_pool() {
56  uint8_t* newPool = static_cast<uint8_t*>(xerus::misc::r_malloc(xma::POOL_SIZE)); // TODO use mmap(...)
57  uint8_t* startAddr = newPool+xma::BUCKET_SIZE - reinterpret_cast<uintptr_t>(newPool)%xma::ALIGNMENT;
58  pools.emplace_back(newPool, startAddr);
59  }
60 
61  void *myalloc(size_t n) {
62  if (n >= xma::SMALLEST_NOT_CACHED_SIZE) {
63  void *res = xerus::misc::r_malloc(n+xma::ALIGNMENT+1);
64 
65  // res is increased by at least 2 bytes (as alignmentOffset < xma::ALIGNMENT)
66  res = static_cast<void*>(static_cast<uint8_t*>(res)+xma::ALIGNMENT+1);
67  uintptr_t alignmentOffset = reinterpret_cast<uintptr_t>(res)%xma::ALIGNMENT;
68  res = static_cast<void*>(static_cast<uint8_t*>(res) - alignmentOffset);
69 
70  // indicate non-bucket allocation
71  *(static_cast<uint8_t*>(res)-1) = 0xFF;
72  // and the alignmentOffset to be able to reconstruct the original res pointer
73  *(static_cast<uint8_t*>(res)-2) = static_cast<uint8_t>(alignmentOffset);
74  return res;
75  } else {
76  uint8_t numBucket = uint8_t( (n+1)/xma::BUCKET_SIZE );
77  uint8_t* res;
78  if (xm::astore.buckets[numBucket].empty()) {
79  if (xm::astore.pools.empty() || xm::astore.pools.back().second + (numBucket+1)*xma::BUCKET_SIZE >= xm::astore.pools.back().first + xma::POOL_SIZE) {
80  xm::astore.create_new_pool();
81  }
82  res = xm::astore.pools.back().second;
83  xm::astore.pools.back().second += (numBucket+1)*xma::BUCKET_SIZE;//n+sizeof(size_t);
84  *(res-1) = numBucket;
85  } else {
86  res = xm::astore.buckets[numBucket].back();
87  xm::astore.buckets[numBucket].pop_back();
88  }
89  #ifdef XERUS_PERFORMANCE_ANALYSIS
90  xm::astore.allocCount[numBucket] += 1;
91  xm::astore.currAlloc[numBucket] += 1;
92  if (xm::astore.currAlloc[numBucket] > xm::astore.maxAlloc[numBucket]) {
93  xm::astore.maxAlloc[numBucket] = xm::astore.currAlloc[numBucket];
94  }
95  #endif
96  return static_cast<void*>(res);
97  }
98  }
99 
100  #ifdef XERUS_REPLACE_ALLOCATOR
101  void* operator new(std::size_t n) {
102  return myalloc(n);
103  }
104 
105  void *operator new[](std::size_t s) {
106  return myalloc(s);
107  }
108  #endif
109 
110  // void *malloc(size_t size) {
111  // return myalloc(size);
112  // }
113  //
114  // void *calloc (size_t _nmemb, size_t _size) {
115  // if (~0ul / _size < _nmemb) {
116  // throw std::bad_alloc();
117  // }
118  // size_t s = _nmemb * _size;
119  // void *t = myalloc(s);
120  // memset(t, 0, s);
121  // return t;
122  // }
123  //
124  // void *realloc (void *_ptr, size_t _size) {
125  // size_t n=0;
126  // if (_ptr != nullptr) {
127  // n = *(static_cast<size_t*>(_ptr)-2);
128  // }
129  // if (n>= _size) {
130  // return _ptr;
131  // } else {
132  // void *newstorage = myalloc(_size);
133  // memcpy(newstorage, _ptr, n);
134  // ::operator delete(_ptr);
135  // return newstorage;
136  // }
137  // }
138  // TODO valloc
139 
140 
141  void mydelete(void *ptr) noexcept {
142  uint8_t n = *(static_cast<uint8_t*>(ptr)-1);
143  if (n<0xFF) {
144  #ifdef XERUS_PERFORMANCE_ANALYSIS
145  xm::astore.currAlloc[n] -= 1;
146  #endif
147  if (programIsRunning) {
148  xm::astore.buckets[n].push_back(static_cast<uint8_t*>(ptr));
149  }
150  } else {
151  uint8_t offset = *(static_cast<uint8_t*>(ptr)-2);
152  xerus::misc::r_free(static_cast<void*>(static_cast<uint8_t*>(ptr)-xma::ALIGNMENT-1+offset));
153  }
154  }
155 
156  void operator delete(void* ptr) noexcept {
157  mydelete(ptr);
158  }
159 
160  void operator delete[](void* ptr) noexcept {
161  mydelete(ptr);
162  }
163 
164 #endif
Collection of classes and functions that provide elementary functionality that is not special to xeru...
Definition: tensor.h:1061
Header file for the data structures used by the custom new and delete operators.