Bitcoin ABC  0.26.3
P2P Digital Currency
lockedpool.cpp
Go to the documentation of this file.
1 // Copyright (c) 2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #include <support/cleanse.h>
6 #include <support/lockedpool.h>
7 
8 #if defined(HAVE_CONFIG_H)
9 #include <config/bitcoin-config.h>
10 #endif
11 
12 #ifdef WIN32
13 #ifndef NOMINMAX
14 #define NOMINMAX
15 #endif
16 #include <windows.h>
17 #else
18 #include <climits> // for PAGESIZE
19 #include <sys/mman.h> // for mmap
20 #include <sys/resource.h> // for getrlimit
21 #include <unistd.h> // for sysconf
22 #endif
23 
24 #include <algorithm>
25 #include <limits>
26 #include <stdexcept>
27 #include <utility>
28 #ifdef ARENA_DEBUG
29 #include <iomanip>
30 #include <iostream>
31 #endif
32 
34 
35 /*******************************************************************************/
36 // Utilities
37 //
39 static inline size_t align_up(size_t x, size_t align) {
40  return (x + align - 1) & ~(align - 1);
41 }
42 
43 /*******************************************************************************/
44 // Implementation: Arena
45 
46 Arena::Arena(void *base_in, size_t size_in, size_t alignment_in)
47  : base(static_cast<char *>(base_in)),
48  end(static_cast<char *>(base_in) + size_in), alignment(alignment_in) {
49  // Start with one free chunk that covers the entire arena
50  auto it = size_to_free_chunk.emplace(size_in, base);
51  chunks_free.emplace(base, it);
52  chunks_free_end.emplace(base + size_in, it);
53 }
54 
56 
57 void *Arena::alloc(size_t size) {
58  // Round to next multiple of alignment
59  size = align_up(size, alignment);
60 
61  // Don't handle zero-sized chunks
62  if (size == 0) {
63  return nullptr;
64  }
65 
66  // Pick a large enough free-chunk. Returns an iterator pointing to the first
67  // element that is not less than key. This allocation strategy is best-fit.
68  // According to "Dynamic Storage Allocation: A Survey and Critical Review",
69  // Wilson et. al. 1995,
70  // http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf,
71  // best-fit and first-fit policies seem to work well in practice.
72  auto size_ptr_it = size_to_free_chunk.lower_bound(size);
73  if (size_ptr_it == size_to_free_chunk.end()) {
74  return nullptr;
75  }
76 
77  // Create the used-chunk, taking its space from the end of the free-chunk
78  const size_t size_remaining = size_ptr_it->first - size;
79  auto alloced =
80  chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
81  chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first);
82  if (size_ptr_it->first == size) {
83  // whole chunk is used up
84  chunks_free.erase(size_ptr_it->second);
85  } else {
86  // still some memory left in the chunk
87  auto it_remaining =
88  size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
89  chunks_free[size_ptr_it->second] = it_remaining;
90  chunks_free_end.emplace(size_ptr_it->second + size_remaining,
91  it_remaining);
92  }
93  size_to_free_chunk.erase(size_ptr_it);
94 
95  return reinterpret_cast<void *>(alloced->first);
96 }
97 
98 void Arena::free(void *ptr) {
99  // Freeing the nullptr pointer is OK.
100  if (ptr == nullptr) {
101  return;
102  }
103 
104  // Remove chunk from used map
105  auto i = chunks_used.find(static_cast<char *>(ptr));
106  if (i == chunks_used.end()) {
107  throw std::runtime_error("Arena: invalid or double free");
108  }
109  std::pair<char *, size_t> freed = *i;
110  chunks_used.erase(i);
111 
112  // coalesce freed with previous chunk
113  auto prev = chunks_free_end.find(freed.first);
114  if (prev != chunks_free_end.end()) {
115  freed.first -= prev->second->first;
116  freed.second += prev->second->first;
117  size_to_free_chunk.erase(prev->second);
118  chunks_free_end.erase(prev);
119  }
120 
121  // coalesce freed with chunk after freed
122  auto next = chunks_free.find(freed.first + freed.second);
123  if (next != chunks_free.end()) {
124  freed.second += next->second->first;
125  size_to_free_chunk.erase(next->second);
126  chunks_free.erase(next);
127  }
128 
129  // Add/set space with coalesced free chunk
130  auto it = size_to_free_chunk.emplace(freed.second, freed.first);
131  chunks_free[freed.first] = it;
132  chunks_free_end[freed.first + freed.second] = it;
133 }
134 
136  Arena::Stats r{0, 0, 0, chunks_used.size(), chunks_free.size()};
137  for (const auto &chunk : chunks_used) {
138  r.used += chunk.second;
139  }
140  for (const auto &chunk : chunks_free) {
141  r.free += chunk.second->first;
142  }
143  r.total = r.used + r.free;
144  return r;
145 }
146 
147 #ifdef ARENA_DEBUG
148 static void printchunk(void *base, size_t sz, bool used) {
149  std::cout << "0x" << std::hex << std::setw(16) << std::setfill('0') << base
150  << " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz
151  << " 0x" << used << std::endl;
152 }
153 void Arena::walk() const {
154  for (const auto &chunk : chunks_used) {
155  printchunk(chunk.first, chunk.second, true);
156  }
157  std::cout << std::endl;
158  for (const auto &chunk : chunks_free) {
159  printchunk(chunk.first, chunk.second->first, false);
160  }
161  std::cout << std::endl;
162 }
163 #endif
164 
165 /*******************************************************************************/
166 // Implementation: Win32LockedPageAllocator
167 
168 #ifdef WIN32
172 class Win32LockedPageAllocator : public LockedPageAllocator {
173 public:
174  Win32LockedPageAllocator();
175  void *AllocateLocked(size_t len, bool *lockingSuccess) override;
176  void FreeLocked(void *addr, size_t len) override;
177  size_t GetLimit() override;
178 
179 private:
180  size_t page_size;
181 };
182 
183 Win32LockedPageAllocator::Win32LockedPageAllocator() {
184  // Determine system page size in bytes
185  SYSTEM_INFO sSysInfo;
186  GetSystemInfo(&sSysInfo);
187  page_size = sSysInfo.dwPageSize;
188 }
189 void *Win32LockedPageAllocator::AllocateLocked(size_t len,
190  bool *lockingSuccess) {
191  len = align_up(len, page_size);
192  void *addr =
193  VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
194  if (addr) {
195  // VirtualLock is used to attempt to keep keying material out of swap.
196  // Note that it does not provide this as a guarantee, but, in practice,
197  // memory that has been VirtualLock'd almost never gets written to the
198  // pagefile except in rare circumstances where memory is extremely low.
199  *lockingSuccess = VirtualLock(const_cast<void *>(addr), len) != 0;
200  }
201  return addr;
202 }
203 void Win32LockedPageAllocator::FreeLocked(void *addr, size_t len) {
204  len = align_up(len, page_size);
205  memory_cleanse(addr, len);
206  VirtualUnlock(const_cast<void *>(addr), len);
207 }
208 
209 size_t Win32LockedPageAllocator::GetLimit() {
210  // TODO is there a limit on Windows, how to get it?
211  return std::numeric_limits<size_t>::max();
212 }
213 #endif
214 
215 /*******************************************************************************/
216 // Implementation: PosixLockedPageAllocator
217 
218 #ifndef WIN32
224 public:
226  void *AllocateLocked(size_t len, bool *lockingSuccess) override;
227  void FreeLocked(void *addr, size_t len) override;
228  size_t GetLimit() override;
229 
230 private:
231  size_t page_size;
232 };
233 
235 // Determine system page size in bytes
236 #if defined(PAGESIZE) // defined in climits
237  page_size = PAGESIZE;
238 #else // assume some POSIX OS
239  page_size = sysconf(_SC_PAGESIZE);
240 #endif
241 }
242 
243 // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
244 // MAP_ANON which is deprecated
245 #ifndef MAP_ANONYMOUS
246 #define MAP_ANONYMOUS MAP_ANON
247 #endif
248 
250  bool *lockingSuccess) {
251  void *addr;
252  len = align_up(len, page_size);
253  addr = mmap(nullptr, len, PROT_READ | PROT_WRITE,
254  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
255  if (addr == MAP_FAILED) {
256  return nullptr;
257  }
258  if (addr) {
259  *lockingSuccess = mlock(addr, len) == 0;
260 #if defined(MADV_DONTDUMP) // Linux
261  madvise(addr, len, MADV_DONTDUMP);
262 #elif defined(MADV_NOCORE) // FreeBSD
263  madvise(addr, len, MADV_NOCORE);
264 #endif
265  }
266  return addr;
267 }
268 void PosixLockedPageAllocator::FreeLocked(void *addr, size_t len) {
269  len = align_up(len, page_size);
270  memory_cleanse(addr, len);
271  munlock(addr, len);
272  munmap(addr, len);
273 }
275 #ifdef RLIMIT_MEMLOCK
276  struct rlimit rlim;
277  if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
278  if (rlim.rlim_cur != RLIM_INFINITY) {
279  return rlim.rlim_cur;
280  }
281  }
282 #endif
283  return std::numeric_limits<size_t>::max();
284 }
285 #endif
286 
287 /*******************************************************************************/
288 // Implementation: LockedPool
289 
290 LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in,
291  LockingFailed_Callback lf_cb_in)
292  : allocator(std::move(allocator_in)), lf_cb(lf_cb_in),
293  cumulative_bytes_locked(0) {}
294 
296 void *LockedPool::alloc(size_t size) {
297  std::lock_guard<std::mutex> lock(mutex);
298 
299  // Don't handle impossible sizes
300  if (size == 0 || size > ARENA_SIZE) {
301  return nullptr;
302  }
303 
304  // Try allocating from each current arena
305  for (auto &arena : arenas) {
306  void *addr = arena.alloc(size);
307  if (addr) {
308  return addr;
309  }
310  }
311  // If that fails, create a new one
313  return arenas.back().alloc(size);
314  }
315  return nullptr;
316 }
317 
318 void LockedPool::free(void *ptr) {
319  std::lock_guard<std::mutex> lock(mutex);
320  // TODO we can do better than this linear search by keeping a map of arena
321  // extents to arena, and looking up the address.
322  for (auto &arena : arenas) {
323  if (arena.addressInArena(ptr)) {
324  arena.free(ptr);
325  return;
326  }
327  }
328  throw std::runtime_error(
329  "LockedPool: invalid address not pointing to any arena");
330 }
331 
333  std::lock_guard<std::mutex> lock(mutex);
334  LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
335  for (const auto &arena : arenas) {
336  Arena::Stats i = arena.stats();
337  r.used += i.used;
338  r.free += i.free;
339  r.total += i.total;
340  r.chunks_used += i.chunks_used;
341  r.chunks_free += i.chunks_free;
342  }
343  return r;
344 }
345 
346 bool LockedPool::new_arena(size_t size, size_t align) {
347  bool locked;
348  // If this is the first arena, handle this specially: Cap the upper size by
349  // the process limit. This makes sure that the first arena will at least be
350  // locked. An exception to this is if the process limit is 0: in this case
351  // no memory can be locked at all so we'll skip past this logic.
352  if (arenas.empty()) {
353  size_t limit = allocator->GetLimit();
354  if (limit > 0) {
355  size = std::min(size, limit);
356  }
357  }
358  void *addr = allocator->AllocateLocked(size, &locked);
359  if (!addr) {
360  return false;
361  }
362  if (locked) {
363  cumulative_bytes_locked += size;
364  } else if (lf_cb) {
365  // Call the locking-failed callback if locking failed
366  if (!lf_cb()) {
367  // If the callback returns false, free the memory and fail,
368  // otherwise consider the user warned and proceed.
369  allocator->FreeLocked(addr, size);
370  return false;
371  }
372  }
373  arenas.emplace_back(allocator.get(), addr, size, align);
374  return true;
375 }
376 
378  void *base_in, size_t size_in,
379  size_t align_in)
380  : Arena(base_in, size_in, align_in), base(base_in), size(size_in),
381  allocator(allocator_in) {}
383  allocator->FreeLocked(base, size);
384 }
385 
386 /*******************************************************************************/
387 // Implementation: LockedPoolManager
388 //
390  std::unique_ptr<LockedPageAllocator> allocator_in)
391  : LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed) {}
392 
394  // TODO: log something but how? without including util.h
395  return true;
396 }
397 
399 // Using a local static instance guarantees that the object is initialized when
400 // it's first needed and also deinitialized after all objects that use it are
401 // done with it. I can think of one unlikely scenario where we may have a static
402 // deinitialization order/problem, but the check in LockedPoolManagerBase's
403 // destructor helps us detect if that ever happens.
404 #ifdef WIN32
405  std::unique_ptr<LockedPageAllocator> allocator(
406  new Win32LockedPageAllocator());
407 #else
408  std::unique_ptr<LockedPageAllocator> allocator(
410 #endif
411  static LockedPoolManager instance(std::move(allocator));
412  LockedPoolManager::_instance = &instance;
413 }
An arena manages a contiguous region of memory by dividing it into chunks.
Definition: lockedpool.h:50
char * base
Base address of arena.
Definition: lockedpool.h:111
size_t alignment
Minimum chunk alignment.
Definition: lockedpool.h:115
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:105
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:57
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Definition: lockedpool.h:98
Arena(void *base, size_t size, size_t alignment)
Definition: lockedpool.cpp:46
Stats stats() const
Get arena usage statistics.
Definition: lockedpool.cpp:135
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
Definition: lockedpool.h:108
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:103
virtual ~Arena()
Definition: lockedpool.cpp:55
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:98
OS-dependent allocation and deallocation of locked/pinned memory pages.
Definition: lockedpool.h:19
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Definition: lockedpool.cpp:377
Pool for locked memory chunks.
Definition: lockedpool.h:132
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:318
Stats stats() const
Get pool usage statistics.
Definition: lockedpool.cpp:332
std::unique_ptr< LockedPageAllocator > allocator
Definition: lockedpool.h:196
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:296
size_t cumulative_bytes_locked
Definition: lockedpool.h:215
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
Definition: lockedpool.cpp:290
LockingFailed_Callback lf_cb
Definition: lockedpool.h:214
std::list< LockedPageArena > arenas
Definition: lockedpool.h:213
bool new_arena(size_t size, size_t align)
Definition: lockedpool.cpp:346
static const size_t ARENA_ALIGN
Chunk alignment.
Definition: lockedpool.h:145
static const size_t ARENA_SIZE
Size of one arena of locked memory.
Definition: lockedpool.h:140
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
Definition: lockedpool.h:219
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
Definition: lockedpool.h:233
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Definition: lockedpool.cpp:389
static bool LockingFailed()
Called when locking fails, warn the user here.
Definition: lockedpool.cpp:393
static LockedPoolManager * _instance
Definition: lockedpool.h:250
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
Definition: lockedpool.cpp:398
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
Definition: lockedpool.cpp:223
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
Definition: lockedpool.cpp:249
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Definition: lockedpool.cpp:268
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes.
Definition: lockedpool.cpp:274
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
Definition: cleanse.cpp:14
Implement std::hash so RCUPtr can be used as a key for maps or sets.
Definition: rcu.h:257
Memory statistics.
Definition: lockedpool.h:59
size_t used
Definition: lockedpool.h:60
size_t chunks_used
Definition: lockedpool.h:63
size_t total
Definition: lockedpool.h:62
size_t free
Definition: lockedpool.h:61
size_t chunks_free
Definition: lockedpool.h:64
Memory statistics.
Definition: lockedpool.h:153
#define MAP_ANONYMOUS
Definition: lockedpool.cpp:246
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
Definition: lockedpool.cpp:39