Bitcoin Core  24.99.0
P2P Digital Currency
lockedpool.cpp
Go to the documentation of this file.
1 // Copyright (c) 2016-2022 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #include <support/lockedpool.h>
6 #include <support/cleanse.h>
7 
8 #if defined(HAVE_CONFIG_H)
10 #endif
11 
12 #ifdef WIN32
13 #include <windows.h>
14 #else
15 #include <sys/mman.h> // for mmap
16 #include <sys/resource.h> // for getrlimit
17 #include <limits.h> // for PAGESIZE
18 #include <unistd.h> // for sysconf
19 #endif
20 
21 #include <algorithm>
22 #include <limits>
23 #include <stdexcept>
24 #include <utility>
25 #ifdef ARENA_DEBUG
26 #include <iomanip>
27 #include <iostream>
28 #endif
29 
31 
32 /*******************************************************************************/
33 // Utilities
34 //
36 static inline size_t align_up(size_t x, size_t align)
37 {
38  return (x + align - 1) & ~(align - 1);
39 }
40 
41 /*******************************************************************************/
42 // Implementation: Arena
43 
44 Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
45  base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
46 {
47  // Start with one free chunk that covers the entire arena
48  auto it = size_to_free_chunk.emplace(size_in, base);
49  chunks_free.emplace(base, it);
50  chunks_free_end.emplace(static_cast<char*>(base) + size_in, it);
51 }
52 
54 {
55 }
56 
57 void* Arena::alloc(size_t size)
58 {
59  // Round to next multiple of alignment
60  size = align_up(size, alignment);
61 
62  // Don't handle zero-sized chunks
63  if (size == 0)
64  return nullptr;
65 
66  // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
67  // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
68  // Wilson et. al. 1995, https://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
69  // policies seem to work well in practice.
70  auto size_ptr_it = size_to_free_chunk.lower_bound(size);
71  if (size_ptr_it == size_to_free_chunk.end())
72  return nullptr;
73 
74  // Create the used-chunk, taking its space from the end of the free-chunk
75  const size_t size_remaining = size_ptr_it->first - size;
76  char* const free_chunk = static_cast<char*>(size_ptr_it->second);
77  auto allocated = chunks_used.emplace(free_chunk + size_remaining, size).first;
78  chunks_free_end.erase(free_chunk + size_ptr_it->first);
79  if (size_ptr_it->first == size) {
80  // whole chunk is used up
81  chunks_free.erase(size_ptr_it->second);
82  } else {
83  // still some memory left in the chunk
84  auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
85  chunks_free[size_ptr_it->second] = it_remaining;
86  chunks_free_end.emplace(free_chunk + size_remaining, it_remaining);
87  }
88  size_to_free_chunk.erase(size_ptr_it);
89 
90  return allocated->first;
91 }
92 
93 void Arena::free(void *ptr)
94 {
95  // Freeing the nullptr pointer is OK.
96  if (ptr == nullptr) {
97  return;
98  }
99 
100  // Remove chunk from used map
101  auto i = chunks_used.find(ptr);
102  if (i == chunks_used.end()) {
103  throw std::runtime_error("Arena: invalid or double free");
104  }
105  auto freed = std::make_pair(static_cast<char*>(i->first), i->second);
106  chunks_used.erase(i);
107 
108  // coalesce freed with previous chunk
109  auto prev = chunks_free_end.find(freed.first);
110  if (prev != chunks_free_end.end()) {
111  freed.first -= prev->second->first;
112  freed.second += prev->second->first;
113  size_to_free_chunk.erase(prev->second);
114  chunks_free_end.erase(prev);
115  }
116 
117  // coalesce freed with chunk after freed
118  auto next = chunks_free.find(freed.first + freed.second);
119  if (next != chunks_free.end()) {
120  freed.second += next->second->first;
121  size_to_free_chunk.erase(next->second);
122  chunks_free.erase(next);
123  }
124 
125  // Add/set space with coalesced free chunk
126  auto it = size_to_free_chunk.emplace(freed.second, freed.first);
127  chunks_free[freed.first] = it;
128  chunks_free_end[freed.first + freed.second] = it;
129 }
130 
132 {
133  Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
134  for (const auto& chunk: chunks_used)
135  r.used += chunk.second;
136  for (const auto& chunk: chunks_free)
137  r.free += chunk.second->first;
138  r.total = r.used + r.free;
139  return r;
140 }
141 
142 #ifdef ARENA_DEBUG
143 static void printchunk(void* base, size_t sz, bool used) {
144  std::cout <<
145  "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
146  " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
147  " 0x" << used << std::endl;
148 }
149 void Arena::walk() const
150 {
151  for (const auto& chunk: chunks_used)
152  printchunk(chunk.first, chunk.second, true);
153  std::cout << std::endl;
154  for (const auto& chunk: chunks_free)
155  printchunk(chunk.first, chunk.second->first, false);
156  std::cout << std::endl;
157 }
158 #endif
159 
160 /*******************************************************************************/
161 // Implementation: Win32LockedPageAllocator
162 
163 #ifdef WIN32
166 class Win32LockedPageAllocator: public LockedPageAllocator
167 {
168 public:
169  Win32LockedPageAllocator();
170  void* AllocateLocked(size_t len, bool *lockingSuccess) override;
171  void FreeLocked(void* addr, size_t len) override;
172  size_t GetLimit() override;
173 private:
174  size_t page_size;
175 };
176 
177 Win32LockedPageAllocator::Win32LockedPageAllocator()
178 {
179  // Determine system page size in bytes
180  SYSTEM_INFO sSysInfo;
181  GetSystemInfo(&sSysInfo);
182  page_size = sSysInfo.dwPageSize;
183 }
184 void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
185 {
186  len = align_up(len, page_size);
187  void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
188  if (addr) {
189  // VirtualLock is used to attempt to keep keying material out of swap. Note
190  // that it does not provide this as a guarantee, but, in practice, memory
191  // that has been VirtualLock'd almost never gets written to the pagefile
192  // except in rare circumstances where memory is extremely low.
193  *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
194  }
195  return addr;
196 }
197 void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
198 {
199  len = align_up(len, page_size);
200  memory_cleanse(addr, len);
201  VirtualUnlock(const_cast<void*>(addr), len);
202 }
203 
204 size_t Win32LockedPageAllocator::GetLimit()
205 {
206  size_t min, max;
207  if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
208  return min;
209  }
210  return std::numeric_limits<size_t>::max();
211 }
212 #endif
213 
214 /*******************************************************************************/
215 // Implementation: PosixLockedPageAllocator
216 
217 #ifndef WIN32
222 {
223 public:
225  void* AllocateLocked(size_t len, bool *lockingSuccess) override;
226  void FreeLocked(void* addr, size_t len) override;
227  size_t GetLimit() override;
228 private:
229  size_t page_size;
230 };
231 
233 {
234  // Determine system page size in bytes
235 #if defined(PAGESIZE) // defined in limits.h
236  page_size = PAGESIZE;
237 #else // assume some POSIX OS
238  page_size = sysconf(_SC_PAGESIZE);
239 #endif
240 }
241 
242 void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
243 {
244  void *addr;
245  len = align_up(len, page_size);
246  addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
247  if (addr == MAP_FAILED) {
248  return nullptr;
249  }
250  if (addr) {
251  *lockingSuccess = mlock(addr, len) == 0;
252 #if defined(MADV_DONTDUMP) // Linux
253  madvise(addr, len, MADV_DONTDUMP);
254 #elif defined(MADV_NOCORE) // FreeBSD
255  madvise(addr, len, MADV_NOCORE);
256 #endif
257  }
258  return addr;
259 }
260 void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
261 {
262  len = align_up(len, page_size);
263  memory_cleanse(addr, len);
264  munlock(addr, len);
265  munmap(addr, len);
266 }
268 {
269 #ifdef RLIMIT_MEMLOCK
270  struct rlimit rlim;
271  if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
272  if (rlim.rlim_cur != RLIM_INFINITY) {
273  return rlim.rlim_cur;
274  }
275  }
276 #endif
277  return std::numeric_limits<size_t>::max();
278 }
279 #endif
280 
281 /*******************************************************************************/
282 // Implementation: LockedPool
283 
284 LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in)
285  : allocator(std::move(allocator_in)), lf_cb(lf_cb_in)
286 {
287 }
288 
289 LockedPool::~LockedPool() = default;
290 
291 void* LockedPool::alloc(size_t size)
292 {
293  std::lock_guard<std::mutex> lock(mutex);
294 
295  // Don't handle impossible sizes
296  if (size == 0 || size > ARENA_SIZE)
297  return nullptr;
298 
299  // Try allocating from each current arena
300  for (auto &arena: arenas) {
301  void *addr = arena.alloc(size);
302  if (addr) {
303  return addr;
304  }
305  }
306  // If that fails, create a new one
308  return arenas.back().alloc(size);
309  }
310  return nullptr;
311 }
312 
313 void LockedPool::free(void *ptr)
314 {
315  std::lock_guard<std::mutex> lock(mutex);
316  // TODO we can do better than this linear search by keeping a map of arena
317  // extents to arena, and looking up the address.
318  for (auto &arena: arenas) {
319  if (arena.addressInArena(ptr)) {
320  arena.free(ptr);
321  return;
322  }
323  }
324  throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
325 }
326 
328 {
329  std::lock_guard<std::mutex> lock(mutex);
330  LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
331  for (const auto &arena: arenas) {
332  Arena::Stats i = arena.stats();
333  r.used += i.used;
334  r.free += i.free;
335  r.total += i.total;
336  r.chunks_used += i.chunks_used;
337  r.chunks_free += i.chunks_free;
338  }
339  return r;
340 }
341 
342 bool LockedPool::new_arena(size_t size, size_t align)
343 {
344  bool locked;
345  // If this is the first arena, handle this specially: Cap the upper size
346  // by the process limit. This makes sure that the first arena will at least
347  // be locked. An exception to this is if the process limit is 0:
348  // in this case no memory can be locked at all so we'll skip past this logic.
349  if (arenas.empty()) {
350  size_t limit = allocator->GetLimit();
351  if (limit > 0) {
352  size = std::min(size, limit);
353  }
354  }
355  void *addr = allocator->AllocateLocked(size, &locked);
356  if (!addr) {
357  return false;
358  }
359  if (locked) {
360  cumulative_bytes_locked += size;
361  } else if (lf_cb) { // Call the locking-failed callback if locking failed
362  if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
363  allocator->FreeLocked(addr, size);
364  return false;
365  }
366  }
367  arenas.emplace_back(allocator.get(), addr, size, align);
368  return true;
369 }
370 
371 LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
372  Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
373 {
374 }
376 {
377  allocator->FreeLocked(base, size);
378 }
379 
380 /*******************************************************************************/
381 // Implementation: LockedPoolManager
382 //
383 LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in):
384  LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed)
385 {
386 }
387 
389 {
390  // TODO: log something but how? without including util.h
391  return true;
392 }
393 
395 {
396  // Using a local static instance guarantees that the object is initialized
397  // when it's first needed and also deinitialized after all objects that use
398  // it are done with it. I can think of one unlikely scenario where we may
399  // have a static deinitialization order/problem, but the check in
400  // LockedPoolManagerBase's destructor helps us detect if that ever happens.
401 #ifdef WIN32
402  std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
403 #else
404  std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
405 #endif
406  static LockedPoolManager instance(std::move(allocator));
407  LockedPoolManager::_instance = &instance;
408 }
void * base
Base address of arena.
Definition: lockedpool.h:106
size_t alignment
Minimum chunk alignment.
Definition: lockedpool.h:110
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:100
std::unordered_map< void *, size_t > chunks_used
Map from begin of used chunk to its size.
Definition: lockedpool.h:103
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:57
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Definition: lockedpool.h:94
Arena(void *base, size_t size, size_t alignment)
Definition: lockedpool.cpp:44
Stats stats() const
Get arena usage statistics.
Definition: lockedpool.cpp:131
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:98
virtual ~Arena()
Definition: lockedpool.cpp:53
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:93
OS-dependent allocation and deallocation of locked/pinned memory pages.
Definition: lockedpool.h:20
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Definition: lockedpool.cpp:371
Pool for locked memory chunks.
Definition: lockedpool.h:127
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:313
Stats stats() const
Get pool usage statistics.
Definition: lockedpool.cpp:327
std::unique_ptr< LockedPageAllocator > allocator
Definition: lockedpool.h:183
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:291
size_t cumulative_bytes_locked
Definition: lockedpool.h:201
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
Definition: lockedpool.cpp:284
LockingFailed_Callback lf_cb
Definition: lockedpool.h:200
std::list< LockedPageArena > arenas
Definition: lockedpool.h:199
bool new_arena(size_t size, size_t align)
Definition: lockedpool.cpp:342
static const size_t ARENA_ALIGN
Chunk alignment.
Definition: lockedpool.h:138
static const size_t ARENA_SIZE
Size of one arena of locked memory.
Definition: lockedpool.h:134
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
Definition: lockedpool.h:204
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
Definition: lockedpool.h:219
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Definition: lockedpool.cpp:383
static bool LockingFailed()
Called when locking fails, warn the user here.
Definition: lockedpool.cpp:388
static LockedPoolManager * _instance
Definition: lockedpool.h:237
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
Definition: lockedpool.cpp:394
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
Definition: lockedpool.cpp:222
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
Definition: lockedpool.cpp:242
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Definition: lockedpool.cpp:260
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes.
Definition: lockedpool.cpp:267
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
Definition: cleanse.cpp:14
Memory statistics.
Definition: lockedpool.h:59
size_t used
Definition: lockedpool.h:60
size_t chunks_used
Definition: lockedpool.h:63
size_t total
Definition: lockedpool.h:62
size_t free
Definition: lockedpool.h:61
size_t chunks_free
Definition: lockedpool.h:64
Memory statistics.
Definition: lockedpool.h:146
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
Definition: lockedpool.cpp:36