8#if defined(HAVE_CONFIG_H)
9#include <config/bitcoin-config.h>
20#include <sys/resource.h>
95 return reinterpret_cast<void *
>(
alloced->first);
100 if (ptr ==
nullptr) {
105 auto i =
chunks_used.find(
static_cast<char *
>(ptr));
107 throw std::runtime_error(
"Arena: invalid or double free");
109 std::pair<char *, size_t>
freed = *i;
124 freed.second += next->second->first;
141 r.free +=
chunk.second->first;
143 r.total = r.used + r.free;
148static void printchunk(
void *base,
size_t sz,
bool used) {
149 std::cout <<
"0x" << std::hex << std::setw(16) << std::setfill(
'0') << base
150 <<
" 0x" << std::hex << std::setw(16) << std::setfill(
'0') <<
sz
151 <<
" 0x" << used << std::endl;
153void Arena::walk()
const {
157 std::cout << std::endl;
161 std::cout << std::endl;
183Win32LockedPageAllocator::Win32LockedPageAllocator() {
189void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
203void Win32LockedPageAllocator::FreeLocked(
void *addr,
size_t len) {
209size_t Win32LockedPageAllocator::GetLimit() {
211 return std::numeric_limits<size_t>::max();
246#define MAP_ANONYMOUS MAP_ANON
260#if defined(MADV_DONTDUMP)
262#elif defined(MADV_NOCORE)
279 return rlim.rlim_cur;
283 return std::numeric_limits<size_t>::max();
293 cumulative_bytes_locked(0) {}
297 std::lock_guard<std::mutex> lock(
mutex);
306 void *addr =
arena.alloc(size);
313 return arenas.back().alloc(size);
319 std::lock_guard<std::mutex> lock(
mutex);
323 if (
arena.addressInArena(ptr)) {
328 throw std::runtime_error(
329 "LockedPool: invalid address not pointing to any arena");
333 std::lock_guard<std::mutex> lock(
mutex);
355 size = std::min(size,
limit);
358 void *addr =
allocator->AllocateLocked(size, &locked);
405 std::unique_ptr<LockedPageAllocator>
allocator(
408 std::unique_ptr<LockedPageAllocator>
allocator(
An arena manages a contiguous region of memory by dividing it into chunks.
char * base
Base address of arena.
size_t alignment
Minimum chunk alignment.
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
void * alloc(size_t size)
Allocate size bytes from this arena.
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Arena(void *base, size_t size, size_t alignment)
Stats stats() const
Get arena usage statistics.
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
void free(void *ptr)
Free a previously allocated chunk of memory.
OS-dependent allocation and deallocation of locked/pinned memory pages.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Pool for locked memory chunks.
void free(void *ptr)
Free a previously allocated chunk of memory.
Stats stats() const
Get pool usage statistics.
std::unique_ptr< LockedPageAllocator > allocator
void * alloc(size_t size)
Allocate size bytes from this arena.
size_t cumulative_bytes_locked
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
LockingFailed_Callback lf_cb
std::list< LockedPageArena > arenas
bool new_arena(size_t size, size_t align)
static const size_t ARENA_ALIGN
Chunk alignment.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
static bool LockingFailed()
Called when locking fails, warn the user here.
static LockedPoolManager * _instance
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes.
PosixLockedPageAllocator()
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
Implement std::hash so RCUPtr can be used as a key for maps or sets.
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
static size_t align_up(size_t x, size_t align)
Align up to power of 2.