5 #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H 6 #define BITCOIN_SUPPORT_LOCKEDPOOL_H 13 #include <unordered_map> 31 virtual void*
AllocateLocked(
size_t len,
bool *lockingSuccess) = 0;
36 virtual void FreeLocked(
void* addr,
size_t len) = 0;
71 void*
alloc(
size_t size);
96 typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator>
ChunkToSizeMap;
172 void*
alloc(
size_t size);
178 void free(
void *ptr);
197 bool new_arena(
size_t size,
size_t align);
240 #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
static std::once_flag init_flag
Arena & operator=(const Arena &)=delete
size_t alignment
Minimum chunk alignment.
bool(* LockingFailed_Callback)()
Callback when allocation succeeds but locking fails.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
std::multimap< size_t, char * > SizeToChunkSortedMap
std::list< LockedPageArena > arenas
static LockedPoolManager & Instance()
Return the current instance, or create it once.
static const size_t ARENA_ALIGN
Chunk alignment.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
OS-dependent allocation and deallocation of locked/pinned memory pages.
bool addressInArena(void *ptr) const
Return whether a pointer points inside this arena.
LockedPageAllocator * allocator
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
char * end
End address of arena.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::unordered_map< char *, SizeToChunkSortedMap::const_iterator > ChunkToSizeMap
virtual ~LockedPageAllocator()
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
Create an arena from locked pages.
char * base
Base address of arena.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator
LockedPool & operator=(const LockedPool &)=delete