8 #if defined(HAVE_CONFIG_H) 16 #define _WIN32_WINNT 0x0501 17 #define WIN32_LEAN_AND_MEAN 1 24 #include <sys/resource.h> 38 static inline size_t align_up(
size_t x,
size_t align)
40 return (x + align - 1) & ~(align - 1);
47 base(static_cast<char*>(base_in)),
end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
77 const size_t size_remaining = size_ptr_it->first - size;
78 auto allocated =
chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
80 if (size_ptr_it->first == size) {
87 chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
91 return reinterpret_cast<void*
>(allocated->first);
102 auto i =
chunks_used.find(static_cast<char*>(ptr));
104 throw std::runtime_error(
"Arena: invalid or double free");
106 std::pair<char*, size_t> freed = *i;
112 freed.first -= prev->second->first;
113 freed.second += prev->second->first;
119 auto next =
chunks_free.find(freed.first + freed.second);
121 freed.second += next->second->first;
136 r.used += chunk.second;
138 r.free += chunk.second->first;
139 r.total = r.used + r.free;
144 static void printchunk(
char* base,
size_t sz,
bool used) {
146 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
147 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
148 " 0x" << used << std::endl;
150 void Arena::walk()
const 153 printchunk(chunk.first, chunk.second,
true);
154 std::cout << std::endl;
156 printchunk(chunk.first, chunk.second,
false);
157 std::cout << std::endl;
170 Win32LockedPageAllocator();
172 void FreeLocked(
void* addr,
size_t len)
override;
178 Win32LockedPageAllocator::Win32LockedPageAllocator()
181 SYSTEM_INFO sSysInfo;
182 GetSystemInfo(&sSysInfo);
183 page_size = sSysInfo.dwPageSize;
185 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
187 len = align_up(len, page_size);
188 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
194 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
198 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
200 len = align_up(len, page_size);
202 VirtualUnlock(const_cast<void*>(addr), len);
205 size_t Win32LockedPageAllocator::GetLimit()
208 return std::numeric_limits<size_t>::max();
224 void FreeLocked(
void* addr,
size_t len)
override;
233 #if defined(PAGESIZE) // defined in limits.h 235 #else // assume some POSIX OS 242 #ifndef MAP_ANONYMOUS 243 #define MAP_ANONYMOUS MAP_ANON 250 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
252 *lockingSuccess = mlock(addr, len) == 0;
265 #ifdef RLIMIT_MEMLOCK 267 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
268 if (rlim.rlim_cur != RLIM_INFINITY) {
269 return rlim.rlim_cur;
273 return std::numeric_limits<size_t>::max();
281 allocator(
std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
290 std::lock_guard<std::mutex> lock(
mutex);
297 for (
auto &arena:
arenas) {
298 void *addr = arena.alloc(size);
305 return arenas.back().alloc(size);
312 std::lock_guard<std::mutex> lock(
mutex);
315 for (
auto &arena:
arenas) {
316 if (arena.addressInArena(ptr)) {
321 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
326 std::lock_guard<std::mutex> lock(
mutex);
328 for (
const auto &arena:
arenas) {
349 size = std::min(size, limit);
352 void *addr =
allocator->AllocateLocked(size, &locked);
369 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
399 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
static std::once_flag init_flag
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
OS-dependent allocation and deallocation of locked/pinned memory pages.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
char * base
Base address of arena.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator