Bucketed heap allocator. Each allocation size has a corresponding bucket which has a list of pages. Pages are split into fixed-size chunks.
#pragma once | |
// ======================================================== | |
#include <algorithm> | |
#include <array> | |
#include <cstddef> | |
#include <cstdint> | |
#include <cstring> | |
#ifndef SIMPLE_HEAP_ASSERT | |
#include <cassert> | |
#define SIMPLE_HEAP_ASSERT(expr, message) assert(expr) | |
#endif // SIMPLE_HEAP_ASSERT | |
#if SIMPLE_HEAP_DEBUG | |
#include <cstdio> | |
#endif // SIMPLE_HEAP_DEBUG | |
// ======================================================== | |
namespace SimpleHeap | |
{ | |
#if SIMPLE_HEAP_DEBUG | |
constexpr std::uint8_t AllocdMemFillPattern = 0xCD; // Clean memory - new allocation | |
constexpr std::uint8_t FreedMemFillPattern = 0xDD; // Dead memory - freed/deleted | |
constexpr std::uint8_t FreshPageFillPattern = 0xFE; // New page - assume nothing allocated from it yet | |
constexpr std::uint8_t DeadPageFillPattern = 0xFD; // Recycled page - set when heap kills an empty page | |
constexpr std::uint64_t PageHeaderPattern = UINT64_C(0x1BADF00D2BADF00D); // User block header guard value | |
constexpr std::uint64_t PageFooterPattern = UINT64_C(0xDEADBEEFDEADBEEF); // User block footer guard value | |
#endif // SIMPLE_HEAP_DEBUG | |
// Most sizes are capped to 16 bits, so we can use uint16s and reduce overhead per page. | |
constexpr std::uint32_t MaxBuckets = UINT16_MAX; | |
constexpr std::uint32_t MaxPageSize = UINT16_MAX; | |
constexpr std::uint32_t MaxPagesPerBucket = UINT16_MAX; | |
constexpr std::uint32_t MaxBlocksPerPage = UINT16_MAX; | |
// ======================================================== | |
// FixedSizePage: | |
// ======================================================== | |
template | |
< | |
int SizeBytes, | |
int Alignment = alignof(std::max_align_t) | |
> | |
class FixedSizePage final | |
{ | |
public: | |
FixedSizePage() = default; | |
FixedSizePage(const FixedSizePage &) = delete; | |
FixedSizePage & operator = (const FixedSizePage &) = delete; | |
void init(const std::uint16_t pageBlockSize, const std::uint16_t bucketIdx) | |
{ | |
initDataBlock(pageBlockSize, bucketIdx); | |
initFreeList(); | |
} | |
void * allocBlock() | |
{ | |
if (isFull()) | |
{ | |
return nullptr; | |
} | |
SIMPLE_HEAP_ASSERT(m_data.allocatedCount < MaxBlocksPerPage, "Invalid block count!"); | |
++m_data.allocatedCount; | |
return popFreeList(); | |
} | |
void freeBlock(void * ptr) | |
{ | |
auto * userBlock = static_cast<std::uint8_t *>(ptr); | |
#if SIMPLE_HEAP_DEBUG | |
userBlock -= 8; // Back to the header guard dword | |
#endif // SIMPLE_HEAP_DEBUG | |
SIMPLE_HEAP_ASSERT(!isEmpty(), "Freeing pointer from empty page!"); | |
SIMPLE_HEAP_ASSERT(validatePtr(userBlock), "Pointer not allocated from this page or corrupted guard words!"); | |
pushFreeList(userBlock); | |
--m_data.allocatedCount; | |
} | |
#if SIMPLE_HEAP_DEBUG | |
// Dump the whole page as 32 or 64 bits words (including the header overhead). | |
// Slack that doesn't fit in the last word is not printed. | |
void dump32(const char * const header = nullptr) const | |
{ | |
if (header) | |
{ | |
std::printf("%s\n", header); | |
} | |
auto * ptr32 = reinterpret_cast<const std::uint32_t *>(&m_data); | |
constexpr std::size_t NumWords = sizeof(Data) / 4; | |
for (std::size_t w = 0; w < NumWords; ++w) | |
{ | |
std::printf("0x%08X\n", ptr32[w]); | |
} | |
} | |
void dump64(const char * const header = nullptr) const | |
{ | |
if (header) | |
{ | |
std::printf("%s\n", header); | |
} | |
auto * ptr64 = reinterpret_cast<const std::uint64_t *>(&m_data); | |
constexpr std::size_t NumDWords = sizeof(Data) / 8; | |
for (std::size_t dw = 0; dw < NumDWords; ++dw) | |
{ | |
std::printf("0x%016llX\n", ptr64[dw]); | |
} | |
} | |
// Validate the guard words of each block and freelist connectivity. | |
void sanityCheck() const | |
{ | |
auto allFreeBytes = [](const void * ptr, const std::size_t size) -> bool | |
{ | |
auto * bytes = static_cast<const std::uint8_t *>(ptr); | |
for (std::size_t b = 0; b < size; ++b) | |
{ | |
if (bytes[b] != FreedMemFillPattern) | |
{ | |
return false; | |
} | |
} | |
return true; | |
}; | |
const std::size_t pageBlockSize = m_data.blockSize; | |
const std::size_t freeBlockSize = pageBlockSize - GuardBytes - 8; | |
const FreeBlock * free = m_data.freeListHead; | |
const FreeBlock * prev = nullptr; | |
while (free != nullptr) | |
{ | |
if (prev != nullptr) | |
{ | |
SIMPLE_HEAP_ASSERT(prev->next == free, "Page freelist connectivity is broken!"); | |
} | |
// Make sure all bytes minus FreeBlock::next match the free mem debug pattern. | |
SIMPLE_HEAP_ASSERT(allFreeBytes(free + 1, freeBlockSize), "Page freelist block was written to after it was freed!"); | |
prev = free; | |
free = free->next; | |
} | |
// First block is at the end of the range. | |
const std::uint8_t * cursor = m_data.pageBytes + PageBytes; | |
const std::uint8_t * const beginning = m_data.pageBytes; | |
const std::size_t blockCount = m_data.allocatedCount; | |
for (;;) | |
{ | |
cursor -= pageBlockSize; | |
if (cursor < beginning) | |
{ | |
break; | |
} | |
SIMPLE_HEAP_ASSERT(validatePtr(cursor), "Corrupted heap block - bad guard words!"); | |
} | |
} | |
#endif // SIMPLE_HEAP_DEBUG | |
bool isFull() const { return (m_data.freeListHead == nullptr); } | |
bool isEmpty() const { return (m_data.allocatedCount == 0); } | |
std::uint16_t blockSize() const { return m_data.blockSize; } | |
std::uint16_t blockSizeNoDebugOverhead() const { return m_data.blockSize - GuardBytes; } | |
std::uint16_t bucketIndex() const { return m_data.bucketIdx; } | |
std::uint16_t allocatedCount() const { return m_data.allocatedCount; } | |
std::uint16_t listIndex() const { return m_data.bucketListIdx; } | |
void setListIndex(const std::uint16_t index) { m_data.bucketListIdx = index; } | |
static constexpr int PerPageOverhead = 16; // Number of bytes we need to take from SizeBytes for housekeeping. | |
static constexpr int GuardBytes = SIMPLE_HEAP_DEBUG ? 16 : 0; | |
static constexpr int PageBytes = SizeBytes - PerPageOverhead; | |
static constexpr int MaxAllocSize = PageBytes - GuardBytes; | |
static constexpr int MinAllocSize = sizeof(void *); | |
private: | |
struct FreeBlock | |
{ | |
FreeBlock * next; | |
}; | |
struct Data | |
{ | |
FreeBlock * freeListHead; | |
std::uint16_t blockSize; | |
std::uint16_t bucketIdx; | |
std::uint16_t bucketListIdx; | |
std::uint16_t allocatedCount; | |
alignas(Alignment) std::uint8_t pageBytes[PageBytes]; | |
} m_data; | |
static_assert(SizeBytes <= MaxPageSize, "Page size too big!"); | |
static_assert(sizeof(Data) == SizeBytes, "Unexpected size for FixedSizePage::Data!"); | |
private: | |
#if SIMPLE_HEAP_DEBUG | |
void initDataBlock(const std::uint16_t pageBlockSize, const std::uint16_t bucketIdx) | |
{ | |
// Plus the header and footer guard dwords | |
const std::uint16_t debugBlockSize = pageBlockSize + GuardBytes; | |
SIMPLE_HEAP_ASSERT(pageBlockSize >= MinAllocSize, "Block size too small!"); | |
SIMPLE_HEAP_ASSERT(debugBlockSize <= PageBytes, "Block size too big!"); | |
SIMPLE_HEAP_ASSERT((PageBytes / debugBlockSize) <= MaxBlocksPerPage, "Max blocks per page exceeded!"); | |
m_data.freeListHead = nullptr; | |
m_data.blockSize = debugBlockSize; | |
m_data.bucketIdx = bucketIdx; | |
m_data.bucketListIdx = 0; | |
m_data.allocatedCount = 0; | |
std::memset(m_data.pageBytes, FreshPageFillPattern, PageBytes); | |
} | |
void initFreeList() | |
{ | |
// Start at the end so we get the right order as we push into the freelist. | |
std::uint8_t * cursor = m_data.pageBytes + PageBytes; | |
std::uint8_t * const beginning = m_data.pageBytes; | |
const std::size_t pageBlockSize = m_data.blockSize; | |
for (;;) | |
{ | |
cursor -= pageBlockSize; | |
if (cursor < beginning) | |
{ | |
break; | |
} | |
pushFreeList(cursor); | |
SIMPLE_HEAP_ASSERT(validatePtr(cursor), "FixedSizePage::pushFreeList() is broken!"); | |
} | |
} | |
void pushFreeList(void * blockPtr) | |
{ | |
auto * ptr = static_cast<std::uint8_t *>(blockPtr); | |
std::memset(ptr, FreedMemFillPattern, m_data.blockSize); | |
auto * headerGuardDWord = reinterpret_cast<std::uint64_t *>(ptr); ptr += 8; | |
auto * freeLink = reinterpret_cast<FreeBlock *>(ptr); ptr += (m_data.blockSize - GuardBytes); | |
auto * footerGuardDWord = reinterpret_cast<std::uint64_t *>(ptr); ptr += 8; | |
*headerGuardDWord = PageHeaderPattern; | |
freeLink->next = m_data.freeListHead; | |
m_data.freeListHead = freeLink; | |
*footerGuardDWord = PageFooterPattern; | |
} | |
void * popFreeList() | |
{ | |
SIMPLE_HEAP_ASSERT(!isEmpty(), "Can't pop from empty page free list!"); | |
auto * ptr = reinterpret_cast<std::uint8_t *>(m_data.freeListHead) - 8; // Back to the header guard dword | |
m_data.freeListHead = m_data.freeListHead->next; | |
std::memset(ptr, AllocdMemFillPattern, m_data.blockSize); | |
auto * headerGuardDWord = reinterpret_cast<std::uint64_t *>(ptr); ptr += 8; | |
auto * userBlock = ptr; ptr += (m_data.blockSize - GuardBytes); | |
auto * footerGuardDWord = reinterpret_cast<std::uint64_t *>(ptr); ptr += 8; | |
*headerGuardDWord = PageHeaderPattern; | |
*footerGuardDWord = PageFooterPattern; | |
return userBlock; | |
} | |
bool validatePtr(const void * const ptr) const | |
{ | |
if (ptr == nullptr) | |
{ | |
return false; | |
} | |
const std::uint8_t * startPtr = m_data.pageBytes; | |
const std::uint8_t * endPtr = m_data.pageBytes + PageBytes; | |
const std::uint8_t * testPtr = static_cast<const std::uint8_t *>(ptr); | |
if (testPtr < startPtr || testPtr >= endPtr) | |
{ | |
return false; | |
} | |
return validateGuardDWords(testPtr); | |
} | |
bool validateGuardDWords(const std::uint8_t * ptr) const | |
{ | |
auto * headerGuardDWord = reinterpret_cast<const std::uint64_t *>(ptr); ptr += 8; | |
ptr += (m_data.blockSize - GuardBytes); | |
auto * footerGuardDWord = reinterpret_cast<const std::uint64_t *>(ptr); ptr += 8; | |
if (*headerGuardDWord != PageHeaderPattern || *footerGuardDWord != PageFooterPattern) | |
{ | |
return false; | |
} | |
return true; | |
} | |
#else // !SIMPLE_HEAP_DEBUG | |
void initDataBlock(const std::uint16_t pageBlockSize, const std::uint16_t bucketIdx) | |
{ | |
SIMPLE_HEAP_ASSERT(pageBlockSize >= MinAllocSize, "Block size too small!"); | |
SIMPLE_HEAP_ASSERT(pageBlockSize <= PageBytes, "Block size too big!"); | |
SIMPLE_HEAP_ASSERT((PageBytes / pageBlockSize) <= MaxBlocksPerPage, "Max blocks per page exceeded!"); | |
m_data.freeListHead = nullptr; | |
m_data.blockSize = pageBlockSize; | |
m_data.bucketIdx = bucketIdx; | |
m_data.bucketListIdx = 0; | |
m_data.allocatedCount = 0; | |
} | |
void initFreeList() | |
{ | |
// Start at the end so we get the right order as we push into the freelist. | |
std::uint8_t * cursor = m_data.pageBytes + PageBytes; | |
std::uint8_t * const beginning = m_data.pageBytes; | |
const std::size_t pageBlockSize = m_data.blockSize; | |
for (;;) | |
{ | |
cursor -= pageBlockSize; | |
if (cursor < beginning) | |
{ | |
break; | |
} | |
pushFreeList(cursor); | |
} | |
} | |
void pushFreeList(void * blockPtr) | |
{ | |
auto * freeLink = static_cast<FreeBlock *>(blockPtr); | |
freeLink->next = m_data.freeListHead; | |
m_data.freeListHead = freeLink; | |
} | |
void * popFreeList() | |
{ | |
SIMPLE_HEAP_ASSERT(!isEmpty(), "Can't pop from empty page free list!"); | |
void * userBlock = m_data.freeListHead; | |
m_data.freeListHead = m_data.freeListHead->next; | |
return userBlock; | |
} | |
bool validatePtr(const void * const ptr) const | |
{ | |
if (ptr == nullptr) | |
{ | |
return false; | |
} | |
const std::uint8_t * startPtr = m_data.pageBytes; | |
const std::uint8_t * endPtr = m_data.pageBytes + PageBytes; | |
const std::uint8_t * testPtr = static_cast<const std::uint8_t *>(ptr); | |
if (testPtr < startPtr || testPtr >= endPtr) | |
{ | |
return false; | |
} | |
return true; | |
} | |
#endif // SIMPLE_HEAP_DEBUG | |
}; | |
// ======================================================== | |
// PageBucket: | |
// ======================================================== | |
template | |
< | |
typename PageType, | |
int PagesPerBucket | |
> | |
class PageBucket final | |
{ | |
public: | |
PageBucket() = default; | |
PageBucket(const PageBucket &) = delete; | |
PageBucket & operator = (const PageBucket &) = delete; | |
void init() | |
{ | |
m_usedPages = 0; | |
std::fill(std::begin(m_pages), std::end(m_pages), nullptr); | |
} | |
bool insert(PageType * newPage) | |
{ | |
SIMPLE_HEAP_ASSERT(!isFull(), "Can't insert new page into full bucket!"); | |
// Optimize for the common case when we are pushing pages | |
// in a stack-like manner into a newly allocated Bucket. | |
const int lastIdx = m_usedPages; | |
if (m_pages[lastIdx] == nullptr) | |
{ | |
insertPageAt(newPage, lastIdx); | |
return true; | |
} | |
// Try to find an empty slot searching from the beginning: | |
for (int i = 0; i < PagesPerBucket; ++i) | |
{ | |
if (m_pages[i] == nullptr) | |
{ | |
insertPageAt(newPage, i); | |
return true; | |
} | |
} | |
// No more free slots! | |
return false; | |
} | |
void remove(PageType * page) | |
{ | |
SIMPLE_HEAP_ASSERT(!isEmpty(), "Bucket already empty!"); | |
SIMPLE_HEAP_ASSERT(page != nullptr, "Can't remove null page!"); | |
const auto index = page->listIndex(); | |
SIMPLE_HEAP_ASSERT(index < PagesPerBucket, "Invalid page index in bucket page list!"); | |
SIMPLE_HEAP_ASSERT(m_pages[index] == page, "Page list index mismatch!"); | |
SIMPLE_HEAP_ASSERT(m_pages[index]->isEmpty(), "Trying to remove non-empty page!"); | |
m_pages[index] = nullptr; | |
--m_usedPages; | |
} | |
PageType * tryFindBestPage() const | |
{ | |
// Early-out if empty - common case for a new bucket. | |
if (isEmpty()) | |
{ | |
return nullptr; | |
} | |
// Start from the most recent inserted page and | |
// work down until the beginning of the list. | |
for (int i = m_usedPages - 1; i >= 0; --i) | |
{ | |
PageType * p = m_pages[i]; | |
if (p != nullptr && !p->isFull()) | |
{ | |
return p; | |
} | |
} | |
// We might have something free further up, | |
// so do a final pass to make sure. | |
for (int i = m_usedPages; i < PagesPerBucket; ++i) | |
{ | |
PageType * p = m_pages[i]; | |
if (p != nullptr && !p->isFull()) | |
{ | |
return p; | |
} | |
} | |
// Will need to allocate a new page. | |
return nullptr; | |
} | |
#if SIMPLE_HEAP_DEBUG | |
// Assert that all pages in this bucket are in good shape and the use counts match. | |
void sanityCheck() const | |
{ | |
int usedPagesFound = 0; | |
for (const PageType * pg : m_pages) | |
{ | |
if (pg != nullptr) | |
{ | |
pg->sanityCheck(); | |
++usedPagesFound; | |
} | |
} | |
SIMPLE_HEAP_ASSERT(usedPagesFound == m_usedPages, "Bucket used pages count mismatch!"); | |
} | |
#endif // SIMPLE_HEAP_DEBUG | |
bool isEmpty() const { return (m_usedPages == 0); } | |
bool isFull() const { return (m_usedPages == PagesPerBucket); } | |
int usedPageCount() const { return m_usedPages; } | |
private: | |
void insertPageAt(PageType * page, const int index) | |
{ | |
page->setListIndex(static_cast<std::uint16_t>(index)); | |
m_pages[index] = page; | |
++m_usedPages; | |
} | |
static_assert(PagesPerBucket <= MaxPagesPerBucket, "Max number of pages per bucket exceeded!"); | |
int m_usedPages; | |
std::array<PageType *, PagesPerBucket> m_pages; // Page pointers are not owned by the Bucket. | |
}; | |
// ======================================================== | |
// Allocator: | |
// ======================================================== | |
template | |
< | |
int NumBuckets, | |
int PagesPerBucket, | |
int PageSizeBytes, | |
int PageAlignment = alignof(std::max_align_t) | |
> | |
class Allocator final | |
{ | |
static_assert(NumBuckets <= MaxBuckets, "Max number of allocation buckets exceeded!"); | |
static_assert(PagesPerBucket <= MaxPagesPerBucket, "Max number of pages per bucket exceeded!"); | |
static_assert(PageSizeBytes <= MaxPageSize, "Max page size in bytes exceeded!"); | |
public: | |
using Page = FixedSizePage<PageSizeBytes, PageAlignment>; | |
using Bucket = PageBucket<Page, PagesPerBucket>; | |
static constexpr int MinAllocSize = Page::MinAllocSize; | |
static constexpr int MaxAllocSize = Page::MaxAllocSize; | |
struct InitParams | |
{ | |
// Range of bucket sizes *must be sorted* (NumBuckets entries expected). | |
const std::uint16_t * bucketSizes; | |
// >= NumBuckets | |
Bucket * bucketsMemChunk; | |
int bucketCount; | |
// >= PagesPerBucket * NumBuckets | |
Page * pagesMemChunk; | |
int pageCount; | |
}; | |
Allocator() = default; | |
Allocator(const Allocator &) = delete; | |
Allocator & operator = (const Allocator &) = delete; | |
// Initialize the heap prior to any use. This must be called before any allocation can take place. | |
// The allocator does not take ownership of the memory buffers provided via InitParams and will not | |
// free that memory on shutdown. The buffers must also remain valid for as long as the heap is in use. | |
void init(const InitParams & params) | |
{ | |
SIMPLE_HEAP_ASSERT(params.bucketCount >= NumBuckets, "InitParams::bucketCount must be >= NumBuckets template param!"); | |
SIMPLE_HEAP_ASSERT(params.pageCount >= PagesPerBucket * NumBuckets, "Not enough pages provided!"); | |
m_usedPages = 0; | |
m_committedBytes = 0; | |
m_pageFreeListHead = nullptr; | |
m_pageCount = params.pageCount; | |
m_bucketSizes = params.bucketSizes; | |
m_buckets = params.bucketsMemChunk; | |
m_pages = params.pagesMemChunk; | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
m_buckets[b].init(); | |
} | |
initPageFreeList(); | |
} | |
// Shuts down the heap allocator - since memory is not actually owned by the heap, | |
// this just sets the internal states back to defaults. After shutdown, you cannot | |
// allocate or free from the heap! | |
void shutdown() | |
{ | |
m_usedPages = 0; | |
m_pageCount = 0; | |
m_committedBytes = 0; | |
m_pageFreeListHead = nullptr; | |
m_bucketSizes = nullptr; | |
m_buckets = nullptr; | |
m_pages = nullptr; | |
} | |
// Tries to allocate sizeBytes bytes of memory - might fail with a null pointer. | |
void * tryAllocate(const std::size_t sizeBytes) | |
{ | |
if (sizeBytes > MaxAllocSize) | |
{ | |
return nullptr; | |
} | |
// Find the closest matching bucket for this allocation size: | |
std::uint16_t bucketSize; | |
const std::uint16_t bucketIndex = calcBucketForSize(sizeBytes, &bucketSize); | |
Bucket & bucket = getBucketAt(bucketIndex); | |
// Find a suitable page to allocate from: | |
Page * page = bucket.tryFindBestPage(); | |
if (page == nullptr) | |
{ | |
if (bucket.isFull()) | |
{ | |
// Can't accommodate new pages for this bucket size! | |
return nullptr; | |
} | |
// All bucket pages are full or this is a new | |
// uninitialized bucket, try to insert a new page. | |
if ((page = tryAllocPage(bucketSize, bucketIndex)) == nullptr) | |
{ | |
return nullptr; | |
} | |
bucket.insert(page); | |
} | |
void * const userBlock = page->allocBlock(); | |
if (userBlock != nullptr) | |
{ | |
m_committedBytes += page->blockSize(); | |
} | |
return userBlock; | |
} | |
// Allocates sizeBytes bytes of memory or fails with an assertion. | |
void * allocate(const std::size_t sizeBytes) | |
{ | |
void * const ptr = tryAllocate(sizeBytes); | |
SIMPLE_HEAP_ASSERT(ptr != nullptr, "SimpleHeap - out of memory!"); | |
return ptr; | |
} | |
// Frees a pointer previously allocated from this heap by allocate() or tryAllocate(). | |
// Freeing a null pointer is a no-op. Freeing a pointer not from the heap will assert. | |
void free(void * const ptr) | |
{ | |
if (ptr == nullptr) | |
{ | |
return; | |
} | |
Page * page = pageForPtr(ptr); | |
SIMPLE_HEAP_ASSERT(page != nullptr, "Trying to free a pointer not allocated from this heap!"); | |
// Put the ptr back into the page free-list: | |
page->freeBlock(ptr); | |
m_committedBytes -= page->blockSize(); | |
// If this wasn't the last block in the page, we are done. | |
if (!page->isEmpty()) | |
{ | |
return; | |
} | |
// Tidy up empty pages: | |
const auto bucketIndex = page->bucketIndex(); | |
Bucket & bucket = getBucketAt(bucketIndex); | |
bucket.remove(page); | |
freePage(page); | |
} | |
// Get the page the pointer was allocated from or null if not a valid pointer from this heap. | |
// Available in the public interface for testing only - not used externally. | |
Page * pageForPtr(const void * const ptr) const | |
{ | |
auto * testPtr = static_cast<const std::uint8_t *>(ptr); | |
auto * startAddr = reinterpret_cast<const std::uint8_t *>(m_pages); | |
auto * endAddr = reinterpret_cast<const std::uint8_t *>(m_pages + m_pageCount); | |
if (testPtr < startAddr || testPtr >= endAddr) | |
{ | |
return nullptr; | |
} | |
const std::ptrdiff_t withinPageOffset = testPtr - startAddr; | |
const std::size_t pageIndex = withinPageOffset / sizeof(Page); | |
SIMPLE_HEAP_ASSERT(pageIndex < std::size_t(m_pageCount), "Invalid page index derived from pointer!"); | |
return &m_pages[pageIndex]; | |
} | |
// Round size in bytes to allocation bucket size and returns that bucket index. | |
// Available in the public interface for testing only - not used externally. | |
std::uint16_t calcBucketForSize(std::size_t sizeBytes, std::uint16_t * outRoundedBucketSize) const | |
{ | |
SIMPLE_HEAP_ASSERT(sizeBytes < MaxAllocSize, "Invalid allocation size!"); | |
if (sizeBytes < MinAllocSize) | |
{ | |
sizeBytes = MinAllocSize; | |
} | |
const auto siz = static_cast<std::uint16_t>(sizeBytes); | |
const auto idx = std::lower_bound(m_bucketSizes, m_bucketSizes + NumBuckets, siz); | |
SIMPLE_HEAP_ASSERT(idx != (m_bucketSizes + NumBuckets), "Allocation size not in m_bucketSizes[]"); | |
*outRoundedBucketSize = *idx; | |
return static_cast<std::uint16_t>(idx - m_bucketSizes); | |
} | |
#if SIMPLE_HEAP_DEBUG | |
// Validates the freelist of pages, all buckets and all allocated pages. | |
void sanityCheck() const | |
{ | |
// Assert that the allocated page count of each bucket matches the advertised: | |
int totalPagesInBuckets = 0; | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
m_buckets[b].sanityCheck(); | |
totalPagesInBuckets += m_buckets[b].usedPageCount(); | |
} | |
SIMPLE_HEAP_ASSERT(totalPagesInBuckets == m_usedPages, | |
"Number of allocated pages does not match use count in all buckets!"); | |
// Validate our freelist of whole pages: | |
int freePageCount = 0; | |
const FreePage * free = m_pageFreeListHead; | |
const FreePage * prev = nullptr; | |
while (free != nullptr) | |
{ | |
if (prev != nullptr) | |
{ | |
SIMPLE_HEAP_ASSERT(prev->next == free, "Page freelist connectivity is broken!"); | |
} | |
prev = free; | |
free = free->next; | |
++freePageCount; | |
} | |
SIMPLE_HEAP_ASSERT(freePageCount == (m_pageCount - m_usedPages), | |
"Num of pages in the heap free list should match (count - used)!"); | |
} | |
#endif // SIMPLE_HEAP_DEBUG | |
// | |
// Heap stats queries: | |
// | |
int usedPageCount() const { return m_usedPages; } | |
int pageCount() const { return m_pageCount; } | |
const Page * pages() const { return m_pages; } | |
int bucketCount() const { return NumBuckets; } | |
const Bucket * buckets() const { return m_buckets; } | |
const std::uint16_t * bucketSizes() const { return m_bucketSizes; } | |
std::size_t committedBytes() const { return m_committedBytes; } | |
private: | |
struct FreePage | |
{ | |
FreePage * next; | |
}; | |
void initPageFreeList() | |
{ | |
#if SIMPLE_HEAP_DEBUG | |
// Trash all pages in debug node. | |
std::memset(m_pages, DeadPageFillPattern, sizeof(Page) * m_pageCount); | |
#endif // SIMPLE_HEAP_DEBUG | |
// Start at the end so we get the right order as we push into the freelist. | |
Page * cursor = m_pages + m_pageCount; | |
Page * const beginning = m_pages; | |
int pagesAdded = 0; | |
for (;; ++pagesAdded) | |
{ | |
--cursor; | |
if (cursor < beginning) | |
{ | |
break; | |
} | |
FreePage * fpg = reinterpret_cast<FreePage *>(cursor); | |
fpg->next = m_pageFreeListHead; | |
m_pageFreeListHead = fpg; | |
} | |
SIMPLE_HEAP_ASSERT(pagesAdded == m_pageCount, "Something wrong the initPageFreeList()..."); | |
} | |
Page * tryAllocPage(const std::uint16_t pageBlockSize, const std::uint16_t bucketIndex) | |
{ | |
if (m_pageFreeListHead == nullptr) | |
{ | |
return nullptr; // Heap is depleted! | |
} | |
Page * pg = reinterpret_cast<Page *>(m_pageFreeListHead); | |
m_pageFreeListHead = m_pageFreeListHead->next; | |
SIMPLE_HEAP_ASSERT(m_usedPages < m_pageCount, "Invalid used pages count!"); | |
++m_usedPages; | |
pg->init(pageBlockSize, bucketIndex); | |
return pg; | |
} | |
void freePage(Page * page) | |
{ | |
#if SIMPLE_HEAP_DEBUG | |
std::memset(page, DeadPageFillPattern, sizeof(Page)); | |
#endif // SIMPLE_HEAP_DEBUG | |
FreePage * fpg = reinterpret_cast<FreePage *>(page); | |
fpg->next = m_pageFreeListHead; | |
m_pageFreeListHead = fpg; | |
--m_usedPages; | |
SIMPLE_HEAP_ASSERT(m_usedPages >= 0, "Negative used pages count!"); | |
} | |
Bucket & getBucketAt(const std::size_t index) | |
{ | |
SIMPLE_HEAP_ASSERT(index < NumBuckets, "Invalid bucket index"); | |
return m_buckets[index]; | |
} | |
private: | |
int m_usedPages = 0; | |
int m_pageCount = 0; | |
std::size_t m_committedBytes = 0; | |
FreePage * m_pageFreeListHead = nullptr; | |
// Pointers to external buffers provided on initialization | |
const std::uint16_t * m_bucketSizes = nullptr; | |
Bucket * m_buckets = nullptr; | |
Page * m_pages = nullptr; | |
}; | |
// ======================================================== | |
} // namespace SimpleHeap | |
#ifdef SIMPLE_HEAP_INCLUDE_TESTS | |
// ======================================================== | |
// SimpleHeap unit tests: | |
// ======================================================== | |
#ifndef SIMPLE_HEAP_TEST_ASSERT | |
#include <cassert> | |
#define SIMPLE_HEAP_TEST_ASSERT(expr) assert(expr) | |
#endif // SIMPLE_HEAP_TEST_ASSERT | |
#include <unordered_map> | |
#include <vector> | |
struct SimpleHeapTest final | |
{ | |
// ---------------------------------------------------- | |
static constexpr int NumBuckets = 12; | |
static constexpr int PagesPerBucket = 4; | |
static constexpr int PageSizeBytes = 1024; | |
static constexpr int PageAlignment = 16; | |
// Allocation sizes - any size is legal, not just powers of two. | |
// Note: List must be sorted from smallest to greatest. | |
const std::uint16_t bucketSizes[NumBuckets] = { | |
1, 4, 8, 16, 24, 32, 48, 64, 88, 102, 128, 256, | |
}; | |
using Heap = SimpleHeap::Allocator<NumBuckets, PagesPerBucket, PageSizeBytes, PageAlignment>; | |
Heap heap; | |
// ---------------------------------------------------- | |
void init() | |
{ | |
SIMPLE_HEAP_TEST_ASSERT(std::is_sorted(std::begin(bucketSizes), std::end(bucketSizes))); | |
SIMPLE_HEAP_TEST_ASSERT(alignof(Heap::Page) == PageAlignment); | |
SIMPLE_HEAP_TEST_ASSERT(sizeof(Heap::Page) == PageSizeBytes); | |
Heap::InitParams heapInit; | |
heapInit.bucketCount = NumBuckets; | |
heapInit.bucketsMemChunk = new Heap::Bucket[heapInit.bucketCount]; | |
heapInit.pageCount = PagesPerBucket * NumBuckets; | |
heapInit.pagesMemChunk = new Heap::Page[heapInit.pageCount]; | |
heapInit.bucketSizes = bucketSizes; | |
heap.init(heapInit); | |
// Basic assumptions after init: | |
SIMPLE_HEAP_TEST_ASSERT(heap.pages() == heapInit.pagesMemChunk); | |
SIMPLE_HEAP_TEST_ASSERT(heap.buckets() == heapInit.bucketsMemChunk); | |
SIMPLE_HEAP_TEST_ASSERT(heap.bucketSizes() == heapInit.bucketSizes); | |
SIMPLE_HEAP_TEST_ASSERT(heap.pageCount() == heapInit.pageCount); | |
SIMPLE_HEAP_TEST_ASSERT(heap.bucketCount() == heapInit.bucketCount); | |
SIMPLE_HEAP_TEST_ASSERT(heap.usedPageCount() == 0); | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
} | |
void shutdown() | |
{ | |
SIMPLE_HEAP_TEST_ASSERT(heap.usedPageCount() == 0); | |
SIMPLE_HEAP_TEST_ASSERT(heap.committedBytes() == 0); | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
delete[] heap.pages(); | |
delete[] heap.buckets(); | |
heap.shutdown(); | |
} | |
void run() | |
{ | |
// Test a single page in isolation: | |
{ | |
SimpleHeap::FixedSizePage<128> pg; | |
pg.init(32, 0); | |
#if SIMPLE_HEAP_DEBUG | |
pg.dump64("RAW PAGE DUMP 1st:"); | |
#endif // SIMPLE_HEAP_DEBUG | |
void * p0 = pg.allocBlock(); | |
void * p1 = pg.allocBlock(); | |
#if !SIMPLE_HEAP_DEBUG | |
void * p2 = pg.allocBlock(); | |
#endif // !SIMPLE_HEAP_DEBUG | |
#if SIMPLE_HEAP_DEBUG | |
pg.sanityCheck(); | |
pg.dump64("RAW PAGE DUMP 2nd:"); | |
#endif // SIMPLE_HEAP_DEBUG | |
SIMPLE_HEAP_TEST_ASSERT(!pg.isEmpty()); | |
SIMPLE_HEAP_TEST_ASSERT(pg.isFull()); | |
SIMPLE_HEAP_TEST_ASSERT(pg.allocatedCount() == (SIMPLE_HEAP_DEBUG ? 2 : 3)); | |
pg.freeBlock(p0); | |
pg.freeBlock(p1); | |
#if !SIMPLE_HEAP_DEBUG | |
pg.freeBlock(p2); | |
#endif // !SIMPLE_HEAP_DEBUG | |
#if SIMPLE_HEAP_DEBUG | |
pg.sanityCheck(); | |
pg.dump64("RAW PAGE DUMP 3rd:"); | |
#endif // SIMPLE_HEAP_DEBUG | |
SIMPLE_HEAP_TEST_ASSERT(pg.isEmpty()); | |
SIMPLE_HEAP_TEST_ASSERT(!pg.isFull()); | |
SIMPLE_HEAP_TEST_ASSERT(pg.allocatedCount() == 0); | |
} | |
// Test allocations of varying sizes, with sanity checks in between each: | |
{ | |
const std::uint8_t fillPatterns[] = { | |
'?', '*', '#', '+', 'x', '1', '@', '7', 'G', '=', '{', '%', | |
}; | |
static_assert(std::size(fillPatterns) == NumBuckets, "Need to update this!"); | |
constexpr int NumAllocs = 10; | |
void * allocs[NumAllocs][NumBuckets] = {}; | |
// Allocate an init our memory: | |
for (int a = 0; a < NumAllocs; ++a) | |
{ | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
allocs[a][b] = heap.allocate(bucketSizes[b]); | |
SIMPLE_HEAP_TEST_ASSERT(allocs[a][b] != nullptr); | |
// Fill with something known, but different across buckets/allocs: | |
std::memset(allocs[a][b], (fillPatterns[b] ^ a), bucketSizes[b]); | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
} | |
} | |
// Validate our fill pattern remains intact and counts match: | |
std::unordered_map<const Heap::Page *, int> pageAllocCount; | |
for (int a = 0; a < NumAllocs; ++a) | |
{ | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
const int sizeBytes = bucketSizes[b]; | |
const auto * ptr = static_cast<std::uint8_t *>(allocs[a][b]); | |
for (int i = 0; i < sizeBytes; ++i) | |
{ | |
SIMPLE_HEAP_TEST_ASSERT(ptr[i] == (fillPatterns[b] ^ a)); | |
} | |
const auto * pg = heap.pageForPtr(ptr); | |
SIMPLE_HEAP_TEST_ASSERT(pg != nullptr); | |
SIMPLE_HEAP_TEST_ASSERT(pg->blockSize() >= sizeBytes); | |
SIMPLE_HEAP_TEST_ASSERT(pg->bucketIndex() >= b); | |
auto mapIter = pageAllocCount.find(pg); | |
if (mapIter == std::end(pageAllocCount)) | |
{ | |
pageAllocCount[pg] += pg->allocatedCount(); | |
} | |
else | |
{ | |
SIMPLE_HEAP_TEST_ASSERT(mapIter->second == pg->allocatedCount()); | |
} | |
} | |
} | |
// All allocations for a bucket size across all of its pages must amount to NumAllocs, | |
// (or more precisely, NumAllocs times the sizes that mapped to the same bucket). | |
std::unordered_map<int, int> bucketAllocCounts; | |
std::unordered_map<int, int> expectedCounts; | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
std::uint16_t roundedBucketSize; | |
const int bucketIndex = heap.calcBucketForSize(bucketSizes[b], &roundedBucketSize); | |
expectedCounts[bucketIndex] += NumAllocs; | |
bucketAllocCounts[b] = 0; | |
for (const auto pg : pageAllocCount) | |
{ | |
if (pg.first->blockSizeNoDebugOverhead() == roundedBucketSize) | |
{ | |
bucketAllocCounts[bucketIndex] += pg.second; | |
} | |
} | |
} | |
for (const auto counts : bucketAllocCounts) | |
{ | |
SIMPLE_HEAP_TEST_ASSERT(counts.second == expectedCounts[counts.first]); | |
} | |
// Free the allocations and validate again: | |
for (int a = 0; a < NumAllocs; ++a) | |
{ | |
for (int b = 0; b < NumBuckets; ++b) | |
{ | |
heap.free(allocs[a][b]); | |
allocs[a][b] = nullptr; | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
} | |
} | |
// Allocate and free the whole heap: | |
std::vector<void *> allPtrs; | |
for (int b = 0;;) | |
{ | |
void * ptr = heap.tryAllocate(bucketSizes[b]); | |
if (ptr == nullptr) | |
{ | |
break; | |
} | |
allPtrs.push_back(ptr); | |
b = (b + 1) % NumBuckets; | |
} | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
for (void * ptr : allPtrs) | |
{ | |
heap.free(ptr); | |
} | |
#if SIMPLE_HEAP_DEBUG | |
heap.sanityCheck(); | |
#endif // SIMPLE_HEAP_DEBUG | |
SIMPLE_HEAP_TEST_ASSERT(heap.committedBytes() == 0); | |
SIMPLE_HEAP_TEST_ASSERT(heap.usedPageCount() == 0); | |
} | |
} | |
}; // SimpleHeapTest | |
#endif // SIMPLE_HEAP_INCLUDE_TESTS |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment