Created
July 15, 2017 18:12
-
-
Save glampert/ccb7d20744073f72dd9a00eb2fddc89d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// | |
// General purpose minimal heap allocator. | |
// | |
// Keeps a freelist of chunks of arbitrary size. First round of allocations | |
// will use the whole heap block before it starts going into the freelist. | |
// Freeing just pushes the block back into the freelist and is constant time. | |
// Allocating might search the freelist if the whole heap was already allocated | |
// once. | |
// | |
// To reduce fragmentation we could merge free nodes either on free or allocate. | |
// Allocating from the freelist could also split nodes if the node size is bigger | |
// than the requested size. Right now it just returns the first node >= the alloc | |
// size. | |
// | |
#include <cassert> | |
#include <cstdint> | |
#include <cstddef> | |
#include <cstring> | |
#include <cstdio> | |
// ---------------------------------------------------------------------------- | |
#if defined(__clang__) && defined(__has_feature) | |
#if __has_feature(address_sanitizer) | |
#include <sanitizer/asan_interface.h> | |
#define MINIHEAP_ASAN 1 | |
#endif // address_sanitizer | |
#endif // __clang__ && __has_feature | |
#if !defined(MINIHEAP_ASAN) | |
#define ASAN_POISON_MEMORY_REGION(addr, size) /* nothing */ | |
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) /* nothing */ | |
#define ASAN_NO_SANITIZE_ADDRESS_ATTRIBUTE /* nothing */ | |
#else // MINIHEAP_ASAN | |
// ASAN_POISON_MEMORY_REGION() already defined in asan_interface.h | |
// ASAN_UNPOISON_MEMORY_REGION() already defined in asan_interface.h | |
#define ASAN_NO_SANITIZE_ADDRESS_ATTRIBUTE __attribute__((no_sanitize("address"))) | |
#endif // MINIHEAP_ASAN | |
// ---------------------------------------------------------------------------- | |
class MiniHeapAllocator final | |
{ | |
public: | |
MiniHeapAllocator(void * heapBuffer, const std::uint32_t heapSize) | |
{ | |
assert(heapBuffer != nullptr); | |
assert(heapSize != 0); | |
m_HeapStart = static_cast<std::uint8_t *>(heapBuffer); | |
m_HeapSize = heapSize; | |
m_HeapUsed = 0; | |
m_FirstNode = static_cast<Node *>(heapBuffer); | |
m_FirstFree = nullptr; | |
// Keep the whole heap initially poisoned. | |
ASAN_POISON_MEMORY_REGION(m_HeapStart, m_HeapSize); | |
} | |
~MiniHeapAllocator() | |
{ | |
// Unpoison so it can be freed by the user. | |
ASAN_UNPOISON_MEMORY_REGION(m_HeapStart, m_HeapSize); | |
} | |
ASAN_NO_SANITIZE_ADDRESS_ATTRIBUTE void * Allocate(std::size_t bytes) | |
{ | |
if (bytes < kMinAllocSize) | |
{ | |
bytes = kMinAllocSize; | |
} | |
// Every allocation is prefixed by a Node header. | |
bytes += sizeof(Node); | |
// First batch of allocations will just use the whole memory | |
// range available before we even start recycling. | |
if ((m_HeapUsed + bytes) <= m_HeapSize) | |
{ | |
Node * newAlloc = reinterpret_cast<Node *>(m_HeapStart + m_HeapUsed); | |
m_HeapUsed += bytes; | |
newAlloc->Guard = kGuardValue; | |
newAlloc->Size = bytes; | |
newAlloc->IsFree = false; | |
ASAN_UNPOISON_MEMORY_REGION(newAlloc + 1, bytes - sizeof(Node)); | |
return (newAlloc + 1); // User block follows Node header. | |
} | |
// Try recycling from the freelist: | |
FreeNode * bestMatch = nullptr; | |
for (FreeNode * iter = m_FirstFree; iter != nullptr; iter = iter->Next) | |
{ | |
assert(iter->Guard == kGuardValue); | |
assert(iter->IsFree == true); | |
if (iter->Size >= bytes) | |
{ | |
// TODO: Could split the FreeNode if size > bytes requested. | |
bestMatch = iter; | |
break; | |
} | |
} | |
// Cannot accommodate the block. | |
if (bestMatch == nullptr) | |
{ | |
// TODO: Could try to merge FreeNodes and retry. | |
return nullptr; | |
} | |
// Remove from the freelist: | |
if (bestMatch == m_FirstFree) | |
{ | |
m_FirstFree = m_FirstFree->Next; | |
} | |
else | |
{ | |
FreeNode * prevNode = bestMatch->Prev; | |
FreeNode * nextNode = bestMatch->Next; | |
if (prevNode != nullptr) | |
{ | |
assert(prevNode->Next == bestMatch); | |
prevNode->Next = nextNode; | |
} | |
if (nextNode != nullptr) | |
{ | |
assert(nextNode->Prev == bestMatch); | |
nextNode->Prev = prevNode; | |
} | |
} | |
bestMatch->IsFree = false; | |
ASAN_UNPOISON_MEMORY_REGION(static_cast<Node *>(bestMatch) + 1, bytes - sizeof(Node)); | |
return (static_cast<Node *>(bestMatch) + 1); // User block follows node header. | |
} | |
ASAN_NO_SANITIZE_ADDRESS_ATTRIBUTE void Free(void * ptr) | |
{ | |
Node * header = static_cast<Node *>(ptr) - 1; | |
assert(IsValidPtr(header)); | |
assert(header->Guard == kGuardValue); | |
assert(header->Size >= sizeof(FreeNode)); | |
assert(header->IsFree == false); | |
ASAN_POISON_MEMORY_REGION(header, header->Size); | |
// Push into the freelist: | |
FreeNode * toFree = static_cast<FreeNode *>(header); | |
if (m_FirstFree != nullptr) | |
{ | |
assert(m_FirstFree->Prev == nullptr); | |
m_FirstFree->Prev = toFree; | |
} | |
toFree->Next = m_FirstFree; | |
toFree->Prev = nullptr; | |
m_FirstFree = toFree; | |
} | |
bool IsValidPtr(const void * const ptr) const | |
{ | |
auto * testAddr = static_cast<const std::uint8_t *>(ptr); | |
auto * heapEnd = m_HeapStart + m_HeapSize; | |
return (testAddr >= m_HeapStart && testAddr < heapEnd); | |
} | |
private: | |
static constexpr std::uint32_t kMinAllocSize = 16; // FreeNode Prev,Next | |
static constexpr std::uint32_t kGuardValue = 0xDEADBEEF; | |
struct Node | |
{ | |
std::uint32_t Guard; // Used by asserts | |
std::uint32_t Size : 31; | |
std::uint32_t IsFree : 1; | |
}; | |
struct FreeNode : Node | |
{ | |
FreeNode * Prev; | |
FreeNode * Next; | |
}; | |
static_assert(sizeof(Node) == 8, "MiniHeapAllocator::Node size expected to be 16 bytes"); | |
static_assert(sizeof(FreeNode) == 24, "MiniHeapAllocator::FreeNode size expected to be 24 bytes"); | |
private: | |
std::uint8_t * m_HeapStart; | |
std::uint32_t m_HeapSize; | |
std::uint32_t m_HeapUsed; | |
Node * m_FirstNode; | |
FreeNode * m_FirstFree; | |
}; | |
// ---------------------------------------------------------------------------- | |
int main() | |
{ | |
constexpr int kHeapSize = 1 * 1024 * 1024; | |
MiniHeapAllocator miniHeap{ new std::uint8_t[kHeapSize], kHeapSize }; | |
const int sizes[] = { 4, 8, 16, 24, 32, 64, 128, 256, 512 }; | |
constexpr int kMaxSizes = sizeof(sizes) / sizeof(sizes[0]); | |
constexpr int kMaxPtrs = 2048; | |
// 1st pass: | |
{ | |
void * ptrs[kMaxPtrs] = {0}; | |
std::printf("Allocating...\n"); | |
for (int i = 0, s = 0; i < kMaxPtrs; ++i) | |
{ | |
ptrs[i] = miniHeap.Allocate(sizes[s]); | |
if (ptrs[i] == nullptr) | |
{ | |
break; | |
} | |
std::memset(ptrs[i], 'X', sizes[s]); | |
s = (s + 1) % kMaxSizes; | |
} | |
std::printf("Freeing...\n"); | |
for (int i = 0; i < kMaxPtrs; ++i) | |
{ | |
if (ptrs[i] != nullptr) | |
{ | |
miniHeap.Free(ptrs[i]); | |
} | |
} | |
} | |
// 2nd pass: | |
{ | |
void * ptrs[kMaxPtrs] = {0}; | |
std::printf("Allocating (again)...\n"); | |
for (int i = 0, s = 0; i < kMaxPtrs; ++i) | |
{ | |
ptrs[i] = miniHeap.Allocate(sizes[s]); | |
if (ptrs[i] == nullptr) | |
{ | |
break; | |
} | |
std::memset(ptrs[i], 'X', sizes[s]); | |
s = (s + 1) % kMaxSizes; | |
} | |
std::printf("Freeing (again)...\n"); | |
for (int i = 0; i < kMaxPtrs; ++i) | |
{ | |
if (ptrs[i] != nullptr) | |
{ | |
miniHeap.Free(ptrs[i]); | |
} | |
} | |
} | |
std::printf("Done!\n"); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment