Skip to content

Instantly share code, notes, and snippets.

@martinus
Last active June 5, 2022 20:42
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save martinus/0cee4dd177beda78bb1e2e3d128c2d70 to your computer and use it in GitHub Desktop.
Save martinus/0cee4dd177beda78bb1e2e3d128c2d70 to your computer and use it in GitHub Desktop.
Fast & simple memory pool for node based allocators. Wohoo!
/**
* TODO
*
* Usage is like so:
*
* using CCoinsMap = std::pmr::unordered_map<COutPoint, CCoinsCacheEntry, SaltedOutpointHasher>;
* auto mr = NodePoolResource<256>(); // large enough to fit nodes size
* auto map = CCoinsMap{0, SaltedOutpointHasher{}, std::equal_to<COutPoint>{}, &mr};
*/
template <size_t MAX_BLOCK_SIZE_BYTES>
class NodePoolResource : public std::pmr::memory_resource
{
/**
* In-place linked list of the allocations, used for the free list.
*/
struct FreeList {
FreeList* next = nullptr;
};
static constexpr size_t CHUNK_SIZE_BYTES = 262144;
static constexpr size_t BLOCK_ALIGNMENT_BYTES = std::alignment_of_v<FreeList>;
/**
* Fallback allocator when pool is not used.
*/
std::pmr::memory_resource* const m_upstream_resource = std::pmr::get_default_resource();
/**
* Contains all allocated pools of memory, used to free the data in the destructor.
*/
std::vector<std::unique_ptr<std::byte[]>> m_allocated_chunks{};
/**
* Single linked lists of all data that came from deallocating.
*/
std::vector<FreeList*> m_pools{MAX_BLOCK_SIZE_BYTES / BLOCK_ALIGNMENT_BYTES + 1};
/**
* Points to the beginning of available memory for carving out allocations.
*/
std::byte* m_available_memory_it;
/**
* Points to the end of available memory for carving out allocations.
*
* That member variable is redundant, and is always equal to `m_allocated_chunks.back().get() + CHUNK_SIZE_BYTES`
* whenever it is accessed, but `m_untouched_memory_end` caches this for clarity and efficiency.
*/
std::byte* m_untouched_memory_end;
[[nodiscard]] static constexpr size_t poolIdxOr0(size_t bytes, size_t alignment)
{
if (bytes <= MAX_BLOCK_SIZE_BYTES && alignment == BLOCK_ALIGNMENT_BYTES) {
return bytes / BLOCK_ALIGNMENT_BYTES;
}
return 0;
}
void* do_allocate(size_t bytes, size_t alignment) override
{
if (const auto idx = poolIdxOr0(bytes, alignment)) {
if (nullptr != m_pools[idx]) {
return std::exchange(m_pools[idx], m_pools[idx]->next);
}
if (m_available_memory_it + bytes > m_available_memory_end) {
allocateChunk();
}
return std::exchange(m_available_memory_it, m_available_memory_it + bytes);
}
return m_upstream_resource->allocate(bytes, alignment);
}
void do_deallocate(void* p, size_t bytes, size_t alignment) override
{
if (const auto idx = poolIdxOr0(bytes, alignment)) {
auto* a = new (p) FreeList{};
a->next = std::exchange(m_pools[idx], a);
} else {
m_upstream_resource->deallocate(p, bytes, alignment);
}
}
bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override
{
return this == &other;
}
void allocateChunk()
{
m_allocated_chunks.emplace_back(new std::byte[CHUNK_SIZE_BYTES]);
m_available_memory_it = m_allocated_chunks.back().get();
m_available_memory_end = m_available_memory_it + CHUNK_SIZE_BYTES;
}
public:
NodePoolResource()
{
allocateChunk();
}
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment