Skip to content

Instantly share code, notes, and snippets.

@glampert
Created December 11, 2016 16:31
Show Gist options
  • Save glampert/08113917900d04c8bfd418517063faea to your computer and use it in GitHub Desktop.
Save glampert/08113917900d04c8bfd418517063faea to your computer and use it in GitHub Desktop.
// ================================================================================================
// -*- C++ -*-
// File: debug_allocator.hpp
// Author: Guilherme R. Lampert
// Created on: 03/12/16
//
// About:
// Virtual memory pages backed debug allocator. Aligns each memory allocation to one
// or more pages and adds an additional header and footer protected pages, ensuring that
// any out-of-bounds memory access that hits the header or footer will trap immediately.
// Freed pages are also kept mapped but are marked as protected, so a use-after-free will
// always trap.
//
// License:
// This software is in the public domain. Where that dedication is not recognized,
// you are granted a perpetual, irrevocable license to copy, distribute, and modify
// this file as you see fit. Source code is provided "as is", without warranty of any
// kind, express or implied. No attribution is required, but a mention about the author
// is appreciated.
// ================================================================================================
#ifndef DEBUG_ALLOCATOR_HPP
#define DEBUG_ALLOCATOR_HPP
#include <cstddef>
#include <cstdint>
// ========================================================
// Platform switches:
// ========================================================
#if defined(__APPLE__) || defined(__linux__)
// Unix-like system, use mmap().
#define MEM_PLATFORM_UNIX_LIKE 1
#elif defined(_WIN32) || defined(_WIN64)
// Windows, use the VirtualAlloc APIs.
#define MEM_PLATFORM_WINDOWS 1
#else
#error "Unsupported platform!"
#endif // MEM_PLATFORM_XYZ
// ========================================================
// Configurable assert and error handlers:
// ========================================================
#ifndef MEM_ASSERT
#include <cassert>
#define MEM_ASSERT assert
#endif // MEM_ASSERT
#ifndef MEM_ERROR
#include <cstdio>
#define MEM_ERROR(...) do { std::fprintf(stderr, __VA_ARGS__); std::fprintf(stderr, "\n"); } while (0,0)
#endif // MEM_ERROR
// ========================================================
// Memory helpers:
// ========================================================
namespace MemUtils
{
inline bool isPowerOfTwo(const std::size_t size)
{
return size != 0 && (size & (size - 1)) == 0;
}
inline bool isAlignedPtr(const void * ptr, const std::size_t alignment)
{
// Aligned if the pointer is evenly divisible by the alignment value.
return (reinterpret_cast<std::uintptr_t>(ptr) & (alignment - 1)) == 0;
}
inline std::size_t alignSize(const std::size_t size, const std::size_t alignment)
{
// Add the minimum extra needed to the size for pointer alignment.
// This size can then be used to malloc() some memory
// and then have the pointer aligned with alignPtr().
return size + (alignment - 1);
}
template<typename T>
inline T * alignPtr(const T * ptr, const std::size_t alignment, std::size_t * outOffset = nullptr)
{
// Cast to integer and align:
const std::uintptr_t unalignedPtr = reinterpret_cast<std::uintptr_t>(ptr);
const std::uintptr_t alignedPtr = (unalignedPtr + (alignment - 1)) & ~(alignment - 1);
// Optionally return the amount the original ptr was offset by:
if (outOffset != nullptr)
{
(*outOffset) = alignedPtr - unalignedPtr;
}
// Re-cast to void*, validate and return:
T * userPtr = reinterpret_cast<T *>(alignedPtr);
MEM_ASSERT(isAlignedPtr(userPtr, alignment));
return userPtr;
}
template<typename T>
inline T * unalignPtr(const T * ptr, const std::size_t offset)
{
// 'offset' must be the value returned by alignedPtr() outOffset!
const std::uintptr_t alignedPtr = reinterpret_cast<std::uintptr_t>(ptr);
const std::uintptr_t unalignedPtr = alignedPtr - offset;
return reinterpret_cast<T *>(unalignedPtr);
}
} // namespace MemUtils {}
// ========================================================
// struct MemPage:
// ========================================================
struct MemPage final
{
enum Flags
{
IsFreed = 0, // Page is not allocated (address == nullptr)
AccessRead = 1 << 0, // Page memory can be read.
AccessWrite = 1 << 1, // Page memory can be written.
AccessProtected = 1 << 2, // Page memory is protected and cannot be accessed at all.
};
void * address; // Starting address of the page.
std::uint32_t flags; // Any of the above flags ORed together.
std::uint32_t allocatorId; // Unique identifier that links this page to the allocator that owns it.
};
// ========================================================
// struct MemBlock:
// ========================================================
struct MemBlock final
{
void * address; // Start of the user portion of the memory block (adjusted according to alignment).
const MemPage * firstPage; // Back pointer to the first valid page in the range this block was allocated from.
std::size_t sizeInBytes; // User size of the block (does not include alignment overhead, if any).
std::uint32_t alignment; // How many bytes the pointer is aligned to. Always a power-of-two.
std::uint32_t pageCount; // Number of memory pages this block spans, starting from the first page. Not counting header and footer pages.
void invalidate()
{
address = nullptr;
firstPage = nullptr;
sizeInBytes = 0;
alignment = 0;
pageCount = 0;
}
bool isValid() const
{
return (address != nullptr && firstPage != nullptr &&
sizeInBytes != 0 && alignment != 0 && pageCount != 0);
}
template<typename T> T * castTo()
{
MEM_ASSERT(sizeof(T) <= sizeInBytes);
return static_cast<T *>(address);
}
template<typename T> const T * castTo() const
{
MEM_ASSERT(sizeof(T) <= sizeInBytes);
return static_cast<const T *>(address);
}
};
// ========================================================
// class PageProviderWrapper:
// ========================================================
template<typename PageProviderImpl>
class PageProviderWrapper final
{
public:
PageProviderWrapper() = default;
// Not copyable.
PageProviderWrapper(const PageProviderWrapper & other) = delete;
PageProviderWrapper & operator = (const PageProviderWrapper & other) = delete;
// Get the size in bytes of each page returned by allocatePages().
// The page size is platform dependent, but we can assume it will
// never change while the program is running.
std::size_t getPageSizeInBytes()
{
// Cached for cheap access.
if (m_pageSizeBytes == 0)
{
m_pageSizeBytes = m_pageProvider.querySystemPageSize();
MEM_ASSERT(m_pageSizeBytes != 0);
}
return m_pageSizeBytes;
}
// Allocate a number of pages, each with the fixed page size used by the system.
// Access level of all pages will be Read+Write (RW).
bool allocatePages(MemPage * pages, const std::size_t count)
{
MEM_ASSERT(pages != nullptr);
MEM_ASSERT(count != 0);
return m_pageProvider.allocatePages(pages, count, getPageSizeInBytes());
}
// Frees a number of pages, allowing the system to recycle then and
// possibly return the same pages again on a future call to allocatePages().
bool freePages(MemPage * pages, const std::size_t count)
{
MEM_ASSERT(pages != nullptr);
MEM_ASSERT(count != 0);
return m_pageProvider.freePages(pages, count, getPageSizeInBytes());
}
// Change the access level of the pages to protected, effectively preventing any reads or writes.
bool protectPages(MemPage * pages, const std::size_t count)
{
MEM_ASSERT(pages != nullptr);
MEM_ASSERT(count != 0);
return m_pageProvider.protectPages(pages, count, getPageSizeInBytes());
}
private:
std::size_t m_pageSizeBytes = 0;
PageProviderImpl m_pageProvider;
};
// ========================================================
// struct UnixPageProvider:
// ========================================================
#ifdef MEM_PLATFORM_UNIX_LIKE
struct UnixPageProvider final
{
static std::size_t querySystemPageSize();
static bool allocatePages(MemPage * pages, std::size_t pageCount, std::size_t pageSizeBytes);
static bool freePages(MemPage * pages, std::size_t pageCount, std::size_t pageSizeBytes);
static bool protectPages(MemPage * pages, std::size_t pageCount, std::size_t pageSizeBytes);
};
using SystemPageProvider = PageProviderWrapper<UnixPageProvider>;
#endif // MEM_PLATFORM_UNIX_LIKE
// ========================================================
// struct WindowsPageProvider:
// ========================================================
#ifdef MEM_PLATFORM_WINDOWS
/*
struct WindowsPageProvider
{
// TODO
};
using SystemPageProvider = PageProviderWrapper<WindowsPageProvider>;
*/
#error "TODO - not implemented yet!"
#endif // MEM_PLATFORM_WINDOWS
// ========================================================
// class DebugPageAllocator:
// ========================================================
class DebugPageAllocator final
{
public:
// Create the debug allocator with a unique identifier.
// Make sure to pass a value that will not be repeated across
// instances of DebugPageAllocators. This value is used to tag
// pages allocated from this allocator to quickly be able to
// test if a block or page is owned by the allocator.
explicit DebugPageAllocator(std::uint32_t id);
// Not copyable.
DebugPageAllocator(const DebugPageAllocator & other) = delete;
DebugPageAllocator & operator = (const DebugPageAllocator & other) = delete;
// Allocate an aligned block of arbitrary size. The block will be rounded to
// one or more virtual memory pages, plus a protected header and footer page. When
// the block is freed, its pages are not unmaped but rather kept alive as protected.
// The header and footer will help detecting out-of-bounds memory accesses, while
// keeping the freed pages marked as protected will help detect use-after-free.
// If the block size is smaller than the rounded size in pages, it will be
// right-aligned to the page range by default, catching overruns but possibly
// missing underruns. You can change this behavior to either always left-align
// or alternate (ping-pong) between left and right for each allocation.
MemBlock allocate(std::size_t sizeInBytes, std::size_t alignment = 16);
// Marks all pages in the block as read/write protected, trapping on any
// subsequent attempt to access that memory. The pages will remain mapped
// until the program terminates.
void free(MemBlock * block);
// Cheap test if a block or page was sourced from this allocator.
// It uses the unique allocator id set on construction.
bool owns(const MemBlock & block) const;
bool owns(const MemPage & page) const;
// Change the default user block alignment inside the virtual memory pages
// to align to the left or right (place the user block after/before the header
// or footer pages).
void setLeftAlignAllocations(bool doLeftAlign);
// Alternate between left and right aligning the user blocks for each new allocation.
void setPingPongAlignAllocations(bool doPingPong);
// Enable filling newly allocated user memory with a known pattern.
// This is ON by default.
void setFillNewlyAllocatedBlocks(bool doFill);
// Change the byte pattern used to fill newly allocated blocks when filling is enabled.
void setNewBlockFillValue(std::uint8_t val);
private:
MemPage * newMemPages(std::size_t pageCount);
void freeMemPages(MemPage * pages, std::size_t count);
void * allocatePageRange(std::size_t pageCount, std::size_t blockSize, MemPage ** firstPageInBlock);
private:
SystemPageProvider m_sysPageAllocator;
const std::uint32_t m_allocatorId;
std::uint8_t m_fillValAllocedMem;
bool m_userBlockAlignLeft;
bool m_userBlockAlignPingPong;
bool m_lastAllocWasLeftAligned;
bool m_patternFillUserBlocks;
};
#endif // DEBUG_ALLOCATOR_HPP
// ================================================================================================
// -*- C++ -*-
// File: debug_allocator.cpp
// Author: Guilherme R. Lampert
// Created on: 03/12/16
//
// License:
// This software is in the public domain. Where that dedication is not recognized,
// you are granted a perpetual, irrevocable license to copy, distribute, and modify
// this file as you see fit. Source code is provided "as is", without warranty of any
// kind, express or implied. No attribution is required, but a mention about the author
// is appreciated.
// ================================================================================================
#include "debug_allocator.hpp"
#include <cstdlib>
#include <cstring>
// ========================================================
// struct UnixPageProvider:
// ========================================================
#ifdef MEM_PLATFORM_UNIX_LIKE
#include <sys/types.h>
#include <sys/mman.h>
#include <unistd.h>
#include <errno.h>
// Not available on MacOS -- according to documentation, this flag is optional and may not be supported.
#if !defined(MAP_UNINITIALIZED)
#define MAP_UNINITIALIZED 0
#endif // MAP_UNINITIALIZED
std::size_t UnixPageProvider::querySystemPageSize()
{
return sysconf(_SC_PAGE_SIZE);
}
bool UnixPageProvider::allocatePages(MemPage * pages, const std::size_t pageCount, const std::size_t pageSizeBytes)
{
const std::size_t lengthBytes = (pageCount * pageSizeBytes);
void * baseAddress = mmap(nullptr, lengthBytes, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED), -1, 0);
if (baseAddress == nullptr || baseAddress == MAP_FAILED)
{
MEM_ERROR("mmap(%zu bytes, %zu pages, PROT_RW) failed with error code %d: '%s'",
lengthBytes, pageCount, errno, std::strerror(errno));
return false;
}
// Assign the addresses:
std::uint8_t * addr = static_cast<std::uint8_t *>(baseAddress);
for (std::size_t p = 0; p < pageCount; ++p)
{
pages[p].address = addr;
pages[p].flags = (MemPage::AccessRead | MemPage::AccessWrite);
addr += pageSizeBytes;
}
return true;
}
bool UnixPageProvider::freePages(MemPage * pages, const std::size_t pageCount, const std::size_t pageSizeBytes)
{
std::size_t successCount = 0;
for (std::size_t p = 0; p < pageCount; ++p)
{
if (munmap(pages[p].address, pageSizeBytes) != 0)
{
MEM_ERROR("munmap(%p) failed with error code %d: '%s'",
pages[p].address, errno, std::strerror(errno));
continue;
}
pages[p].address = nullptr;
pages[p].flags = MemPage::IsFreed;
++successCount;
}
return (successCount == pageCount);
}
bool UnixPageProvider::protectPages(MemPage * pages, const std::size_t pageCount, const std::size_t pageSizeBytes)
{
std::size_t successCount = 0;
for (std::size_t p = 0; p < pageCount; ++p)
{
if (mprotect(pages[p].address, pageSizeBytes, PROT_NONE) != 0)
{
MEM_ERROR("mprotect(%p, PROT_NONE) failed with error code %d: '%s'",
pages[p].address, errno, std::strerror(errno));
continue;
}
pages[p].flags = MemPage::AccessProtected;
++successCount;
}
return (successCount == pageCount);
}
#endif // MEM_PLATFORM_UNIX_LIKE
// ========================================================
// class DebugPageAllocator:
// ========================================================
DebugPageAllocator::DebugPageAllocator(const std::uint32_t id)
: m_allocatorId { id }
, m_fillValAllocedMem { 0xFA } // Arbitrary - change to whatever best suits your project
, m_userBlockAlignLeft { false }
, m_userBlockAlignPingPong { false }
, m_lastAllocWasLeftAligned{ false }
, m_patternFillUserBlocks { true }
{
}
MemBlock DebugPageAllocator::allocate(const std::size_t sizeInBytes, const std::size_t alignment)
{
MEM_ASSERT(sizeInBytes != 0);
MEM_ASSERT(alignment != 0);
MEM_ASSERT(MemUtils::isPowerOfTwo(alignment));
const std::size_t alignedSize = MemUtils::alignSize(sizeInBytes, alignment);
const std::size_t pageSize = m_sysPageAllocator.getPageSizeInBytes();
std::size_t pageCount = 1; // Minimum of 1 page
if (alignedSize > pageSize)
{
pageCount += (alignedSize / pageSize);
}
MemBlock newBlock;
MemPage * firstPage;
void * addr = allocatePageRange(pageCount, alignedSize, &firstPage);
if (addr != nullptr)
{
newBlock.address = MemUtils::alignPtr(addr, alignment);
newBlock.firstPage = firstPage;
newBlock.sizeInBytes = sizeInBytes;
newBlock.alignment = static_cast<std::uint32_t>(alignment);
newBlock.pageCount = static_cast<std::uint32_t>(pageCount);
}
else
{
newBlock.invalidate();
}
return newBlock;
}
void DebugPageAllocator::free(MemBlock * block)
{
MEM_ASSERT(block != nullptr);
if (!block->isValid())
{
return;
}
// Should always free from the correct allocator!
MEM_ASSERT(owns(*block));
// Just mark all user pages in the block range as read/write protected and we are done.
// A user-after-free attempt will trap immediately.
m_sysPageAllocator.protectPages(const_cast<MemPage *>(block->firstPage), block->pageCount);
// Free the MemPage objects, including the header and footer pages.
freeMemPages(const_cast<MemPage *>(block->firstPage) - 1, block->pageCount + 2);
// Clear the mem block struct.
block->invalidate();
}
bool DebugPageAllocator::owns(const MemBlock & block) const
{
if (!block.isValid())
{
return false;
}
// Could also check all pages this block spans - currently just checking the head page for performance.
return (block.firstPage->allocatorId == m_allocatorId);
}
bool DebugPageAllocator::owns(const MemPage & page) const
{
return (page.address != nullptr && page.allocatorId == m_allocatorId);
}
void DebugPageAllocator::setLeftAlignAllocations(const bool doLeftAlign)
{
m_userBlockAlignLeft = doLeftAlign;
}
void DebugPageAllocator::setPingPongAlignAllocations(const bool doPingPong)
{
m_userBlockAlignPingPong = doPingPong;
m_userBlockAlignLeft = false;
}
void DebugPageAllocator::setFillNewlyAllocatedBlocks(const bool doFill)
{
m_patternFillUserBlocks = doFill;
}
void DebugPageAllocator::setNewBlockFillValue(const std::uint8_t val)
{
m_fillValAllocedMem = val;
}
MemPage * DebugPageAllocator::newMemPages(const std::size_t pageCount)
{
auto pages = new MemPage[pageCount];
// Link pages to this allocator:
for (std::size_t p = 0; p < pageCount; ++p)
{
pages[p].allocatorId = m_allocatorId;
}
return pages;
}
void DebugPageAllocator::freeMemPages(MemPage * pages, std::size_t /*count*/)
{
delete[] pages;
}
void * DebugPageAllocator::allocatePageRange(std::size_t pageCount, const std::size_t blockSize, MemPage ** firstPageInBlock)
{
MEM_ASSERT(pageCount != 0);
MEM_ASSERT(firstPageInBlock != nullptr);
// Add a read/write protected footer and header to the range of pages.
pageCount += 2;
MemPage * pages = newMemPages(pageCount);
if (pages == nullptr)
{
MEM_ERROR("Failed to allocate %zu MemPages for the DebugPageAllocator!", pageCount);
return nullptr;
}
if (!m_sysPageAllocator.allocatePages(pages, pageCount))
{
MEM_ERROR("Failed to allocate %zu system memory pages! Probably out of virtual memory.", pageCount);
freeMemPages(pages, pageCount);
return nullptr;
}
MemPage * header = &pages[0];
MemPage * user = &pages[1];
MemPage * footer = &pages[pageCount - 1];
(*firstPageInBlock) = user;
// Header and footer are protected. Trying to read or write to those will trap immediately.
// User page(s) remain as read/write.
m_sysPageAllocator.protectPages(header, 1);
m_sysPageAllocator.protectPages(footer, 1);
bool alignLeft;
if (m_userBlockAlignLeft)
{
alignLeft = true;
m_lastAllocWasLeftAligned = true;
}
else if (m_userBlockAlignPingPong)
{
alignLeft = !m_lastAllocWasLeftAligned;
m_lastAllocWasLeftAligned = alignLeft;
}
else // Right-align to the last user page
{
alignLeft = false;
}
// If the allocation is going to be left-aligned with the pages, we don't have to do anything,
// the user block already starts immediately after the header. If we are aligning the user portion
// to the end of the page range (right-align), then we might have to shift the starting address.
void * userStartAddr = user->address;
if (!alignLeft)
{
userStartAddr = static_cast<std::uint8_t *>(footer->address) - blockSize;
}
// Optionally fill the whole range between header and footer with a recognizable pattern:
if (m_patternFillUserBlocks)
{
std::uint8_t * startAddr = static_cast<std::uint8_t *>(user->address);
const std::ptrdiff_t lengthBytes = static_cast<std::uint8_t *>(footer->address) - startAddr;
std::memset(startAddr, m_fillValAllocedMem, lengthBytes);
}
// Possibly misaligned.
return userStartAddr;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment