Skip to content

Instantly share code, notes, and snippets.

@mlfarrell
Last active July 2, 2018 22:14
Show Gist options
  • Save mlfarrell/b72df6444e60653506dc69fdde9a0fa6 to your computer and use it in GitHub Desktop.
Save mlfarrell/b72df6444e60653506dc69fdde9a0fa6 to your computer and use it in GitHub Desktop.
Vulkan memory manager draft
#include <cassert>
#include <algorithm>
#include "VulkanMemoryManager.h"
using namespace std;
namespace vgl
{
namespace core
{
static VulkanMemoryManager *managerInstance = nullptr;
VulkanMemoryManager::AllocationStrategy VulkanMemoryManager::allocationStrategy = VulkanMemoryManager::AS_MOBILE_CONVERVATIVE;
VkDeviceSize VulkanMemoryManager::allocSizeBoundaries[2][3];
static inline uint64_t fastCeil(uint64_t x, uint64_t y) { return (1 + ((x - 1) / y)); }
VulkanMemoryManager &VulkanMemoryManager::manager()
{
return *managerInstance;
}
VulkanMemoryManager::VulkanMemoryManager(VkPhysicalDevice physicalDevice, VkDevice device)
: physicalDevice(physicalDevice), device(device)
{
assert(!managerInstance);
managerInstance = this;
switch(allocationStrategy)
{
case AS_MOBILE_CONVERVATIVE:
allocSizeBoundaries[0][0] = (512<<10); //512k
allocSizeBoundaries[0][1] = (5<<20); //5 mb
allocSizeBoundaries[0][2] = (100<<20); //100 mb
allocSizeBoundaries[1][0] = (1<<20); //1 mb
allocSizeBoundaries[1][1] = (25<<20); //25 mb
allocSizeBoundaries[1][2] = (100<<20); //100 mb
break;
case AS_DESKTOP:
allocSizeBoundaries[0][0] = (1<<10); //1 mb
allocSizeBoundaries[0][1] = (100<<20); //25 mb
allocSizeBoundaries[0][2] = (500<<20); //500 mb
allocSizeBoundaries[1][0] = (10<<20); //10 mb
allocSizeBoundaries[1][1] = (100<<20); //100 mb
allocSizeBoundaries[1][2] = (250<<20); //500 mb
break;
}
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memoryProperties);
}
VulkanMemoryManager::~VulkanMemoryManager()
{
for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++)
{
for(auto &allocation : allocations[i]) if(allocation.memory)
vkFreeMemory(device, allocation.memory, nullptr);
}
}
VulkanMemoryManager::Suballocation VulkanMemoryManager::allocate(uint32_t typeFilter, VkMemoryPropertyFlags properties, VkDeviceSize requiredSize, VkDeviceSize requiredAlignment)
{
if(requiredSize == 0)
return {};
AllocationType allocType = AT_SMALL;
if(requiredSize > allocSizeBoundaries[0][0])
allocType = AT_MED;
else if(requiredSize > allocSizeBoundaries[0][1])
allocType = AT_LARGE;
else if(requiredSize > allocSizeBoundaries[0][2])
return {};
uint32_t memoryType = findMemoryType(typeFilter, properties);
Suballocation suballoc = findSuballocation(memoryType, requiredSize, requiredAlignment, allocType);
if(!suballoc)
{
//have to create a new allocation
makeNewAllocation(memoryType, allocType);
suballoc = findSuballocation(memoryType, requiredSize, requiredAlignment, allocType);
#ifdef DEBUG
//this shouldn't be possible
if(requiredAlignment > 0 && suballoc.offset > 0)
throw runtime_error("VulkanMemoryManager::allocate alignment ate shit");
#endif
}
return suballoc;
}
void VulkanMemoryManager::free(const Suballocation &suballocation)
{
auto &allocMap = allocationsMap[suballocation.memoryType];
auto ai = allocMap.find(suballocation.allocationId);
bool freed = false;
if(ai != allocMap.end())
{
auto &allocation = allocations[suballocation.memoryType][ai->second];
if(allocation.suballocationCount > 0)
{
//O(n) search for id here for now until I'm certain that I'm sticking with std::list
//for these suballocations
for(auto srIt = allocation.freeRegions.begin(); srIt != allocation.freeRegions.end(); srIt++)
{
auto srPrev = (srIt != allocation.freeRegions.begin()) ? prev(srIt) : allocation.freeRegions.end();
auto srNext = next(srIt);
auto sr = &(*srIt);
if(sr->id == suballocation.subregionId)
{
freed = true;
allocation.suballocationCount--;
sr->free = true;
//can we merge consecutive regions now
//to keep this fast, only checking left and right
if(srNext != allocation.freeRegions.end() && srNext->free)
{
mergeSubregions(allocation.freeRegions, srIt, srNext);
srIt = srNext;
srPrev = (srIt != allocation.freeRegions.begin()) ? prev(srIt) : allocation.freeRegions.end();
}
if(srPrev != allocation.freeRegions.end() && srPrev->free)
{
mergeSubregions(allocation.freeRegions, srIt, srPrev);
}
if(allocation.suballocationCount == 0)
{
//this entire allocation is freed now
//I may decide to defer this until later to keep the free regions
//around on desktop machines..
vkFreeMemory(device, allocation.memory, nullptr);
allocation.memory = nullptr;
}
break;
}
}
}
}
#ifdef DEBUG
if(!freed)
{
throw runtime_error("VulkanMemoryManager::free() failed!");
}
#endif
}
uint32_t VulkanMemoryManager::findMemoryType(uint32_t typeFilter, VkMemoryPropertyFlags properties)
{
for(uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++)
{
if((typeFilter & (1 << i)) && (memoryProperties.memoryTypes[i].propertyFlags & properties) == properties)
{
return i;
}
}
throw runtime_error("VulkanMemoryManager::findMemoryType failed to find suitable memory type!");
}
VkDeviceMemory VulkanMemoryManager::allocateDirect(uint32_t memoryType, VkDeviceSize requiredSize)
{
VkDeviceMemory result = nullptr;
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = requiredSize;
allocInfo.memoryTypeIndex = memoryType;
if(vkAllocateMemory(device, &allocInfo, nullptr, &result) == VK_SUCCESS)
{
Allocation alloc = {};
alloc.memory = result;
alloc.size = (uint32_t)fastCeil(requiredSize, pageSize);
alloc.id = allocationIds++;
alloc.type = AT_UNKNOWN;
alloc.freeRegions = { { 0, alloc.size, true, subregionIds++ } };
allocationsMap[memoryType][alloc.id] = allocations[memoryType].size();
allocations[memoryType].emplace_back(alloc);
}
else
{
result = nullptr;
}
return result;
}
VkDeviceMemory VulkanMemoryManager::makeNewAllocation(uint32_t memoryType, AllocationType type)
{
if(type == AT_UNKNOWN)
throw runtime_error("Unknown allocation type requested in VulkanMemoryManager::makeNewAllocation()");
VkDeviceSize size = allocSizeBoundaries[1][(int)type];
VkDeviceMemory result = nullptr;
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = size;
allocInfo.memoryTypeIndex = memoryType;
if(vkAllocateMemory(device, &allocInfo, nullptr, &result) != VK_SUCCESS)
{
return nullptr;
}
else
{
Allocation alloc = {};
alloc.memory = result;
alloc.size = (uint32_t)fastCeil(size, pageSize);
alloc.id = allocationIds++;
alloc.type = type;
alloc.freeRegions = { { 0, alloc.size, true, subregionIds++ } };
allocationsMap[memoryType][alloc.id] = allocations[memoryType].size();
allocations[memoryType].emplace_back(alloc);
}
return result;
}
VulkanMemoryManager::Suballocation VulkanMemoryManager::findSuballocation(uint32_t memoryType, VkDeviceSize requiredSize, VkDeviceSize requiredAlignment, AllocationType type)
{
Suballocation result = nullptr;
uint32_t requiredPageSize = (uint32_t)fastCeil(requiredSize, pageSize);
//it'd be extremely unlikely for 4096 to not work for a requested alignment
if(requiredAlignment / pageSize != 0)
{
throw runtime_error("VulkanMemoryManager::findSuballocation() failed to obtain correct alignment for suballocation");
}
//O(n)? scary?
for(auto &allocation : allocations[memoryType]) if(allocation.memory && allocation.type == type)
{
for(auto srIt = allocation.freeRegions.begin(); srIt != allocation.freeRegions.end(); srIt++)
{
auto sr = &(*srIt);
if(sr->free && requiredPageSize <= sr->size)
{
result.memory = allocation.memory;
result.offset = sr->startPage*pageSize;
result.size = requiredPageSize;
result.memoryType = memoryType;
result.allocationId = allocation.id;
auto region = divideSubregion(allocation.freeRegions, srIt, requiredPageSize);
result.subregionId = region->id;
allocation.suballocationCount++;
break;
}
}
}
return result;
}
VulkanMemoryManager::Subregion *VulkanMemoryManager::divideSubregion(list<Subregion> &regions, list<Subregion>::iterator region, uint32_t sizeInPages)
{
//auto newRegion = Subregion::create();
auto newRegionIter = regions.emplace(region, Subregion());
auto newRegion = &(*newRegionIter);
newRegion->id = subregionIds++;
newRegion->startPage = region->startPage;
newRegion->size = sizeInPages;
newRegion->free = false;
region->size -= sizeInPages;
region->startPage = region->startPage+sizeInPages;
return newRegion;
}
void VulkanMemoryManager::mergeSubregions(list<Subregion> &regions, list<Subregion>::iterator region1, std::list<Subregion>::iterator region2)
{
assert(region1->free && region2->free);
region2->startPage -= region1->size;
region2->size += region1->size;
regions.erase(region1);
}
void VulkanMemoryManager::cleanupAllocations()
{
for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++)
{
auto &v = allocations[i];
v.erase(remove_if(v.begin(), v.end(), [](const Allocation &a) { return (a.memory == nullptr); }), v.end());
allocationsMap[i].clear();
for(size_t j = 0; j < allocations[i].size(); j++)
allocationsMap[i][allocations[i][j].id] = j;
}
}
}
}
#pragma once
#include <vector>
#include <map>
#include <list>
#include "vulkan.h"
namespace vgl
{
namespace core
{
///This class will get more advanced as time passes
class VulkanMemoryManager
{
public:
enum AllocationStrategy
{
AS_MOBILE_CONVERVATIVE,
AS_DESKTOP
};
///Unlike other classes with static accessors, one instance of this class must already
///Exist before calling manager(). manager() will not create one for you
static VulkanMemoryManager &manager();
VulkanMemoryManager(VkPhysicalDevice physicalDevice, VkDevice device);
~VulkanMemoryManager();
struct Suballocation
{
public:
VkDeviceMemory memory;
VkDeviceSize offset;
uint32_t size; //size in pages
uint32_t memoryType;
uint64_t allocationId, subregionId;
Suballocation() = default;
Suballocation(nullptr_t null) : memory(VK_NULL_HANDLE) {}
void operator =(nullptr_t null) { memory = VK_NULL_HANDLE; }
operator bool() const { return (memory != nullptr); }
bool operator !() const { return (memory == nullptr); }
};
Suballocation allocate(uint32_t typeFilter, VkMemoryPropertyFlags properties, VkDeviceSize requiredSize, VkDeviceSize requiredAlignment=0);
void free(const Suballocation &suballocation);
///When using this function, the caller is entirely responsible for the returned memory object
VkDeviceMemory allocateDirect(uint32_t memoryType, VkDeviceSize requiredSize);
protected:
enum AllocationType
{
AT_SMALL = 0, AT_MED, AT_LARGE, AT_UNKNOWN
};
static const int pageSize = 4096;
static AllocationStrategy allocationStrategy;
static VkDeviceSize allocSizeBoundaries[2][3];
uint32_t findMemoryType(uint32_t typeFilter, VkMemoryPropertyFlags properties);
VkDeviceMemory makeNewAllocation(uint32_t memoryType, AllocationType type);
Suballocation findSuballocation(uint32_t memoryType, VkDeviceSize requiredSize, VkDeviceSize requiredAlignment, AllocationType type);
void cleanupAllocations();
VkPhysicalDevice physicalDevice;
VkPhysicalDeviceMemoryProperties memoryProperties;
struct Subregion
{
uint32_t startPage, size;
bool free;
uint64_t id;
//Subregion *next = nullptr, *prev = nullptr;
//static Subregion *create();
//void destroy();
};
struct Allocation
{
VkDeviceMemory memory;
uint32_t size; //in pages
uint32_t suballocationCount;
uint64_t id;
AllocationType type;
std::list<Subregion> freeRegions;
//Subregion *freeRegions;
};
Subregion *divideSubregion(std::list<Subregion> &regions, std::list<Subregion>::iterator region, uint32_t sizeInPages);
void mergeSubregions(std::list<Subregion> &regions, std::list<Subregion>::iterator region1, std::list<Subregion>::iterator region2);
VkDevice device;
std::vector<Allocation> allocations[VK_MAX_MEMORY_TYPES];
std::map<uint64_t, uint64_t> allocationsMap[VK_MAX_MEMORY_TYPES];
//std::vector<Subregion> subregionPools[VK_MAX_MEMORY_TYPES];
uint64_t allocationIds = 1, subregionIds = 1;
};
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment