Skip to content

Instantly share code, notes, and snippets.

@Papierkorb
Last active September 13, 2019 13:44
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save Papierkorb/dd40c8ac2722505d18600710abc04040 to your computer and use it in GitHub Desktop.
Save Papierkorb/dd40c8ac2722505d18600710abc04040 to your computer and use it in GitHub Desktop.
Memory Management code taken from my Crystal OS project (AMD64!).

Hello there

This code comes from my operating system project, written in Crystal. The target platform was AMD64, so that's why it uses 64-Bit integers all over the place. The whole thing isn't open (yet?), but I stopped working on it so..

Take what you want from it, however a short source description, e.g. using a in-source comment, to this gist or to me would be really neat :)

module Kernel
module Memory
# Provides high-level memory management functionality for dynamic memory
# allocations.
#
# The two public methods are {#allocate} and {#free}.
module Allocator
extend self
CHILD_MAGIC = 0xAF96324Au32
PARENT_MAGIC = 0x7EAC0FFEu32
CHILD_FREE_MAGIC = CHILD_MAGIC - 1u32
# Forward declarations
struct Parent; end
struct Child; end
# Manages a small memory region inside a {Parent}
struct Child
@available : Bool
@size : UInt32
@parent : Parent*
@next_child : Child*?
@magic : UInt32 = CHILD_MAGIC
def available?; @available; end
def size; @size; end
def next_child; @next_child; end
def parent; @parent; end
def available=(v); @available = v; end
def size=(v); @size = v; end
def next_child=(v); @next_child = v; end
def initialize(@parent, @size, @available = false, @next_child = nil)
end
def dispose
@magic = CHILD_FREE_MAGIC
end
def verify
(@magic == CHILD_MAGIC)
end
def verify!
if @magic == CHILD_FREE_MAGIC
kernel_panic "Memory Corruption: Double free detected"
elsif @magic != CHILD_MAGIC
kernel_panic "Memory Corruption: Wrong MAGIC in child: ", @magic
end
end
end
# Manages a large memory region using many children.
# The available size of the region is {Physical::PAGE_SIZE} minus the
# size of the Parent managing it and at least one {Child}.
#
# A parent, each managing one or more virtual memory pages, uses a
# linear memory region.
struct Parent
@page_count : UInt32
@size_available : UInt32
@child : Child*
@next_parent : Parent*?
@magic : UInt32 = PARENT_MAGIC
def page_count; @page_count; end
def size_available; @size_available; end
def child; @child; end
def next_parent; @next_parent; end
def size_available=(v); @size_available = v; end
def child=(v); @child = v; end
def next_parent=(v); @next_parent = v; end
def initialize(
@child,
@page_count,
@size_available,
@next_parent = nil,
)
end
def verify
(@magic == PARENT_MAGIC)
end
def verify!
unless @magic == PARENT_MAGIC
kernel_panic "Memory Corruption: Wrong MAGIC in parent: ", @magic
end
end
end
# If the size of the child minus the size of the allocation is greater or
# equal to this amount, the child will in reality have the full size
# instead of it being split into smaller chunks.
WASTE_THRESHOLD = sizeof(Child) * 2
# Minimum overhead per parent.
BASE_OVERHEAD = (sizeof(Parent) + sizeof(Child)).to_u32
# Start of the linked-list
@@first_parent : Pointer(Parent) = Pointer(Parent).null
# First entry in the linked-list, which actually has space available.
@@first_with_available : Pointer(Parent) = Pointer(Parent).null
# Count of currently allocated pages in total
@@total_page_count : UInt32 = 0u32
def first_parent; @@first_parent; end
def total_page_count; @@total_page_count; end
# Initializes the allocator.
def start
@@total_page_count = 0u32
@@first_parent = create_new_parent
@@first_with_available = @@first_parent
end
private def create_new_parent(page_count : UInt32 = 1u32) : Pointer(Parent)
size_avail = (Physical::PAGE_SIZE * page_count) - BASE_OVERHEAD
parent = allocate_and_map_pages(page_count).as(Parent*)
child = (parent + 1).as(Child*)
child.value = Child.new(
available: true,
parent: parent,
size: size_avail.to_u32,
)
parent.value = Parent.new(
child: child,
page_count: page_count,
size_available: size_avail.to_u32,
)
parent
end
# Allocates a memory region with size in *bytes* and returns it as
# void pointer.
def allocate(bytes : UInt32) : Pointer(Void)
# Fast-path
if alloc = try_allocation_in_parent(@@first_with_available, bytes)
return alloc
end
# Find parent with enough space
previous = parent = @@first_parent
while parent
if alloc = try_allocation_in_parent(parent, bytes)
return alloc
end
previous, parent = parent, parent.value.next_parent
end
# If we end up here, there are no parents with enough available space
pages = (bytes + BASE_OVERHEAD) / Physical::PAGE_SIZE
new_parent = create_new_parent(pages + 1) # Create new parent
@@first_with_available = new_parent # Remember this one
previous.value.next_parent = new_parent # Hook it up ...
do_allocation(new_parent, new_parent.value.child, bytes) # Allocate!
end
# ditto
def allocate(bytes)
allocate bytes.to_u32
end
private def try_allocation_in_parent(parent, bytes)
parent.value.verify!
if parent.value.size_available >= bytes
try_allocation(parent, bytes)
end
end
# Unallocates the given memory region. It must not be used afterwards for
# *anything* at all anymore.
def free(ptr : Pointer(Void)) : Nil
child = ptr.as(Child*) - 1
parent = child.value.parent
child.value.verify! # Check for memory corruption
parent.value.verify!
if child.value.available?
kernel_panic "Memory Corruption: Double-free detected at ", ptr
end
child.value.available = true # Mark as available
parent.value.size_available += child.value.size
# Check if we can merge memory regions
# Merge with previous child if it is available
prev_child = find_previous_child(child)
if prev_child && prev_child.value.available?
child = prev_child
merge_children(child)
end
# Merge with the next child if it is available
next_child = child.value.next_child
if next_child
next_child.value.verify!
merge_children(child) if next_child.value.available?
end
if is_parent_empty(parent)
destroy_parent(parent)
@@first_with_available = @@first_parent
else @@first_with_available.value.size_available < parent.value.size_available
@@first_with_available = parent
end
end
# ditto
def free(ptr : Pointer)
free(ptr.as(Void*))
end
# ditto
def free(ptr : UInt64)
free(Pointer(Void).new(ptr))
end
# Verifies all parents and children. If one fails, the method
# will panic. If all structures are fine, it does nothing.
def verify_data_structures! : Nil
parent = @@first_parent
while parent
parent.value.verify!
child = parent.value.child
available = 0u32
while child
child.value.verify!
available += child.value.size if child.value.available?
child = child.value.next_child
end
if available != parent.value.size_available
kernel_panic "Wrong available size for parent ", parent, ": ", available, " != ", parent.value.size_available
end
parent = parent.value.next_parent
end
end
private def destroy_parent(parent)
previous = find_previous_parent(parent)
# Remove `parent` from the linked-list
previous.value.next_parent = parent.value.next_parent
free_parent(parent)
end
private def free_parent(parent) : Nil
@@total_page_count -= parent.value.page_count
# Unmap the virtual memory for it
vmm = Virtual.kernel_instance
parent.value.page_count.times do |page_off|
vmm.value.unmap(parent.address + page_off * Physical::PAGE_SIZE)
end
end
private def find_previous_parent(parent) : Pointer(Parent)
current = previous = @@first_parent
return parent if parent == current
while current != parent
current.value.verify!
current, previous = current.value.next_parent, current
return @@first_parent unless current
end
previous || @@first_parent
end
private def is_parent_empty(parent) : Bool
# Make sure we never deallocate the first parent
return false if parent == @@first_parent
max_available = (parent.value.page_count * Physical::PAGE_SIZE) - BASE_OVERHEAD
has_available = parent.value.size_available
if max_available == has_available
true
elsif has_available > max_available
kernel_panic "Parent has more space available than possible: ", has_available, " > ", max_available
else
false
end
end
private def find_previous_child(child) : Pointer(Child) | Nil
parent = child.value.parent
current = previous = parent.value.child
# Break if this is the first child.
return nil if current == child
while current != child
current.value.verify!
current, previous = current.value.next_child, current
return nil unless current
end
previous
end
private def merge_children(child) : Nil
partner = child.value.next_child
# We need a next_child to merge with
kernel_panic "Allocator#merge_children: next_child is nil" unless partner
# They must have the same parents
if child.value.parent != partner.value.parent
kernel_panic "Allocator#merge_children: Attempting to merge children of different parents"
end
# Update child
child.value.size += sizeof(Child) + partner.value.size
# Remove `partner` out of the linked list
child.value.next_child = partner.value.next_child
child.value.parent.value.size_available += sizeof(Child)
# Mark partner as disposed
partner.value.dispose
end
private def try_allocation(parent, bytes) : Pointer(Void) | Nil
child, previous = find_available_child(parent, bytes)
if child # Have we found a fitting child?
do_allocation(parent, child, bytes)
end
end
private def do_allocation(parent, child, bytes) : Pointer(Void)
child.value.verify!
child.value.available = false # This child is now in use
# Assertions
if child.value.size.to_u64.to_i64 - bytes.to_u64.to_i64 < 0
kernel_panic "Logic error in Allocator#do_allocation: Child ", child.value.size, " - ", bytes, " < 0!"
end
if parent.value.size_available.to_u64.to_i64 - bytes.to_u64.to_i64 < 0
kernel_panic "Logic error in Allocator#do_allocation: Parent ", parent.value.size_available, " < 0!"
end
# Split memory region if it makes sense
if child.value.size - bytes > WASTE_THRESHOLD
parent.value.size_available -= bytes
split_child(parent, child, bytes)
else
parent.value.size_available -= child.value.size
end
# Is the page full?
if parent == @@first_with_available && parent.value.size_available == 0
@@first_with_available = @@first_parent
end
# Return data pointer
(child + 1).as(Void*)
end
private def split_child(parent, child, bytes) : Nil
remaining = child.value.size.to_u64.to_i64 - bytes.to_u64.to_i64 - sizeof(Child)
if remaining < 0
kernel_panic "Logic error in Allocator#split_child: ", remaining, " < 0!"
end
# Update previous child
new_child = (child.as(UInt8*) + sizeof(Child) + bytes).as(Child*)
child.value.size = bytes
partner = child.value.next_child
child.value.next_child = new_child
# Update parent
parent.value.size_available -= sizeof(Child)
# Initialize new child
new_child.value = Child.new(
available: true,
size: remaining.to_u32,
parent: child.value.parent,
next_child: partner,
)
end
private def find_available_child(parent, min_size)
previous = child = parent.value.child
while child
child.value.verify!
if child.value.available? && child.value.size >= min_size
return { child, previous }
end
previous, child = child, child.value.next_child
end
{ nil, previous }
end
private def allocate_and_map_pages(count : Int = 1) : Pointer(Void)
@@total_page_count += count.to_u32
Virtual.kernel_instance.value.allocate_pages(count)
end
end
end
end
module Kernel
module Memory
# Used by {Physical} to keep track of in-use physical memory "pages".
struct Bitmap
ELEMENT_SIZE = 64
FULL_ELEMENT = 0xFFFFFFFFFFFFFFFFu64
@data : UInt64* = Pointer(UInt64).new(0u64)
@byte_size : UInt64 = 0u64
@elements : Int32 = 0
# Returns the size of the *managed* memory.
# This will be equivalent to {Physical.total_amount}.
def byte_size
@byte_size
end
# Count of elements in the bitmap
def elements
@elements
end
def data
@data
end
def total_pages
@byte_size / Physical::PAGE_SIZE
end
def pages_in_use
count = 0
@elements.times do |idx|
el = @data[idx]
next if el == 0
ELEMENT_SIZE.times do |off|
count += 1 if (el & (1 << off)) != 0
end
end
count
end
def reset!(data, byte_size)
@data = data
@byte_size = byte_size
@elements = ((byte_size / Physical::PAGE_SIZE) / ELEMENT_SIZE).to_i32
@elements.times do |idx|
@data[idx] = 0u64
end
end
# Returns the size of the bitmap in bytes
def bitmap_byte_size
@elements * sizeof(UInt64)
end
# Marks the page at *address* to be *in_use* (or not).
# The *address* may be an address inside the page.
def mark(address : UInt64, in_use : Bool)
off, bit = bitmap_index(address)
if in_use
@data[off] |= (1 << bit)
else
@data[off] &= ~(1 << bit)
end
end
def mark_used(address : UInt64)
off, bit = bitmap_index(address)
@data[off] |= (1 << bit)
end
def mark_free(address : UInt64)
off, bit = bitmap_index(address)
@data[off] &= ~(1 << bit)
end
def mark_page_used(page : UInt64)
off, bit = bitmap_page_index(page)
@data[off] |= (1 << bit)
end
def mark_page_free(page : UInt64)
off, bit = bitmap_page_index(page)
@data[off] &= ~(1 << bit)
end
# Marks all pages which are "touched" by the address range
def mark_range(address : UInt64, length : UInt64, in_use : Bool)
page_first = address / Physical::PAGE_SIZE
page_last = (address + length) / Physical::PAGE_SIZE
pages = page_last - page_first + 1
if in_use
pages.times{|off| mark_page_used(page_first + off)}
else
pages.times{|off| mark_page_free(page_first + off)}
end
end
def in_use?(address : UInt64) : Bool
off, bit = bitmap_index(address)
(@data[off] & (1 << bit)) != 0
end
def free?(address : UInt64) : Bool
!in_use?(address)
end
# Finds an unused page in the bitmap and returns its memory address.
# Returns `nil` if no page is available
def search_free_page : UInt64 | Nil
@elements.times do |idx|
el = @data[idx]
next if el == FULL_ELEMENT
ELEMENT_SIZE.times do |off|
if (el & (1 << off)) == 0
page_no = (idx * ELEMENT_SIZE) + off
return page_no.to_u64 * Physical::PAGE_SIZE
end
end
end
nil
end
def to_unsafe
@data
end
private def bitmap_index(address)
bitmap_page_index(address / Physical::PAGE_SIZE)
end
private def bitmap_page_index(page_no)
off = page_no / ELEMENT_SIZE
bit = page_no % ELEMENT_SIZE
{ off, bit }
end
end
end
end
# memset(3) - fill memory with a constant byte
fun memset(ptr : Pointer(UInt8), data : Int32, size : UInt64) : Pointer(Void)
byte = data.to_u8
byte64 = byte.to_u64 | (byte.to_u64 << 8) | (byte.to_u64 << 16) | \
(byte.to_u64 << 24) | (byte.to_u64 << 32) | (byte.to_u64 << 40) | \
(byte.to_u64 << 48) | (byte.to_u64 << 56)
# Slow copy at start
while ptr.address % 8 != 0 && size > 0
ptr.value = byte
ptr += 1
size -= 1
end
# Fast copy afterwards
ptr64 = ptr.as(UInt64*)
size64 = size / sizeof(UInt64)
while size64 > 0
ptr64.value = byte64
ptr64 += 1
size64 -= 1
end
ptr.as(Void*)
end
# memcpy(3) - copy memory area
fun memcpy(dest : Pointer(UInt8), src : Pointer(UInt8), size : UInt64) : Pointer(UInt8)
# Slow copy at start
while size % 8 != 0
dest.value = src.value
dest += 1
src += 1
size -= 1
end
# Fast copy afterwards
dest64 = dest.as(UInt64*)
src64 = src.as(UInt64*)
size64 = size / sizeof(UInt64)
while size64 > 0
dest64.value = src64.value
dest64 += 1
src64 += 1
size64 -= 1
end
dest
end
# bzero(3) - write zero-valued bytes
fun bzero(ptr : Pointer(UInt8), size : UInt64) : Nil
memset(ptr, 0, size)
end
module Kernel
module Memory
# Manages the physical memory. It's a struct so we can stack-allocate it.
# Remember: First time we use this class, there's no real memory available.
# We don't even know how *much* memory we have.
module Physical
extend self
PAGE_SIZE = 4096u64 # 4KiB
# End-address of memory commonly used by DMA transfers.
DMA_END = (16 * 1024 * 1024).to_u64 # 16MiB
# Don't use the memory region 1MiB..16MiB
# This region is used by DMA devices.
MIN_ADDRESS = DMA_END
@@total_amount : UInt64 = 0u64
@@usable_amount : UInt64 = 0u64
@@map : Bitmap = Bitmap.new
@@success : Bool = false
# Returns the total amount of detected system memory. Not all of it may
# be usable though.
def total_amount
@@total_amount
end
# Returns the total amount of *usable* system memory.
def usable_amount
@@usable_amount
end
def success?
@@success
end
def map
@@map
end
def start(multiboot : Boot::Multiboot::Data*)
@@success = false
@@total_amount = (multiboot.value.mem_lower.to_u64 + multiboot.value.mem_upper.to_u64) * 1024u64
@@usable_amount = multiboot.value.mem_upper.to_u64 * 1024u64
@@map = Bitmap.new
mmap_ptr = Pointer(UInt8).new(multiboot.value.mmap_addr.to_u64)
mmap_size = multiboot.value.mmap_length.to_u64.to_i64
# Discover memory properties
count_sizes(mmap_ptr, mmap_size)
pmm_addr = find_first_empty(mmap_ptr, mmap_size, @@total_amount)
# Use a fallback address
pmm_addr = MIN_ADDRESS unless pmm_addr
# Also use if it's less than the fallback
pmm_addr = MIN_ADDRESS if pmm_addr < MIN_ADDRESS
# Initialize the physical memory (bit-)map at some available address
@@map.reset!(Pointer(UInt64).new(pmm_addr), @@total_amount)
# Mark pages as in-use
@@map.mark_range(0u64, DMA_END, true)
# The bitmap itself is in-use too
@@map.mark_range(pmm_addr, @@map.bitmap_byte_size.to_u64, true)
# And at last, mark everything that's in-use for other reasons
mark_used_pages(mmap_ptr, mmap_size)
@@success = true
end
# Allocates a page in physical memory and returns its address.
def allocate_page : Pointer(UInt8)
# Find free page and mark it to be in use
address = @@map.search_free_page
kernel_panic "Ran out of physical memory" unless address
@@map.mark_used(address)
Pointer(UInt8).new(address) # Done.
end
# Marks the page at *page_address* to be unused.
def free_page(page_address : UInt64)
@@map.mark_free page_address
end
# ditto
def free_page(page : Pointer)
@@map.mark_free page.address
end
def walk_memory_map(mmap : UInt8*, bytes : Int64) : Nil
while bytes > 0
cur = mmap.as(Boot::MemoryMap::Data*)
yield cur
size = cur.value.size + sizeof(UInt32)
mmap += size
bytes -= size
end
end
private def count_sizes(mmap, bytes)
walk_memory_map(mmap, bytes) do |entry|
if !entry.value.type.available? && entry.value.base_address < @@total_amount
@@usable_amount -= entry.value.length
end
end
end
private def find_first_empty(mmap, bytes, min_size)
walk_memory_map(mmap, bytes) do |entry|
if entry.value.type.available? && entry.value.length >= min_size
return entry.value.base_address
end
end
end
private def mark_used_pages(mmap, bytes)
walk_memory_map(mmap, bytes) do |entry|
next if entry.value.base_address > @@total_amount
unless entry.value.type.available?
@@map.mark_range(entry.value.base_address, entry.value.length, true)
end
end
end
end
end
end
# POSIX-style allocation methods
fun malloc(size : UInt64) : Void*
Kernel::Memory::Allocator.allocate(size)
end
fun free(ptr : Void*) : Void
Kernel::Memory::Allocator.free(ptr)
end
fun calloc(nmemb : UInt64, size : UInt64) : Void*
bytes = nmemb * size
ptr = Kernel::Memory::Allocator.allocate(bytes)
memset(ptr.as(UInt8*), 0, bytes)
ptr
end
fun realloc(ptr : Void*, size : UInt64) : Void*
# TODO: Implement this properly :)
dest = Kernel::Memory::Allocator.allocate(size)
memcpy(dest.as(UInt8*), ptr.as(UInt8*), size)
Kernel::Memory::Allocator.free(ptr)
dest
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment