From ee603b7478308ed0518d3e0f6902afd8cf9eac3c Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Wed, 2 Aug 2023 00:54:37 -0700 Subject: [PATCH] [zion] Add a basic slab allocator to the kernel. Don't free from the slab allocator yet but allocations appear to work. --- zion/memory/kernel_heap.cpp | 40 +++++++++++- zion/memory/kernel_heap.h | 12 ++++ zion/memory/slab_allocator.h | 118 +++++++++++++++++++++++++++++++++++ zion/zion.cpp | 1 + 4 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 zion/memory/slab_allocator.h diff --git a/zion/memory/kernel_heap.cpp b/zion/memory/kernel_heap.cpp index 0fb84fc..ef7f8f4 100644 --- a/zion/memory/kernel_heap.cpp +++ b/zion/memory/kernel_heap.cpp @@ -18,11 +18,47 @@ KernelHeap& GetKernelHeap() { } // namespace KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound) - : next_addr_(lower_bound), upper_bound_(upper_bound) { + : next_slab_addr_(lower_bound), + first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2), + next_addr_(first_unsized_addr_), + upper_bound_(upper_bound) { gKernelHeap = this; } +void KernelHeap::InitializeSlabAllocators() { + slab_8_ = glcr::MakeUnique>(next_slab_addr_, 4); + next_slab_addr_ += 0x4000; + slab_16_ = glcr::MakeUnique>(next_slab_addr_, 6); + next_slab_addr_ += 0x6000; + slab_32_ = glcr::MakeUnique>(next_slab_addr_, 6); + next_slab_addr_ += 0x6000; +} + void* KernelHeap::Allocate(uint64_t size) { +#if K_HEAP_DEBUG + dbgln("Alloc (%x)", size); +#endif + if ((size <= 8) && slab_8_) { + auto ptr_or = slab_8_->Allocate(); + if (ptr_or.ok()) { + return ptr_or.value(); + } + dbgln("Failed allocation (slab 8): %x", ptr_or.error()); + } + if ((size <= 16) && slab_16_) { + auto ptr_or = slab_16_->Allocate(); + if (ptr_or.ok()) { + return ptr_or.value(); + } + dbgln("Failed allocation (slab 16): %x", ptr_or.error()); + } + if ((size <= 32) && slab_32_) { + auto ptr_or = slab_32_->Allocate(); + if (ptr_or.ok()) { + return ptr_or.value(); + } + dbgln("Failed allocation (slab 32): %x", ptr_or.error()); + } if (next_addr_ + size >= upper_bound_) { panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size, upper_bound_); @@ -67,5 +103,5 @@ void KernelHeap::RecordSize(uint64_t size) { void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); } void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); } -void operator delete(void*, uint64_t) {} +void operator delete(void*, uint64_t size) {} void operator delete[](void*) {} diff --git a/zion/memory/kernel_heap.h b/zion/memory/kernel_heap.h index ae8f772..f77354c 100644 --- a/zion/memory/kernel_heap.h +++ b/zion/memory/kernel_heap.h @@ -1,19 +1,31 @@ #pragma once +#include #include +#include "memory/slab_allocator.h" + class KernelHeap { public: KernelHeap(uint64_t lower_bound, uint64_t upper_bound); + void InitializeSlabAllocators(); void* Allocate(uint64_t size); + void Free(void* address); + static void DumpDistribution(); private: + uint64_t next_slab_addr_; + uint64_t first_unsized_addr_; uint64_t next_addr_; uint64_t upper_bound_; + glcr::UniquePtr> slab_8_; + glcr::UniquePtr> slab_16_; + glcr::UniquePtr> slab_32_; + // Distribution collection for the purpose of investigating a slab allocator. // 0: 0-4B // 1: 4B-8B diff --git a/zion/memory/slab_allocator.h b/zion/memory/slab_allocator.h new file mode 100644 index 0000000..1e885c8 --- /dev/null +++ b/zion/memory/slab_allocator.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "memory/paging_util.h" + +constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE; + +// TODO: Add variable sized slabs (i.e. multiple page slabs.) +template +class Slab : public glcr::RefCounted>, + public glcr::IntrusiveListNode> { + public: + explicit Slab(uint64_t addr) : page_addr_(addr) { + for (uint64_t i = 0; i < kBitmapLength; i++) { + bitmap_[i] = 0; + } + + bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1; + EnsureResident(page_addr_, 16); + uint64_t* first_elem = reinterpret_cast(page_addr_); + first_elem[0] = kSlabSentinel; + first_elem[1] = reinterpret_cast(this); + } + Slab(Slab&) = delete; + Slab(Slab&&) = delete; + + glcr::ErrorOr Allocate() { + uint64_t index = GetFirstFreeIndex(); + if (index == 0) { + return glcr::EXHAUSTED; + } + bitmap_[index / 64] |= (0x1 << (index % 64)); + return reinterpret_cast(page_addr_ + (index * ElemSize)); + } + + glcr::ErrorCode Free(void* addr) { + uint64_t raw_addr = reinterpret_cast(addr); + if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) { + return glcr::INVALID_ARGUMENT; + } + // FIXME: Check alignment. + uint64_t offset = raw_addr - page_addr_; + uint64_t index = offset / ElemSize; + if (index == 0) { + return glcr::FAILED_PRECONDITION; + } + bitmap_[index / 64] &= ~(0x1 << (index % 64)); + } + + bool IsFull() { + for (uint64_t i = 0; i < kBitmapLength; i++) { + if (bitmap_[i] != -1) { + return false; + } + } + return true; + } + + private: + // FIXME: Likely a bug or two here if the number of elements doesn't evenly + // divide in to the bitmap length. + static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64; + static constexpr uint64_t kMaxElements = 0x99E / ElemSize; + uint64_t bitmap_[kBitmapLength]; + uint64_t page_addr_; + + uint64_t GetFirstFreeIndex() { + uint64_t bi = 0; + while (bi < kBitmapLength && (bitmap_[bi] == -1)) { + bi++; + } + if (bi == kBitmapLength) { + return 0; + } + // FIXME: Use hardware bit instructions here. + uint64_t bo = 0; + uint64_t bm = bitmap_[bi]; + while (bm & 0x1) { + bm >>= 1; + bo += 1; + } + return (bi * 64) + bo; + } +}; + +template +class SlabAllocator { + public: + SlabAllocator() = delete; + SlabAllocator(SlabAllocator&) = delete; + + // TODO: Add a Kernel VMMO Struct to hold things like this. + SlabAllocator(uint64_t base_addr, uint64_t num_pages) { + for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) { + slabs_.PushBack(glcr::MakeRefCounted>(base_addr)); + } + } + + glcr::ErrorOr Allocate() { + glcr::RefPtr> curr = slabs_.PeekFront(); + while (curr) { + if (curr->IsFull()) { + curr = curr->next_; + continue; + } + return curr->Allocate(); + } + return glcr::EXHAUSTED; + } + + private: + glcr::IntrusiveList> slabs_; +}; diff --git a/zion/zion.cpp b/zion/zion.cpp index 75ca733..fc44e51 100644 --- a/zion/zion.cpp +++ b/zion/zion.cpp @@ -25,6 +25,7 @@ extern "C" void zion() { phys_mem::InitBootstrapPageAllocation(); KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000); phys_mem::InitPhysicalMemoryManager(); + heap.InitializeSlabAllocators(); dbgln("[boot] Memory allocations available now.");