diff --git a/zion/CMakeLists.txt b/zion/CMakeLists.txt index 7b174a3..ef48bff 100644 --- a/zion/CMakeLists.txt +++ b/zion/CMakeLists.txt @@ -17,6 +17,7 @@ add_executable(zion memory/kernel_stack_manager.cpp memory/paging_util.cpp memory/physical_memory.cpp + memory/slab_allocator.cpp memory/user_stack_manager.cpp object/address_space.cpp object/channel.cpp diff --git a/zion/memory/kernel_heap.cpp b/zion/memory/kernel_heap.cpp index 32b17d3..d3944e1 100644 --- a/zion/memory/kernel_heap.cpp +++ b/zion/memory/kernel_heap.cpp @@ -15,23 +15,26 @@ KernelHeap& GetKernelHeap() { } return *gKernelHeap; } + +static uint8_t kNewByte = 0xAB; + +void InitMemory(uint8_t* addr, uint64_t size) { + for (uint64_t i = 0; i < size; i++) { + addr[i] = kNewByte; + } +} + } // namespace KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound) - : next_slab_addr_(lower_bound), - first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2), - next_addr_(first_unsized_addr_), - upper_bound_(upper_bound) { + : next_addr_(lower_bound), upper_bound_(upper_bound) { gKernelHeap = this; } void KernelHeap::InitializeSlabAllocators() { - slab_8_ = glcr::MakeUnique>(next_slab_addr_, 4); - next_slab_addr_ += 0x4000; - slab_16_ = glcr::MakeUnique>(next_slab_addr_, 6); - next_slab_addr_ += 0x6000; - slab_32_ = glcr::MakeUnique>(next_slab_addr_, 6); - next_slab_addr_ += 0x6000; + slab_8_ = glcr::MakeUnique(8); + slab_16_ = glcr::MakeUnique(16); + slab_32_ = glcr::MakeUnique(32); } void* KernelHeap::Allocate(uint64_t size) { @@ -106,8 +109,16 @@ void KernelHeap::RecordSize(uint64_t size) { distributions[index]++; } -void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); } -void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); } +void* operator new(uint64_t size) { + void* addr = GetKernelHeap().Allocate(size); + InitMemory(static_cast(addr), size); + return addr; +} +void* operator new[](uint64_t size) { + void* addr = GetKernelHeap().Allocate(size); + InitMemory(static_cast(addr), size); + return addr; +} void operator delete(void*, uint64_t size) {} void operator delete[](void*) {} diff --git a/zion/memory/kernel_heap.h b/zion/memory/kernel_heap.h index f77354c..dc024f0 100644 --- a/zion/memory/kernel_heap.h +++ b/zion/memory/kernel_heap.h @@ -17,14 +17,12 @@ class KernelHeap { static void DumpDistribution(); private: - uint64_t next_slab_addr_; - uint64_t first_unsized_addr_; uint64_t next_addr_; uint64_t upper_bound_; - glcr::UniquePtr> slab_8_; - glcr::UniquePtr> slab_16_; - glcr::UniquePtr> slab_32_; + glcr::UniquePtr slab_8_; + glcr::UniquePtr slab_16_; + glcr::UniquePtr slab_32_; // Distribution collection for the purpose of investigating a slab allocator. // 0: 0-4B diff --git a/zion/memory/slab_allocator.cpp b/zion/memory/slab_allocator.cpp new file mode 100644 index 0000000..28cd24a --- /dev/null +++ b/zion/memory/slab_allocator.cpp @@ -0,0 +1,95 @@ +#include "slab_allocator.h" + +#include "debug/debug.h" +#include "memory/paging_util.h" + +namespace { + +// TODO: Store these in a kernel VMM. +const uint64_t kSlabSize = 0x1000; +uint64_t gNextSlab = 0xFFFFFFFF'40000000; +const uint64_t gMaxSlab = 0xFFFF'FFFF'6000'0000; + +uint64_t NextSlab() { + // FIXME: Synchronization. + uint64_t next_slab = gNextSlab; + gNextSlab += kSlabSize; + EnsureResident(next_slab, 1); + + return next_slab; +} + +} // namespace + +Slab::Slab(uint64_t elem_size) : elem_size_(elem_size), num_allocated_(0) { + uint64_t first_elem_offset = ((sizeof(Slab) - 1) / elem_size_) + 1; + uint64_t entry_addr = + reinterpret_cast(this) + first_elem_offset * elem_size_; + FreeListEntry* entry = reinterpret_cast(entry_addr); + first_free_ = entry; + + uint64_t max_entry = reinterpret_cast(this) + kSlabSize; + entry_addr += elem_size_; + while (entry_addr < max_entry) { + FreeListEntry* next_entry = reinterpret_cast(entry_addr); + next_entry->next = nullptr; + entry->next = next_entry; + entry = next_entry; + entry_addr += elem_size_; + } +} + +void* Slab::Allocate() { + if (IsFull()) { + panic("Allocating from full slab."); + } + + FreeListEntry* to_return = first_free_; + first_free_ = first_free_->next; + num_allocated_++; + return static_cast(to_return); +} + +void Slab::Free(void* address) { + if (!IsContained(address)) { + panic("Freeing non-contained address from slab."); + } + + uint64_t vaddr = reinterpret_cast(address); + if ((vaddr & ~(elem_size_ - 1)) != vaddr) { + panic("Freeing non-aligned address"); + } + + FreeListEntry* new_free = static_cast(address); + new_free->next = first_free_; + first_free_ = new_free; + num_allocated_--; +} + +bool Slab::IsFull() { return first_free_ == nullptr; } +bool Slab::IsContained(void* addr) { + uint64_t uaddr = reinterpret_cast(addr); + uint64_t uthis = reinterpret_cast(this); + bool is_above = uaddr > (uthis + sizeof(this)); + bool is_below = uaddr < (uthis + kSlabSize); + return is_above && is_below; +} + +// FIXME: Move all of these to a new file that can be included anywhere. +inline void* operator new(uint64_t, void* p) { return p; } + +glcr::ErrorOr SlabAllocator::Allocate() { + auto slab = slabs_.PeekFront(); + while (slab && slab->IsFull()) { + slab = slab->next_; + } + + if (slab) { + return slab->Allocate(); + } + + dbgln("Allocating new kernel slab size {}", elem_size_); + void* next_slab = (uint64_t*)NextSlab(); + slabs_.PushFront(glcr::AdoptPtr(new (next_slab) Slab(elem_size_))); + return slabs_.PeekFront()->Allocate(); +} diff --git a/zion/memory/slab_allocator.h b/zion/memory/slab_allocator.h index 1e885c8..aab4c83 100644 --- a/zion/memory/slab_allocator.h +++ b/zion/memory/slab_allocator.h @@ -8,111 +8,38 @@ #include "memory/paging_util.h" -constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE; - -// TODO: Add variable sized slabs (i.e. multiple page slabs.) -template -class Slab : public glcr::RefCounted>, - public glcr::IntrusiveListNode> { +class Slab : public glcr::RefCounted, + public glcr::IntrusiveListNode { public: - explicit Slab(uint64_t addr) : page_addr_(addr) { - for (uint64_t i = 0; i < kBitmapLength; i++) { - bitmap_[i] = 0; - } - - bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1; - EnsureResident(page_addr_, 16); - uint64_t* first_elem = reinterpret_cast(page_addr_); - first_elem[0] = kSlabSentinel; - first_elem[1] = reinterpret_cast(this); - } + explicit Slab(uint64_t elem_size); Slab(Slab&) = delete; Slab(Slab&&) = delete; - glcr::ErrorOr Allocate() { - uint64_t index = GetFirstFreeIndex(); - if (index == 0) { - return glcr::EXHAUSTED; - } - bitmap_[index / 64] |= (0x1 << (index % 64)); - return reinterpret_cast(page_addr_ + (index * ElemSize)); - } + void* Allocate(); + void Free(void* addr); - glcr::ErrorCode Free(void* addr) { - uint64_t raw_addr = reinterpret_cast(addr); - if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) { - return glcr::INVALID_ARGUMENT; - } - // FIXME: Check alignment. - uint64_t offset = raw_addr - page_addr_; - uint64_t index = offset / ElemSize; - if (index == 0) { - return glcr::FAILED_PRECONDITION; - } - bitmap_[index / 64] &= ~(0x1 << (index % 64)); - } - - bool IsFull() { - for (uint64_t i = 0; i < kBitmapLength; i++) { - if (bitmap_[i] != -1) { - return false; - } - } - return true; - } + bool IsFull(); private: - // FIXME: Likely a bug or two here if the number of elements doesn't evenly - // divide in to the bitmap length. - static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64; - static constexpr uint64_t kMaxElements = 0x99E / ElemSize; - uint64_t bitmap_[kBitmapLength]; - uint64_t page_addr_; + struct FreeListEntry { + FreeListEntry* next; + }; - uint64_t GetFirstFreeIndex() { - uint64_t bi = 0; - while (bi < kBitmapLength && (bitmap_[bi] == -1)) { - bi++; - } - if (bi == kBitmapLength) { - return 0; - } - // FIXME: Use hardware bit instructions here. - uint64_t bo = 0; - uint64_t bm = bitmap_[bi]; - while (bm & 0x1) { - bm >>= 1; - bo += 1; - } - return (bi * 64) + bo; - } + uint64_t elem_size_; + uint64_t num_allocated_; + FreeListEntry* first_free_; + + bool IsContained(void* addr); }; -template class SlabAllocator { public: - SlabAllocator() = delete; + SlabAllocator(uint64_t elem_size) : elem_size_(elem_size) {} SlabAllocator(SlabAllocator&) = delete; - // TODO: Add a Kernel VMMO Struct to hold things like this. - SlabAllocator(uint64_t base_addr, uint64_t num_pages) { - for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) { - slabs_.PushBack(glcr::MakeRefCounted>(base_addr)); - } - } - - glcr::ErrorOr Allocate() { - glcr::RefPtr> curr = slabs_.PeekFront(); - while (curr) { - if (curr->IsFull()) { - curr = curr->next_; - continue; - } - return curr->Allocate(); - } - return glcr::EXHAUSTED; - } + glcr::ErrorOr Allocate(); private: - glcr::IntrusiveList> slabs_; + uint64_t elem_size_; + glcr::IntrusiveList slabs_; }; diff --git a/zion/zion.cpp b/zion/zion.cpp index 6e5d7cd..7f3c035 100644 --- a/zion/zion.cpp +++ b/zion/zion.cpp @@ -23,7 +23,7 @@ extern "C" void zion() { early_dbgln("[boot] Init Physical Memory Manager."); phys_mem::InitBootstrapPageAllocation(); - KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000); + KernelHeap heap(0xFFFFFFFF'60000000, 0xFFFFFFFF'80000000); phys_mem::InitPhysicalMemoryManager(); heap.InitializeSlabAllocators(); phys_mem::DumpRegions();