From 659f122c9ecb79db5543fe35879de75d9922931c Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Wed, 15 Nov 2023 12:53:14 -0800 Subject: [PATCH] [Zion] Free memory that was allocated with the slab allocator. This reduces the number of active allocs by ~ 200. Size 8: 142 -> 2 Size 16: 319 > 313 Size 32: 364-> 312 --- zion/memory/constants.h | 11 +++++++++++ zion/memory/kernel_heap.cpp | 26 +++++++++++++++++++------- zion/memory/kernel_heap.h | 7 ++++--- zion/memory/slab_allocator.cpp | 17 +++++++++++++---- zion/memory/slab_allocator.h | 2 ++ zion/zion.cpp | 2 +- 6 files changed, 50 insertions(+), 15 deletions(-) create mode 100644 zion/memory/constants.h diff --git a/zion/memory/constants.h b/zion/memory/constants.h new file mode 100644 index 0000000..55555b7 --- /dev/null +++ b/zion/memory/constants.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +const uint64_t KiB = 0x400; +const uint64_t kPageSize = 4 * KiB; + +const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000; +const uint64_t kKernelSlabHeapEnd = 0xFFFF'FFFF'6000'0000; +const uint64_t kKernelBuddyHeapStart = 0xFFFF'FFFF'6000'0000; +const uint64_t kKernelBuddyHeapEnd = 0xFFFF'FFFF'8000'0000; diff --git a/zion/memory/kernel_heap.cpp b/zion/memory/kernel_heap.cpp index ecce12d..6822b0a 100644 --- a/zion/memory/kernel_heap.cpp +++ b/zion/memory/kernel_heap.cpp @@ -24,12 +24,15 @@ void InitMemory(uint8_t* addr, uint64_t size) { } } +bool IsSlab(void* addr) { + uint64_t uaddr = reinterpret_cast(addr); + + return uaddr >= kKernelSlabHeapStart && uaddr < kKernelSlabHeapEnd; +} + } // namespace -KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound) - : next_addr_(lower_bound), upper_bound_(upper_bound) { - gKernelHeap = this; -} +KernelHeap::KernelHeap() { gKernelHeap = this; } void KernelHeap::InitializeSlabAllocators() { slab_8_ = glcr::MakeUnique(8); @@ -89,7 +92,8 @@ void KernelHeap::DumpDebugDataInternal() { dbgln(""); dbgln("Heap Debug Statistics!"); - dbgln("Pages used: {}", (next_addr_ - 0xFFFFFFFF60000000 - 1) / 0x1000 + 1); + dbgln("Pages used: {}", + (next_addr_ - kKernelBuddyHeapStart - 1) / 0x1000 + 1); // Active Allocs. dbgln("Active Allocations: {}", alloc_count_); @@ -145,5 +149,13 @@ void* operator new[](uint64_t size) { return addr; } -void operator delete(void*, uint64_t size) {} -void operator delete[](void*) {} +void operator delete(void* addr, uint64_t size) { + if (IsSlab(addr)) { + SlabFree(addr); + } +} +void operator delete[](void* addr) { + if (IsSlab(addr)) { + SlabFree(addr); + } +} diff --git a/zion/memory/kernel_heap.h b/zion/memory/kernel_heap.h index 20ed9f1..d0eafad 100644 --- a/zion/memory/kernel_heap.h +++ b/zion/memory/kernel_heap.h @@ -3,11 +3,12 @@ #include #include +#include "memory/constants.h" #include "memory/slab_allocator.h" class KernelHeap { public: - KernelHeap(uint64_t lower_bound, uint64_t upper_bound); + KernelHeap(); void InitializeSlabAllocators(); void* Allocate(uint64_t size); @@ -17,8 +18,8 @@ class KernelHeap { static void DumpDebugData(); private: - uint64_t next_addr_; - uint64_t upper_bound_; + uint64_t next_addr_ = kKernelBuddyHeapStart; + uint64_t upper_bound_ = kKernelBuddyHeapEnd; uint64_t alloc_count_ = 0; diff --git a/zion/memory/slab_allocator.cpp b/zion/memory/slab_allocator.cpp index 19d22d2..e858258 100644 --- a/zion/memory/slab_allocator.cpp +++ b/zion/memory/slab_allocator.cpp @@ -1,21 +1,24 @@ #include "slab_allocator.h" #include "debug/debug.h" +#include "memory/constants.h" #include "memory/paging_util.h" namespace { // TODO: Store these in a kernel VMM. -const uint64_t kSlabSize = 0x1000; -uint64_t gNextSlab = 0xFFFFFFFF'40000000; -const uint64_t gMaxSlab = 0xFFFF'FFFF'6000'0000; +const uint64_t kSlabSize = 4 * KiB; +const uint64_t kSlabMask = ~(kSlabSize - 1); +uint64_t gNextSlab = kKernelSlabHeapStart; uint64_t NextSlab() { // FIXME: Synchronization. uint64_t next_slab = gNextSlab; + if (next_slab >= kKernelBuddyHeapEnd) { + panic("Slab heap overrun"); + } gNextSlab += kSlabSize; EnsureResident(next_slab, 1); - return next_slab; } @@ -113,3 +116,9 @@ uint64_t SlabAllocator::Allocations() { } return count; } + +void SlabFree(void* addr) { + Slab* slab = + reinterpret_cast(reinterpret_cast(addr) & kSlabMask); + slab->Free(addr); +} diff --git a/zion/memory/slab_allocator.h b/zion/memory/slab_allocator.h index 1e2c9ff..8f12126 100644 --- a/zion/memory/slab_allocator.h +++ b/zion/memory/slab_allocator.h @@ -48,3 +48,5 @@ class SlabAllocator { uint64_t elem_size_; glcr::IntrusiveList slabs_; }; + +void SlabFree(void* addr); diff --git a/zion/zion.cpp b/zion/zion.cpp index feb6b13..7a43027 100644 --- a/zion/zion.cpp +++ b/zion/zion.cpp @@ -23,7 +23,7 @@ extern "C" void zion() { early_dbgln("[boot] Init Physical Memory Manager."); phys_mem::InitBootstrapPageAllocation(); - KernelHeap heap(0xFFFFFFFF'60000000, 0xFFFFFFFF'80000000); + KernelHeap heap; phys_mem::InitPhysicalMemoryManager(); heap.InitializeSlabAllocators(); phys_mem::DumpRegions();