[Zion] Free memory that was allocated with the slab allocator.

This reduces the number of active allocs by ~ 200.
Size 8: 142 -> 2
Size 16: 319 > 313
Size 32: 364-> 312
This commit is contained in:
Drew Galbraith 2023-11-15 12:53:14 -08:00
parent 6d27ee5dc5
commit 659f122c9e
6 changed files with 50 additions and 15 deletions

11
zion/memory/constants.h Normal file
View File

@ -0,0 +1,11 @@
#pragma once
#include <stdint.h>
const uint64_t KiB = 0x400;
const uint64_t kPageSize = 4 * KiB;
const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000;
const uint64_t kKernelSlabHeapEnd = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapStart = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapEnd = 0xFFFF'FFFF'8000'0000;

View File

@ -24,12 +24,15 @@ void InitMemory(uint8_t* addr, uint64_t size) {
}
}
bool IsSlab(void* addr) {
uint64_t uaddr = reinterpret_cast<uint64_t>(addr);
return uaddr >= kKernelSlabHeapStart && uaddr < kKernelSlabHeapEnd;
}
} // namespace
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
: next_addr_(lower_bound), upper_bound_(upper_bound) {
gKernelHeap = this;
}
KernelHeap::KernelHeap() { gKernelHeap = this; }
void KernelHeap::InitializeSlabAllocators() {
slab_8_ = glcr::MakeUnique<SlabAllocator>(8);
@ -89,7 +92,8 @@ void KernelHeap::DumpDebugDataInternal() {
dbgln("");
dbgln("Heap Debug Statistics!");
dbgln("Pages used: {}", (next_addr_ - 0xFFFFFFFF60000000 - 1) / 0x1000 + 1);
dbgln("Pages used: {}",
(next_addr_ - kKernelBuddyHeapStart - 1) / 0x1000 + 1);
// Active Allocs.
dbgln("Active Allocations: {}", alloc_count_);
@ -145,5 +149,13 @@ void* operator new[](uint64_t size) {
return addr;
}
void operator delete(void*, uint64_t size) {}
void operator delete[](void*) {}
void operator delete(void* addr, uint64_t size) {
if (IsSlab(addr)) {
SlabFree(addr);
}
}
void operator delete[](void* addr) {
if (IsSlab(addr)) {
SlabFree(addr);
}
}

View File

@ -3,11 +3,12 @@
#include <glacier/memory/unique_ptr.h>
#include <stdint.h>
#include "memory/constants.h"
#include "memory/slab_allocator.h"
class KernelHeap {
public:
KernelHeap(uint64_t lower_bound, uint64_t upper_bound);
KernelHeap();
void InitializeSlabAllocators();
void* Allocate(uint64_t size);
@ -17,8 +18,8 @@ class KernelHeap {
static void DumpDebugData();
private:
uint64_t next_addr_;
uint64_t upper_bound_;
uint64_t next_addr_ = kKernelBuddyHeapStart;
uint64_t upper_bound_ = kKernelBuddyHeapEnd;
uint64_t alloc_count_ = 0;

View File

@ -1,21 +1,24 @@
#include "slab_allocator.h"
#include "debug/debug.h"
#include "memory/constants.h"
#include "memory/paging_util.h"
namespace {
// TODO: Store these in a kernel VMM.
const uint64_t kSlabSize = 0x1000;
uint64_t gNextSlab = 0xFFFFFFFF'40000000;
const uint64_t gMaxSlab = 0xFFFF'FFFF'6000'0000;
const uint64_t kSlabSize = 4 * KiB;
const uint64_t kSlabMask = ~(kSlabSize - 1);
uint64_t gNextSlab = kKernelSlabHeapStart;
uint64_t NextSlab() {
// FIXME: Synchronization.
uint64_t next_slab = gNextSlab;
if (next_slab >= kKernelBuddyHeapEnd) {
panic("Slab heap overrun");
}
gNextSlab += kSlabSize;
EnsureResident(next_slab, 1);
return next_slab;
}
@ -113,3 +116,9 @@ uint64_t SlabAllocator::Allocations() {
}
return count;
}
void SlabFree(void* addr) {
Slab* slab =
reinterpret_cast<Slab*>(reinterpret_cast<uint64_t>(addr) & kSlabMask);
slab->Free(addr);
}

View File

@ -48,3 +48,5 @@ class SlabAllocator {
uint64_t elem_size_;
glcr::IntrusiveList<Slab> slabs_;
};
void SlabFree(void* addr);

View File

@ -23,7 +23,7 @@ extern "C" void zion() {
early_dbgln("[boot] Init Physical Memory Manager.");
phys_mem::InitBootstrapPageAllocation();
KernelHeap heap(0xFFFFFFFF'60000000, 0xFFFFFFFF'80000000);
KernelHeap heap;
phys_mem::InitPhysicalMemoryManager();
heap.InitializeSlabAllocators();
phys_mem::DumpRegions();