[Zion] Move to a kernel slab allocator that will allow easier dealloc.

This commit is contained in:
Drew Galbraith 2023-11-15 12:03:42 -08:00
parent 792e5155ba
commit d71d543b2a
6 changed files with 141 additions and 109 deletions

View File

@ -17,6 +17,7 @@ add_executable(zion
memory/kernel_stack_manager.cpp
memory/paging_util.cpp
memory/physical_memory.cpp
memory/slab_allocator.cpp
memory/user_stack_manager.cpp
object/address_space.cpp
object/channel.cpp

View File

@ -15,23 +15,26 @@ KernelHeap& GetKernelHeap() {
}
return *gKernelHeap;
}
static uint8_t kNewByte = 0xAB;
void InitMemory(uint8_t* addr, uint64_t size) {
for (uint64_t i = 0; i < size; i++) {
addr[i] = kNewByte;
}
}
} // namespace
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
: next_slab_addr_(lower_bound),
first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2),
next_addr_(first_unsized_addr_),
upper_bound_(upper_bound) {
: next_addr_(lower_bound), upper_bound_(upper_bound) {
gKernelHeap = this;
}
void KernelHeap::InitializeSlabAllocators() {
slab_8_ = glcr::MakeUnique<SlabAllocator<8>>(next_slab_addr_, 4);
next_slab_addr_ += 0x4000;
slab_16_ = glcr::MakeUnique<SlabAllocator<16>>(next_slab_addr_, 6);
next_slab_addr_ += 0x6000;
slab_32_ = glcr::MakeUnique<SlabAllocator<32>>(next_slab_addr_, 6);
next_slab_addr_ += 0x6000;
slab_8_ = glcr::MakeUnique<SlabAllocator>(8);
slab_16_ = glcr::MakeUnique<SlabAllocator>(16);
slab_32_ = glcr::MakeUnique<SlabAllocator>(32);
}
void* KernelHeap::Allocate(uint64_t size) {
@ -106,8 +109,16 @@ void KernelHeap::RecordSize(uint64_t size) {
distributions[index]++;
}
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
void* operator new(uint64_t size) {
void* addr = GetKernelHeap().Allocate(size);
InitMemory(static_cast<uint8_t*>(addr), size);
return addr;
}
void* operator new[](uint64_t size) {
void* addr = GetKernelHeap().Allocate(size);
InitMemory(static_cast<uint8_t*>(addr), size);
return addr;
}
void operator delete(void*, uint64_t size) {}
void operator delete[](void*) {}

View File

@ -17,14 +17,12 @@ class KernelHeap {
static void DumpDistribution();
private:
uint64_t next_slab_addr_;
uint64_t first_unsized_addr_;
uint64_t next_addr_;
uint64_t upper_bound_;
glcr::UniquePtr<SlabAllocator<8>> slab_8_;
glcr::UniquePtr<SlabAllocator<16>> slab_16_;
glcr::UniquePtr<SlabAllocator<32>> slab_32_;
glcr::UniquePtr<SlabAllocator> slab_8_;
glcr::UniquePtr<SlabAllocator> slab_16_;
glcr::UniquePtr<SlabAllocator> slab_32_;
// Distribution collection for the purpose of investigating a slab allocator.
// 0: 0-4B

View File

@ -0,0 +1,95 @@
#include "slab_allocator.h"
#include "debug/debug.h"
#include "memory/paging_util.h"
namespace {
// TODO: Store these in a kernel VMM.
const uint64_t kSlabSize = 0x1000;
uint64_t gNextSlab = 0xFFFFFFFF'40000000;
const uint64_t gMaxSlab = 0xFFFF'FFFF'6000'0000;
uint64_t NextSlab() {
// FIXME: Synchronization.
uint64_t next_slab = gNextSlab;
gNextSlab += kSlabSize;
EnsureResident(next_slab, 1);
return next_slab;
}
} // namespace
Slab::Slab(uint64_t elem_size) : elem_size_(elem_size), num_allocated_(0) {
uint64_t first_elem_offset = ((sizeof(Slab) - 1) / elem_size_) + 1;
uint64_t entry_addr =
reinterpret_cast<uint64_t>(this) + first_elem_offset * elem_size_;
FreeListEntry* entry = reinterpret_cast<FreeListEntry*>(entry_addr);
first_free_ = entry;
uint64_t max_entry = reinterpret_cast<uint64_t>(this) + kSlabSize;
entry_addr += elem_size_;
while (entry_addr < max_entry) {
FreeListEntry* next_entry = reinterpret_cast<FreeListEntry*>(entry_addr);
next_entry->next = nullptr;
entry->next = next_entry;
entry = next_entry;
entry_addr += elem_size_;
}
}
void* Slab::Allocate() {
if (IsFull()) {
panic("Allocating from full slab.");
}
FreeListEntry* to_return = first_free_;
first_free_ = first_free_->next;
num_allocated_++;
return static_cast<void*>(to_return);
}
void Slab::Free(void* address) {
if (!IsContained(address)) {
panic("Freeing non-contained address from slab.");
}
uint64_t vaddr = reinterpret_cast<uint64_t>(address);
if ((vaddr & ~(elem_size_ - 1)) != vaddr) {
panic("Freeing non-aligned address");
}
FreeListEntry* new_free = static_cast<FreeListEntry*>(address);
new_free->next = first_free_;
first_free_ = new_free;
num_allocated_--;
}
bool Slab::IsFull() { return first_free_ == nullptr; }
bool Slab::IsContained(void* addr) {
uint64_t uaddr = reinterpret_cast<uint64_t>(addr);
uint64_t uthis = reinterpret_cast<uint64_t>(this);
bool is_above = uaddr > (uthis + sizeof(this));
bool is_below = uaddr < (uthis + kSlabSize);
return is_above && is_below;
}
// FIXME: Move all of these to a new file that can be included anywhere.
inline void* operator new(uint64_t, void* p) { return p; }
glcr::ErrorOr<void*> SlabAllocator::Allocate() {
auto slab = slabs_.PeekFront();
while (slab && slab->IsFull()) {
slab = slab->next_;
}
if (slab) {
return slab->Allocate();
}
dbgln("Allocating new kernel slab size {}", elem_size_);
void* next_slab = (uint64_t*)NextSlab();
slabs_.PushFront(glcr::AdoptPtr(new (next_slab) Slab(elem_size_)));
return slabs_.PeekFront()->Allocate();
}

View File

@ -8,111 +8,38 @@
#include "memory/paging_util.h"
constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE;
// TODO: Add variable sized slabs (i.e. multiple page slabs.)
template <uint64_t ElemSize>
class Slab : public glcr::RefCounted<Slab<ElemSize>>,
public glcr::IntrusiveListNode<Slab<ElemSize>> {
class Slab : public glcr::RefCounted<Slab>,
public glcr::IntrusiveListNode<Slab> {
public:
explicit Slab(uint64_t addr) : page_addr_(addr) {
for (uint64_t i = 0; i < kBitmapLength; i++) {
bitmap_[i] = 0;
}
bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1;
EnsureResident(page_addr_, 16);
uint64_t* first_elem = reinterpret_cast<uint64_t*>(page_addr_);
first_elem[0] = kSlabSentinel;
first_elem[1] = reinterpret_cast<uint64_t>(this);
}
explicit Slab(uint64_t elem_size);
Slab(Slab&) = delete;
Slab(Slab&&) = delete;
glcr::ErrorOr<void*> Allocate() {
uint64_t index = GetFirstFreeIndex();
if (index == 0) {
return glcr::EXHAUSTED;
}
bitmap_[index / 64] |= (0x1 << (index % 64));
return reinterpret_cast<void*>(page_addr_ + (index * ElemSize));
}
void* Allocate();
void Free(void* addr);
glcr::ErrorCode Free(void* addr) {
uint64_t raw_addr = reinterpret_cast<uint64_t>(addr);
if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) {
return glcr::INVALID_ARGUMENT;
}
// FIXME: Check alignment.
uint64_t offset = raw_addr - page_addr_;
uint64_t index = offset / ElemSize;
if (index == 0) {
return glcr::FAILED_PRECONDITION;
}
bitmap_[index / 64] &= ~(0x1 << (index % 64));
}
bool IsFull() {
for (uint64_t i = 0; i < kBitmapLength; i++) {
if (bitmap_[i] != -1) {
return false;
}
}
return true;
}
bool IsFull();
private:
// FIXME: Likely a bug or two here if the number of elements doesn't evenly
// divide in to the bitmap length.
static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64;
static constexpr uint64_t kMaxElements = 0x99E / ElemSize;
uint64_t bitmap_[kBitmapLength];
uint64_t page_addr_;
uint64_t GetFirstFreeIndex() {
uint64_t bi = 0;
while (bi < kBitmapLength && (bitmap_[bi] == -1)) {
bi++;
}
if (bi == kBitmapLength) {
return 0;
}
// FIXME: Use hardware bit instructions here.
uint64_t bo = 0;
uint64_t bm = bitmap_[bi];
while (bm & 0x1) {
bm >>= 1;
bo += 1;
}
return (bi * 64) + bo;
}
struct FreeListEntry {
FreeListEntry* next;
};
uint64_t elem_size_;
uint64_t num_allocated_;
FreeListEntry* first_free_;
bool IsContained(void* addr);
};
template <uint64_t ElemSize>
class SlabAllocator {
public:
SlabAllocator() = delete;
SlabAllocator(uint64_t elem_size) : elem_size_(elem_size) {}
SlabAllocator(SlabAllocator&) = delete;
// TODO: Add a Kernel VMMO Struct to hold things like this.
SlabAllocator(uint64_t base_addr, uint64_t num_pages) {
for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) {
slabs_.PushBack(glcr::MakeRefCounted<Slab<ElemSize>>(base_addr));
}
}
glcr::ErrorOr<void*> Allocate() {
glcr::RefPtr<Slab<ElemSize>> curr = slabs_.PeekFront();
while (curr) {
if (curr->IsFull()) {
curr = curr->next_;
continue;
}
return curr->Allocate();
}
return glcr::EXHAUSTED;
}
glcr::ErrorOr<void*> Allocate();
private:
glcr::IntrusiveList<Slab<ElemSize>> slabs_;
uint64_t elem_size_;
glcr::IntrusiveList<Slab> slabs_;
};

View File

@ -23,7 +23,7 @@ extern "C" void zion() {
early_dbgln("[boot] Init Physical Memory Manager.");
phys_mem::InitBootstrapPageAllocation();
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
KernelHeap heap(0xFFFFFFFF'60000000, 0xFFFFFFFF'80000000);
phys_mem::InitPhysicalMemoryManager();
heap.InitializeSlabAllocators();
phys_mem::DumpRegions();