[zion] Add a basic slab allocator to the kernel.
Don't free from the slab allocator yet but allocations appear to work.
This commit is contained in:
parent
e3a425e274
commit
ee603b7478
|
@ -18,11 +18,47 @@ KernelHeap& GetKernelHeap() {
|
|||
} // namespace
|
||||
|
||||
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
|
||||
: next_addr_(lower_bound), upper_bound_(upper_bound) {
|
||||
: next_slab_addr_(lower_bound),
|
||||
first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2),
|
||||
next_addr_(first_unsized_addr_),
|
||||
upper_bound_(upper_bound) {
|
||||
gKernelHeap = this;
|
||||
}
|
||||
|
||||
void KernelHeap::InitializeSlabAllocators() {
|
||||
slab_8_ = glcr::MakeUnique<SlabAllocator<8>>(next_slab_addr_, 4);
|
||||
next_slab_addr_ += 0x4000;
|
||||
slab_16_ = glcr::MakeUnique<SlabAllocator<16>>(next_slab_addr_, 6);
|
||||
next_slab_addr_ += 0x6000;
|
||||
slab_32_ = glcr::MakeUnique<SlabAllocator<32>>(next_slab_addr_, 6);
|
||||
next_slab_addr_ += 0x6000;
|
||||
}
|
||||
|
||||
void* KernelHeap::Allocate(uint64_t size) {
|
||||
#if K_HEAP_DEBUG
|
||||
dbgln("Alloc (%x)", size);
|
||||
#endif
|
||||
if ((size <= 8) && slab_8_) {
|
||||
auto ptr_or = slab_8_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 8): %x", ptr_or.error());
|
||||
}
|
||||
if ((size <= 16) && slab_16_) {
|
||||
auto ptr_or = slab_16_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 16): %x", ptr_or.error());
|
||||
}
|
||||
if ((size <= 32) && slab_32_) {
|
||||
auto ptr_or = slab_32_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 32): %x", ptr_or.error());
|
||||
}
|
||||
if (next_addr_ + size >= upper_bound_) {
|
||||
panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size,
|
||||
upper_bound_);
|
||||
|
@ -67,5 +103,5 @@ void KernelHeap::RecordSize(uint64_t size) {
|
|||
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
|
||||
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
|
||||
|
||||
void operator delete(void*, uint64_t) {}
|
||||
void operator delete(void*, uint64_t size) {}
|
||||
void operator delete[](void*) {}
|
||||
|
|
|
@ -1,19 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/memory/unique_ptr.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "memory/slab_allocator.h"
|
||||
|
||||
class KernelHeap {
|
||||
public:
|
||||
KernelHeap(uint64_t lower_bound, uint64_t upper_bound);
|
||||
void InitializeSlabAllocators();
|
||||
|
||||
void* Allocate(uint64_t size);
|
||||
|
||||
void Free(void* address);
|
||||
|
||||
static void DumpDistribution();
|
||||
|
||||
private:
|
||||
uint64_t next_slab_addr_;
|
||||
uint64_t first_unsized_addr_;
|
||||
uint64_t next_addr_;
|
||||
uint64_t upper_bound_;
|
||||
|
||||
glcr::UniquePtr<SlabAllocator<8>> slab_8_;
|
||||
glcr::UniquePtr<SlabAllocator<16>> slab_16_;
|
||||
glcr::UniquePtr<SlabAllocator<32>> slab_32_;
|
||||
|
||||
// Distribution collection for the purpose of investigating a slab allocator.
|
||||
// 0: 0-4B
|
||||
// 1: 4B-8B
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/container/intrusive_list.h>
|
||||
#include <glacier/memory/ref_counted.h>
|
||||
#include <glacier/status/error.h>
|
||||
#include <glacier/status/error_or.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "memory/paging_util.h"
|
||||
|
||||
constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE;
|
||||
|
||||
// TODO: Add variable sized slabs (i.e. multiple page slabs.)
|
||||
template <uint64_t ElemSize>
|
||||
class Slab : public glcr::RefCounted<Slab<ElemSize>>,
|
||||
public glcr::IntrusiveListNode<Slab<ElemSize>> {
|
||||
public:
|
||||
explicit Slab(uint64_t addr) : page_addr_(addr) {
|
||||
for (uint64_t i = 0; i < kBitmapLength; i++) {
|
||||
bitmap_[i] = 0;
|
||||
}
|
||||
|
||||
bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1;
|
||||
EnsureResident(page_addr_, 16);
|
||||
uint64_t* first_elem = reinterpret_cast<uint64_t*>(page_addr_);
|
||||
first_elem[0] = kSlabSentinel;
|
||||
first_elem[1] = reinterpret_cast<uint64_t>(this);
|
||||
}
|
||||
Slab(Slab&) = delete;
|
||||
Slab(Slab&&) = delete;
|
||||
|
||||
glcr::ErrorOr<void*> Allocate() {
|
||||
uint64_t index = GetFirstFreeIndex();
|
||||
if (index == 0) {
|
||||
return glcr::EXHAUSTED;
|
||||
}
|
||||
bitmap_[index / 64] |= (0x1 << (index % 64));
|
||||
return reinterpret_cast<void*>(page_addr_ + (index * ElemSize));
|
||||
}
|
||||
|
||||
glcr::ErrorCode Free(void* addr) {
|
||||
uint64_t raw_addr = reinterpret_cast<uint64_t>(addr);
|
||||
if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) {
|
||||
return glcr::INVALID_ARGUMENT;
|
||||
}
|
||||
// FIXME: Check alignment.
|
||||
uint64_t offset = raw_addr - page_addr_;
|
||||
uint64_t index = offset / ElemSize;
|
||||
if (index == 0) {
|
||||
return glcr::FAILED_PRECONDITION;
|
||||
}
|
||||
bitmap_[index / 64] &= ~(0x1 << (index % 64));
|
||||
}
|
||||
|
||||
bool IsFull() {
|
||||
for (uint64_t i = 0; i < kBitmapLength; i++) {
|
||||
if (bitmap_[i] != -1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
// FIXME: Likely a bug or two here if the number of elements doesn't evenly
|
||||
// divide in to the bitmap length.
|
||||
static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64;
|
||||
static constexpr uint64_t kMaxElements = 0x99E / ElemSize;
|
||||
uint64_t bitmap_[kBitmapLength];
|
||||
uint64_t page_addr_;
|
||||
|
||||
uint64_t GetFirstFreeIndex() {
|
||||
uint64_t bi = 0;
|
||||
while (bi < kBitmapLength && (bitmap_[bi] == -1)) {
|
||||
bi++;
|
||||
}
|
||||
if (bi == kBitmapLength) {
|
||||
return 0;
|
||||
}
|
||||
// FIXME: Use hardware bit instructions here.
|
||||
uint64_t bo = 0;
|
||||
uint64_t bm = bitmap_[bi];
|
||||
while (bm & 0x1) {
|
||||
bm >>= 1;
|
||||
bo += 1;
|
||||
}
|
||||
return (bi * 64) + bo;
|
||||
}
|
||||
};
|
||||
|
||||
template <uint64_t ElemSize>
|
||||
class SlabAllocator {
|
||||
public:
|
||||
SlabAllocator() = delete;
|
||||
SlabAllocator(SlabAllocator&) = delete;
|
||||
|
||||
// TODO: Add a Kernel VMMO Struct to hold things like this.
|
||||
SlabAllocator(uint64_t base_addr, uint64_t num_pages) {
|
||||
for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) {
|
||||
slabs_.PushBack(glcr::MakeRefCounted<Slab<ElemSize>>(base_addr));
|
||||
}
|
||||
}
|
||||
|
||||
glcr::ErrorOr<void*> Allocate() {
|
||||
glcr::RefPtr<Slab<ElemSize>> curr = slabs_.PeekFront();
|
||||
while (curr) {
|
||||
if (curr->IsFull()) {
|
||||
curr = curr->next_;
|
||||
continue;
|
||||
}
|
||||
return curr->Allocate();
|
||||
}
|
||||
return glcr::EXHAUSTED;
|
||||
}
|
||||
|
||||
private:
|
||||
glcr::IntrusiveList<Slab<ElemSize>> slabs_;
|
||||
};
|
|
@ -25,6 +25,7 @@ extern "C" void zion() {
|
|||
phys_mem::InitBootstrapPageAllocation();
|
||||
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
|
||||
phys_mem::InitPhysicalMemoryManager();
|
||||
heap.InitializeSlabAllocators();
|
||||
|
||||
dbgln("[boot] Memory allocations available now.");
|
||||
|
||||
|
|
Loading…
Reference in New Issue