[Zion] Move to a kernel slab allocator that will allow easier dealloc.
This commit is contained in:
parent
792e5155ba
commit
d71d543b2a
|
@ -17,6 +17,7 @@ add_executable(zion
|
||||||
memory/kernel_stack_manager.cpp
|
memory/kernel_stack_manager.cpp
|
||||||
memory/paging_util.cpp
|
memory/paging_util.cpp
|
||||||
memory/physical_memory.cpp
|
memory/physical_memory.cpp
|
||||||
|
memory/slab_allocator.cpp
|
||||||
memory/user_stack_manager.cpp
|
memory/user_stack_manager.cpp
|
||||||
object/address_space.cpp
|
object/address_space.cpp
|
||||||
object/channel.cpp
|
object/channel.cpp
|
||||||
|
|
|
@ -15,23 +15,26 @@ KernelHeap& GetKernelHeap() {
|
||||||
}
|
}
|
||||||
return *gKernelHeap;
|
return *gKernelHeap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint8_t kNewByte = 0xAB;
|
||||||
|
|
||||||
|
void InitMemory(uint8_t* addr, uint64_t size) {
|
||||||
|
for (uint64_t i = 0; i < size; i++) {
|
||||||
|
addr[i] = kNewByte;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
|
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
|
||||||
: next_slab_addr_(lower_bound),
|
: next_addr_(lower_bound), upper_bound_(upper_bound) {
|
||||||
first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2),
|
|
||||||
next_addr_(first_unsized_addr_),
|
|
||||||
upper_bound_(upper_bound) {
|
|
||||||
gKernelHeap = this;
|
gKernelHeap = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelHeap::InitializeSlabAllocators() {
|
void KernelHeap::InitializeSlabAllocators() {
|
||||||
slab_8_ = glcr::MakeUnique<SlabAllocator<8>>(next_slab_addr_, 4);
|
slab_8_ = glcr::MakeUnique<SlabAllocator>(8);
|
||||||
next_slab_addr_ += 0x4000;
|
slab_16_ = glcr::MakeUnique<SlabAllocator>(16);
|
||||||
slab_16_ = glcr::MakeUnique<SlabAllocator<16>>(next_slab_addr_, 6);
|
slab_32_ = glcr::MakeUnique<SlabAllocator>(32);
|
||||||
next_slab_addr_ += 0x6000;
|
|
||||||
slab_32_ = glcr::MakeUnique<SlabAllocator<32>>(next_slab_addr_, 6);
|
|
||||||
next_slab_addr_ += 0x6000;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* KernelHeap::Allocate(uint64_t size) {
|
void* KernelHeap::Allocate(uint64_t size) {
|
||||||
|
@ -106,8 +109,16 @@ void KernelHeap::RecordSize(uint64_t size) {
|
||||||
distributions[index]++;
|
distributions[index]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
|
void* operator new(uint64_t size) {
|
||||||
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
|
void* addr = GetKernelHeap().Allocate(size);
|
||||||
|
InitMemory(static_cast<uint8_t*>(addr), size);
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
void* operator new[](uint64_t size) {
|
||||||
|
void* addr = GetKernelHeap().Allocate(size);
|
||||||
|
InitMemory(static_cast<uint8_t*>(addr), size);
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
void operator delete(void*, uint64_t size) {}
|
void operator delete(void*, uint64_t size) {}
|
||||||
void operator delete[](void*) {}
|
void operator delete[](void*) {}
|
||||||
|
|
|
@ -17,14 +17,12 @@ class KernelHeap {
|
||||||
static void DumpDistribution();
|
static void DumpDistribution();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
uint64_t next_slab_addr_;
|
|
||||||
uint64_t first_unsized_addr_;
|
|
||||||
uint64_t next_addr_;
|
uint64_t next_addr_;
|
||||||
uint64_t upper_bound_;
|
uint64_t upper_bound_;
|
||||||
|
|
||||||
glcr::UniquePtr<SlabAllocator<8>> slab_8_;
|
glcr::UniquePtr<SlabAllocator> slab_8_;
|
||||||
glcr::UniquePtr<SlabAllocator<16>> slab_16_;
|
glcr::UniquePtr<SlabAllocator> slab_16_;
|
||||||
glcr::UniquePtr<SlabAllocator<32>> slab_32_;
|
glcr::UniquePtr<SlabAllocator> slab_32_;
|
||||||
|
|
||||||
// Distribution collection for the purpose of investigating a slab allocator.
|
// Distribution collection for the purpose of investigating a slab allocator.
|
||||||
// 0: 0-4B
|
// 0: 0-4B
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
#include "slab_allocator.h"
|
||||||
|
|
||||||
|
#include "debug/debug.h"
|
||||||
|
#include "memory/paging_util.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// TODO: Store these in a kernel VMM.
|
||||||
|
const uint64_t kSlabSize = 0x1000;
|
||||||
|
uint64_t gNextSlab = 0xFFFFFFFF'40000000;
|
||||||
|
const uint64_t gMaxSlab = 0xFFFF'FFFF'6000'0000;
|
||||||
|
|
||||||
|
uint64_t NextSlab() {
|
||||||
|
// FIXME: Synchronization.
|
||||||
|
uint64_t next_slab = gNextSlab;
|
||||||
|
gNextSlab += kSlabSize;
|
||||||
|
EnsureResident(next_slab, 1);
|
||||||
|
|
||||||
|
return next_slab;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
Slab::Slab(uint64_t elem_size) : elem_size_(elem_size), num_allocated_(0) {
|
||||||
|
uint64_t first_elem_offset = ((sizeof(Slab) - 1) / elem_size_) + 1;
|
||||||
|
uint64_t entry_addr =
|
||||||
|
reinterpret_cast<uint64_t>(this) + first_elem_offset * elem_size_;
|
||||||
|
FreeListEntry* entry = reinterpret_cast<FreeListEntry*>(entry_addr);
|
||||||
|
first_free_ = entry;
|
||||||
|
|
||||||
|
uint64_t max_entry = reinterpret_cast<uint64_t>(this) + kSlabSize;
|
||||||
|
entry_addr += elem_size_;
|
||||||
|
while (entry_addr < max_entry) {
|
||||||
|
FreeListEntry* next_entry = reinterpret_cast<FreeListEntry*>(entry_addr);
|
||||||
|
next_entry->next = nullptr;
|
||||||
|
entry->next = next_entry;
|
||||||
|
entry = next_entry;
|
||||||
|
entry_addr += elem_size_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Slab::Allocate() {
|
||||||
|
if (IsFull()) {
|
||||||
|
panic("Allocating from full slab.");
|
||||||
|
}
|
||||||
|
|
||||||
|
FreeListEntry* to_return = first_free_;
|
||||||
|
first_free_ = first_free_->next;
|
||||||
|
num_allocated_++;
|
||||||
|
return static_cast<void*>(to_return);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Slab::Free(void* address) {
|
||||||
|
if (!IsContained(address)) {
|
||||||
|
panic("Freeing non-contained address from slab.");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t vaddr = reinterpret_cast<uint64_t>(address);
|
||||||
|
if ((vaddr & ~(elem_size_ - 1)) != vaddr) {
|
||||||
|
panic("Freeing non-aligned address");
|
||||||
|
}
|
||||||
|
|
||||||
|
FreeListEntry* new_free = static_cast<FreeListEntry*>(address);
|
||||||
|
new_free->next = first_free_;
|
||||||
|
first_free_ = new_free;
|
||||||
|
num_allocated_--;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Slab::IsFull() { return first_free_ == nullptr; }
|
||||||
|
bool Slab::IsContained(void* addr) {
|
||||||
|
uint64_t uaddr = reinterpret_cast<uint64_t>(addr);
|
||||||
|
uint64_t uthis = reinterpret_cast<uint64_t>(this);
|
||||||
|
bool is_above = uaddr > (uthis + sizeof(this));
|
||||||
|
bool is_below = uaddr < (uthis + kSlabSize);
|
||||||
|
return is_above && is_below;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Move all of these to a new file that can be included anywhere.
|
||||||
|
inline void* operator new(uint64_t, void* p) { return p; }
|
||||||
|
|
||||||
|
glcr::ErrorOr<void*> SlabAllocator::Allocate() {
|
||||||
|
auto slab = slabs_.PeekFront();
|
||||||
|
while (slab && slab->IsFull()) {
|
||||||
|
slab = slab->next_;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slab) {
|
||||||
|
return slab->Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgln("Allocating new kernel slab size {}", elem_size_);
|
||||||
|
void* next_slab = (uint64_t*)NextSlab();
|
||||||
|
slabs_.PushFront(glcr::AdoptPtr(new (next_slab) Slab(elem_size_)));
|
||||||
|
return slabs_.PeekFront()->Allocate();
|
||||||
|
}
|
|
@ -8,111 +8,38 @@
|
||||||
|
|
||||||
#include "memory/paging_util.h"
|
#include "memory/paging_util.h"
|
||||||
|
|
||||||
constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE;
|
class Slab : public glcr::RefCounted<Slab>,
|
||||||
|
public glcr::IntrusiveListNode<Slab> {
|
||||||
// TODO: Add variable sized slabs (i.e. multiple page slabs.)
|
|
||||||
template <uint64_t ElemSize>
|
|
||||||
class Slab : public glcr::RefCounted<Slab<ElemSize>>,
|
|
||||||
public glcr::IntrusiveListNode<Slab<ElemSize>> {
|
|
||||||
public:
|
public:
|
||||||
explicit Slab(uint64_t addr) : page_addr_(addr) {
|
explicit Slab(uint64_t elem_size);
|
||||||
for (uint64_t i = 0; i < kBitmapLength; i++) {
|
|
||||||
bitmap_[i] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1;
|
|
||||||
EnsureResident(page_addr_, 16);
|
|
||||||
uint64_t* first_elem = reinterpret_cast<uint64_t*>(page_addr_);
|
|
||||||
first_elem[0] = kSlabSentinel;
|
|
||||||
first_elem[1] = reinterpret_cast<uint64_t>(this);
|
|
||||||
}
|
|
||||||
Slab(Slab&) = delete;
|
Slab(Slab&) = delete;
|
||||||
Slab(Slab&&) = delete;
|
Slab(Slab&&) = delete;
|
||||||
|
|
||||||
glcr::ErrorOr<void*> Allocate() {
|
void* Allocate();
|
||||||
uint64_t index = GetFirstFreeIndex();
|
void Free(void* addr);
|
||||||
if (index == 0) {
|
|
||||||
return glcr::EXHAUSTED;
|
|
||||||
}
|
|
||||||
bitmap_[index / 64] |= (0x1 << (index % 64));
|
|
||||||
return reinterpret_cast<void*>(page_addr_ + (index * ElemSize));
|
|
||||||
}
|
|
||||||
|
|
||||||
glcr::ErrorCode Free(void* addr) {
|
bool IsFull();
|
||||||
uint64_t raw_addr = reinterpret_cast<uint64_t>(addr);
|
|
||||||
if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) {
|
|
||||||
return glcr::INVALID_ARGUMENT;
|
|
||||||
}
|
|
||||||
// FIXME: Check alignment.
|
|
||||||
uint64_t offset = raw_addr - page_addr_;
|
|
||||||
uint64_t index = offset / ElemSize;
|
|
||||||
if (index == 0) {
|
|
||||||
return glcr::FAILED_PRECONDITION;
|
|
||||||
}
|
|
||||||
bitmap_[index / 64] &= ~(0x1 << (index % 64));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsFull() {
|
|
||||||
for (uint64_t i = 0; i < kBitmapLength; i++) {
|
|
||||||
if (bitmap_[i] != -1) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// FIXME: Likely a bug or two here if the number of elements doesn't evenly
|
struct FreeListEntry {
|
||||||
// divide in to the bitmap length.
|
FreeListEntry* next;
|
||||||
static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64;
|
};
|
||||||
static constexpr uint64_t kMaxElements = 0x99E / ElemSize;
|
|
||||||
uint64_t bitmap_[kBitmapLength];
|
|
||||||
uint64_t page_addr_;
|
|
||||||
|
|
||||||
uint64_t GetFirstFreeIndex() {
|
uint64_t elem_size_;
|
||||||
uint64_t bi = 0;
|
uint64_t num_allocated_;
|
||||||
while (bi < kBitmapLength && (bitmap_[bi] == -1)) {
|
FreeListEntry* first_free_;
|
||||||
bi++;
|
|
||||||
}
|
bool IsContained(void* addr);
|
||||||
if (bi == kBitmapLength) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// FIXME: Use hardware bit instructions here.
|
|
||||||
uint64_t bo = 0;
|
|
||||||
uint64_t bm = bitmap_[bi];
|
|
||||||
while (bm & 0x1) {
|
|
||||||
bm >>= 1;
|
|
||||||
bo += 1;
|
|
||||||
}
|
|
||||||
return (bi * 64) + bo;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <uint64_t ElemSize>
|
|
||||||
class SlabAllocator {
|
class SlabAllocator {
|
||||||
public:
|
public:
|
||||||
SlabAllocator() = delete;
|
SlabAllocator(uint64_t elem_size) : elem_size_(elem_size) {}
|
||||||
SlabAllocator(SlabAllocator&) = delete;
|
SlabAllocator(SlabAllocator&) = delete;
|
||||||
|
|
||||||
// TODO: Add a Kernel VMMO Struct to hold things like this.
|
glcr::ErrorOr<void*> Allocate();
|
||||||
SlabAllocator(uint64_t base_addr, uint64_t num_pages) {
|
|
||||||
for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) {
|
|
||||||
slabs_.PushBack(glcr::MakeRefCounted<Slab<ElemSize>>(base_addr));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
glcr::ErrorOr<void*> Allocate() {
|
|
||||||
glcr::RefPtr<Slab<ElemSize>> curr = slabs_.PeekFront();
|
|
||||||
while (curr) {
|
|
||||||
if (curr->IsFull()) {
|
|
||||||
curr = curr->next_;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return curr->Allocate();
|
|
||||||
}
|
|
||||||
return glcr::EXHAUSTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
glcr::IntrusiveList<Slab<ElemSize>> slabs_;
|
uint64_t elem_size_;
|
||||||
|
glcr::IntrusiveList<Slab> slabs_;
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,7 +23,7 @@ extern "C" void zion() {
|
||||||
|
|
||||||
early_dbgln("[boot] Init Physical Memory Manager.");
|
early_dbgln("[boot] Init Physical Memory Manager.");
|
||||||
phys_mem::InitBootstrapPageAllocation();
|
phys_mem::InitBootstrapPageAllocation();
|
||||||
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
|
KernelHeap heap(0xFFFFFFFF'60000000, 0xFFFFFFFF'80000000);
|
||||||
phys_mem::InitPhysicalMemoryManager();
|
phys_mem::InitPhysicalMemoryManager();
|
||||||
heap.InitializeSlabAllocators();
|
heap.InitializeSlabAllocators();
|
||||||
phys_mem::DumpRegions();
|
phys_mem::DumpRegions();
|
||||||
|
|
Loading…
Reference in New Issue