Compare commits

...

8 Commits

Author SHA1 Message Date
Drew Galbraith 92d8a02291 [Zion] Cleanup memory debug statistics slightly. 2023-11-15 13:10:53 -08:00
Drew Galbraith 10d16e129f [Zion] Use a slab allocator for 64 byte and 128 byte allocs.
This has no effect on page usage for now (each uses a small amount of
pages so has some overhead.

However it does reduce the active allocations from
234 -> 146 (64B)
61 -> 51 (128B)
2023-11-15 13:06:14 -08:00
Drew Galbraith cc16210e90 [Zion] Use slab allocators immediately during heap init. 2023-11-15 13:02:34 -08:00
Drew Galbraith 659f122c9e [Zion] Free memory that was allocated with the slab allocator.
This reduces the number of active allocs by ~ 200.
Size 8: 142 -> 2
Size 16: 319 > 313
Size 32: 364-> 312
2023-11-15 12:53:14 -08:00
Drew Galbraith 6d27ee5dc5 [Zion] Add more debug information about kernel heap usage.
This will be helpful as we work to improve it.

Without deallocations, we currently stand at the following alloc numbers
8 - 142
16 - 319
32 - 364

unsized - 305

total page usage including slabs is 12.
2023-11-15 12:36:18 -08:00
Drew Galbraith 4657c46f73 [Zion] Fix kernel heap distribution calculation. 2023-11-15 12:14:58 -08:00
Drew Galbraith d71d543b2a [Zion] Move to a kernel slab allocator that will allow easier dealloc. 2023-11-15 12:04:35 -08:00
Drew Galbraith 792e5155ba [Glacier] When resizing vector use the proper T constructor.
Previously when we static_casted from uint8_t[] to T[] we ended
up not properly initializing the objects in the array. This caused
issues where garbage memory provided by new was treated as a legitimate
object.

Potentially in the future it would make sense to back vectors with
a simple byte array and do memcpys to move objects in and out as
needed.
2023-11-15 12:00:53 -08:00
10 changed files with 290 additions and 168 deletions

View File

@ -69,11 +69,13 @@ class Vector {
template <typename T>
void Vector<T>::Resize(uint64_t capacity) {
T* new_data = reinterpret_cast<T*>(new uint8_t[capacity * sizeof(T)]);
for (uint64_t i = 0; i < size_; i++) {
new_data[i] = glcr::Move(data_[i]);
T* new_data = new T[capacity];
if (data_) {
for (uint64_t i = 0; i < size_; i++) {
new_data[i] = glcr::Move(data_[i]);
}
delete[] data_;
}
delete[] data_;
data_ = new_data;
capacity_ = capacity;
}

View File

@ -17,6 +17,7 @@ add_executable(zion
memory/kernel_stack_manager.cpp
memory/paging_util.cpp
memory/physical_memory.cpp
memory/slab_allocator.cpp
memory/user_stack_manager.cpp
object/address_space.cpp
object/channel.cpp

View File

@ -141,7 +141,7 @@ extern "C" void interrupt_apic_timer(InterruptFrame*) {
cnt++;
if (cnt % 20 == 0) {
if (cnt == 20) {
KernelHeap::DumpDistribution();
KernelHeap::DumpDebugData();
}
dbgln("timer: {}s", cnt * 50 / 1000);
}

11
zion/memory/constants.h Normal file
View File

@ -0,0 +1,11 @@
#pragma once
#include <stdint.h>
const uint64_t KiB = 0x400;
const uint64_t kPageSize = 4 * KiB;
const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000;
const uint64_t kKernelSlabHeapEnd = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapStart = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapEnd = 0xFFFF'FFFF'8000'0000;

View File

@ -15,31 +15,28 @@ KernelHeap& GetKernelHeap() {
}
return *gKernelHeap;
}
static uint8_t kNewByte = 0xAB;
void InitMemory(uint8_t* addr, uint64_t size) {
for (uint64_t i = 0; i < size; i++) {
addr[i] = kNewByte;
}
}
bool IsSlab(void* addr) {
uint64_t uaddr = reinterpret_cast<uint64_t>(addr);
return uaddr >= kKernelSlabHeapStart && uaddr < kKernelSlabHeapEnd;
}
} // namespace
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
: next_slab_addr_(lower_bound),
first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2),
next_addr_(first_unsized_addr_),
upper_bound_(upper_bound) {
gKernelHeap = this;
}
void KernelHeap::InitializeSlabAllocators() {
slab_8_ = glcr::MakeUnique<SlabAllocator<8>>(next_slab_addr_, 4);
next_slab_addr_ += 0x4000;
slab_16_ = glcr::MakeUnique<SlabAllocator<16>>(next_slab_addr_, 6);
next_slab_addr_ += 0x6000;
slab_32_ = glcr::MakeUnique<SlabAllocator<32>>(next_slab_addr_, 6);
next_slab_addr_ += 0x6000;
}
KernelHeap::KernelHeap() { gKernelHeap = this; }
void* KernelHeap::Allocate(uint64_t size) {
#if K_HEAP_DEBUG
dbgln("Alloc ({x})", size);
#endif
if ((size <= 8) && slab_8_) {
auto ptr_or = slab_8_->Allocate();
if (size <= 8) {
auto ptr_or = slab_8_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
@ -47,8 +44,8 @@ void* KernelHeap::Allocate(uint64_t size) {
dbgln("Skipped allocation (slab 8): {x}", ptr_or.error());
#endif
}
if ((size <= 16) && slab_16_) {
auto ptr_or = slab_16_->Allocate();
if (size <= 16) {
auto ptr_or = slab_16_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
@ -56,13 +53,31 @@ void* KernelHeap::Allocate(uint64_t size) {
dbgln("Skipped allocation (slab 16): {x}", ptr_or.error());
#endif
}
if ((size <= 32) && slab_32_) {
auto ptr_or = slab_32_->Allocate();
if (size <= 32) {
auto ptr_or = slab_32_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 32): {x}", ptr_or.error());
#endif
}
if (size <= 64) {
auto ptr_or = slab_64_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 64): {x}", ptr_or.error());
#endif
}
if (size <= 128) {
auto ptr_or = slab_128_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 128): {x}", ptr_or.error());
#endif
}
if (next_addr_ + size >= upper_bound_) {
@ -74,40 +89,78 @@ void* KernelHeap::Allocate(uint64_t size) {
#endif
EnsureResident(next_addr_, size);
uint64_t address = next_addr_;
alloc_count_ += 1;
next_addr_ += size;
return reinterpret_cast<void*>(address);
}
void KernelHeap::DumpDistribution() {
void KernelHeap::DumpDebugData() {
#if K_HEAP_DEBUG
uint64_t* distributions = gKernelHeap->distributions;
dbgln("<=4B: {}", distributions[0]);
dbgln("<=8B: {}", distributions[1]);
dbgln("<=16B: {}", distributions[2]);
dbgln("<=32B: {}", distributions[3]);
dbgln("<=64B: {}", distributions[4]);
dbgln("<=128B: {}", distributions[5]);
dbgln("<=256B: {}", distributions[6]);
dbgln("<=512B: {}", distributions[7]);
dbgln("<=1KiB: {}", distributions[8]);
dbgln("<=2KiB: {}", distributions[9]);
dbgln("<=4KiB: {}", distributions[10]);
dbgln("> 4KiB: {}", distributions[11]);
gKernelHeap->DumpDebugDataInternal();
#endif
}
void KernelHeap::DumpDebugDataInternal() {
dbgln("");
dbgln("Heap Debug Statistics!");
dbgln("Slab Statistics:");
dbgln("Slab 8: {} slabs, {} allocs", slab_8_.SlabCount(),
slab_8_.Allocations());
dbgln("Slab 16: {} slabs, {} allocs", slab_16_.SlabCount(),
slab_16_.Allocations());
dbgln("Slab 32: {} slabs, {} allocs", slab_32_.SlabCount(),
slab_32_.Allocations());
dbgln("Slab 64: {} slabs, {} allocs", slab_64_.SlabCount(),
slab_64_.Allocations());
dbgln("Slab 128: {} slabs, {} allocs", slab_128_.SlabCount(),
slab_128_.Allocations());
dbgln("");
dbgln("Size Distributions of non slab-allocated.");
dbgln("Pages used: {}",
(next_addr_ - kKernelBuddyHeapStart - 1) / 0x1000 + 1);
// Active Allocs.
dbgln("Active Allocations: {}", alloc_count_);
dbgln("<=256B: {}", distributions[0]);
dbgln("<=512B: {}", distributions[1]);
dbgln("<=1KiB: {}", distributions[2]);
dbgln("<=2KiB: {}", distributions[3]);
dbgln("<=4KiB: {}", distributions[4]);
dbgln("> 4KiB: {}", distributions[5]);
dbgln("");
}
void KernelHeap::RecordSize(uint64_t size) {
size >>= 3;
size -= 1;
size >>= 8;
uint64_t index = 0;
while (size && index < 11) {
while (size && index < 5) {
size >>= 1;
index++;
}
distributions[index]++;
}
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
void* operator new(uint64_t size) {
void* addr = GetKernelHeap().Allocate(size);
InitMemory(static_cast<uint8_t*>(addr), size);
return addr;
}
void* operator new[](uint64_t size) {
void* addr = GetKernelHeap().Allocate(size);
InitMemory(static_cast<uint8_t*>(addr), size);
return addr;
}
void operator delete(void*, uint64_t size) {}
void operator delete[](void*) {}
void operator delete(void* addr, uint64_t size) {
if (IsSlab(addr)) {
SlabFree(addr);
}
}
void operator delete[](void* addr) {
if (IsSlab(addr)) {
SlabFree(addr);
}
}

View File

@ -3,43 +3,41 @@
#include <glacier/memory/unique_ptr.h>
#include <stdint.h>
#include "memory/constants.h"
#include "memory/slab_allocator.h"
class KernelHeap {
public:
KernelHeap(uint64_t lower_bound, uint64_t upper_bound);
KernelHeap();
void InitializeSlabAllocators();
void* Allocate(uint64_t size);
void Free(void* address);
static void DumpDistribution();
static void DumpDebugData();
private:
uint64_t next_slab_addr_;
uint64_t first_unsized_addr_;
uint64_t next_addr_;
uint64_t upper_bound_;
uint64_t next_addr_ = kKernelBuddyHeapStart;
uint64_t upper_bound_ = kKernelBuddyHeapEnd;
glcr::UniquePtr<SlabAllocator<8>> slab_8_;
glcr::UniquePtr<SlabAllocator<16>> slab_16_;
glcr::UniquePtr<SlabAllocator<32>> slab_32_;
uint64_t alloc_count_ = 0;
// Distribution collection for the purpose of investigating a slab allocator.
// 0: 0-4B
// 1: 4B-8B
// 2: 8B-16B
// 3: 16B-32B
// 4: 32B-64B
// 5: 64B-128B
// 6: 128B-256B
// 7: 256B-512B
// 8: 512B-1KiB
// 9: 1KiB-2KiB
// 10: 2KiB-4KiB
// 11: 4KiB+
uint64_t distributions[12];
SlabAllocator slab_8_{8};
SlabAllocator slab_16_{16};
SlabAllocator slab_32_{32};
SlabAllocator slab_64_{64};
SlabAllocator slab_128_{128};
// 0: 128B-256B
// 1: 256B-512B
// 2: 512B-1KiB
// 3: 1KiB-2KiB
// 4: 2KiB-4KiB
// 5: 4KiB+
uint64_t distributions[6];
void RecordSize(uint64_t size);
void DumpDebugDataInternal();
};

View File

@ -140,10 +140,10 @@ void InitBootstrapPageAllocation() {
// if we limit the number of pages this should be fine.
// Currently set to the minimum of 3 for one kernel heap allocation:
// PageDirectory + PageTable + Page
if (entry.type == 0 && entry.length >= 0x3000) {
if (entry.type == 0 && entry.length >= 0x5000) {
gBootstrap.init_page = entry.base;
gBootstrap.next_page = entry.base;
gBootstrap.max_page = entry.base + 0x3000;
gBootstrap.max_page = entry.base + 0x4000;
gBootstrapEnabled = true;
return;
}

View File

@ -0,0 +1,124 @@
#include "slab_allocator.h"
#include "debug/debug.h"
#include "memory/constants.h"
#include "memory/paging_util.h"
namespace {
// TODO: Store these in a kernel VMM.
const uint64_t kSlabSize = 4 * KiB;
const uint64_t kSlabMask = ~(kSlabSize - 1);
uint64_t gNextSlab = kKernelSlabHeapStart;
uint64_t NextSlab() {
// FIXME: Synchronization.
uint64_t next_slab = gNextSlab;
if (next_slab >= kKernelBuddyHeapEnd) {
panic("Slab heap overrun");
}
gNextSlab += kSlabSize;
EnsureResident(next_slab, 1);
return next_slab;
}
} // namespace
Slab::Slab(uint64_t elem_size) : elem_size_(elem_size), num_allocated_(0) {
uint64_t first_elem_offset = ((sizeof(Slab) - 1) / elem_size_) + 1;
uint64_t entry_addr =
reinterpret_cast<uint64_t>(this) + first_elem_offset * elem_size_;
FreeListEntry* entry = reinterpret_cast<FreeListEntry*>(entry_addr);
first_free_ = entry;
uint64_t max_entry = reinterpret_cast<uint64_t>(this) + kSlabSize;
entry_addr += elem_size_;
while (entry_addr < max_entry) {
FreeListEntry* next_entry = reinterpret_cast<FreeListEntry*>(entry_addr);
next_entry->next = nullptr;
entry->next = next_entry;
entry = next_entry;
entry_addr += elem_size_;
}
}
void* Slab::Allocate() {
if (IsFull()) {
panic("Allocating from full slab.");
}
FreeListEntry* to_return = first_free_;
first_free_ = first_free_->next;
num_allocated_++;
return static_cast<void*>(to_return);
}
void Slab::Free(void* address) {
if (!IsContained(address)) {
panic("Freeing non-contained address from slab.");
}
uint64_t vaddr = reinterpret_cast<uint64_t>(address);
if ((vaddr & ~(elem_size_ - 1)) != vaddr) {
panic("Freeing non-aligned address");
}
FreeListEntry* new_free = static_cast<FreeListEntry*>(address);
new_free->next = first_free_;
first_free_ = new_free;
num_allocated_--;
}
bool Slab::IsFull() { return first_free_ == nullptr; }
bool Slab::IsContained(void* addr) {
uint64_t uaddr = reinterpret_cast<uint64_t>(addr);
uint64_t uthis = reinterpret_cast<uint64_t>(this);
bool is_above = uaddr > (uthis + sizeof(this));
bool is_below = uaddr < (uthis + kSlabSize);
return is_above && is_below;
}
// FIXME: Move all of these to a new file that can be included anywhere.
inline void* operator new(uint64_t, void* p) { return p; }
glcr::ErrorOr<void*> SlabAllocator::Allocate() {
auto slab = slabs_.PeekFront();
while (slab && slab->IsFull()) {
slab = slab->next_;
}
if (slab) {
return slab->Allocate();
}
dbgln("Allocating new kernel slab size {}", elem_size_);
void* next_slab = (uint64_t*)NextSlab();
slabs_.PushFront(glcr::AdoptPtr(new (next_slab) Slab(elem_size_)));
return slabs_.PeekFront()->Allocate();
}
uint64_t SlabAllocator::SlabCount() {
auto slab = slabs_.PeekFront();
uint64_t count = 0;
while (slab) {
count++;
slab = slab->next_;
}
return count;
}
uint64_t SlabAllocator::Allocations() {
auto slab = slabs_.PeekFront();
uint64_t count = 0;
while (slab) {
count += slab->Allocations();
slab = slab->next_;
}
return count;
}
void SlabFree(void* addr) {
Slab* slab =
reinterpret_cast<Slab*>(reinterpret_cast<uint64_t>(addr) & kSlabMask);
slab->Free(addr);
}

View File

@ -8,111 +8,45 @@
#include "memory/paging_util.h"
constexpr uint64_t kSlabSentinel = 0xDEADBEEF'C0DEC0DE;
// TODO: Add variable sized slabs (i.e. multiple page slabs.)
template <uint64_t ElemSize>
class Slab : public glcr::RefCounted<Slab<ElemSize>>,
public glcr::IntrusiveListNode<Slab<ElemSize>> {
class Slab : public glcr::RefCounted<Slab>,
public glcr::IntrusiveListNode<Slab> {
public:
explicit Slab(uint64_t addr) : page_addr_(addr) {
for (uint64_t i = 0; i < kBitmapLength; i++) {
bitmap_[i] = 0;
}
bitmap_[0] = ElemSize <= 8 ? 0x3 : 0x1;
EnsureResident(page_addr_, 16);
uint64_t* first_elem = reinterpret_cast<uint64_t*>(page_addr_);
first_elem[0] = kSlabSentinel;
first_elem[1] = reinterpret_cast<uint64_t>(this);
}
explicit Slab(uint64_t elem_size);
Slab(Slab&) = delete;
Slab(Slab&&) = delete;
glcr::ErrorOr<void*> Allocate() {
uint64_t index = GetFirstFreeIndex();
if (index == 0) {
return glcr::EXHAUSTED;
}
bitmap_[index / 64] |= (0x1 << (index % 64));
return reinterpret_cast<void*>(page_addr_ + (index * ElemSize));
}
void* Allocate();
void Free(void* addr);
glcr::ErrorCode Free(void* addr) {
uint64_t raw_addr = reinterpret_cast<uint64_t>(addr);
if (raw_addr < page_addr_ || raw_addr > (page_addr_ + 0x1000)) {
return glcr::INVALID_ARGUMENT;
}
// FIXME: Check alignment.
uint64_t offset = raw_addr - page_addr_;
uint64_t index = offset / ElemSize;
if (index == 0) {
return glcr::FAILED_PRECONDITION;
}
bitmap_[index / 64] &= ~(0x1 << (index % 64));
}
bool IsFull() {
for (uint64_t i = 0; i < kBitmapLength; i++) {
if (bitmap_[i] != -1) {
return false;
}
}
return true;
}
bool IsFull();
uint64_t Allocations() { return num_allocated_; }
private:
// FIXME: Likely a bug or two here if the number of elements doesn't evenly
// divide in to the bitmap length.
static constexpr uint64_t kBitmapLength = 0x1000 / ElemSize / 64;
static constexpr uint64_t kMaxElements = 0x99E / ElemSize;
uint64_t bitmap_[kBitmapLength];
uint64_t page_addr_;
struct FreeListEntry {
FreeListEntry* next;
};
uint64_t GetFirstFreeIndex() {
uint64_t bi = 0;
while (bi < kBitmapLength && (bitmap_[bi] == -1)) {
bi++;
}
if (bi == kBitmapLength) {
return 0;
}
// FIXME: Use hardware bit instructions here.
uint64_t bo = 0;
uint64_t bm = bitmap_[bi];
while (bm & 0x1) {
bm >>= 1;
bo += 1;
}
return (bi * 64) + bo;
}
uint64_t elem_size_;
uint64_t num_allocated_;
FreeListEntry* first_free_;
bool IsContained(void* addr);
};
template <uint64_t ElemSize>
class SlabAllocator {
public:
SlabAllocator() = delete;
SlabAllocator(uint64_t elem_size) : elem_size_(elem_size) {}
SlabAllocator(SlabAllocator&) = delete;
// TODO: Add a Kernel VMMO Struct to hold things like this.
SlabAllocator(uint64_t base_addr, uint64_t num_pages) {
for (uint64_t i = 0; i < num_pages; i++, base_addr += 0x1000) {
slabs_.PushBack(glcr::MakeRefCounted<Slab<ElemSize>>(base_addr));
}
}
glcr::ErrorOr<void*> Allocate();
glcr::ErrorOr<void*> Allocate() {
glcr::RefPtr<Slab<ElemSize>> curr = slabs_.PeekFront();
while (curr) {
if (curr->IsFull()) {
curr = curr->next_;
continue;
}
return curr->Allocate();
}
return glcr::EXHAUSTED;
}
// Stats:
uint64_t SlabCount();
uint64_t Allocations();
private:
glcr::IntrusiveList<Slab<ElemSize>> slabs_;
uint64_t elem_size_;
glcr::IntrusiveList<Slab> slabs_;
};
void SlabFree(void* addr);

View File

@ -23,9 +23,8 @@ extern "C" void zion() {
early_dbgln("[boot] Init Physical Memory Manager.");
phys_mem::InitBootstrapPageAllocation();
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
KernelHeap heap;
phys_mem::InitPhysicalMemoryManager();
heap.InitializeSlabAllocators();
phys_mem::DumpRegions();
dbgln("[boot] Memory allocations available now.");
@ -55,7 +54,7 @@ extern "C" void zion() {
LoadInitProgram();
dbgln("[boot] Allocs during boot:");
heap.DumpDistribution();
heap.DumpDebugData();
dbgln("[boot] Init finished, yielding.");
gScheduler->Enable();