2023-05-18 09:46:41 -07:00
|
|
|
#include "memory/kernel_heap.h"
|
|
|
|
|
|
|
|
#include "debug/debug.h"
|
|
|
|
#include "memory/paging_util.h"
|
|
|
|
|
2023-06-07 10:01:22 -07:00
|
|
|
#define K_HEAP_DEBUG 0
|
|
|
|
|
2023-05-18 11:29:44 -07:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
static KernelHeap* gKernelHeap = nullptr;
|
|
|
|
|
|
|
|
KernelHeap& GetKernelHeap() {
|
|
|
|
if (!gKernelHeap) {
|
|
|
|
panic("Kernel Heap not initialized.");
|
|
|
|
}
|
|
|
|
return *gKernelHeap;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2023-05-18 09:46:41 -07:00
|
|
|
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
|
2023-08-02 00:54:37 -07:00
|
|
|
: next_slab_addr_(lower_bound),
|
|
|
|
first_unsized_addr_(lower_bound + (upper_bound - lower_bound) / 2),
|
|
|
|
next_addr_(first_unsized_addr_),
|
|
|
|
upper_bound_(upper_bound) {
|
2023-05-18 11:29:44 -07:00
|
|
|
gKernelHeap = this;
|
|
|
|
}
|
2023-05-18 09:46:41 -07:00
|
|
|
|
2023-08-02 00:54:37 -07:00
|
|
|
void KernelHeap::InitializeSlabAllocators() {
|
|
|
|
slab_8_ = glcr::MakeUnique<SlabAllocator<8>>(next_slab_addr_, 4);
|
|
|
|
next_slab_addr_ += 0x4000;
|
|
|
|
slab_16_ = glcr::MakeUnique<SlabAllocator<16>>(next_slab_addr_, 6);
|
|
|
|
next_slab_addr_ += 0x6000;
|
|
|
|
slab_32_ = glcr::MakeUnique<SlabAllocator<32>>(next_slab_addr_, 6);
|
|
|
|
next_slab_addr_ += 0x6000;
|
|
|
|
}
|
|
|
|
|
2023-05-18 09:46:41 -07:00
|
|
|
void* KernelHeap::Allocate(uint64_t size) {
|
2023-08-02 00:54:37 -07:00
|
|
|
#if K_HEAP_DEBUG
|
|
|
|
dbgln("Alloc (%x)", size);
|
|
|
|
#endif
|
|
|
|
if ((size <= 8) && slab_8_) {
|
|
|
|
auto ptr_or = slab_8_->Allocate();
|
|
|
|
if (ptr_or.ok()) {
|
|
|
|
return ptr_or.value();
|
|
|
|
}
|
|
|
|
dbgln("Failed allocation (slab 8): %x", ptr_or.error());
|
|
|
|
}
|
|
|
|
if ((size <= 16) && slab_16_) {
|
|
|
|
auto ptr_or = slab_16_->Allocate();
|
|
|
|
if (ptr_or.ok()) {
|
|
|
|
return ptr_or.value();
|
|
|
|
}
|
|
|
|
dbgln("Failed allocation (slab 16): %x", ptr_or.error());
|
|
|
|
}
|
|
|
|
if ((size <= 32) && slab_32_) {
|
|
|
|
auto ptr_or = slab_32_->Allocate();
|
|
|
|
if (ptr_or.ok()) {
|
|
|
|
return ptr_or.value();
|
|
|
|
}
|
|
|
|
dbgln("Failed allocation (slab 32): %x", ptr_or.error());
|
|
|
|
}
|
2023-05-18 09:46:41 -07:00
|
|
|
if (next_addr_ + size >= upper_bound_) {
|
2023-06-12 20:55:53 -07:00
|
|
|
panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size,
|
|
|
|
upper_bound_);
|
2023-05-18 09:46:41 -07:00
|
|
|
}
|
2023-06-07 10:01:22 -07:00
|
|
|
#if K_HEAP_DEBUG
|
|
|
|
RecordSize(size);
|
|
|
|
#endif
|
2023-05-18 09:46:41 -07:00
|
|
|
EnsureResident(next_addr_, size);
|
|
|
|
uint64_t address = next_addr_;
|
|
|
|
next_addr_ += size;
|
|
|
|
return reinterpret_cast<void*>(address);
|
|
|
|
}
|
2023-05-18 11:29:44 -07:00
|
|
|
|
2023-06-07 10:01:22 -07:00
|
|
|
void KernelHeap::DumpDistribution() {
|
|
|
|
#if K_HEAP_DEBUG
|
|
|
|
uint64_t* distributions = gKernelHeap->distributions;
|
|
|
|
dbgln("<=4B: %u", distributions[0]);
|
|
|
|
dbgln("<=8B: %u", distributions[1]);
|
|
|
|
dbgln("<=16B: %u", distributions[2]);
|
|
|
|
dbgln("<=32B: %u", distributions[3]);
|
|
|
|
dbgln("<=64B: %u", distributions[4]);
|
|
|
|
dbgln("<=128B: %u", distributions[5]);
|
|
|
|
dbgln("<=256B: %u", distributions[6]);
|
|
|
|
dbgln("<=512B: %u", distributions[7]);
|
|
|
|
dbgln("<=1KiB: %u", distributions[8]);
|
|
|
|
dbgln("<=2KiB: %u", distributions[9]);
|
|
|
|
dbgln("<=4KiB: %u", distributions[10]);
|
|
|
|
dbgln("> 4KiB: %u", distributions[11]);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelHeap::RecordSize(uint64_t size) {
|
|
|
|
size >>= 3;
|
|
|
|
uint64_t index = 0;
|
|
|
|
while (size && index < 11) {
|
|
|
|
size >>= 1;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
distributions[index]++;
|
|
|
|
}
|
|
|
|
|
2023-05-18 11:29:44 -07:00
|
|
|
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
|
2023-05-18 13:24:02 -07:00
|
|
|
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
|
2023-05-18 11:29:44 -07:00
|
|
|
|
2023-08-02 00:54:37 -07:00
|
|
|
void operator delete(void*, uint64_t size) {}
|
2023-06-26 15:46:03 -07:00
|
|
|
void operator delete[](void*) {}
|