[Zion] Use a slab allocator for 64 byte and 128 byte allocs.

This has no effect on page usage for now (each uses a small amount of
pages so has some overhead.

However it does reduce the active allocations from
234 -> 146 (64B)
61 -> 51 (128B)
This commit is contained in:
Drew Galbraith 2023-11-15 13:06:14 -08:00
parent cc16210e90
commit 10d16e129f
2 changed files with 24 additions and 0 deletions

View File

@ -60,6 +60,24 @@ void* KernelHeap::Allocate(uint64_t size) {
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 32): {x}", ptr_or.error());
#endif
}
if (size <= 64) {
auto ptr_or = slab_64_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 64): {x}", ptr_or.error());
#endif
}
if (size <= 128) {
auto ptr_or = slab_128_.Allocate();
if (ptr_or.ok()) {
return ptr_or.value();
}
#if K_HEAP_DEBUG
dbgln("Skipped allocation (slab 128): {x}", ptr_or.error());
#endif
}
if (next_addr_ + size >= upper_bound_) {
@ -98,6 +116,10 @@ void KernelHeap::DumpDebugDataInternal() {
slab_16_.Allocations());
dbgln("Slab 32: {} slabs, {} allocs", slab_32_.SlabCount(),
slab_32_.Allocations());
dbgln("Slab 64: {} slabs, {} allocs", slab_64_.SlabCount(),
slab_64_.Allocations());
dbgln("Slab 128: {} slabs, {} allocs", slab_128_.SlabCount(),
slab_128_.Allocations());
dbgln("");
dbgln("Size Distributions of non slab-allocated.");

View File

@ -26,6 +26,8 @@ class KernelHeap {
SlabAllocator slab_8_{8};
SlabAllocator slab_16_{16};
SlabAllocator slab_32_{32};
SlabAllocator slab_64_{64};
SlabAllocator slab_128_{128};
// Distribution collection for the purpose of investigating a slab allocator.
// 0: 0-8B