[Zion] Split free physical memory into two lists.

Allow partitioning create requests so for large continuous requests
we don't have to iterate past a bunch of single free pages.
This commit is contained in:
Drew Galbraith 2023-11-20 10:02:17 -08:00
parent 12ca4e4e89
commit 9b43d615a9
1 changed files with 38 additions and 29 deletions

View File

@ -56,20 +56,24 @@ class PhysicalMemoryManager {
} }
uint64_t AllocatePage() { uint64_t AllocatePage() {
if (memory_blocks.size() == 0) { if (single_memory_pages_.size() > 0) {
return single_memory_pages_.PopFront();
}
if (memory_blocks_.size() == 0) {
panic("No available memory regions."); panic("No available memory regions.");
} }
while (memory_blocks.PeekFront().num_pages == 0) { while (memory_blocks_.PeekFront().num_pages == 0) {
memory_blocks.PopFront(); memory_blocks_.PopFront();
} }
MemBlock& block = memory_blocks.PeekFront(); MemBlock& block = memory_blocks_.PeekFront();
uint64_t page = block.base; uint64_t page = block.base;
block.base += kPageSize; block.base += kPageSize;
block.num_pages--; block.num_pages--;
if (block.num_pages == 0) { if (block.num_pages == 0) {
memory_blocks.PopFront(); memory_blocks_.PopFront();
} }
#if K_PHYS_DEBUG #if K_PHYS_DEBUG
dbgln("Single {x}", page); dbgln("Single {x}", page);
@ -78,37 +82,37 @@ class PhysicalMemoryManager {
return page; return page;
} }
uint64_t AllocateContinuous(uint64_t num_pages) { uint64_t AllocateContinuous(uint64_t num_pages) {
if (memory_blocks.size() == 0) { if (memory_blocks_.size() == 0) {
panic("No available memory regions."); panic("No available memory regions.");
} }
MemBlock& block = memory_blocks.PeekFront(); // TODO: Add an easy way to delete an iterator from a LinkedList
if (block.num_pages == 0) { // so we can keep the blocklist free from 0 sized pages.
panic("Bad state, empty memory block."); // These occur when we allocate a continuous block the same size as
// an available MemoryBlock.
while (memory_blocks_.PeekFront().num_pages == 0) {
memory_blocks_.PopFront();
} }
auto iter = memory_blocks.begin(); for (MemBlock& block : memory_blocks_) {
while (iter != memory_blocks.end() && iter->num_pages < num_pages) { if (block.num_pages < num_pages) {
dbgln("Skipping block of size {} seeking {}", iter->num_pages, num_pages); continue;
iter = iter.next();
} }
uint64_t page = block.base;
if (iter == memory_blocks.end()) { block.base += num_pages * kPageSize;
panic("No memory regions to allocate"); block.num_pages -= num_pages;
}
uint64_t page = iter->base;
iter->base += num_pages * kPageSize;
iter->num_pages -= num_pages;
#if K_PHYS_DEBUG #if K_PHYS_DEBUG
dbgln("Continuous {x}:{}", page, num_pages); dbgln("Continuous {x}:{}", page, num_pages);
#endif #endif
allocated_pages_ += num_pages;
return page; return page;
} }
panic("No memory regions to allocate");
UNREACHABLE
}
void FreePage(uint64_t page) { void FreePage(uint64_t page) {
AddMemoryRegion(page, 1); single_memory_pages_.PushFront(page);
allocated_pages_--; allocated_pages_--;
} }
@ -121,7 +125,7 @@ class PhysicalMemoryManager {
uint64_t AvailablePages() { uint64_t AvailablePages() {
uint64_t available = 0; uint64_t available = 0;
for (const auto& mem_block : memory_blocks) { for (const auto& mem_block : memory_blocks_) {
available += mem_block.num_pages; available += mem_block.num_pages;
} }
return available; return available;
@ -133,7 +137,12 @@ class PhysicalMemoryManager {
uint64_t num_pages = 0; uint64_t num_pages = 0;
}; };
glcr::LinkedList<MemBlock> memory_blocks; // Memory blocks contains the initial memory blocks
// as well as freed chucks. We relegate single freed
// pages to a separate list to avoid having to traverse far
// to find large ones.
glcr::LinkedList<MemBlock> memory_blocks_;
glcr::LinkedList<uint64_t> single_memory_pages_;
uint64_t allocated_pages_ = 0; uint64_t allocated_pages_ = 0;
@ -142,7 +151,7 @@ class PhysicalMemoryManager {
.base = base, .base = base,
.num_pages = num_pages, .num_pages = num_pages,
}; };
memory_blocks.PushFront(block); memory_blocks_.PushFront(block);
} }
}; };