From 0b7e6673684c1bab67a79952c190f803943611c5 Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Thu, 18 May 2023 09:46:41 -0700 Subject: [PATCH] Add a basic kernel heap object. Currently allocation always fails because we don't have a way to allocate a physical page. --- zion/CMakeLists.txt | 1 + zion/memory/kernel_heap.cpp | 17 +++++++ zion/memory/kernel_heap.h | 14 ++++++ zion/memory/paging_util.cpp | 89 +++++++++++++++++++++++++++++++++++++ zion/memory/paging_util.h | 16 +++++++ zion/zion.cpp | 9 ++-- 6 files changed, 140 insertions(+), 6 deletions(-) create mode 100644 zion/memory/kernel_heap.cpp create mode 100644 zion/memory/kernel_heap.h diff --git a/zion/CMakeLists.txt b/zion/CMakeLists.txt index d1de879..272016f 100644 --- a/zion/CMakeLists.txt +++ b/zion/CMakeLists.txt @@ -5,6 +5,7 @@ add_executable(zion debug/debug.cpp interrupt/interrupt.cpp interrupt/interrupt_enter.s + memory/kernel_heap.cpp memory/paging_util.cpp zion.cpp) diff --git a/zion/memory/kernel_heap.cpp b/zion/memory/kernel_heap.cpp new file mode 100644 index 0000000..311aae1 --- /dev/null +++ b/zion/memory/kernel_heap.cpp @@ -0,0 +1,17 @@ +#include "memory/kernel_heap.h" + +#include "debug/debug.h" +#include "memory/paging_util.h" + +KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound) + : next_addr_(lower_bound), upper_bound_(upper_bound) {} + +void* KernelHeap::Allocate(uint64_t size) { + if (next_addr_ + size >= upper_bound_) { + panic("Kernel Heap Overrun"); + } + EnsureResident(next_addr_, size); + uint64_t address = next_addr_; + next_addr_ += size; + return reinterpret_cast(address); +} diff --git a/zion/memory/kernel_heap.h b/zion/memory/kernel_heap.h new file mode 100644 index 0000000..456b387 --- /dev/null +++ b/zion/memory/kernel_heap.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +class KernelHeap { + public: + KernelHeap(uint64_t lower_bound, uint64_t upper_bound); + + void* Allocate(uint64_t size); + + private: + uint64_t next_addr_; + uint64_t upper_bound_; +}; diff --git a/zion/memory/paging_util.cpp b/zion/memory/paging_util.cpp index 48b74af..1ccc9f7 100644 --- a/zion/memory/paging_util.cpp +++ b/zion/memory/paging_util.cpp @@ -1,9 +1,45 @@ #include "memory/paging_util.h" #include "boot/boot_info.h" +#include "debug/debug.h" #define PRESENT_BIT 0x1 #define READ_WRITE_BIT 0x2 +#define SIGN_EXT 0xFFFF0000'00000000 +#define RECURSIVE ((uint64_t)0x1FE) +#define PML_OFFSET 39 +#define PDP_OFFSET 30 +#define PD_OFFSET 21 +#define PT_OFFSET 12 + +// How to recursively index into each page table structure assuming +// the PML4 is recursively mapped at the 510th entry (0x1FE). +#define PML_RECURSE 0xFFFFFF7F'BFDFE000 +#define PDP_RECURSE 0xFFFFFF7F'BFC00000 +#define PD_RECURSE 0xFFFFFF7F'80000000 +#define PT_RECURSE 0xFFFFFF00'00000000 + +namespace { + +uint64_t PageAlign(uint64_t addr) { return addr & ~0xFFF; } +uint64_t* PageAlign(uint64_t* addr) { + return reinterpret_cast(reinterpret_cast(addr) & ~0xFFF); +} + +void ZeroOutPage(uint64_t* ptr) { + ptr = PageAlign(ptr); + for (uint64_t i = 0; i < 512; i++) { + ptr[i] = 0; + } +} + +uint64_t ShiftForEntryIndexing(uint64_t addr, uint64_t offset) { + addr &= ~0xFFFF0000'00000000; + addr >>= offset; + addr <<= 3; + return addr; +} +} // namespace void InitPaging() { uint64_t pml4_addr = 0; @@ -18,3 +54,56 @@ void InitializePml4(uint64_t pml4_physical_addr) { uint64_t recursive_entry = pml4_physical_addr | PRESENT_BIT | READ_WRITE_BIT; pml4_virtual[0x1FE] = recursive_entry; } + +void AllocatePageDirectoryPointer(uint64_t addr); +void AllocatePageDirectory(uint64_t addr); +void AllocatePageTable(uint64_t addr); +void AllocatePage(uint64_t addr) { panic("Page Allocation Not Implemented."); } + +void EnsureResident(uint64_t addr, uint64_t size) { + uint64_t max = addr + size; + addr = PageAlign(addr); + while (addr < max) { + if (!PageLoaded(addr)) { + AllocatePage(addr); + } + addr += 0x1000; + } +} + +uint64_t* Pml4Entry(uint64_t addr) { + return reinterpret_cast(PML_RECURSE | + ShiftForEntryIndexing(addr, PML_OFFSET)); +} + +uint64_t* PageDirectoryPointerEntry(uint64_t addr) { + return reinterpret_cast(PDP_RECURSE | + ShiftForEntryIndexing(addr, PDP_OFFSET)); +} + +uint64_t* PageDirectoryEntry(uint64_t addr) { + return reinterpret_cast(PD_RECURSE | + ShiftForEntryIndexing(addr, PD_OFFSET)); +} + +uint64_t* PageTableEntry(uint64_t addr) { + return reinterpret_cast(PT_RECURSE | + ShiftForEntryIndexing(addr, PT_OFFSET)); +} + +bool PageDirectoryPointerLoaded(uint64_t addr) { + return *Pml4Entry(addr) & PRESENT_BIT; +} + +bool PageDirectoryLoaded(uint64_t addr) { + return PageDirectoryPointerLoaded(addr) && + (*PageDirectoryPointerEntry(addr) & PRESENT_BIT); +} + +bool PageTableLoaded(uint64_t addr) { + return PageDirectoryLoaded(addr) && (*PageDirectoryEntry(addr) & PRESENT_BIT); +} + +bool PageLoaded(uint64_t addr) { + return PageTableLoaded(addr) && (*PageTableEntry(addr) & PRESENT_BIT); +} diff --git a/zion/memory/paging_util.h b/zion/memory/paging_util.h index 75644d0..fb261a7 100644 --- a/zion/memory/paging_util.h +++ b/zion/memory/paging_util.h @@ -4,3 +4,19 @@ void InitPaging(); void InitializePml4(uint64_t pml4_physical_addr); + +void AllocatePageDirectoryPointer(uint64_t addr); +void AllocatePageDirectory(uint64_t addr); +void AllocatePageTable(uint64_t addr); +void AllocatePage(uint64_t addr); +void EnsureResident(uint64_t addr, uint64_t size); + +uint64_t* Pml4Entry(uint64_t addr); +uint64_t* PageDirectoryPointerEntry(uint64_t addr); +uint64_t* PageDirectoryEntry(uint64_t addr); +uint64_t* PageTableEntry(uint64_t addr); + +bool PageDirectoryPointerLoaded(uint64_t addr); +bool PageDirectoryLoaded(uint64_t addr); +bool PageTableLoaded(uint64_t addr); +bool PageLoaded(uint64_t addr); diff --git a/zion/zion.cpp b/zion/zion.cpp index 9821400..003eb87 100644 --- a/zion/zion.cpp +++ b/zion/zion.cpp @@ -5,6 +5,7 @@ #include "common/gdt.h" #include "debug/debug.h" #include "interrupt/interrupt.h" +#include "memory/kernel_heap.h" #include "memory/paging_util.h" extern "C" void zion() { @@ -12,12 +13,8 @@ extern "C" void zion() { InitIdt(); InitPaging(); - const limine_memmap_response& resp = boot::GetMemoryMap(); - dbgln("Base,Length,Type"); - for (uint64_t i = 0; i < resp.entry_count; i++) { - const limine_memmap_entry& entry = *resp.entries[i]; - dbgln("%m,%x,%u", entry.base, entry.length, entry.type); - } + KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000); + heap.Allocate(1); while (1) ;