Add a basic kernel heap object.
Currently allocation always fails because we don't have a way to allocate a physical page.
This commit is contained in:
parent
45b5817a36
commit
0b7e667368
|
@ -5,6 +5,7 @@ add_executable(zion
|
||||||
debug/debug.cpp
|
debug/debug.cpp
|
||||||
interrupt/interrupt.cpp
|
interrupt/interrupt.cpp
|
||||||
interrupt/interrupt_enter.s
|
interrupt/interrupt_enter.s
|
||||||
|
memory/kernel_heap.cpp
|
||||||
memory/paging_util.cpp
|
memory/paging_util.cpp
|
||||||
zion.cpp)
|
zion.cpp)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
#include "memory/kernel_heap.h"
|
||||||
|
|
||||||
|
#include "debug/debug.h"
|
||||||
|
#include "memory/paging_util.h"
|
||||||
|
|
||||||
|
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
|
||||||
|
: next_addr_(lower_bound), upper_bound_(upper_bound) {}
|
||||||
|
|
||||||
|
void* KernelHeap::Allocate(uint64_t size) {
|
||||||
|
if (next_addr_ + size >= upper_bound_) {
|
||||||
|
panic("Kernel Heap Overrun");
|
||||||
|
}
|
||||||
|
EnsureResident(next_addr_, size);
|
||||||
|
uint64_t address = next_addr_;
|
||||||
|
next_addr_ += size;
|
||||||
|
return reinterpret_cast<void*>(address);
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
class KernelHeap {
|
||||||
|
public:
|
||||||
|
KernelHeap(uint64_t lower_bound, uint64_t upper_bound);
|
||||||
|
|
||||||
|
void* Allocate(uint64_t size);
|
||||||
|
|
||||||
|
private:
|
||||||
|
uint64_t next_addr_;
|
||||||
|
uint64_t upper_bound_;
|
||||||
|
};
|
|
@ -1,9 +1,45 @@
|
||||||
#include "memory/paging_util.h"
|
#include "memory/paging_util.h"
|
||||||
|
|
||||||
#include "boot/boot_info.h"
|
#include "boot/boot_info.h"
|
||||||
|
#include "debug/debug.h"
|
||||||
|
|
||||||
#define PRESENT_BIT 0x1
|
#define PRESENT_BIT 0x1
|
||||||
#define READ_WRITE_BIT 0x2
|
#define READ_WRITE_BIT 0x2
|
||||||
|
#define SIGN_EXT 0xFFFF0000'00000000
|
||||||
|
#define RECURSIVE ((uint64_t)0x1FE)
|
||||||
|
#define PML_OFFSET 39
|
||||||
|
#define PDP_OFFSET 30
|
||||||
|
#define PD_OFFSET 21
|
||||||
|
#define PT_OFFSET 12
|
||||||
|
|
||||||
|
// How to recursively index into each page table structure assuming
|
||||||
|
// the PML4 is recursively mapped at the 510th entry (0x1FE).
|
||||||
|
#define PML_RECURSE 0xFFFFFF7F'BFDFE000
|
||||||
|
#define PDP_RECURSE 0xFFFFFF7F'BFC00000
|
||||||
|
#define PD_RECURSE 0xFFFFFF7F'80000000
|
||||||
|
#define PT_RECURSE 0xFFFFFF00'00000000
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
uint64_t PageAlign(uint64_t addr) { return addr & ~0xFFF; }
|
||||||
|
uint64_t* PageAlign(uint64_t* addr) {
|
||||||
|
return reinterpret_cast<uint64_t*>(reinterpret_cast<uint64_t>(addr) & ~0xFFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZeroOutPage(uint64_t* ptr) {
|
||||||
|
ptr = PageAlign(ptr);
|
||||||
|
for (uint64_t i = 0; i < 512; i++) {
|
||||||
|
ptr[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t ShiftForEntryIndexing(uint64_t addr, uint64_t offset) {
|
||||||
|
addr &= ~0xFFFF0000'00000000;
|
||||||
|
addr >>= offset;
|
||||||
|
addr <<= 3;
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
void InitPaging() {
|
void InitPaging() {
|
||||||
uint64_t pml4_addr = 0;
|
uint64_t pml4_addr = 0;
|
||||||
|
@ -18,3 +54,56 @@ void InitializePml4(uint64_t pml4_physical_addr) {
|
||||||
uint64_t recursive_entry = pml4_physical_addr | PRESENT_BIT | READ_WRITE_BIT;
|
uint64_t recursive_entry = pml4_physical_addr | PRESENT_BIT | READ_WRITE_BIT;
|
||||||
pml4_virtual[0x1FE] = recursive_entry;
|
pml4_virtual[0x1FE] = recursive_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AllocatePageDirectoryPointer(uint64_t addr);
|
||||||
|
void AllocatePageDirectory(uint64_t addr);
|
||||||
|
void AllocatePageTable(uint64_t addr);
|
||||||
|
void AllocatePage(uint64_t addr) { panic("Page Allocation Not Implemented."); }
|
||||||
|
|
||||||
|
void EnsureResident(uint64_t addr, uint64_t size) {
|
||||||
|
uint64_t max = addr + size;
|
||||||
|
addr = PageAlign(addr);
|
||||||
|
while (addr < max) {
|
||||||
|
if (!PageLoaded(addr)) {
|
||||||
|
AllocatePage(addr);
|
||||||
|
}
|
||||||
|
addr += 0x1000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* Pml4Entry(uint64_t addr) {
|
||||||
|
return reinterpret_cast<uint64_t*>(PML_RECURSE |
|
||||||
|
ShiftForEntryIndexing(addr, PML_OFFSET));
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* PageDirectoryPointerEntry(uint64_t addr) {
|
||||||
|
return reinterpret_cast<uint64_t*>(PDP_RECURSE |
|
||||||
|
ShiftForEntryIndexing(addr, PDP_OFFSET));
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* PageDirectoryEntry(uint64_t addr) {
|
||||||
|
return reinterpret_cast<uint64_t*>(PD_RECURSE |
|
||||||
|
ShiftForEntryIndexing(addr, PD_OFFSET));
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* PageTableEntry(uint64_t addr) {
|
||||||
|
return reinterpret_cast<uint64_t*>(PT_RECURSE |
|
||||||
|
ShiftForEntryIndexing(addr, PT_OFFSET));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageDirectoryPointerLoaded(uint64_t addr) {
|
||||||
|
return *Pml4Entry(addr) & PRESENT_BIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageDirectoryLoaded(uint64_t addr) {
|
||||||
|
return PageDirectoryPointerLoaded(addr) &&
|
||||||
|
(*PageDirectoryPointerEntry(addr) & PRESENT_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageTableLoaded(uint64_t addr) {
|
||||||
|
return PageDirectoryLoaded(addr) && (*PageDirectoryEntry(addr) & PRESENT_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PageLoaded(uint64_t addr) {
|
||||||
|
return PageTableLoaded(addr) && (*PageTableEntry(addr) & PRESENT_BIT);
|
||||||
|
}
|
||||||
|
|
|
@ -4,3 +4,19 @@
|
||||||
|
|
||||||
void InitPaging();
|
void InitPaging();
|
||||||
void InitializePml4(uint64_t pml4_physical_addr);
|
void InitializePml4(uint64_t pml4_physical_addr);
|
||||||
|
|
||||||
|
void AllocatePageDirectoryPointer(uint64_t addr);
|
||||||
|
void AllocatePageDirectory(uint64_t addr);
|
||||||
|
void AllocatePageTable(uint64_t addr);
|
||||||
|
void AllocatePage(uint64_t addr);
|
||||||
|
void EnsureResident(uint64_t addr, uint64_t size);
|
||||||
|
|
||||||
|
uint64_t* Pml4Entry(uint64_t addr);
|
||||||
|
uint64_t* PageDirectoryPointerEntry(uint64_t addr);
|
||||||
|
uint64_t* PageDirectoryEntry(uint64_t addr);
|
||||||
|
uint64_t* PageTableEntry(uint64_t addr);
|
||||||
|
|
||||||
|
bool PageDirectoryPointerLoaded(uint64_t addr);
|
||||||
|
bool PageDirectoryLoaded(uint64_t addr);
|
||||||
|
bool PageTableLoaded(uint64_t addr);
|
||||||
|
bool PageLoaded(uint64_t addr);
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include "common/gdt.h"
|
#include "common/gdt.h"
|
||||||
#include "debug/debug.h"
|
#include "debug/debug.h"
|
||||||
#include "interrupt/interrupt.h"
|
#include "interrupt/interrupt.h"
|
||||||
|
#include "memory/kernel_heap.h"
|
||||||
#include "memory/paging_util.h"
|
#include "memory/paging_util.h"
|
||||||
|
|
||||||
extern "C" void zion() {
|
extern "C" void zion() {
|
||||||
|
@ -12,12 +13,8 @@ extern "C" void zion() {
|
||||||
InitIdt();
|
InitIdt();
|
||||||
InitPaging();
|
InitPaging();
|
||||||
|
|
||||||
const limine_memmap_response& resp = boot::GetMemoryMap();
|
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
|
||||||
dbgln("Base,Length,Type");
|
heap.Allocate(1);
|
||||||
for (uint64_t i = 0; i < resp.entry_count; i++) {
|
|
||||||
const limine_memmap_entry& entry = *resp.entries[i];
|
|
||||||
dbgln("%m,%x,%u", entry.base, entry.length, entry.type);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1)
|
while (1)
|
||||||
;
|
;
|
||||||
|
|
Loading…
Reference in New Issue