Compare commits

...

5 Commits

17 changed files with 134 additions and 82 deletions

View File

@ -15,6 +15,7 @@ add_executable(zion
loader/init_loader.cpp
memory/kernel_heap.cpp
memory/kernel_stack_manager.cpp
memory/kernel_vmm.cpp
memory/paging_util.cpp
memory/physical_memory.cpp
memory/slab_allocator.cpp

View File

@ -139,13 +139,18 @@ void ParseMcfg(SdtHeader* rsdt) {
}
void ParseMadt(SdtHeader* rsdt) {
#if K_ACPI_DEBUG
dbgsz(rsdt->signature, 4);
#endif
uint64_t max_addr = reinterpret_cast<uint64_t>(rsdt) + rsdt->length;
MadtHeader* header = reinterpret_cast<MadtHeader*>(rsdt);
#if K_ACPI_DEBUG
dbgln("Local APIC {x}", header->local_apic_address);
gLApicBase = header->local_apic_address;
dbgln("Flags: {x}", header->flags);
#endif
gLApicBase = header->local_apic_address;
MadtEntry* entry = &header->first_entry;
@ -153,14 +158,18 @@ void ParseMadt(SdtHeader* rsdt) {
switch (entry->type) {
case 0: {
MadtLocalApic* local = reinterpret_cast<MadtLocalApic*>(entry);
#if K_ACPI_DEBUG
dbgln("Local APIC (Proc id, id, flags): {x}, {x}, {x}",
local->processor_id, local->apic_id, local->flags);
#endif
break;
}
case 1: {
MadtIoApic* io = reinterpret_cast<MadtIoApic*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Apic (id, addr, gsi base): {x}, {x}, {x}", io->io_apic_id,
io->io_apic_address, io->global_system_interrupt_base);
#endif
if (gIOApicBase != 0) {
dbgln("More than one IOApic, unhandled");
}
@ -170,16 +179,20 @@ void ParseMadt(SdtHeader* rsdt) {
case 2: {
MadtIoApicInterruptSource* src =
reinterpret_cast<MadtIoApicInterruptSource*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Source (Bus, IRQ, GSI, flags): {x}, {x}, {x}, {x}",
src->bus_source, src->irq_source, src->global_system_interrupt,
src->flags);
#endif
break;
}
case 4: {
MadtLocalApicNonMaskable* lnmi =
reinterpret_cast<MadtLocalApicNonMaskable*>(entry);
#if K_ACPI_DEBUG
dbgln("Local NMI (proc id, flags, lint#): {x}, {x}, {x}",
lnmi->apic_processor_id, lnmi->flags, lnmi->lint_num);
#endif
break;
}
default:

View File

@ -93,8 +93,6 @@ void InitGdt() {
: "rax");
}
void SetIst1(uint64_t* ist1) {
gTaskStateSegment.ist1 = reinterpret_cast<uint64_t>(ist1);
}
void SetIst1(uint64_t ist1) { gTaskStateSegment.ist1 = ist1; }
void SetRsp0(uint64_t rsp0) { gTaskStateSegment.rsp0 = rsp0; }

View File

@ -4,6 +4,6 @@
void InitGdt();
void SetIst1(uint64_t* ist1);
void SetIst1(uint64_t ist1);
void SetRsp0(uint64_t rsp0);

View File

@ -40,11 +40,6 @@ void AddProcPrefix() {
} // namespace
void early_dbgln(const char* str) {
dbgcstr(str);
dbgcstr("\n");
}
void dbgln(const glcr::StringView& str) {
AddProcPrefix();
dbg(str);

View File

@ -6,20 +6,8 @@
#include "include/ztypes.h"
// Debug line without formatting for
// before allocations are available.
void early_dbgln(const char* str);
void dbgln(const glcr::StringView& str);
// TODO: Write a version of StrFormat that
// accepts a fix-sized buffer for output
// to use in the kernel. That way we make
// dbgln and panic calls without allocation.
// Optionally, add a dbgln_unbounded method for
// things like the Debug syscall where the formatted
// string may be fairly large.
template <typename... Args>
void dbgln(const char* str, Args... args) {
char buffer[256];

View File

@ -9,3 +9,14 @@ const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000;
const uint64_t kKernelSlabHeapEnd = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapStart = 0xFFFF'FFFF'6000'0000;
const uint64_t kKernelBuddyHeapEnd = 0xFFFF'FFFF'8000'0000;
// Note the kernel code isn't actually this large, we just reserve a lot of
// address space for it. (256 MiB).
const uint64_t kKernelCodeStart = 0xFFFF'FFFF'8000'0000;
const uint64_t kKernelCodeEnd = 0xFFFF'FFFF'9000'0000;
const uint64_t kKernelStackStart = 0xFFFF'FFFF'9000'0000;
const uint64_t kKernelStackEnd = 0xFFFF'FFFF'A000'0000;
const uint64_t kKernelStackSize = 3 * kPageSize;
const uint64_t kKernelStackOffset = 4 * kPageSize;

View File

@ -2,30 +2,25 @@
#include "common/gdt.h"
#include "debug/debug.h"
#include "interrupt/interrupt.h"
#include "memory/constants.h"
#include "memory/paging_util.h"
#define KERNEL_STACK_START 0xFFFFFFFF'90000000
#define KERNEL_STACK_LIMIT 0xFFFFFFFF'9FFFFFFF
#define KERNEL_STACK_OFFSET 0x4000
KernelStackManager::KernelStackManager()
: next_stack_addr_(kKernelStackStart) {}
KernelStackManager* gKernelStackManager;
void KernelStackManager::Init() {
gKernelStackManager = new KernelStackManager();
SetIst1(gKernelStackManager->AllocateKernelStack());
void KernelStackManager::SetupInterruptStack() {
SetIst1(AllocateKernelStack());
UpdateFaultHandlersToIst1();
}
KernelStackManager::KernelStackManager()
: next_stack_addr_(KERNEL_STACK_START) {}
uint64_t* KernelStackManager::AllocateKernelStack() {
next_stack_addr_ += KERNEL_STACK_OFFSET;
if (next_stack_addr_ >= KERNEL_STACK_LIMIT) {
panic("No more kernelstack space");
uint64_t KernelStackManager::AllocateKernelStack() {
next_stack_addr_ += kKernelStackOffset;
if (next_stack_addr_ >= kKernelStackEnd) {
panic("No more kernel stack space");
}
EnsureResident(next_stack_addr_ - 0x3000, 0x3000);
return reinterpret_cast<uint64_t*>(next_stack_addr_) - 1;
EnsureResident(next_stack_addr_ - kKernelStackSize, kKernelStackSize);
return next_stack_addr_ - 8;
}
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {

View File

@ -14,14 +14,15 @@
// class.
class KernelStackManager {
public:
static void Init();
KernelStackManager();
uint64_t* AllocateKernelStack();
void SetupInterruptStack();
uint64_t AllocateKernelStack();
void FreeKernelStack(uint64_t stack_base);
private:
KernelStackManager();
uint64_t next_stack_addr_;
uint64_t freed_stack_cnt_ = 0;
};

View File

@ -0,0 +1,38 @@
#include "memory/kernel_vmm.h"
#include "debug/debug.h"
#include "memory/paging_util.h"
namespace {
KernelVmm* gKernelVmm = nullptr;
}
KernelVmm::KernelVmm() {
if (gKernelVmm) {
panic("KernelVmm double init.");
}
gKernelVmm = this;
stack_manager_ = glcr::MakeUnique<KernelStackManager>();
stack_manager_->SetupInterruptStack();
}
uint64_t KernelVmm::AcquireSlabHeapRegion(uint64_t slab_size_bytes) {
return gKernelVmm->AcquireSlabHeapRegionInternal(slab_size_bytes);
}
uint64_t KernelVmm::AcquireKernelStack() {
return gKernelVmm->stack_manager_->AllocateKernelStack();
}
uint64_t KernelVmm::AcquireSlabHeapRegionInternal(uint64_t slab_size_bytes) {
uint64_t next_slab = next_slab_heap_page_;
if (next_slab >= kKernelBuddyHeapEnd) {
panic("Slab heap overrun");
}
next_slab_heap_page_ += slab_size_bytes;
// TODO: Consider handling this as a part of a page-fault handler
// rather than auto mapping all over the place.
EnsureResident(next_slab, slab_size_bytes);
return next_slab;
}

27
zion/memory/kernel_vmm.h Normal file
View File

@ -0,0 +1,27 @@
#pragma once
#include <stdint.h>
#include "memory/constants.h"
#include "memory/kernel_heap.h"
#include "memory/kernel_stack_manager.h"
class KernelVmm {
public:
KernelVmm();
// TODO: Create a "MemoryRegion" style class to hold the return
// types from this object.
static uint64_t AcquireSlabHeapRegion(uint64_t slab_size_bytes);
static uint64_t AcquireKernelStack();
static void FreeKernelStack(uint64_t);
private:
uint64_t next_slab_heap_page_ = kKernelSlabHeapStart;
KernelHeap heap_;
glcr::UniquePtr<KernelStackManager> stack_manager_;
uint64_t AcquireSlabHeapRegionInternal(uint64_t slab_size_bytes);
};

View File

@ -153,10 +153,10 @@ void InitBootstrapPageAllocation() {
// if we limit the number of pages this should be fine.
// Currently set to the minimum of 3 for one kernel heap allocation:
// PageDirectory + PageTable + Page
if (entry.type == 0 && entry.length >= 0x5000) {
if (entry.type == 0 && entry.length >= 0x9000) {
gBootstrap.init_page = entry.base;
gBootstrap.next_page = entry.base;
gBootstrap.max_page = entry.base + 0x4000;
gBootstrap.max_page = entry.base + 0x9000;
gBootstrapEnabled = true;
return;
}
@ -206,7 +206,7 @@ uint64_t AllocatePage() {
}
#if K_PHYS_DEBUG
early_dbgln("[PMM] Boostrap Alloc!");
dbgln("[PMM] Boostrap Alloc!");
#endif
uint64_t page = gBootstrap.next_page;

View File

@ -2,25 +2,13 @@
#include "debug/debug.h"
#include "memory/constants.h"
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
namespace {
// TODO: Store these in a kernel VMM.
const uint64_t kSlabSize = 4 * KiB;
const uint64_t kSlabMask = ~(kSlabSize - 1);
uint64_t gNextSlab = kKernelSlabHeapStart;
uint64_t NextSlab() {
// FIXME: Synchronization.
uint64_t next_slab = gNextSlab;
if (next_slab >= kKernelBuddyHeapEnd) {
panic("Slab heap overrun");
}
gNextSlab += kSlabSize;
EnsureResident(next_slab, 1);
return next_slab;
}
} // namespace
@ -91,8 +79,8 @@ glcr::ErrorOr<void*> SlabAllocator::Allocate() {
return slab->Allocate();
}
dbgln("Allocating new kernel slab size {}", elem_size_);
void* next_slab = (uint64_t*)NextSlab();
void* next_slab =
reinterpret_cast<void*>(KernelVmm::AcquireSlabHeapRegion(kSlabSize));
slabs_.PushFront(glcr::AdoptPtr(new (next_slab) Slab(elem_size_)));
return slabs_.PeekFront()->Allocate();
}

View File

@ -1,14 +1,12 @@
#include "object/address_space.h"
#include "debug/debug.h"
#include "memory/kernel_stack_manager.h"
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
#include "memory/physical_memory.h"
#define K_VMAS_DEBUG 0
extern KernelStackManager* gKernelStackManager;
glcr::RefPtr<AddressSpace> AddressSpace::ForRoot() {
uint64_t cr3 = 0;
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
@ -49,8 +47,8 @@ uint64_t AddressSpace::MapInMemoryObject(
return vaddr;
}
uint64_t* AddressSpace::AllocateKernelStack() {
return gKernelStackManager->AllocateKernelStack();
uint64_t AddressSpace::AllocateKernelStack() {
return KernelVmm::AcquireKernelStack();
}
bool AddressSpace::HandlePageFault(uint64_t vaddr) {

View File

@ -75,7 +75,7 @@ class AddressSpace : public KernelObject {
uint64_t MapInMemoryObject(const glcr::RefPtr<MemoryObject>& mem_obj);
// Kernel Mappings.
uint64_t* AllocateKernelStack();
uint64_t AllocateKernelStack();
// Returns true if the page fault has been resolved.
bool HandlePageFault(uint64_t vaddr);

View File

@ -30,7 +30,8 @@ glcr::RefPtr<Thread> Thread::Create(Process& proc, uint64_t tid) {
}
Thread::Thread(Process& proc, uint64_t tid) : process_(proc), id_(tid) {
uint64_t* stack_ptr = proc.vmas()->AllocateKernelStack();
uint64_t* stack_ptr =
reinterpret_cast<uint64_t*>(proc.vmas()->AllocateKernelStack());
// 0: rip
*(stack_ptr) = reinterpret_cast<uint64_t>(thread_init);
// 1-4: rax, rcx, rdx, rbx

View File

@ -10,6 +10,7 @@
#include "loader/init_loader.h"
#include "memory/kernel_heap.h"
#include "memory/kernel_stack_manager.h"
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
#include "memory/physical_memory.h"
#include "scheduler/process_manager.h"
@ -17,15 +18,22 @@
#include "syscall/syscall.h"
extern "C" void zion() {
early_dbgln("[boot] Init GDT & IDT.");
dbgln("[boot] Init GDT & IDT.");
InitGdt();
InitIdt();
early_dbgln("[boot] Init Physical Memory Manager.");
dbgln("[boot] Init Physical Memory Manager.");
phys_mem::InitBootstrapPageAllocation();
KernelHeap heap;
// - Must happen after BootstrapPageAllocation
// due to the heap using page allocations.
// - Must happen after InitIdt() as the kernel
// stack manager will update ist1.
KernelVmm kvmm;
// Must happen after KernelVmm init as it
// will do allocations to build the free list.
phys_mem::InitPhysicalMemoryManager();
phys_mem::DumpRegions();
dbgln("[boot] Memory allocations available now.");
@ -36,13 +44,6 @@ extern "C" void zion() {
Apic::Init();
ApicTimer::Init();
dbgln("[boot] Init Kernel Stack Manager.");
KernelStackManager::Init();
// The KernelStackManager sets Ist1 as a part of initialization so we can use
// it now.
UpdateFaultHandlersToIst1();
dbgln("[boot] Init syscalls.");
InitSyscall();
@ -53,9 +54,6 @@ extern "C" void zion() {
dbgln("[boot] Loading sys init program.");
LoadInitProgram();
dbgln("[boot] Allocs during boot:");
heap.DumpDebugData();
dbgln("[boot] Init finished, yielding.");
gScheduler->Enable();
gScheduler->Yield();