Compare commits

...

5 Commits

Author SHA1 Message Date
Drew Galbraith cb41953354 Scheduler with working threads.
Currently only one process but it is a start.
2023-05-18 13:24:02 -07:00
Drew Galbraith 960cbf9519 Add Scheduler wireframe.
Right now does nothing but has containing classes for process and thread
information.
2023-05-18 12:43:53 -07:00
Drew Galbraith de2c96b848 Internal many paging_util functions. 2023-05-18 11:40:13 -07:00
Drew Galbraith fa2bb4df89 Added a PhysicalMemoryManager class.
Stores a linkedlist of free blocks of PhysicalMemory.
2023-05-18 11:34:45 -07:00
Drew Galbraith 4380590af2 Add new and delete operator implementations to the kernel heap.
For now delete does nothing.
2023-05-18 11:29:44 -07:00
13 changed files with 465 additions and 47 deletions

View File

@ -8,6 +8,10 @@ add_executable(zion
memory/kernel_heap.cpp
memory/paging_util.cpp
memory/physical_memory.cpp
scheduler/context_switch.s
scheduler/process.cpp
scheduler/scheduler.cpp
scheduler/thread.cpp
zion.cpp)
target_include_directories(zion
@ -17,12 +21,14 @@ target_include_directories(zion
# -c -- Don't run the linker (only necessary for the assembler)
# -ffreestanding
# -fno-rtti -- No runtime type information (might mess with polymorphism?)
# -fno-exceptions -- Disable exceptions.
# -nostdlib -- Don't include the standard library.
# -mabi=sysv -- Explicitly specify the ABI since we will rely on it.
# -mno-red-zone -- Don't put data below the stack pointer (clobbered by interrupts).
# -mcmodel=kernel -- Assume the kernel code is running in the higher half.
# -mgeneral-regs-only -- Prevent GCC from using a whole host of nonsense registers (that we have to enable).
set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only")
set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -fno-rtti -fno-exceptions -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only")
set(_Z_LINK_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/linker.ld")

View File

@ -3,8 +3,22 @@
#include "debug/debug.h"
#include "memory/paging_util.h"
namespace {
static KernelHeap* gKernelHeap = nullptr;
KernelHeap& GetKernelHeap() {
if (!gKernelHeap) {
panic("Kernel Heap not initialized.");
}
return *gKernelHeap;
}
} // namespace
KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
: next_addr_(lower_bound), upper_bound_(upper_bound) {}
: next_addr_(lower_bound), upper_bound_(upper_bound) {
gKernelHeap = this;
}
void* KernelHeap::Allocate(uint64_t size) {
if (next_addr_ + size >= upper_bound_) {
@ -15,3 +29,8 @@ void* KernelHeap::Allocate(uint64_t size) {
next_addr_ += size;
return reinterpret_cast<void*>(address);
}
void* operator new(uint64_t size) { return GetKernelHeap().Allocate(size); }
void* operator new[](uint64_t size) { return GetKernelHeap().Allocate(size); }
void operator delete(void*, uint64_t) {}

View File

@ -41,6 +41,39 @@ uint64_t ShiftForEntryIndexing(uint64_t addr, uint64_t offset) {
return addr;
}
uint64_t* Pml4Entry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PML_RECURSE |
ShiftForEntryIndexing(addr, PML_OFFSET));
}
uint64_t* PageDirectoryPointerEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PDP_RECURSE |
ShiftForEntryIndexing(addr, PDP_OFFSET));
}
uint64_t* PageDirectoryEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PD_RECURSE |
ShiftForEntryIndexing(addr, PD_OFFSET));
}
uint64_t* PageTableEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PT_RECURSE |
ShiftForEntryIndexing(addr, PT_OFFSET));
}
bool PageDirectoryPointerLoaded(uint64_t addr) {
return *Pml4Entry(addr) & PRESENT_BIT;
}
bool PageDirectoryLoaded(uint64_t addr) {
return PageDirectoryPointerLoaded(addr) &&
(*PageDirectoryPointerEntry(addr) & PRESENT_BIT);
}
bool PageTableLoaded(uint64_t addr) {
return PageDirectoryLoaded(addr) && (*PageDirectoryEntry(addr) & PRESENT_BIT);
}
void MapPage(uint64_t virt, uint64_t phys) {
if (PageLoaded(virt)) {
panic("Allocating Over Existing Page: %m", virt);
@ -97,39 +130,6 @@ void EnsureResident(uint64_t addr, uint64_t size) {
}
}
uint64_t* Pml4Entry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PML_RECURSE |
ShiftForEntryIndexing(addr, PML_OFFSET));
}
uint64_t* PageDirectoryPointerEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PDP_RECURSE |
ShiftForEntryIndexing(addr, PDP_OFFSET));
}
uint64_t* PageDirectoryEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PD_RECURSE |
ShiftForEntryIndexing(addr, PD_OFFSET));
}
uint64_t* PageTableEntry(uint64_t addr) {
return reinterpret_cast<uint64_t*>(PT_RECURSE |
ShiftForEntryIndexing(addr, PT_OFFSET));
}
bool PageDirectoryPointerLoaded(uint64_t addr) {
return *Pml4Entry(addr) & PRESENT_BIT;
}
bool PageDirectoryLoaded(uint64_t addr) {
return PageDirectoryPointerLoaded(addr) &&
(*PageDirectoryPointerEntry(addr) & PRESENT_BIT);
}
bool PageTableLoaded(uint64_t addr) {
return PageDirectoryLoaded(addr) && (*PageDirectoryEntry(addr) & PRESENT_BIT);
}
bool PageLoaded(uint64_t addr) {
return PageTableLoaded(addr) && (*PageTableEntry(addr) & PRESENT_BIT);
}

View File

@ -5,18 +5,7 @@
void InitPaging();
void InitializePml4(uint64_t pml4_physical_addr);
void AllocatePageDirectoryPointer(uint64_t addr);
void AllocatePageDirectory(uint64_t addr);
void AllocatePageTable(uint64_t addr);
void AllocatePage(uint64_t addr);
void EnsureResident(uint64_t addr, uint64_t size);
uint64_t* Pml4Entry(uint64_t addr);
uint64_t* PageDirectoryPointerEntry(uint64_t addr);
uint64_t* PageDirectoryEntry(uint64_t addr);
uint64_t* PageTableEntry(uint64_t addr);
bool PageDirectoryPointerLoaded(uint64_t addr);
bool PageDirectoryLoaded(uint64_t addr);
bool PageTableLoaded(uint64_t addr);
bool PageLoaded(uint64_t addr);

View File

@ -15,6 +15,69 @@ struct BootstrapMemory {
static BootstrapMemory gBootstrap;
static bool gBootstrapEnabled = false;
class PhysicalMemoryManager {
public:
// Reads the memory map and takes
// control of the available regions.
PhysicalMemoryManager() {
const limine_memmap_response& memmap = boot::GetMemoryMap();
for (uint64_t i = 0; i < memmap.entry_count; i++) {
const limine_memmap_entry& entry = *memmap.entries[i];
if (entry.type == 0) {
uint64_t base = entry.base;
uint64_t size = entry.length;
if (base == gBootstrap.init_page) {
base = gBootstrap.next_page;
uint64_t bootstrap_used = gBootstrap.next_page - gBootstrap.init_page;
dbgln("[PMM] Taking over from bootstrap, used: %x", bootstrap_used);
size -= bootstrap_used;
}
AddMemoryRegion(base, size);
}
}
}
uint64_t AllocatePage() {
if (front_ == nullptr) {
panic("No available memory regions.");
}
if (front_->num_pages == 0) {
panic("Bad state, empty memory block.");
}
uint64_t page = front_->base;
front_->base += 0x1000;
front_->num_pages--;
if (front_->num_pages == 0) {
MemBlock* temp = front_;
front_ = front_->next;
delete temp;
}
return page;
}
void FreePage(uint64_t page) { AddMemoryRegion(page, 0x1000); }
private:
void AddMemoryRegion(uint64_t base, uint64_t size) {
MemBlock* block = new MemBlock{
.next = front_,
.base = base,
.num_pages = size >> 12,
};
front_ = block;
}
struct MemBlock {
MemBlock* next = nullptr;
uint64_t base = 0;
uint64_t num_pages = 0;
};
MemBlock* front_ = nullptr;
};
static PhysicalMemoryManager* gPmm = nullptr;
}; // namespace
void InitBootstrapPageAllocation() {
@ -36,11 +99,18 @@ void InitBootstrapPageAllocation() {
}
}
void InitPhysicalMemoryManager() { gPmm = new PhysicalMemoryManager(); }
uint64_t AllocatePage() {
if (gPmm != nullptr) {
return gPmm->AllocatePage();
}
if (!gBootstrapEnabled) {
panic("No Bootstrap Memory Manager");
}
dbgln("[PMM] Boostrap Alloc!");
uint64_t page = gBootstrap.next_page;
if (page == gBootstrap.max_page) {
panic("Bootstrap Memory Manager OOM");

View File

@ -0,0 +1,41 @@
.global context_switch
context_switch:
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
mov %cr3, %rax
push %rax
mov %rsp, (%rdi) # Save rsp to the prev task.
mov (%rsi), %rsp # Load the next task's rsp.
pop %rax
mov %rax, %cr3
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rbx
pop %rdx
pop %rcx
pop %rax
retq

View File

@ -0,0 +1,48 @@
#include "scheduler/process.h"
#include "debug/debug.h"
#include "scheduler/thread.h"
namespace {
static uint64_t gNextId = 1;
}
Process* Process::RootProcess() {
uint64_t pml4_addr = 0;
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
Process* proc = new Process(0, pml4_addr);
proc->thread_list_front_ = new ThreadEntry{
.thread = Thread::RootThread(proc),
.next = nullptr,
};
proc->next_thread_id_ = 1;
return proc;
}
Thread* Process::CreateThread() {
Thread* thread = new Thread(this, next_thread_id_++);
ThreadEntry* entry = thread_list_front_;
while (entry->next != nullptr) {
entry = entry->next;
}
entry->next = new ThreadEntry{
.thread = thread,
.next = nullptr,
};
return thread;
}
Thread* Process::GetThread(uint64_t tid) {
ThreadEntry* entry = thread_list_front_;
while (entry != nullptr) {
if (entry->thread->tid() == tid) {
return entry->thread;
}
}
panic("Bad thread access.");
return nullptr;
}

33
zion/scheduler/process.h Normal file
View File

@ -0,0 +1,33 @@
#pragma once
#include <stdint.h>
// Forward decl due to cyclic dependency.
class Thread;
class Process {
public:
// Caller takes ownership of returned process.
static Process* RootProcess();
Process();
uint64_t id() { return id_; }
uint64_t cr3() { return cr3_; }
Thread* CreateThread();
Thread* GetThread(uint64_t tid);
private:
Process(uint64_t id, uint64_t cr3) : id_(id), cr3_(cr3) {}
uint64_t id_;
uint64_t cr3_;
uint64_t next_thread_id_ = 0;
// FIXME: Make a better data structure for this.
struct ThreadEntry {
Thread* thread;
ThreadEntry* next;
};
ThreadEntry* thread_list_front_;
};

View File

@ -0,0 +1,114 @@
#include "scheduler/scheduler.h"
#include "debug/debug.h"
namespace sched {
namespace {
extern "C" void context_switch(uint64_t* current_esp, uint64_t* next_esp);
// Simple linked list class with the intent of eventually replacing this with a
// map.
class ProcList {
public:
ProcList() {}
// Takes ownership.
void InsertProcess(Process* proc) {
if (front_ == nullptr) {
front_ = new ProcEntry{
.proc = proc,
.next = nullptr,
};
return;
}
ProcEntry* back = front_;
while (back->next != nullptr) {
back = back->next;
}
back->next = new ProcEntry{
.proc = proc,
.next = nullptr,
};
}
private:
struct ProcEntry {
Process* proc;
ProcEntry* next;
};
ProcEntry* front_ = nullptr;
};
class Scheduler {
public:
Scheduler() {
Process* root = Process::RootProcess();
current_thread_ = root->GetThread(0);
proc_list_.InsertProcess(Process::RootProcess());
// FIXME: Don't enqueue threads here.
Enqueue(root->CreateThread());
}
void Enable() { enabled_ = true; }
Process& CurrentProcess() { return current_thread_->process(); }
Thread& CurrentThread() { return *current_thread_; }
void Enqueue(Thread* thread) {
Thread* back = current_thread_;
while (back->next_thread_ != nullptr) {
back = back->next_thread_;
}
back->next_thread_ = thread;
}
void Yield() {
if (!enabled_) {
return;
}
asm volatile("cli");
if (current_thread_->next_thread_ == nullptr) {
dbgln("No next thread, continue");
return;
}
Thread* prev = current_thread_;
current_thread_ = current_thread_->next_thread_;
prev->next_thread_ = nullptr;
Enqueue(prev);
context_switch(prev->Rsp0Ptr(), current_thread_->Rsp0Ptr());
asm volatile("sti");
}
private:
bool enabled_ = false;
ProcList proc_list_;
Thread* current_thread_;
};
static Scheduler* gScheduler = nullptr;
Scheduler& GetScheduler() {
if (!gScheduler) {
panic("Scheduler not initialized");
}
return *gScheduler;
}
} // namespace
void InitScheduler() { gScheduler = new Scheduler(); }
void EnableScheduler() { GetScheduler().Enable(); }
void Yield() { GetScheduler().Yield(); }
Process& CurrentProcess() { return GetScheduler().CurrentProcess(); }
Thread& CurrentThread() { return GetScheduler().CurrentThread(); }
} // namespace sched

View File

@ -0,0 +1,26 @@
#pragma once
#include "scheduler/process.h"
#include "scheduler/thread.h"
namespace sched {
// Create the scheduler object in a disabled state,
// processes can be added but will not be scheduled.
void InitScheduler();
// Enables the scheduler such that processes will yield on ticks.
void EnableScheduler();
void Yield();
// Scheduler will take ownership
// of the created process.
void InsertProcess(Process* proc);
void EnqueueThread(Thread* thread);
Process& CurrentProcess();
Thread& CurrentThread();
} // namespace sched

34
zion/scheduler/thread.cpp Normal file
View File

@ -0,0 +1,34 @@
#include "scheduler/thread.h"
#include "debug/debug.h"
#include "scheduler/process.h"
#include "scheduler/scheduler.h"
namespace {
extern "C" void thread_init() {
asm("sti");
dbgln("New Thread!");
sched::Yield();
panic("End of thread.");
}
} // namespace
Thread* Thread::RootThread(Process* root_proc) { return new Thread(root_proc); }
Thread::Thread(Process* proc, uint64_t tid) : process_(proc), id_(tid) {
uint64_t* stack = new uint64_t[512];
uint64_t* stack_ptr = stack + 511;
// 0: rip
*(stack_ptr) = reinterpret_cast<uint64_t>(thread_init);
// 1-4: rax, rcx, rdx, rbx
// 5: rbp
*(stack_ptr - 5) = reinterpret_cast<uint64_t>(stack_ptr + 1);
// 6-15: rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15
// 16: cr3
*(stack_ptr - 16) = proc->cr3();
rsp0_ = reinterpret_cast<uint64_t>(stack_ptr - 16);
}
uint64_t Thread::pid() { return process_->id(); }

33
zion/scheduler/thread.h Normal file
View File

@ -0,0 +1,33 @@
#pragma once
#include <stdint.h>
// Forward decl due to cyclic dependency.
class Process;
class Thread {
public:
static Thread* RootThread(Process* root_proc);
explicit Thread(Process* proc, uint64_t tid);
uint64_t tid() { return id_; };
uint64_t pid();
Process& process() { return *process_; }
uint64_t* Rsp0Ptr() { return &rsp0_; }
// FIXME: Probably make this private.
Thread* next_thread_;
private:
// Special constructor for the root thread only.
Thread(Process* proc) : process_(proc), id_(0) {}
Process* process_;
uint64_t id_;
// Stack pointer to take on resume.
// Stack will contain the full thread context.
uint64_t rsp0_;
};

View File

@ -6,6 +6,7 @@
#include "memory/kernel_heap.h"
#include "memory/paging_util.h"
#include "memory/physical_memory.h"
#include "scheduler/scheduler.h"
extern "C" void zion() {
InitGdt();
@ -14,7 +15,11 @@ extern "C" void zion() {
phys_mem::InitBootstrapPageAllocation();
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
heap.Allocate(1);
phys_mem::InitPhysicalMemoryManager();
sched::InitScheduler();
sched::EnableScheduler();
sched::Yield();
dbgln("Sleeping!");
while (1)