Manage KernelStacks separately rather than just allocing bytes.
Create a global KernelStackManager that will handle the relevant allocs.
This commit is contained in:
parent
3c3341a90f
commit
f22dd66c8d
|
@ -9,8 +9,10 @@ add_executable(zion
|
|||
loader/elf_loader.cpp
|
||||
loader/init_loader.cpp
|
||||
memory/kernel_heap.cpp
|
||||
memory/kernel_stack_manager.cpp
|
||||
memory/paging_util.cpp
|
||||
memory/physical_memory.cpp
|
||||
memory/virtual_memory.cpp
|
||||
scheduler/context_switch.s
|
||||
scheduler/jump_user_space.s
|
||||
scheduler/process.cpp
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
#include "memory/kernel_stack_manager.h"
|
||||
|
||||
#include "debug/debug.h"
|
||||
#include "memory/paging_util.h"
|
||||
|
||||
#define KERNEL_STACK_START 0xFFFFFFFF'90000000
|
||||
#define KERNEL_STACK_LIMIT 0xFFFFFFFF'9FFFFFFF
|
||||
#define KERNEL_STACK_OFFSET 0x4000
|
||||
|
||||
KernelStackManager* gKernelStackManager;
|
||||
|
||||
void KernelStackManager::Init() {
|
||||
gKernelStackManager = new KernelStackManager();
|
||||
}
|
||||
|
||||
KernelStackManager::KernelStackManager()
|
||||
: next_stack_addr_(KERNEL_STACK_START) {}
|
||||
|
||||
uint64_t* KernelStackManager::AllocateKernelStack() {
|
||||
next_stack_addr_ += KERNEL_STACK_OFFSET;
|
||||
if (next_stack_addr_ >= KERNEL_STACK_LIMIT) {
|
||||
panic("No more kernelstack space");
|
||||
}
|
||||
EnsureResident(next_stack_addr_ - 0x3000, 0x3000);
|
||||
return reinterpret_cast<uint64_t*>(next_stack_addr_) - 1;
|
||||
}
|
||||
|
||||
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {
|
||||
freed_stack_cnt_++;
|
||||
dbgln("Freed kernel stacks using %u KiB", freed_stack_cnt_ * 12);
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// KernelStackManager doles out kernel stacks.
|
||||
//
|
||||
// KernelStacks are in the region:
|
||||
// 0xFFFFFFFF 90000000 - 0xFFFFFFFF 9FFFFFFF
|
||||
//
|
||||
// Each kernel stack is 12 KiB with a 4 Kib page boundary.
|
||||
//
|
||||
// It is global object that is only exposed via internal linkage
|
||||
// to the VirtualMemory class. All kernel stacks should be created through that
|
||||
// class.
|
||||
class KernelStackManager {
|
||||
public:
|
||||
static void Init();
|
||||
|
||||
uint64_t* AllocateKernelStack();
|
||||
|
||||
void FreeKernelStack(uint64_t stack_base);
|
||||
|
||||
private:
|
||||
KernelStackManager();
|
||||
uint64_t next_stack_addr_;
|
||||
uint64_t freed_stack_cnt_ = 0;
|
||||
};
|
|
@ -0,0 +1,18 @@
|
|||
#include "memory/virtual_memory.h"
|
||||
|
||||
#include "memory/kernel_stack_manager.h"
|
||||
|
||||
extern KernelStackManager* gKernelStackManager;
|
||||
|
||||
uint64_t* VirtualMemory::AllocateKernelStack() {
|
||||
return gKernelStackManager->AllocateKernelStack();
|
||||
}
|
||||
|
||||
uint64_t VirtualMemory::GetNextMemMapAddr(uint64_t size) {
|
||||
uint64_t addr = next_memmap_addr_;
|
||||
next_memmap_addr_ += size;
|
||||
if (next_memmap_addr_ >= 0x30'00000000) {
|
||||
panic("OOM: Memmap");
|
||||
}
|
||||
return addr;
|
||||
}
|
|
@ -42,14 +42,10 @@ class VirtualMemory {
|
|||
VirtualMemory(const VirtualMemory&) = delete;
|
||||
VirtualMemory(VirtualMemory&&) = delete;
|
||||
|
||||
uint64_t GetNextMemMapAddr(uint64_t size) {
|
||||
uint64_t addr = next_memmap_addr_;
|
||||
next_memmap_addr_ += size;
|
||||
if (next_memmap_addr_ >= 0x30'00000000) {
|
||||
panic("OOM: Memmap");
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
uint64_t GetNextMemMapAddr(uint64_t size);
|
||||
|
||||
// Kernel
|
||||
uint64_t* AllocateKernelStack();
|
||||
|
||||
private:
|
||||
uint64_t cr3_ = 0;
|
||||
|
|
|
@ -16,7 +16,7 @@ SharedPtr<Process> Process::RootProcess() {
|
|||
uint64_t pml4_addr = 0;
|
||||
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
|
||||
SharedPtr<Process> proc(new Process(0, pml4_addr));
|
||||
proc->threads_.PushBack(Thread::RootThread(proc.ptr()));
|
||||
proc->threads_.PushBack(Thread::RootThread(*proc));
|
||||
proc->next_thread_id_ = 1;
|
||||
|
||||
return proc;
|
||||
|
@ -28,7 +28,7 @@ Process::Process() : id_(gNextId++), state_(RUNNING) {
|
|||
}
|
||||
|
||||
void Process::CreateThread(uint64_t entry) {
|
||||
Thread* thread = new Thread(this, next_thread_id_++, entry);
|
||||
Thread* thread = new Thread(*this, next_thread_id_++, entry);
|
||||
threads_.PushBack(thread);
|
||||
gScheduler->Enqueue(thread);
|
||||
}
|
||||
|
|
|
@ -19,14 +19,14 @@ extern "C" void thread_init() {
|
|||
|
||||
} // namespace
|
||||
|
||||
SharedPtr<Thread> Thread::RootThread(Process* root_proc) {
|
||||
SharedPtr<Thread> Thread::RootThread(Process& root_proc) {
|
||||
return new Thread(root_proc);
|
||||
}
|
||||
|
||||
Thread::Thread(const SharedPtr<Process>& proc, uint64_t tid, uint64_t entry)
|
||||
Thread::Thread(Process& proc, uint64_t tid, uint64_t entry)
|
||||
: process_(proc), id_(tid), rip_(entry) {
|
||||
uint64_t* stack = new uint64_t[512];
|
||||
uint64_t* stack_ptr = stack + 511;
|
||||
uint64_t* stack_ptr = proc.vmm().AllocateKernelStack();
|
||||
dbgln("Kernel Stack at: %m", stack_ptr);
|
||||
// 0: rip
|
||||
*(stack_ptr) = reinterpret_cast<uint64_t>(thread_init);
|
||||
// 1-4: rax, rcx, rdx, rbx
|
||||
|
@ -34,12 +34,12 @@ Thread::Thread(const SharedPtr<Process>& proc, uint64_t tid, uint64_t entry)
|
|||
*(stack_ptr - 5) = reinterpret_cast<uint64_t>(stack_ptr + 1);
|
||||
// 6-15: rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15
|
||||
// 16: cr3
|
||||
*(stack_ptr - 16) = proc->cr3();
|
||||
*(stack_ptr - 16) = proc.cr3();
|
||||
rsp0_ = reinterpret_cast<uint64_t>(stack_ptr - 16);
|
||||
rsp0_start_ = reinterpret_cast<uint64_t>(stack_ptr);
|
||||
}
|
||||
|
||||
uint64_t Thread::pid() { return process_->id(); }
|
||||
uint64_t Thread::pid() const { return process_.id(); }
|
||||
|
||||
void Thread::Init() {
|
||||
dbgln("[%u.%u] thread start.", pid(), id_);
|
||||
|
@ -52,6 +52,6 @@ void Thread::Init() {
|
|||
void Thread::Exit() {
|
||||
dbgln("[%u.%u] Exiting", pid(), id_);
|
||||
state_ = FINISHED;
|
||||
process_->CheckState();
|
||||
process_.CheckState();
|
||||
gScheduler->Yield();
|
||||
}
|
||||
|
|
|
@ -15,14 +15,14 @@ class Thread {
|
|||
RUNNABLE,
|
||||
FINISHED,
|
||||
};
|
||||
static SharedPtr<Thread> RootThread(Process* root_proc);
|
||||
static SharedPtr<Thread> RootThread(Process& root_proc);
|
||||
|
||||
explicit Thread(const SharedPtr<Process>& proc, uint64_t tid, uint64_t entry);
|
||||
Thread(Process& proc, uint64_t tid, uint64_t entry);
|
||||
|
||||
uint64_t tid() { return id_; };
|
||||
uint64_t pid();
|
||||
uint64_t tid() const { return id_; };
|
||||
uint64_t pid() const;
|
||||
|
||||
Process& process() { return *process_; }
|
||||
Process& process() { return process_; }
|
||||
|
||||
uint64_t* Rsp0Ptr() { return &rsp0_; }
|
||||
uint64_t Rsp0Start() { return rsp0_start_; }
|
||||
|
@ -37,8 +37,8 @@ class Thread {
|
|||
|
||||
private:
|
||||
// Special constructor for the root thread only.
|
||||
Thread(Process* proc) : process_(proc), id_(0) {}
|
||||
SharedPtr<Process> process_;
|
||||
Thread(Process& proc) : process_(proc), id_(0) {}
|
||||
Process& process_;
|
||||
uint64_t id_;
|
||||
State state_ = RUNNABLE;
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "interrupt/timer.h"
|
||||
#include "loader/init_loader.h"
|
||||
#include "memory/kernel_heap.h"
|
||||
#include "memory/kernel_stack_manager.h"
|
||||
#include "memory/paging_util.h"
|
||||
#include "memory/physical_memory.h"
|
||||
#include "scheduler/process_manager.h"
|
||||
|
@ -22,6 +23,11 @@ extern "C" void zion() {
|
|||
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
|
||||
phys_mem::InitPhysicalMemoryManager();
|
||||
|
||||
dbgln("[boot] Memory allocations available now.");
|
||||
|
||||
dbgln("[boot] Init Kernel Stack Manager.");
|
||||
KernelStackManager::Init();
|
||||
|
||||
dbgln("[boot] Init syscalls.");
|
||||
InitSyscall();
|
||||
|
||||
|
|
Loading…
Reference in New Issue