Compare commits

...

17 Commits

Author SHA1 Message Date
Drew Galbraith 838ef01a2a [Zion] Add a keyboard interrupt handler and a driver manager. 2023-11-25 11:14:30 -08:00
Drew Galbraith d9a936db09 [Glacier] Fix div0 bug when calling .Contains on empty hashmap. 2023-11-25 11:13:09 -08:00
Drew Galbraith 46ae5de30a [Zion] Free paging structures after process exit. 2023-11-24 17:19:32 -08:00
Drew Galbraith 39ac0216dd [Zion] Free all user space mappings when exitting a process. 2023-11-24 17:08:37 -08:00
Drew Galbraith e50d3f8abc [Zion] Remove all capabilities on process cleanup. 2023-11-24 17:05:11 -08:00
Drew Galbraith 430dc36728 [Zion] Add TODO to separate user stacks for safety. 2023-11-24 16:57:00 -08:00
Drew Galbraith 85b701ce31 [Zion] Clarify kernel stack reuse in a comment. 2023-11-24 16:55:45 -08:00
Drew Galbraith 2dd69f5844 [Zion] Map user stacks in as regular MemoryObjects.
This allows us to easily track the physical memory so it
can be freed when the thread exits. It also simplifies the page fault
handler as it just needs to check regular mappings to find a user stack.
2023-11-24 16:51:35 -08:00
Drew Galbraith ba1b4df702 [Zion] Add more context to GPT faults in the interrupt handler. 2023-11-24 16:49:05 -08:00
Drew Galbraith 7695396980 [Zion] Add released User Stacks to pool for reuse. 2023-11-24 16:16:25 -08:00
Drew Galbraith d1ace374b6 [Zion] Return freed kernel stacks to pool. 2023-11-24 16:04:40 -08:00
Drew Galbraith 8fb5b7c03c [Zion] Move existing proc/thread cleanup calls to the cleanup thread. 2023-11-24 15:40:44 -08:00
Drew Galbraith 8bedc80caf [Zion] Add a max depth to stack unwinds. 2023-11-24 15:04:19 -08:00
Drew Galbraith cb590c96b8 [Zion] Add a proc/thread cleanup thread for future use. 2023-11-24 15:04:03 -08:00
Drew Galbraith 8e4cd1562f [Mammoth] Merge freed slices in buddy allocator. 2023-11-24 14:10:55 -08:00
Drew Galbraith 2a3d384336 [Glacier] Don't move const-refs in hashmap. 2023-11-24 14:10:16 -08:00
Drew Galbraith 7715a5898b [Mammoth] Naively free buddy allocated blocks.
We don't yet attempt to merge them with nearby blocks.
Currently this reduces the number of 2 page blocks we allocate down from
40 to 29.
2023-11-24 09:01:25 -08:00
35 changed files with 427 additions and 112 deletions

View File

@ -92,6 +92,9 @@ const V& HashMap<K, V, H>::at(const K& key) const {
template <typename K, typename V, class H>
bool HashMap<K, V, H>::Contains(const K& key) const {
if (data_.size() == 0) {
return false;
}
uint64_t hc = H()(key);
auto& ll = data_[hc % data_.size()];
@ -115,7 +118,7 @@ ErrorCode HashMap<K, V, H>::Insert(const K& key, const V& value) {
return ALREADY_EXISTS;
}
}
ll.PushFront({Move(key), Move(value)});
ll.PushFront({key, value});
size_++;
return OK;
}

View File

@ -31,6 +31,11 @@ struct BuddySlot {
uint64_t size;
};
BuddySlot* GetBuddy(BuddySlot* slot) {
return reinterpret_cast<BuddySlot*>(reinterpret_cast<uint64_t>(slot) ^
slot->size);
}
uint64_t NeededSize(uint64_t size) {
uint64_t needed = size + sizeof(BuddySlot);
// Start at 32 because sizeof(BuddySlot) is already 24;
@ -90,6 +95,32 @@ class BuddyAllocator {
return ptr;
}
void Free(void* ptr) {
check(ZMutexLock(mutex_cap_));
BuddySlot* slot = ((BuddySlot*)ptr) - 1;
if (slot->next || slot->prev) {
crash("Double free", glcr::INTERNAL);
}
BuddySlot* buddy = GetBuddy(slot);
while ((slot->size < 0x2000) && (buddy->next || buddy->prev) &&
(buddy->size == slot->size)) {
// Buddy is free! Merge!.
Remove(buddy);
if (buddy < slot) {
slot = buddy;
}
slot->size *= 2;
buddy = GetBuddy(slot);
}
if (free_front_) {
free_front_->prev = slot;
}
slot->next = free_front_;
free_front_ = slot;
check(ZMutexRelease(mutex_cap_));
}
private:
BuddySlot* free_front_ = nullptr;
z_cap_t mutex_cap_ = 0;
@ -99,6 +130,9 @@ class BuddyAllocator {
BuddySlot* slot = reinterpret_cast<BuddySlot*>(vaddr);
slot->prev = nullptr;
slot->next = free_front_;
if (free_front_) {
free_front_->prev = slot;
}
free_front_ = slot;
slot->size = 0x2000;
}
@ -109,8 +143,7 @@ class BuddyAllocator {
}
slot->size /= 2;
BuddySlot* new_slot = reinterpret_cast<BuddySlot*>(
reinterpret_cast<uint64_t>(slot) ^ slot->size);
BuddySlot* new_slot = GetBuddy(slot);
new_slot->size = slot->size;
new_slot->next = slot->next;
new_slot->prev = slot;
@ -157,11 +190,13 @@ void* Allocate(uint64_t size) {
return ptr;
}
void Free(void* ptr) { gAllocator.Free(ptr); }
} // namespace
[[nodiscard]] void* operator new(uint64_t size) { return Allocate(size); }
[[nodiscard]] void* operator new[](uint64_t size) { return Allocate(size); }
void operator delete(void*, uint64_t) {}
void operator delete[](void*) {}
void operator delete[](void*, uint64_t) {}
void operator delete(void* ptr, uint64_t) { Free(ptr); }
void operator delete[](void* ptr) { Free(ptr); }
void operator delete[](void* ptr, uint64_t) { Free(ptr); }

View File

@ -183,7 +183,22 @@ glcr::ErrorCode AhciDriver::RegisterIrq() {
if (pci_device_header_->interrupt_pin == 0) {
crash("Can't register IRQ without a pin num", glcr::INVALID_ARGUMENT);
}
uint64_t irq_num = Z_IRQ_PCI_BASE + pci_device_header_->interrupt_pin - 1;
uint64_t irq_num = 0;
switch (pci_device_header_->interrupt_pin) {
case 1:
irq_num = kZIrqPci1;
break;
case 2:
irq_num = kZIrqPci2;
break;
case 3:
irq_num = kZIrqPci3;
break;
case 4:
irq_num = kZIrqPci4;
break;
}
RET_ERR(ZIrqRegister(irq_num, &irq_port_cap_));
irq_thread_ = Thread(interrupt_thread, this);
ahci_hba_->global_host_control |= kGhc_InteruptEnable;

View File

@ -10,6 +10,7 @@ add_executable(zion
debug/debug.cpp
interrupt/apic.cpp
interrupt/apic_timer.cpp
interrupt/driver_manager.cpp
interrupt/interrupt.cpp
interrupt/interrupt_enter.s
interrupt/timer.cpp
@ -34,6 +35,7 @@ add_executable(zion
object/reply_port.cpp
object/semaphore.cpp
object/thread.cpp
scheduler/cleanup.cpp
scheduler/context_switch.s
scheduler/jump_user_space.s
scheduler/process_manager.cpp

View File

@ -146,8 +146,8 @@ void ParseMadt(SdtHeader* rsdt) {
MadtHeader* header = reinterpret_cast<MadtHeader*>(rsdt);
#if K_ACPI_DEBUG
dbgln("Local APIC {x}", header->local_apic_address);
dbgln("Flags: {x}", header->flags);
dbgln("Local APIC {x}", +header->local_apic_address);
dbgln("Flags: {x}", +header->flags);
#endif
gLApicBase = header->local_apic_address;
@ -160,7 +160,7 @@ void ParseMadt(SdtHeader* rsdt) {
MadtLocalApic* local = reinterpret_cast<MadtLocalApic*>(entry);
#if K_ACPI_DEBUG
dbgln("Local APIC (Proc id, id, flags): {x}, {x}, {x}",
local->processor_id, local->apic_id, local->flags);
local->processor_id, local->apic_id, +local->flags);
#endif
break;
}
@ -168,7 +168,7 @@ void ParseMadt(SdtHeader* rsdt) {
MadtIoApic* io = reinterpret_cast<MadtIoApic*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Apic (id, addr, gsi base): {x}, {x}, {x}", io->io_apic_id,
io->io_apic_address, io->global_system_interrupt_base);
+io->io_apic_address, +io->global_system_interrupt_base);
#endif
if (gIOApicBase != 0) {
dbgln("More than one IOApic, unhandled");
@ -181,8 +181,8 @@ void ParseMadt(SdtHeader* rsdt) {
reinterpret_cast<MadtIoApicInterruptSource*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Source (Bus, IRQ, GSI, flags): {x}, {x}, {x}, {x}",
src->bus_source, src->irq_source, src->global_system_interrupt,
src->flags);
src->bus_source, src->irq_source, +src->global_system_interrupt,
+src->flags);
#endif
break;
}
@ -191,7 +191,7 @@ void ParseMadt(SdtHeader* rsdt) {
reinterpret_cast<MadtLocalApicNonMaskable*>(entry);
#if K_ACPI_DEBUG
dbgln("Local NMI (proc id, flags, lint#): {x}, {x}, {x}",
lnmi->apic_processor_id, lnmi->flags, lnmi->lint_num);
lnmi->apic_processor_id, +lnmi->flags, lnmi->lint_num);
#endif
break;
}
@ -254,7 +254,7 @@ void ProbeRsdp() {
#if K_ACPI_DEBUG
dbgln("ACPI Ver {}", rsdp->revision);
dbgln("RSDT Addr: {x}", rsdp->rsdt_addr);
dbgln("RSDT Addr: {x}", +rsdp->rsdt_addr);
#endif
ProbeRsdt(reinterpret_cast<SdtHeader*>(rsdp->rsdt_addr));
@ -270,7 +270,7 @@ void ProbeRsdp() {
}
#if K_ACPI_DEBUG
dbgln("XSDT Addr: {x}", rsdp->xsdt_addr);
dbgln("XSDT Addr: {x}", +rsdp->xsdt_addr);
#endif
}

View File

@ -33,3 +33,13 @@ glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
(void)capabilities_.Delete(id);
return cap;
}
void CapabilityTable::ReleaseAll() {
for (uint64_t i = 0; i < next_cap_id_; i++) {
(void)capabilities_.Delete(i);
}
if (capabilities_.size() != 0) {
dbgln("Capabilities still remaining after clear: {x}",
capabilities_.size());
}
}

View File

@ -25,6 +25,8 @@ class CapabilityTable {
glcr::RefPtr<Capability> GetCapability(uint64_t id);
glcr::RefPtr<Capability> ReleaseCapability(uint64_t id);
void ReleaseAll();
private:
glcr::RefPtr<Mutex> lock_ = Mutex::Create();
// TODO: Do some randomization.

View File

@ -9,12 +9,14 @@ bool IsValid(uint64_t* rbp) { return rbp && *rbp != kStackBaseSentinel; }
} // namespace
void StackUnwind(uint64_t rbp) {
uint64_t depth_limit = 10;
dbgln("-- Begin Stack --");
uint64_t* rbp_ptr = reinterpret_cast<uint64_t*>(rbp);
while (IsValid(rbp_ptr)) {
while (IsValid(rbp_ptr) && depth_limit > 0) {
uint64_t rip = *(rbp_ptr + 1);
dbgln("RIP: {x}", rip);
rbp_ptr = reinterpret_cast<uint64_t*>(*rbp_ptr);
depth_limit--;
}
dbgln("-- End Stack --");
}

View File

@ -50,8 +50,6 @@ const uint64_t kZionReplyPortSend = 0x63;
const uint64_t kZionReplyPortRecv = 0x64;
const uint64_t kZionEndpointCall = 0x65;
#define Z_IRQ_PCI_BASE 0x30
// Capability Calls
const uint64_t kZionCapDuplicate = 0x70;
const uint64_t kZionCapRelease = 0x71;
@ -67,6 +65,13 @@ const uint64_t kZionSemaphoreSignal = 0x85;
// Debugging Calls.
const uint64_t kZionDebug = 0x1'0000;
// Irq Types
const uint64_t kZIrqKbd = 0x22;
const uint64_t kZIrqPci1 = 0x30;
const uint64_t kZIrqPci2 = 0x31;
const uint64_t kZIrqPci3 = 0x32;
const uint64_t kZIrqPci4 = 0x33;
/* ------------------------------
* Capability Types
* ------------------------------*/

View File

@ -136,6 +136,9 @@ Apic::Apic(const ApicConfiguration& config)
// FIXME: Get this offset from ACPI.
SetIoDoubleReg(0x14, 0x20 | APIC_MASK);
// Map Keyboard
SetIoDoubleReg(0x12, 0x22);
// For now set these based on the presets in the following spec.
// http://web.archive.org/web/20161130153145/http://download.intel.com/design/chipsets/datashts/29056601.pdf
// FIXME: However in the future we should likely use the MADT for legacy

View File

@ -0,0 +1,26 @@
#include "interrupt/driver_manager.h"
#include "debug/debug.h"
DriverManager* gDriverManager = nullptr;
DriverManager& DriverManager::Get() { return *gDriverManager; }
DriverManager::DriverManager() { gDriverManager = this; }
void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) {
if (!driver_map_.Contains(irq_num)) {
dbgln("WARN IRQ for {x} with no registered driver", irq_num);
return;
}
driver_map_.at(irq_num)->Send(glcr::Move(message));
}
glcr::ErrorCode DriverManager::RegisterListener(uint64_t irq_num,
glcr::RefPtr<Port> port) {
if (driver_map_.Contains(irq_num)) {
return glcr::ALREADY_EXISTS;
}
return driver_map_.Insert(irq_num, port);
}

View File

@ -0,0 +1,22 @@
#pragma once
#include <glacier/container/hash_map.h>
#include <glacier/memory/ref_ptr.h>
#include "object/port.h"
class DriverManager {
public:
static DriverManager& Get();
DriverManager();
DriverManager(const DriverManager&) = delete;
DriverManager(DriverManager&&) = delete;
void WriteMessage(uint64_t irq_num, IpcMessage&& message);
glcr::ErrorCode RegisterListener(uint64_t irq_num, glcr::RefPtr<Port> port);
private:
glcr::HashMap<uint64_t, glcr::RefPtr<Port>> driver_map_;
};

View File

@ -7,6 +7,7 @@
#include "debug/debug.h"
#include "interrupt/apic.h"
#include "interrupt/apic_timer.h"
#include "interrupt/driver_manager.h"
#include "memory/kernel_heap.h"
#include "memory/physical_memory.h"
#include "scheduler/scheduler.h"
@ -110,6 +111,8 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) {
}
dbgln("Index: {}", err >> 3);
dbgln("RIP: {x}", frame->rip);
dbgln("RAX: {x}, RBX: {x}, RCX: {x}, RDX: {x}", frame->rax, frame->rbx,
frame->rcx, frame->rdx);
dbgln("RSP: {x}", frame->rsp);
StackUnwind(frame->rbp);
@ -194,27 +197,39 @@ extern "C" void interrupt_apic_timer(InterruptFrame*) {
gScheduler->Preempt();
}
glcr::RefPtr<Port> pci1_port;
extern "C" void isr_keyboard();
extern "C" void interrupt_keyboard(InterruptFrame*) {
glcr::Array<uint8_t> data(1);
data[0] = inb(0x60);
IpcMessage msg{.data = glcr::Move(data)};
DriverManager::Get().WriteMessage(kZIrqKbd, glcr::Move(msg));
gApic->SignalEOI();
}
extern "C" void isr_pci1();
extern "C" void interrupt_pci1(InterruptFrame*) {
pci1_port->Send({});
DriverManager::Get().WriteMessage(kZIrqPci1, {});
gApic->SignalEOI();
}
extern "C" void isr_pci2();
extern "C" void interrupt_pci2(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci2, {});
dbgln("Interrupt PCI line 2");
gApic->SignalEOI();
}
extern "C" void isr_pci3();
extern "C" void interrupt_pci3(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci3, {});
dbgln("Interrupt PCI line 3");
gApic->SignalEOI();
}
extern "C" void isr_pci4();
extern "C" void interrupt_pci4(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci4, {});
dbgln("Interrupt PCI line 4");
gApic->SignalEOI();
}
@ -228,6 +243,7 @@ void InitIdt() {
gIdt[0x20] = CreateDescriptor(isr_timer);
gIdt[0x21] = CreateDescriptor(isr_apic_timer);
gIdt[0x22] = CreateDescriptor(isr_keyboard);
gIdt[0x30] = CreateDescriptor(isr_pci1);
gIdt[0x31] = CreateDescriptor(isr_pci2);
@ -251,5 +267,3 @@ void UpdateFaultHandlersToIst1() {
};
asm volatile("lidt %0" ::"m"(idtp));
}
void RegisterPciPort(const glcr::RefPtr<Port>& port) { pci1_port = port; }

View File

@ -7,5 +7,3 @@
void InitIdt();
void UpdateFaultHandlersToIst1();
void RegisterPciPort(const glcr::RefPtr<Port>& port);

View File

@ -63,6 +63,7 @@ isr_handler fpe_fault
isr_handler timer
isr_handler apic_timer
isr_handler keyboard
isr_handler pci1
isr_handler pci2

View File

@ -3,6 +3,7 @@
#include <stdint.h>
const uint64_t KiB = 0x400;
const uint64_t MiB = KiB * KiB;
const uint64_t kPageSize = 4 * KiB;
const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000;
@ -20,3 +21,9 @@ const uint64_t kKernelStackEnd = 0xFFFF'FFFF'A000'0000;
const uint64_t kKernelStackSize = 3 * kPageSize;
const uint64_t kKernelStackOffset = 4 * kPageSize;
const uint64_t kUserStackMin = 0x00007FF0'00000000;
const uint64_t kUserStackMax = 0x00008000'00000000;
const uint64_t kUserStackSize = MiB;
const uint64_t kUserSpaceMax = 0x00008000'00000000;

View File

@ -15,6 +15,9 @@ void KernelStackManager::SetupInterruptStack() {
}
uint64_t KernelStackManager::AllocateKernelStack() {
if (!free_stacks_.empty()) {
return free_stacks_.PopFront();
}
next_stack_addr_ += kKernelStackOffset;
if (next_stack_addr_ >= kKernelStackEnd) {
panic("No more kernel stack space");
@ -24,6 +27,9 @@ uint64_t KernelStackManager::AllocateKernelStack() {
}
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {
freed_stack_cnt_++;
dbgln("Freed kernel stacks using {} KiB", freed_stack_cnt_ * 12);
// TODO: Validate this value.
if ((stack_base & 0x3FF8) != 0x3FF8) {
dbgln("Odd kernel stack free {x}", stack_base);
}
free_stacks_.PushFront(stack_base);
}

View File

@ -1,5 +1,6 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <stdint.h>
// KernelStackManager doles out kernel stacks.
@ -20,9 +21,12 @@ class KernelStackManager {
uint64_t AllocateKernelStack();
// For now we just reuse kernel stacks as-is (don't free the physical memory
// or unmap/remap the pages in virtual memory.)
void FreeKernelStack(uint64_t stack_base);
private:
uint64_t next_stack_addr_;
uint64_t freed_stack_cnt_ = 0;
glcr::LinkedList<uint64_t> free_stacks_;
};

View File

@ -87,6 +87,21 @@ uint64_t CurrCr3() {
return pml4_addr;
}
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
uint64_t* struct_virtual =
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
if (level > 0) {
for (uint16_t i = 0; i < 256; i++) {
if (struct_virtual[i] & PRESENT_BIT) {
CleanupPageStructure(struct_virtual[i] & ~0xFFF, level - 1);
}
}
}
phys_mem::FreePage(struct_phys);
}
} // namespace
void InitializePml4(uint64_t pml4_physical_addr) {
@ -105,6 +120,20 @@ void InitializePml4(uint64_t pml4_physical_addr) {
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm);
}
void CleanupPml4(uint64_t pml4_physical_addr) {
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
// Iterate the first half of the pml4 as it contains user-space mappings.
for (uint8_t i = 0; i < 128; i++) {
if (pml4_virtual[i] & PRESENT_BIT) {
CleanupPageStructure(pml4_virtual[i] & ~0xFFF, 2);
}
}
phys_mem::FreePage(pml4_physical_addr);
}
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
vaddr = PageAlign(vaddr);
paddr = PageAlign(paddr);

View File

@ -5,6 +5,7 @@
#include "object/process.h"
void InitializePml4(uint64_t pml4_physical_addr);
void CleanupPml4(uint64_t pml4_physical_addr);
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);

View File

@ -4,31 +4,21 @@
#include "memory/paging_util.h"
uint64_t UserStackManager::NewUserStack() {
if (!freed_stacks_.empty()) {
return freed_stacks_.PopFront();
}
next_stack_ -= kUserStackSize;
uint64_t stack = next_stack_;
next_stack_ -= kStackSize;
if (stack <= kStackMin) {
if (stack <= kUserStackMin) {
panic("Out of user stacks!");
}
if (stack == kStackMax) {
// Add a additional page boudary between kernel and user space.
stack -= 0x1000;
}
EnsureResident(stack - 1, 1);
return stack;
}
void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
freed_stacks_++;
dbgln("{} freed user stacks", freed_stacks_);
}
bool UserStackManager::IsValidStack(uint64_t vaddr) {
if (vaddr < next_stack_ || vaddr > (kStackMax - 0x1000)) {
return false;
void UserStackManager::FreeUserStack(uint64_t stack_base) {
if (stack_base & (kUserStackSize - 1)) {
dbgln("WARN freeing unaligned user stack {x}", stack_base);
}
// Checks if the address is in the first page of the stack.
if (vaddr & 0xFF000) {
return true;
}
return false;
freed_stacks_.PushBack(stack_base);
}

View File

@ -1,7 +1,10 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <stdint.h>
#include "memory/constants.h"
// Per-process class to manage user stacks.
//
// User stacks live at
@ -17,17 +20,13 @@ class UserStackManager {
UserStackManager() {}
UserStackManager(const UserStackManager&) = delete;
// TODO: Create some padding between user stacks to
// detect stack overruns.
uint64_t NewUserStack();
void FreeUserStack(uint64_t stack_ptr);
// Used to check if we should page in this address.
bool IsValidStack(uint64_t vaddr);
void FreeUserStack(uint64_t stack_base);
private:
const uint64_t kStackMax = 0x00008000'00000000;
const uint64_t kStackMin = 0x00007FF0'00000000;
const uint64_t kStackSize = 0x100000;
uint64_t next_stack_ = kUserStackMax;
uint64_t next_stack_ = kStackMax;
uint64_t freed_stacks_ = 0;
glcr::LinkedList<uint64_t> freed_stacks_;
};

View File

@ -18,12 +18,20 @@ AddressSpace::AddressSpace() {
InitializePml4(cr3_);
}
uint64_t AddressSpace::AllocateUserStack() {
return user_stacks_.NewUserStack();
AddressSpace::~AddressSpace() { CleanupPml4(cr3_); }
glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
uint64_t base = user_stacks_.NewUserStack();
auto mem_object = glcr::StaticCastRefPtr<MemoryObject>(
glcr::MakeRefCounted<VariableMemoryObject>(kUserStackSize));
RET_ERR(MapInMemoryObject(base, mem_object));
return base;
}
void AddressSpace::FreeUserStack(uint64_t rsp) {
return user_stacks_.FreeUserStack(rsp);
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
user_stacks_.FreeUserStack(base);
return glcr::OK;
}
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) {
@ -70,11 +78,6 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
return false;
}
if (user_stacks_.IsValidStack(vaddr)) {
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
return true;
}
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
if (!offset_or.ok()) {
return false;

View File

@ -62,11 +62,15 @@ class AddressSpace : public KernelObject {
AddressSpace(const AddressSpace&) = delete;
AddressSpace(AddressSpace&&) = delete;
// Deconstructing an address space will free all paging structures associated
// with this address space.
~AddressSpace();
uint64_t cr3() { return cr3_; }
// User Mappings.
uint64_t AllocateUserStack();
void FreeUserStack(uint64_t);
glcr::ErrorOr<uint64_t> AllocateUserStack();
[[nodiscard]] glcr::ErrorCode FreeUserStack(uint64_t stack_base);
uint64_t GetNextMemMapAddr(uint64_t size, uint64_t align);
// Maps in a memory object at a specific address.

View File

@ -47,16 +47,6 @@ glcr::RefPtr<Thread> Process::GetThread(uint64_t tid) {
return threads_[tid];
}
void Process::CheckState() {
MutexHolder lock(mutex_);
for (uint64_t i = 0; i < threads_.size(); i++) {
if (!threads_[i]->IsDying()) {
return;
}
}
Exit();
}
glcr::RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
return caps_.ReleaseCapability(cid);
}
@ -72,24 +62,51 @@ uint64_t Process::AddExistingCapability(const glcr::RefPtr<Capability>& cap) {
void Process::Exit() {
// TODO: Check this state elsewhere to ensure that we don't for instance
// create a running thread on a finished process.
state_ = FINISHED;
state_ = CLEANUP;
for (uint64_t i = 0; i < threads_.size(); i++) {
if (!threads_[i]->IsDying()) {
threads_[i]->Cleanup();
threads_[i]->SetState(Thread::CLEANUP);
}
}
// From this point onward no threads should be able to reach userspace.
gProcMan->CleanupProcess(id_);
// TODO: Unmap all userspace mappings.
// TODO: Clear capabilities.
// TODO: In the future consider removing this from the process manager.
// I need to think through the implications because the process object
// will be kept alive by the process that created it most likely.
// Technically we may get interrupted here the cleanup process may start,
// truthfully that is fine. Once each thread is flagged for cleanup then it
// will no longer be scheduled again or need to be.
if (gScheduler->CurrentProcess().id_ == id_) {
gScheduler->Yield();
}
}
void Process::Cleanup() {
if (gScheduler->CurrentProcess().id_ == id_) {
panic("Can't clean up process from itself.");
}
if (state_ != CLEANUP) {
dbgln("WARN: Cleaning up process with non-cleanup state {}",
(uint64_t)state_);
state_ = CLEANUP;
}
// 1. For each thread, call cleanup.
for (uint64_t i = 0; i < threads_.size(); i++) {
if (threads_[i]->GetState() == Thread::CLEANUP) {
threads_[i]->Cleanup();
}
}
// 2. Release all capabailities.
caps_.ReleaseAll();
// 3. Unmap all user memory.
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
"Failed to cleanup userspace mappings in process exit.");
// 4. Release paging structures.
vmas_ = nullptr;
state_ = FINISHED;
}

View File

@ -31,6 +31,7 @@ class Process : public KernelObject {
UNSPECIFIED,
SETUP,
RUNNING,
CLEANUP,
FINISHED,
};
static glcr::RefPtr<Process> RootProcess();
@ -55,14 +56,16 @@ class Process : public KernelObject {
}
uint64_t AddExistingCapability(const glcr::RefPtr<Capability>& cap);
// Checks the state of all child threads and transitions to
// finished if all have finished.
void CheckState();
State GetState() { return state_; }
// This stops all of the processes threads (they will no longer be scheduled)
// and flags the process for cleanup.
void Exit();
// This *should not* be called from a thread that belongs to this process.
// Rather it should be called from the cleanup thread.
void Cleanup();
private:
friend class glcr::MakeRefCountedFriend<Process>;
Process();

View File

@ -6,6 +6,7 @@
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
#include "object/process.h"
#include "scheduler/process_manager.h"
#include "scheduler/scheduler.h"
#define K_THREAD_DEBUG 0
@ -64,17 +65,28 @@ void Thread::Start(uint64_t entry, uint64_t arg1, uint64_t arg2) {
}
void Thread::Init() {
if (is_kernel_) {
((void (*)(void*))rip_)(reinterpret_cast<void*>(arg1_));
panic("Returned from kernel thread.");
}
#if K_THREAD_DEBUG
dbgln("Thread start.", pid(), id_);
#endif
uint64_t rsp_ = process_.vmas()->AllocateUserStack();
// TODO: Investigate this further but without this GCC
// will emit movaps calls to non-16-bit-aligned stack
// addresses.
rsp_ -= 0x8;
*reinterpret_cast<uint64_t*>(rsp_) = kStackBaseSentinel;
auto stack_or = process_.vmas()->AllocateUserStack();
if (!stack_or.ok()) {
panic("Unable to allocate user stack: {}", stack_or.error());
}
user_stack_base_ = stack_or.value();
uint64_t rsp = user_stack_base_ + kUserStackSize - 0x8;
*reinterpret_cast<uint64_t*>(rsp) = kStackBaseSentinel;
SetRsp0(rsp0_start_);
jump_user_space(rip_, rsp_, arg1_, arg2_);
jump_user_space(rip_, rsp, arg1_, arg2_);
}
void Thread::SetState(State state) {
if (IsDying()) {
panic("Cannot set state on dying thread.");
}
state_ = state;
}
void Thread::Exit() {
@ -86,23 +98,36 @@ void Thread::Exit() {
panic("Thread::Exit called from [{}.{}] on [{}.{}]", curr_thread->pid(),
curr_thread->tid(), pid(), tid());
}
Cleanup();
process_.CheckState();
gProcMan->CleanupThread(curr_thread);
state_ = CLEANUP;
gScheduler->Yield();
}
void Thread::Cleanup() {
state_ = CLEANUP;
if (gScheduler->CurrentThread().get() == this) {
panic("Cannot cleanup thread from itself.");
}
if (state_ != CLEANUP) {
dbgln("WARN: Cleaning up thread with non-cleanup state {}",
(uint64_t)state_);
state_ = CLEANUP;
}
// 1. Release User Stack
PANIC_ON_ERR(process_.vmas()->FreeUserStack(user_stack_base_),
"Unable to free user stack.");
// 2. Unblock waiting threads.
while (blocked_threads_.size() != 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
state_ = FINISHED;
// TODO: Race condition when called from exit, once kernel stack manager
// actually reuses stacks this will cause an issue
// 3. Release Kernel Stack
KernelVmm::FreeKernelStack(rsp0_start_);
process_.vmas()->FreeUserStack(rsp_);
state_ = FINISHED;
}
void Thread::Wait() {

View File

@ -45,6 +45,8 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
uint8_t* FxData() { return fx_data_; }
void SetKernel() { is_kernel_ = true; }
// Switches the thread's state to runnable and enqueues it.
void Start(uint64_t entry, uint64_t arg1, uint64_t arg2);
@ -53,7 +55,7 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
// State Management.
State GetState() { return state_; };
void SetState(State state) { state_ = state; }
void SetState(State state);
bool IsDying() { return state_ == CLEANUP || state_ == FINISHED; }
// Exits this thread.
@ -75,9 +77,10 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
Process& process_;
uint64_t id_;
State state_ = CREATED;
bool is_kernel_ = false;
uint64_t user_stack_base_;
// Startup Context for the thread.
uint64_t rsp_;
uint64_t rip_;
uint64_t arg1_;
uint64_t arg2_;

View File

@ -0,0 +1,34 @@
#include "scheduler/cleanup.h"
void ProcessCleanup::CleanupLoop() {
while (true) {
while (process_list_.empty() && thread_list_.empty()) {
semaphore_.Wait();
}
// TODO: I think we need to protect these lists with a mutex as well.
while (!process_list_.empty()) {
auto proc = process_list_.PopFront();
dbgln("CLEANUP Process: {}", proc->id());
proc->Cleanup();
}
while (!thread_list_.empty()) {
auto thread = thread_list_.PopFront();
dbgln("CLEANUP Thread: {}.{}", thread->pid(), thread->tid());
thread->Cleanup();
}
}
}
void ProcessCleanup::CleanupProcess(glcr::RefPtr<Process> process) {
process_list_.PushBack(process);
semaphore_.Signal();
}
void ProcessCleanup::CleanupThread(glcr::RefPtr<Thread> thread) {
thread_list_.PushBack(thread);
semaphore_.Signal();
}
void CleanupThreadEntry(ProcessCleanup* cleanup) {
cleanup->CleanupLoop();
UNREACHABLE;
}

25
zion/scheduler/cleanup.h Normal file
View File

@ -0,0 +1,25 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <glacier/memory/ref_ptr.h>
#include "object/process.h"
#include "object/semaphore.h"
#include "object/thread.h"
class ProcessCleanup {
public:
ProcessCleanup() {}
void CleanupLoop();
void CleanupProcess(glcr::RefPtr<Process> process);
void CleanupThread(glcr::RefPtr<Thread> thread);
private:
Semaphore semaphore_;
glcr::LinkedList<glcr::RefPtr<Thread>> thread_list_;
glcr::LinkedList<glcr::RefPtr<Process>> process_list_;
};
void CleanupThreadEntry(ProcessCleanup* cleanup);

View File

@ -19,3 +19,21 @@ Process& ProcessManager::FromId(uint64_t pid) {
}
return *proc_map_.at(pid);
}
void ProcessManager::InitCleanup() {
auto cleanup_thread = FromId(0).CreateThread();
cleanup_thread->SetKernel();
cleanup_thread->Start(reinterpret_cast<uint64_t>(CleanupThreadEntry),
reinterpret_cast<uint64_t>(&gProcMan->cleanup), 0);
}
void ProcessManager::CleanupProcess(uint64_t pid) {
if (!proc_map_.Contains(pid)) {
panic("Bad proc access {}, have {} processes", pid, proc_map_.size());
}
cleanup.CleanupProcess(proc_map_.at(pid));
}
void ProcessManager::CleanupThread(glcr::RefPtr<Thread> thread) {
cleanup.CleanupThread(thread);
}

View File

@ -4,6 +4,7 @@
#include <glacier/memory/ref_ptr.h>
#include "object/process.h"
#include "scheduler/cleanup.h"
class ProcessManager {
public:
@ -16,8 +17,14 @@ class ProcessManager {
Process& FromId(uint64_t id);
void InitCleanup();
void CleanupProcess(uint64_t pid);
void CleanupThread(glcr::RefPtr<Thread> thread);
private:
glcr::HashMap<uint64_t, glcr::RefPtr<Process>> proc_map_;
ProcessCleanup cleanup;
};
extern ProcessManager* gProcMan;

View File

@ -2,6 +2,7 @@
#include "capability/capability.h"
#include "debug/debug.h"
#include "interrupt/driver_manager.h"
#include "interrupt/interrupt.h"
#include "object/endpoint.h"
#include "object/reply_port.h"
@ -157,13 +158,10 @@ glcr::ErrorCode PortPoll(ZPortPollReq* req) {
glcr::ErrorCode IrqRegister(ZIrqRegisterReq* req) {
auto& proc = gScheduler->CurrentProcess();
if (req->irq_num != Z_IRQ_PCI_BASE) {
// FIXME: Don't hardcode this nonsense.
return glcr::UNIMPLEMENTED;
}
glcr::RefPtr<Port> port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port);
RegisterPciPort(port);
DriverManager::Get().RegisterListener(req->irq_num, port);
return glcr::OK;
}

View File

@ -2,6 +2,7 @@
#include "capability/capability.h"
#include "debug/debug.h"
#include "scheduler/cleanup.h"
#include "scheduler/process_manager.h"
#include "scheduler/scheduler.h"

View File

@ -6,6 +6,7 @@
#include "debug/debug.h"
#include "interrupt/apic.h"
#include "interrupt/apic_timer.h"
#include "interrupt/driver_manager.h"
#include "interrupt/interrupt.h"
#include "interrupt/timer.h"
#include "loader/init_loader.h"
@ -46,6 +47,7 @@ extern "C" void zion() {
// These two need to occur after memory allocation is available.
Apic::Init();
ApicTimer::Init();
DriverManager driver_manager;
dbgln("[boot] Init syscalls.");
InitSyscall();
@ -53,6 +55,7 @@ extern "C" void zion() {
dbgln("[boot] Init scheduler.");
ProcessManager::Init();
Scheduler::Init();
gProcMan->InitCleanup();
dbgln("[boot] Loading sys init program.");
LoadInitProgram();