Compare commits

..

No commits in common. "838ef01a2a767c6fcf033d30b48b26a805c02d51" and "0915cf45dc51b1647bb1cb205e889bc4aeebc2f6" have entirely different histories.

35 changed files with 113 additions and 428 deletions

View File

@ -92,9 +92,6 @@ const V& HashMap<K, V, H>::at(const K& key) const {
template <typename K, typename V, class H>
bool HashMap<K, V, H>::Contains(const K& key) const {
if (data_.size() == 0) {
return false;
}
uint64_t hc = H()(key);
auto& ll = data_[hc % data_.size()];
@ -118,7 +115,7 @@ ErrorCode HashMap<K, V, H>::Insert(const K& key, const V& value) {
return ALREADY_EXISTS;
}
}
ll.PushFront({key, value});
ll.PushFront({Move(key), Move(value)});
size_++;
return OK;
}

View File

@ -31,11 +31,6 @@ struct BuddySlot {
uint64_t size;
};
BuddySlot* GetBuddy(BuddySlot* slot) {
return reinterpret_cast<BuddySlot*>(reinterpret_cast<uint64_t>(slot) ^
slot->size);
}
uint64_t NeededSize(uint64_t size) {
uint64_t needed = size + sizeof(BuddySlot);
// Start at 32 because sizeof(BuddySlot) is already 24;
@ -95,32 +90,6 @@ class BuddyAllocator {
return ptr;
}
void Free(void* ptr) {
check(ZMutexLock(mutex_cap_));
BuddySlot* slot = ((BuddySlot*)ptr) - 1;
if (slot->next || slot->prev) {
crash("Double free", glcr::INTERNAL);
}
BuddySlot* buddy = GetBuddy(slot);
while ((slot->size < 0x2000) && (buddy->next || buddy->prev) &&
(buddy->size == slot->size)) {
// Buddy is free! Merge!.
Remove(buddy);
if (buddy < slot) {
slot = buddy;
}
slot->size *= 2;
buddy = GetBuddy(slot);
}
if (free_front_) {
free_front_->prev = slot;
}
slot->next = free_front_;
free_front_ = slot;
check(ZMutexRelease(mutex_cap_));
}
private:
BuddySlot* free_front_ = nullptr;
z_cap_t mutex_cap_ = 0;
@ -130,9 +99,6 @@ class BuddyAllocator {
BuddySlot* slot = reinterpret_cast<BuddySlot*>(vaddr);
slot->prev = nullptr;
slot->next = free_front_;
if (free_front_) {
free_front_->prev = slot;
}
free_front_ = slot;
slot->size = 0x2000;
}
@ -143,7 +109,8 @@ class BuddyAllocator {
}
slot->size /= 2;
BuddySlot* new_slot = GetBuddy(slot);
BuddySlot* new_slot = reinterpret_cast<BuddySlot*>(
reinterpret_cast<uint64_t>(slot) ^ slot->size);
new_slot->size = slot->size;
new_slot->next = slot->next;
new_slot->prev = slot;
@ -190,13 +157,11 @@ void* Allocate(uint64_t size) {
return ptr;
}
void Free(void* ptr) { gAllocator.Free(ptr); }
} // namespace
[[nodiscard]] void* operator new(uint64_t size) { return Allocate(size); }
[[nodiscard]] void* operator new[](uint64_t size) { return Allocate(size); }
void operator delete(void* ptr, uint64_t) { Free(ptr); }
void operator delete[](void* ptr) { Free(ptr); }
void operator delete[](void* ptr, uint64_t) { Free(ptr); }
void operator delete(void*, uint64_t) {}
void operator delete[](void*) {}
void operator delete[](void*, uint64_t) {}

View File

@ -183,22 +183,7 @@ glcr::ErrorCode AhciDriver::RegisterIrq() {
if (pci_device_header_->interrupt_pin == 0) {
crash("Can't register IRQ without a pin num", glcr::INVALID_ARGUMENT);
}
uint64_t irq_num = 0;
switch (pci_device_header_->interrupt_pin) {
case 1:
irq_num = kZIrqPci1;
break;
case 2:
irq_num = kZIrqPci2;
break;
case 3:
irq_num = kZIrqPci3;
break;
case 4:
irq_num = kZIrqPci4;
break;
}
uint64_t irq_num = Z_IRQ_PCI_BASE + pci_device_header_->interrupt_pin - 1;
RET_ERR(ZIrqRegister(irq_num, &irq_port_cap_));
irq_thread_ = Thread(interrupt_thread, this);
ahci_hba_->global_host_control |= kGhc_InteruptEnable;

View File

@ -10,7 +10,6 @@ add_executable(zion
debug/debug.cpp
interrupt/apic.cpp
interrupt/apic_timer.cpp
interrupt/driver_manager.cpp
interrupt/interrupt.cpp
interrupt/interrupt_enter.s
interrupt/timer.cpp
@ -35,7 +34,6 @@ add_executable(zion
object/reply_port.cpp
object/semaphore.cpp
object/thread.cpp
scheduler/cleanup.cpp
scheduler/context_switch.s
scheduler/jump_user_space.s
scheduler/process_manager.cpp

View File

@ -146,8 +146,8 @@ void ParseMadt(SdtHeader* rsdt) {
MadtHeader* header = reinterpret_cast<MadtHeader*>(rsdt);
#if K_ACPI_DEBUG
dbgln("Local APIC {x}", +header->local_apic_address);
dbgln("Flags: {x}", +header->flags);
dbgln("Local APIC {x}", header->local_apic_address);
dbgln("Flags: {x}", header->flags);
#endif
gLApicBase = header->local_apic_address;
@ -160,7 +160,7 @@ void ParseMadt(SdtHeader* rsdt) {
MadtLocalApic* local = reinterpret_cast<MadtLocalApic*>(entry);
#if K_ACPI_DEBUG
dbgln("Local APIC (Proc id, id, flags): {x}, {x}, {x}",
local->processor_id, local->apic_id, +local->flags);
local->processor_id, local->apic_id, local->flags);
#endif
break;
}
@ -168,7 +168,7 @@ void ParseMadt(SdtHeader* rsdt) {
MadtIoApic* io = reinterpret_cast<MadtIoApic*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Apic (id, addr, gsi base): {x}, {x}, {x}", io->io_apic_id,
+io->io_apic_address, +io->global_system_interrupt_base);
io->io_apic_address, io->global_system_interrupt_base);
#endif
if (gIOApicBase != 0) {
dbgln("More than one IOApic, unhandled");
@ -181,8 +181,8 @@ void ParseMadt(SdtHeader* rsdt) {
reinterpret_cast<MadtIoApicInterruptSource*>(entry);
#if K_ACPI_DEBUG
dbgln("IO Source (Bus, IRQ, GSI, flags): {x}, {x}, {x}, {x}",
src->bus_source, src->irq_source, +src->global_system_interrupt,
+src->flags);
src->bus_source, src->irq_source, src->global_system_interrupt,
src->flags);
#endif
break;
}
@ -191,7 +191,7 @@ void ParseMadt(SdtHeader* rsdt) {
reinterpret_cast<MadtLocalApicNonMaskable*>(entry);
#if K_ACPI_DEBUG
dbgln("Local NMI (proc id, flags, lint#): {x}, {x}, {x}",
lnmi->apic_processor_id, +lnmi->flags, lnmi->lint_num);
lnmi->apic_processor_id, lnmi->flags, lnmi->lint_num);
#endif
break;
}
@ -254,7 +254,7 @@ void ProbeRsdp() {
#if K_ACPI_DEBUG
dbgln("ACPI Ver {}", rsdp->revision);
dbgln("RSDT Addr: {x}", +rsdp->rsdt_addr);
dbgln("RSDT Addr: {x}", rsdp->rsdt_addr);
#endif
ProbeRsdt(reinterpret_cast<SdtHeader*>(rsdp->rsdt_addr));
@ -270,7 +270,7 @@ void ProbeRsdp() {
}
#if K_ACPI_DEBUG
dbgln("XSDT Addr: {x}", +rsdp->xsdt_addr);
dbgln("XSDT Addr: {x}", rsdp->xsdt_addr);
#endif
}

View File

@ -33,13 +33,3 @@ glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
(void)capabilities_.Delete(id);
return cap;
}
void CapabilityTable::ReleaseAll() {
for (uint64_t i = 0; i < next_cap_id_; i++) {
(void)capabilities_.Delete(i);
}
if (capabilities_.size() != 0) {
dbgln("Capabilities still remaining after clear: {x}",
capabilities_.size());
}
}

View File

@ -25,8 +25,6 @@ class CapabilityTable {
glcr::RefPtr<Capability> GetCapability(uint64_t id);
glcr::RefPtr<Capability> ReleaseCapability(uint64_t id);
void ReleaseAll();
private:
glcr::RefPtr<Mutex> lock_ = Mutex::Create();
// TODO: Do some randomization.

View File

@ -9,14 +9,12 @@ bool IsValid(uint64_t* rbp) { return rbp && *rbp != kStackBaseSentinel; }
} // namespace
void StackUnwind(uint64_t rbp) {
uint64_t depth_limit = 10;
dbgln("-- Begin Stack --");
uint64_t* rbp_ptr = reinterpret_cast<uint64_t*>(rbp);
while (IsValid(rbp_ptr) && depth_limit > 0) {
while (IsValid(rbp_ptr)) {
uint64_t rip = *(rbp_ptr + 1);
dbgln("RIP: {x}", rip);
rbp_ptr = reinterpret_cast<uint64_t*>(*rbp_ptr);
depth_limit--;
}
dbgln("-- End Stack --");
}

View File

@ -50,6 +50,8 @@ const uint64_t kZionReplyPortSend = 0x63;
const uint64_t kZionReplyPortRecv = 0x64;
const uint64_t kZionEndpointCall = 0x65;
#define Z_IRQ_PCI_BASE 0x30
// Capability Calls
const uint64_t kZionCapDuplicate = 0x70;
const uint64_t kZionCapRelease = 0x71;
@ -65,13 +67,6 @@ const uint64_t kZionSemaphoreSignal = 0x85;
// Debugging Calls.
const uint64_t kZionDebug = 0x1'0000;
// Irq Types
const uint64_t kZIrqKbd = 0x22;
const uint64_t kZIrqPci1 = 0x30;
const uint64_t kZIrqPci2 = 0x31;
const uint64_t kZIrqPci3 = 0x32;
const uint64_t kZIrqPci4 = 0x33;
/* ------------------------------
* Capability Types
* ------------------------------*/

View File

@ -136,9 +136,6 @@ Apic::Apic(const ApicConfiguration& config)
// FIXME: Get this offset from ACPI.
SetIoDoubleReg(0x14, 0x20 | APIC_MASK);
// Map Keyboard
SetIoDoubleReg(0x12, 0x22);
// For now set these based on the presets in the following spec.
// http://web.archive.org/web/20161130153145/http://download.intel.com/design/chipsets/datashts/29056601.pdf
// FIXME: However in the future we should likely use the MADT for legacy

View File

@ -1,26 +0,0 @@
#include "interrupt/driver_manager.h"
#include "debug/debug.h"
DriverManager* gDriverManager = nullptr;
DriverManager& DriverManager::Get() { return *gDriverManager; }
DriverManager::DriverManager() { gDriverManager = this; }
void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) {
if (!driver_map_.Contains(irq_num)) {
dbgln("WARN IRQ for {x} with no registered driver", irq_num);
return;
}
driver_map_.at(irq_num)->Send(glcr::Move(message));
}
glcr::ErrorCode DriverManager::RegisterListener(uint64_t irq_num,
glcr::RefPtr<Port> port) {
if (driver_map_.Contains(irq_num)) {
return glcr::ALREADY_EXISTS;
}
return driver_map_.Insert(irq_num, port);
}

View File

@ -1,22 +0,0 @@
#pragma once
#include <glacier/container/hash_map.h>
#include <glacier/memory/ref_ptr.h>
#include "object/port.h"
class DriverManager {
public:
static DriverManager& Get();
DriverManager();
DriverManager(const DriverManager&) = delete;
DriverManager(DriverManager&&) = delete;
void WriteMessage(uint64_t irq_num, IpcMessage&& message);
glcr::ErrorCode RegisterListener(uint64_t irq_num, glcr::RefPtr<Port> port);
private:
glcr::HashMap<uint64_t, glcr::RefPtr<Port>> driver_map_;
};

View File

@ -7,7 +7,6 @@
#include "debug/debug.h"
#include "interrupt/apic.h"
#include "interrupt/apic_timer.h"
#include "interrupt/driver_manager.h"
#include "memory/kernel_heap.h"
#include "memory/physical_memory.h"
#include "scheduler/scheduler.h"
@ -111,8 +110,6 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) {
}
dbgln("Index: {}", err >> 3);
dbgln("RIP: {x}", frame->rip);
dbgln("RAX: {x}, RBX: {x}, RCX: {x}, RDX: {x}", frame->rax, frame->rbx,
frame->rcx, frame->rdx);
dbgln("RSP: {x}", frame->rsp);
StackUnwind(frame->rbp);
@ -197,39 +194,27 @@ extern "C" void interrupt_apic_timer(InterruptFrame*) {
gScheduler->Preempt();
}
extern "C" void isr_keyboard();
extern "C" void interrupt_keyboard(InterruptFrame*) {
glcr::Array<uint8_t> data(1);
data[0] = inb(0x60);
IpcMessage msg{.data = glcr::Move(data)};
DriverManager::Get().WriteMessage(kZIrqKbd, glcr::Move(msg));
gApic->SignalEOI();
}
glcr::RefPtr<Port> pci1_port;
extern "C" void isr_pci1();
extern "C" void interrupt_pci1(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci1, {});
pci1_port->Send({});
gApic->SignalEOI();
}
extern "C" void isr_pci2();
extern "C" void interrupt_pci2(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci2, {});
dbgln("Interrupt PCI line 2");
gApic->SignalEOI();
}
extern "C" void isr_pci3();
extern "C" void interrupt_pci3(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci3, {});
dbgln("Interrupt PCI line 3");
gApic->SignalEOI();
}
extern "C" void isr_pci4();
extern "C" void interrupt_pci4(InterruptFrame*) {
DriverManager::Get().WriteMessage(kZIrqPci4, {});
dbgln("Interrupt PCI line 4");
gApic->SignalEOI();
}
@ -243,7 +228,6 @@ void InitIdt() {
gIdt[0x20] = CreateDescriptor(isr_timer);
gIdt[0x21] = CreateDescriptor(isr_apic_timer);
gIdt[0x22] = CreateDescriptor(isr_keyboard);
gIdt[0x30] = CreateDescriptor(isr_pci1);
gIdt[0x31] = CreateDescriptor(isr_pci2);
@ -267,3 +251,5 @@ void UpdateFaultHandlersToIst1() {
};
asm volatile("lidt %0" ::"m"(idtp));
}
void RegisterPciPort(const glcr::RefPtr<Port>& port) { pci1_port = port; }

View File

@ -7,3 +7,5 @@
void InitIdt();
void UpdateFaultHandlersToIst1();
void RegisterPciPort(const glcr::RefPtr<Port>& port);

View File

@ -63,7 +63,6 @@ isr_handler fpe_fault
isr_handler timer
isr_handler apic_timer
isr_handler keyboard
isr_handler pci1
isr_handler pci2

View File

@ -3,7 +3,6 @@
#include <stdint.h>
const uint64_t KiB = 0x400;
const uint64_t MiB = KiB * KiB;
const uint64_t kPageSize = 4 * KiB;
const uint64_t kKernelSlabHeapStart = 0xFFFF'FFFF'4000'0000;
@ -21,9 +20,3 @@ const uint64_t kKernelStackEnd = 0xFFFF'FFFF'A000'0000;
const uint64_t kKernelStackSize = 3 * kPageSize;
const uint64_t kKernelStackOffset = 4 * kPageSize;
const uint64_t kUserStackMin = 0x00007FF0'00000000;
const uint64_t kUserStackMax = 0x00008000'00000000;
const uint64_t kUserStackSize = MiB;
const uint64_t kUserSpaceMax = 0x00008000'00000000;

View File

@ -15,9 +15,6 @@ void KernelStackManager::SetupInterruptStack() {
}
uint64_t KernelStackManager::AllocateKernelStack() {
if (!free_stacks_.empty()) {
return free_stacks_.PopFront();
}
next_stack_addr_ += kKernelStackOffset;
if (next_stack_addr_ >= kKernelStackEnd) {
panic("No more kernel stack space");
@ -27,9 +24,6 @@ uint64_t KernelStackManager::AllocateKernelStack() {
}
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {
// TODO: Validate this value.
if ((stack_base & 0x3FF8) != 0x3FF8) {
dbgln("Odd kernel stack free {x}", stack_base);
}
free_stacks_.PushFront(stack_base);
freed_stack_cnt_++;
dbgln("Freed kernel stacks using {} KiB", freed_stack_cnt_ * 12);
}

View File

@ -1,6 +1,5 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <stdint.h>
// KernelStackManager doles out kernel stacks.
@ -21,12 +20,9 @@ class KernelStackManager {
uint64_t AllocateKernelStack();
// For now we just reuse kernel stacks as-is (don't free the physical memory
// or unmap/remap the pages in virtual memory.)
void FreeKernelStack(uint64_t stack_base);
private:
uint64_t next_stack_addr_;
glcr::LinkedList<uint64_t> free_stacks_;
uint64_t freed_stack_cnt_ = 0;
};

View File

@ -87,21 +87,6 @@ uint64_t CurrCr3() {
return pml4_addr;
}
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
uint64_t* struct_virtual =
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
if (level > 0) {
for (uint16_t i = 0; i < 256; i++) {
if (struct_virtual[i] & PRESENT_BIT) {
CleanupPageStructure(struct_virtual[i] & ~0xFFF, level - 1);
}
}
}
phys_mem::FreePage(struct_phys);
}
} // namespace
void InitializePml4(uint64_t pml4_physical_addr) {
@ -120,20 +105,6 @@ void InitializePml4(uint64_t pml4_physical_addr) {
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm);
}
void CleanupPml4(uint64_t pml4_physical_addr) {
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
// Iterate the first half of the pml4 as it contains user-space mappings.
for (uint8_t i = 0; i < 128; i++) {
if (pml4_virtual[i] & PRESENT_BIT) {
CleanupPageStructure(pml4_virtual[i] & ~0xFFF, 2);
}
}
phys_mem::FreePage(pml4_physical_addr);
}
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
vaddr = PageAlign(vaddr);
paddr = PageAlign(paddr);

View File

@ -5,7 +5,6 @@
#include "object/process.h"
void InitializePml4(uint64_t pml4_physical_addr);
void CleanupPml4(uint64_t pml4_physical_addr);
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);

View File

@ -4,21 +4,31 @@
#include "memory/paging_util.h"
uint64_t UserStackManager::NewUserStack() {
if (!freed_stacks_.empty()) {
return freed_stacks_.PopFront();
}
next_stack_ -= kUserStackSize;
uint64_t stack = next_stack_;
if (stack <= kUserStackMin) {
next_stack_ -= kStackSize;
if (stack <= kStackMin) {
panic("Out of user stacks!");
}
if (stack == kStackMax) {
// Add a additional page boudary between kernel and user space.
stack -= 0x1000;
}
EnsureResident(stack - 1, 1);
return stack;
}
void UserStackManager::FreeUserStack(uint64_t stack_base) {
if (stack_base & (kUserStackSize - 1)) {
dbgln("WARN freeing unaligned user stack {x}", stack_base);
}
freed_stacks_.PushBack(stack_base);
void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
freed_stacks_++;
dbgln("{} freed user stacks", freed_stacks_);
}
bool UserStackManager::IsValidStack(uint64_t vaddr) {
if (vaddr < next_stack_ || vaddr > (kStackMax - 0x1000)) {
return false;
}
// Checks if the address is in the first page of the stack.
if (vaddr & 0xFF000) {
return true;
}
return false;
}

View File

@ -1,10 +1,7 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <stdint.h>
#include "memory/constants.h"
// Per-process class to manage user stacks.
//
// User stacks live at
@ -20,13 +17,17 @@ class UserStackManager {
UserStackManager() {}
UserStackManager(const UserStackManager&) = delete;
// TODO: Create some padding between user stacks to
// detect stack overruns.
uint64_t NewUserStack();
void FreeUserStack(uint64_t stack_base);
void FreeUserStack(uint64_t stack_ptr);
// Used to check if we should page in this address.
bool IsValidStack(uint64_t vaddr);
private:
uint64_t next_stack_ = kUserStackMax;
const uint64_t kStackMax = 0x00008000'00000000;
const uint64_t kStackMin = 0x00007FF0'00000000;
const uint64_t kStackSize = 0x100000;
glcr::LinkedList<uint64_t> freed_stacks_;
uint64_t next_stack_ = kStackMax;
uint64_t freed_stacks_ = 0;
};

View File

@ -18,20 +18,12 @@ AddressSpace::AddressSpace() {
InitializePml4(cr3_);
}
AddressSpace::~AddressSpace() { CleanupPml4(cr3_); }
glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
uint64_t base = user_stacks_.NewUserStack();
auto mem_object = glcr::StaticCastRefPtr<MemoryObject>(
glcr::MakeRefCounted<VariableMemoryObject>(kUserStackSize));
RET_ERR(MapInMemoryObject(base, mem_object));
return base;
uint64_t AddressSpace::AllocateUserStack() {
return user_stacks_.NewUserStack();
}
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
user_stacks_.FreeUserStack(base);
return glcr::OK;
void AddressSpace::FreeUserStack(uint64_t rsp) {
return user_stacks_.FreeUserStack(rsp);
}
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) {
@ -78,6 +70,11 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
return false;
}
if (user_stacks_.IsValidStack(vaddr)) {
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
return true;
}
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
if (!offset_or.ok()) {
return false;

View File

@ -62,15 +62,11 @@ class AddressSpace : public KernelObject {
AddressSpace(const AddressSpace&) = delete;
AddressSpace(AddressSpace&&) = delete;
// Deconstructing an address space will free all paging structures associated
// with this address space.
~AddressSpace();
uint64_t cr3() { return cr3_; }
// User Mappings.
glcr::ErrorOr<uint64_t> AllocateUserStack();
[[nodiscard]] glcr::ErrorCode FreeUserStack(uint64_t stack_base);
uint64_t AllocateUserStack();
void FreeUserStack(uint64_t);
uint64_t GetNextMemMapAddr(uint64_t size, uint64_t align);
// Maps in a memory object at a specific address.

View File

@ -47,6 +47,16 @@ glcr::RefPtr<Thread> Process::GetThread(uint64_t tid) {
return threads_[tid];
}
void Process::CheckState() {
MutexHolder lock(mutex_);
for (uint64_t i = 0; i < threads_.size(); i++) {
if (!threads_[i]->IsDying()) {
return;
}
}
Exit();
}
glcr::RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
return caps_.ReleaseCapability(cid);
}
@ -62,51 +72,24 @@ uint64_t Process::AddExistingCapability(const glcr::RefPtr<Capability>& cap) {
void Process::Exit() {
// TODO: Check this state elsewhere to ensure that we don't for instance
// create a running thread on a finished process.
state_ = CLEANUP;
state_ = FINISHED;
for (uint64_t i = 0; i < threads_.size(); i++) {
if (!threads_[i]->IsDying()) {
threads_[i]->SetState(Thread::CLEANUP);
threads_[i]->Cleanup();
}
}
gProcMan->CleanupProcess(id_);
// From this point onward no threads should be able to reach userspace.
// Technically we may get interrupted here the cleanup process may start,
// truthfully that is fine. Once each thread is flagged for cleanup then it
// will no longer be scheduled again or need to be.
// TODO: Unmap all userspace mappings.
// TODO: Clear capabilities.
// TODO: In the future consider removing this from the process manager.
// I need to think through the implications because the process object
// will be kept alive by the process that created it most likely.
if (gScheduler->CurrentProcess().id_ == id_) {
gScheduler->Yield();
}
}
void Process::Cleanup() {
if (gScheduler->CurrentProcess().id_ == id_) {
panic("Can't clean up process from itself.");
}
if (state_ != CLEANUP) {
dbgln("WARN: Cleaning up process with non-cleanup state {}",
(uint64_t)state_);
state_ = CLEANUP;
}
// 1. For each thread, call cleanup.
for (uint64_t i = 0; i < threads_.size(); i++) {
if (threads_[i]->GetState() == Thread::CLEANUP) {
threads_[i]->Cleanup();
}
}
// 2. Release all capabailities.
caps_.ReleaseAll();
// 3. Unmap all user memory.
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
"Failed to cleanup userspace mappings in process exit.");
// 4. Release paging structures.
vmas_ = nullptr;
state_ = FINISHED;
}

View File

@ -31,7 +31,6 @@ class Process : public KernelObject {
UNSPECIFIED,
SETUP,
RUNNING,
CLEANUP,
FINISHED,
};
static glcr::RefPtr<Process> RootProcess();
@ -56,16 +55,14 @@ class Process : public KernelObject {
}
uint64_t AddExistingCapability(const glcr::RefPtr<Capability>& cap);
// Checks the state of all child threads and transitions to
// finished if all have finished.
void CheckState();
State GetState() { return state_; }
// This stops all of the processes threads (they will no longer be scheduled)
// and flags the process for cleanup.
void Exit();
// This *should not* be called from a thread that belongs to this process.
// Rather it should be called from the cleanup thread.
void Cleanup();
private:
friend class glcr::MakeRefCountedFriend<Process>;
Process();

View File

@ -6,7 +6,6 @@
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
#include "object/process.h"
#include "scheduler/process_manager.h"
#include "scheduler/scheduler.h"
#define K_THREAD_DEBUG 0
@ -65,28 +64,17 @@ void Thread::Start(uint64_t entry, uint64_t arg1, uint64_t arg2) {
}
void Thread::Init() {
if (is_kernel_) {
((void (*)(void*))rip_)(reinterpret_cast<void*>(arg1_));
panic("Returned from kernel thread.");
}
#if K_THREAD_DEBUG
dbgln("Thread start.", pid(), id_);
#endif
auto stack_or = process_.vmas()->AllocateUserStack();
if (!stack_or.ok()) {
panic("Unable to allocate user stack: {}", stack_or.error());
}
user_stack_base_ = stack_or.value();
uint64_t rsp = user_stack_base_ + kUserStackSize - 0x8;
*reinterpret_cast<uint64_t*>(rsp) = kStackBaseSentinel;
uint64_t rsp_ = process_.vmas()->AllocateUserStack();
// TODO: Investigate this further but without this GCC
// will emit movaps calls to non-16-bit-aligned stack
// addresses.
rsp_ -= 0x8;
*reinterpret_cast<uint64_t*>(rsp_) = kStackBaseSentinel;
SetRsp0(rsp0_start_);
jump_user_space(rip_, rsp, arg1_, arg2_);
}
void Thread::SetState(State state) {
if (IsDying()) {
panic("Cannot set state on dying thread.");
}
state_ = state;
jump_user_space(rip_, rsp_, arg1_, arg2_);
}
void Thread::Exit() {
@ -98,36 +86,23 @@ void Thread::Exit() {
panic("Thread::Exit called from [{}.{}] on [{}.{}]", curr_thread->pid(),
curr_thread->tid(), pid(), tid());
}
gProcMan->CleanupThread(curr_thread);
state_ = CLEANUP;
Cleanup();
process_.CheckState();
gScheduler->Yield();
}
void Thread::Cleanup() {
if (gScheduler->CurrentThread().get() == this) {
panic("Cannot cleanup thread from itself.");
}
if (state_ != CLEANUP) {
dbgln("WARN: Cleaning up thread with non-cleanup state {}",
(uint64_t)state_);
state_ = CLEANUP;
}
// 1. Release User Stack
PANIC_ON_ERR(process_.vmas()->FreeUserStack(user_stack_base_),
"Unable to free user stack.");
// 2. Unblock waiting threads.
state_ = CLEANUP;
while (blocked_threads_.size() != 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
// 3. Release Kernel Stack
KernelVmm::FreeKernelStack(rsp0_start_);
state_ = FINISHED;
// TODO: Race condition when called from exit, once kernel stack manager
// actually reuses stacks this will cause an issue
KernelVmm::FreeKernelStack(rsp0_start_);
process_.vmas()->FreeUserStack(rsp_);
}
void Thread::Wait() {

View File

@ -45,8 +45,6 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
uint8_t* FxData() { return fx_data_; }
void SetKernel() { is_kernel_ = true; }
// Switches the thread's state to runnable and enqueues it.
void Start(uint64_t entry, uint64_t arg1, uint64_t arg2);
@ -55,7 +53,7 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
// State Management.
State GetState() { return state_; };
void SetState(State state);
void SetState(State state) { state_ = state; }
bool IsDying() { return state_ == CLEANUP || state_ == FINISHED; }
// Exits this thread.
@ -77,10 +75,9 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
Process& process_;
uint64_t id_;
State state_ = CREATED;
bool is_kernel_ = false;
uint64_t user_stack_base_;
// Startup Context for the thread.
uint64_t rsp_;
uint64_t rip_;
uint64_t arg1_;
uint64_t arg2_;

View File

@ -1,34 +0,0 @@
#include "scheduler/cleanup.h"
void ProcessCleanup::CleanupLoop() {
while (true) {
while (process_list_.empty() && thread_list_.empty()) {
semaphore_.Wait();
}
// TODO: I think we need to protect these lists with a mutex as well.
while (!process_list_.empty()) {
auto proc = process_list_.PopFront();
dbgln("CLEANUP Process: {}", proc->id());
proc->Cleanup();
}
while (!thread_list_.empty()) {
auto thread = thread_list_.PopFront();
dbgln("CLEANUP Thread: {}.{}", thread->pid(), thread->tid());
thread->Cleanup();
}
}
}
void ProcessCleanup::CleanupProcess(glcr::RefPtr<Process> process) {
process_list_.PushBack(process);
semaphore_.Signal();
}
void ProcessCleanup::CleanupThread(glcr::RefPtr<Thread> thread) {
thread_list_.PushBack(thread);
semaphore_.Signal();
}
void CleanupThreadEntry(ProcessCleanup* cleanup) {
cleanup->CleanupLoop();
UNREACHABLE;
}

View File

@ -1,25 +0,0 @@
#pragma once
#include <glacier/container/linked_list.h>
#include <glacier/memory/ref_ptr.h>
#include "object/process.h"
#include "object/semaphore.h"
#include "object/thread.h"
class ProcessCleanup {
public:
ProcessCleanup() {}
void CleanupLoop();
void CleanupProcess(glcr::RefPtr<Process> process);
void CleanupThread(glcr::RefPtr<Thread> thread);
private:
Semaphore semaphore_;
glcr::LinkedList<glcr::RefPtr<Thread>> thread_list_;
glcr::LinkedList<glcr::RefPtr<Process>> process_list_;
};
void CleanupThreadEntry(ProcessCleanup* cleanup);

View File

@ -19,21 +19,3 @@ Process& ProcessManager::FromId(uint64_t pid) {
}
return *proc_map_.at(pid);
}
void ProcessManager::InitCleanup() {
auto cleanup_thread = FromId(0).CreateThread();
cleanup_thread->SetKernel();
cleanup_thread->Start(reinterpret_cast<uint64_t>(CleanupThreadEntry),
reinterpret_cast<uint64_t>(&gProcMan->cleanup), 0);
}
void ProcessManager::CleanupProcess(uint64_t pid) {
if (!proc_map_.Contains(pid)) {
panic("Bad proc access {}, have {} processes", pid, proc_map_.size());
}
cleanup.CleanupProcess(proc_map_.at(pid));
}
void ProcessManager::CleanupThread(glcr::RefPtr<Thread> thread) {
cleanup.CleanupThread(thread);
}

View File

@ -4,7 +4,6 @@
#include <glacier/memory/ref_ptr.h>
#include "object/process.h"
#include "scheduler/cleanup.h"
class ProcessManager {
public:
@ -17,14 +16,8 @@ class ProcessManager {
Process& FromId(uint64_t id);
void InitCleanup();
void CleanupProcess(uint64_t pid);
void CleanupThread(glcr::RefPtr<Thread> thread);
private:
glcr::HashMap<uint64_t, glcr::RefPtr<Process>> proc_map_;
ProcessCleanup cleanup;
};
extern ProcessManager* gProcMan;

View File

@ -2,7 +2,6 @@
#include "capability/capability.h"
#include "debug/debug.h"
#include "interrupt/driver_manager.h"
#include "interrupt/interrupt.h"
#include "object/endpoint.h"
#include "object/reply_port.h"
@ -158,10 +157,13 @@ glcr::ErrorCode PortPoll(ZPortPollReq* req) {
glcr::ErrorCode IrqRegister(ZIrqRegisterReq* req) {
auto& proc = gScheduler->CurrentProcess();
if (req->irq_num != Z_IRQ_PCI_BASE) {
// FIXME: Don't hardcode this nonsense.
return glcr::UNIMPLEMENTED;
}
glcr::RefPtr<Port> port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port);
DriverManager::Get().RegisterListener(req->irq_num, port);
RegisterPciPort(port);
return glcr::OK;
}

View File

@ -2,7 +2,6 @@
#include "capability/capability.h"
#include "debug/debug.h"
#include "scheduler/cleanup.h"
#include "scheduler/process_manager.h"
#include "scheduler/scheduler.h"

View File

@ -6,7 +6,6 @@
#include "debug/debug.h"
#include "interrupt/apic.h"
#include "interrupt/apic_timer.h"
#include "interrupt/driver_manager.h"
#include "interrupt/interrupt.h"
#include "interrupt/timer.h"
#include "loader/init_loader.h"
@ -47,7 +46,6 @@ extern "C" void zion() {
// These two need to occur after memory allocation is available.
Apic::Init();
ApicTimer::Init();
DriverManager driver_manager;
dbgln("[boot] Init syscalls.");
InitSyscall();
@ -55,7 +53,6 @@ extern "C" void zion() {
dbgln("[boot] Init scheduler.");
ProcessManager::Init();
Scheduler::Init();
gProcMan->InitCleanup();
dbgln("[boot] Loading sys init program.");
LoadInitProgram();