From 2dd69f5844dc6677a8a9387e5a58eb229f26d3b4 Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Fri, 24 Nov 2023 16:51:35 -0800 Subject: [PATCH] [Zion] Map user stacks in as regular MemoryObjects. This allows us to easily track the physical memory so it can be freed when the thread exits. It also simplifies the page fault handler as it just needs to check regular mappings to find a user stack. --- zion/memory/user_stack_manager.cpp | 23 +++++------------------ zion/memory/user_stack_manager.h | 5 +---- zion/object/address_space.cpp | 19 ++++++++++--------- zion/object/address_space.h | 4 ++-- zion/object/thread.cpp | 18 ++++++++++-------- zion/object/thread.h | 2 +- 6 files changed, 29 insertions(+), 42 deletions(-) diff --git a/zion/memory/user_stack_manager.cpp b/zion/memory/user_stack_manager.cpp index ffb91ae..6226ab4 100644 --- a/zion/memory/user_stack_manager.cpp +++ b/zion/memory/user_stack_manager.cpp @@ -8,30 +8,17 @@ uint64_t UserStackManager::NewUserStack() { return freed_stacks_.PopFront(); } - uint64_t stack = next_stack_; next_stack_ -= kUserStackSize; + uint64_t stack = next_stack_; if (stack <= kUserStackMin) { panic("Out of user stacks!"); } - if (stack == kUserStackMax) { - // Add a additional page boudary between kernel and user space. - stack -= 0x1000; - } - EnsureResident(stack - 1, 1); return stack; } -void UserStackManager::FreeUserStack(uint64_t stack_ptr) { - freed_stacks_.PushBack(stack_ptr); -} - -bool UserStackManager::IsValidStack(uint64_t vaddr) { - if (vaddr < next_stack_ || vaddr > (kUserStackMax - 0x1000)) { - return false; +void UserStackManager::FreeUserStack(uint64_t stack_base) { + if (stack_base & (kUserStackSize - 1)) { + dbgln("WARN freeing unaligned user stack {x}", stack_base); } - // Checks if the address is in the first page of the stack. - if (vaddr & 0xFF000) { - return true; - } - return false; + freed_stacks_.PushBack(stack_base); } diff --git a/zion/memory/user_stack_manager.h b/zion/memory/user_stack_manager.h index fa549c4..adfe94d 100644 --- a/zion/memory/user_stack_manager.h +++ b/zion/memory/user_stack_manager.h @@ -21,10 +21,7 @@ class UserStackManager { UserStackManager(const UserStackManager&) = delete; uint64_t NewUserStack(); - void FreeUserStack(uint64_t stack_ptr); - - // Used to check if we should page in this address. - bool IsValidStack(uint64_t vaddr); + void FreeUserStack(uint64_t stack_base); private: uint64_t next_stack_ = kUserStackMax; diff --git a/zion/object/address_space.cpp b/zion/object/address_space.cpp index 398bfc2..196593d 100644 --- a/zion/object/address_space.cpp +++ b/zion/object/address_space.cpp @@ -18,12 +18,18 @@ AddressSpace::AddressSpace() { InitializePml4(cr3_); } -uint64_t AddressSpace::AllocateUserStack() { - return user_stacks_.NewUserStack(); +glcr::ErrorOr AddressSpace::AllocateUserStack() { + uint64_t base = user_stacks_.NewUserStack(); + auto mem_object = glcr::StaticCastRefPtr( + glcr::MakeRefCounted(kUserStackSize)); + RET_ERR(MapInMemoryObject(base, mem_object)); + return base; } -void AddressSpace::FreeUserStack(uint64_t rsp) { - return user_stacks_.FreeUserStack(rsp); +glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) { + RET_ERR(FreeAddressRange(base, base + kUserStackSize)); + user_stacks_.FreeUserStack(base); + return glcr::OK; } uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) { @@ -70,11 +76,6 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) { return false; } - if (user_stacks_.IsValidStack(vaddr)) { - MapPage(cr3_, vaddr, phys_mem::AllocatePage()); - return true; - } - auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr); if (!offset_or.ok()) { return false; diff --git a/zion/object/address_space.h b/zion/object/address_space.h index d6aebd7..0be9db1 100644 --- a/zion/object/address_space.h +++ b/zion/object/address_space.h @@ -65,8 +65,8 @@ class AddressSpace : public KernelObject { uint64_t cr3() { return cr3_; } // User Mappings. - uint64_t AllocateUserStack(); - void FreeUserStack(uint64_t); + glcr::ErrorOr AllocateUserStack(); + [[nodiscard]] glcr::ErrorCode FreeUserStack(uint64_t stack_base); uint64_t GetNextMemMapAddr(uint64_t size, uint64_t align); // Maps in a memory object at a specific address. diff --git a/zion/object/thread.cpp b/zion/object/thread.cpp index 76d5acd..701792c 100644 --- a/zion/object/thread.cpp +++ b/zion/object/thread.cpp @@ -72,14 +72,15 @@ void Thread::Init() { #if K_THREAD_DEBUG dbgln("Thread start.", pid(), id_); #endif - uint64_t rsp_ = process_.vmas()->AllocateUserStack(); - // TODO: Investigate this further but without this GCC - // will emit movaps calls to non-16-bit-aligned stack - // addresses. - rsp_ -= 0x8; - *reinterpret_cast(rsp_) = kStackBaseSentinel; + auto stack_or = process_.vmas()->AllocateUserStack(); + if (!stack_or.ok()) { + panic("Unable to allocate user stack: {}", stack_or.error()); + } + user_stack_base_ = stack_or.value(); + uint64_t rsp = user_stack_base_ + kUserStackSize - 0x8; + *reinterpret_cast(rsp) = kStackBaseSentinel; SetRsp0(rsp0_start_); - jump_user_space(rip_, rsp_, arg1_, arg2_); + jump_user_space(rip_, rsp, arg1_, arg2_); } void Thread::SetState(State state) { if (IsDying()) { @@ -113,7 +114,8 @@ void Thread::Cleanup() { } // 1. Release User Stack - process_.vmas()->FreeUserStack(rsp_); + PANIC_ON_ERR(process_.vmas()->FreeUserStack(user_stack_base_), + "Unable to free user stack."); // 2. Unblock waiting threads. while (blocked_threads_.size() != 0) { diff --git a/zion/object/thread.h b/zion/object/thread.h index fcde8a8..53bee2b 100644 --- a/zion/object/thread.h +++ b/zion/object/thread.h @@ -78,9 +78,9 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode { uint64_t id_; State state_ = CREATED; bool is_kernel_ = false; + uint64_t user_stack_base_; // Startup Context for the thread. - uint64_t rsp_; uint64_t rip_; uint64_t arg1_; uint64_t arg2_;