[Zion] Map user stacks in as regular MemoryObjects.
This allows us to easily track the physical memory so it can be freed when the thread exits. It also simplifies the page fault handler as it just needs to check regular mappings to find a user stack.
This commit is contained in:
parent
ba1b4df702
commit
2dd69f5844
|
@ -8,30 +8,17 @@ uint64_t UserStackManager::NewUserStack() {
|
|||
return freed_stacks_.PopFront();
|
||||
}
|
||||
|
||||
uint64_t stack = next_stack_;
|
||||
next_stack_ -= kUserStackSize;
|
||||
uint64_t stack = next_stack_;
|
||||
if (stack <= kUserStackMin) {
|
||||
panic("Out of user stacks!");
|
||||
}
|
||||
if (stack == kUserStackMax) {
|
||||
// Add a additional page boudary between kernel and user space.
|
||||
stack -= 0x1000;
|
||||
}
|
||||
EnsureResident(stack - 1, 1);
|
||||
return stack;
|
||||
}
|
||||
|
||||
void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
|
||||
freed_stacks_.PushBack(stack_ptr);
|
||||
}
|
||||
|
||||
bool UserStackManager::IsValidStack(uint64_t vaddr) {
|
||||
if (vaddr < next_stack_ || vaddr > (kUserStackMax - 0x1000)) {
|
||||
return false;
|
||||
void UserStackManager::FreeUserStack(uint64_t stack_base) {
|
||||
if (stack_base & (kUserStackSize - 1)) {
|
||||
dbgln("WARN freeing unaligned user stack {x}", stack_base);
|
||||
}
|
||||
// Checks if the address is in the first page of the stack.
|
||||
if (vaddr & 0xFF000) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
freed_stacks_.PushBack(stack_base);
|
||||
}
|
||||
|
|
|
@ -21,10 +21,7 @@ class UserStackManager {
|
|||
UserStackManager(const UserStackManager&) = delete;
|
||||
|
||||
uint64_t NewUserStack();
|
||||
void FreeUserStack(uint64_t stack_ptr);
|
||||
|
||||
// Used to check if we should page in this address.
|
||||
bool IsValidStack(uint64_t vaddr);
|
||||
void FreeUserStack(uint64_t stack_base);
|
||||
|
||||
private:
|
||||
uint64_t next_stack_ = kUserStackMax;
|
||||
|
|
|
@ -18,12 +18,18 @@ AddressSpace::AddressSpace() {
|
|||
InitializePml4(cr3_);
|
||||
}
|
||||
|
||||
uint64_t AddressSpace::AllocateUserStack() {
|
||||
return user_stacks_.NewUserStack();
|
||||
glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
|
||||
uint64_t base = user_stacks_.NewUserStack();
|
||||
auto mem_object = glcr::StaticCastRefPtr<MemoryObject>(
|
||||
glcr::MakeRefCounted<VariableMemoryObject>(kUserStackSize));
|
||||
RET_ERR(MapInMemoryObject(base, mem_object));
|
||||
return base;
|
||||
}
|
||||
|
||||
void AddressSpace::FreeUserStack(uint64_t rsp) {
|
||||
return user_stacks_.FreeUserStack(rsp);
|
||||
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
|
||||
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
|
||||
user_stacks_.FreeUserStack(base);
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) {
|
||||
|
@ -70,11 +76,6 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (user_stacks_.IsValidStack(vaddr)) {
|
||||
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
|
||||
return true;
|
||||
}
|
||||
|
||||
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
|
||||
if (!offset_or.ok()) {
|
||||
return false;
|
||||
|
|
|
@ -65,8 +65,8 @@ class AddressSpace : public KernelObject {
|
|||
uint64_t cr3() { return cr3_; }
|
||||
|
||||
// User Mappings.
|
||||
uint64_t AllocateUserStack();
|
||||
void FreeUserStack(uint64_t);
|
||||
glcr::ErrorOr<uint64_t> AllocateUserStack();
|
||||
[[nodiscard]] glcr::ErrorCode FreeUserStack(uint64_t stack_base);
|
||||
uint64_t GetNextMemMapAddr(uint64_t size, uint64_t align);
|
||||
|
||||
// Maps in a memory object at a specific address.
|
||||
|
|
|
@ -72,14 +72,15 @@ void Thread::Init() {
|
|||
#if K_THREAD_DEBUG
|
||||
dbgln("Thread start.", pid(), id_);
|
||||
#endif
|
||||
uint64_t rsp_ = process_.vmas()->AllocateUserStack();
|
||||
// TODO: Investigate this further but without this GCC
|
||||
// will emit movaps calls to non-16-bit-aligned stack
|
||||
// addresses.
|
||||
rsp_ -= 0x8;
|
||||
*reinterpret_cast<uint64_t*>(rsp_) = kStackBaseSentinel;
|
||||
auto stack_or = process_.vmas()->AllocateUserStack();
|
||||
if (!stack_or.ok()) {
|
||||
panic("Unable to allocate user stack: {}", stack_or.error());
|
||||
}
|
||||
user_stack_base_ = stack_or.value();
|
||||
uint64_t rsp = user_stack_base_ + kUserStackSize - 0x8;
|
||||
*reinterpret_cast<uint64_t*>(rsp) = kStackBaseSentinel;
|
||||
SetRsp0(rsp0_start_);
|
||||
jump_user_space(rip_, rsp_, arg1_, arg2_);
|
||||
jump_user_space(rip_, rsp, arg1_, arg2_);
|
||||
}
|
||||
void Thread::SetState(State state) {
|
||||
if (IsDying()) {
|
||||
|
@ -113,7 +114,8 @@ void Thread::Cleanup() {
|
|||
}
|
||||
|
||||
// 1. Release User Stack
|
||||
process_.vmas()->FreeUserStack(rsp_);
|
||||
PANIC_ON_ERR(process_.vmas()->FreeUserStack(user_stack_base_),
|
||||
"Unable to free user stack.");
|
||||
|
||||
// 2. Unblock waiting threads.
|
||||
while (blocked_threads_.size() != 0) {
|
||||
|
|
|
@ -78,9 +78,9 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
|
|||
uint64_t id_;
|
||||
State state_ = CREATED;
|
||||
bool is_kernel_ = false;
|
||||
uint64_t user_stack_base_;
|
||||
|
||||
// Startup Context for the thread.
|
||||
uint64_t rsp_;
|
||||
uint64_t rip_;
|
||||
uint64_t arg1_;
|
||||
uint64_t arg2_;
|
||||
|
|
Loading…
Reference in New Issue