acadia/zion/object/thread.cpp

115 lines
3.1 KiB
C++
Raw Normal View History

#include "object/thread.h"
#include "common/gdt.h"
#include "debug/debug.h"
#include "memory/kernel_vmm.h"
#include "memory/paging_util.h"
#include "object/process.h"
#include "scheduler/scheduler.h"
#define K_THREAD_DEBUG 0
namespace {
2023-06-06 16:24:03 -07:00
extern "C" void jump_user_space(uint64_t rip, uint64_t rsp, uint64_t arg1,
uint64_t arg2);
extern "C" void thread_init() {
asm("sti");
gScheduler->CurrentThread()->Init();
panic("Reached end of thread.");
}
} // namespace
2023-06-21 15:07:40 -07:00
glcr::RefPtr<Thread> Thread::RootThread(Process& root_proc) {
return glcr::MakeRefCounted<Thread>(root_proc);
}
2023-06-21 15:07:40 -07:00
glcr::RefPtr<Thread> Thread::Create(Process& proc, uint64_t tid) {
return glcr::MakeRefCounted<Thread>(proc, tid);
}
2023-06-06 18:40:32 -07:00
Thread::Thread(Process& proc, uint64_t tid) : process_(proc), id_(tid) {
uint64_t* stack_ptr =
reinterpret_cast<uint64_t*>(proc.vmas()->AllocateKernelStack());
// 0: rip
*(stack_ptr) = reinterpret_cast<uint64_t>(thread_init);
// 1-4: rax, rcx, rdx, rbx
// 5: rbp
*(stack_ptr - 5) = reinterpret_cast<uint64_t>(stack_ptr + 1);
// 6-15: rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15
// 16: cr3
*(stack_ptr - 16) = proc.vmas()->cr3();
rsp0_ = reinterpret_cast<uint64_t>(stack_ptr - 16);
2023-05-18 16:03:09 -07:00
rsp0_start_ = reinterpret_cast<uint64_t>(stack_ptr);
}
uint64_t Thread::pid() const { return process_.id(); }
2023-06-06 16:24:03 -07:00
void Thread::Start(uint64_t entry, uint64_t arg1, uint64_t arg2) {
rip_ = entry;
arg1_ = arg1;
arg2_ = arg2;
state_ = RUNNABLE;
// Get from parent to avoid creating a new shared ptr.
gScheduler->Enqueue(process_.GetThread(id_));
}
void Thread::Init() {
#if K_THREAD_DEBUG
2023-05-30 23:22:29 -07:00
dbgln("Thread start.", pid(), id_);
#endif
uint64_t rsp = process_.vmas()->AllocateUserStack();
// TODO: Investigate this further but without this GCC
// will emit movaps calls to non-16-bit-aligned stack
// addresses.
rsp -= 0x8;
SetRsp0(rsp0_start_);
2023-06-06 16:24:03 -07:00
jump_user_space(rip_, rsp, arg1_, arg2_);
}
void Thread::Exit() {
#if K_THREAD_DEBUG
dbgln("Exiting");
#endif
auto curr_thread = gScheduler->CurrentThread();
if (curr_thread->tid() != id_) {
panic("Thread::Exit called from [{}.{}] on [{}.{}]", curr_thread->pid(),
curr_thread->tid(), pid(), tid());
}
Cleanup();
process_.CheckState();
gScheduler->Yield();
}
void Thread::Cleanup() {
state_ = CLEANUP;
2023-11-02 22:36:48 -07:00
while (blocked_threads_.size() != 0) {
2023-06-22 02:17:50 -07:00
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
state_ = FINISHED;
// TODO: Race condition when called from exit, once kernel stack manager
// actually reuses stacks this will cause an issue
KernelVmm::FreeKernelStack(rsp0_start_);
2023-06-22 02:17:50 -07:00
}
void Thread::Wait() {
2023-11-02 22:36:48 -07:00
// TODO: We need synchronization code here.
// Race condition is for A waiting on B.
// 1. A checks if B is finished.
// 2. Context Switch A -> B
// 3. B finishes.
// 4. Context Switch B -> A
// 5. A forever blocks on B.
if (IsDying()) {
2023-11-02 22:36:48 -07:00
return;
}
2023-06-22 02:17:50 -07:00
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
gScheduler->Yield();
}