2023-06-06 20:18:53 -07:00
|
|
|
#include "object/process.h"
|
2023-05-18 12:43:53 -07:00
|
|
|
|
|
|
|
#include "debug/debug.h"
|
2023-06-06 15:01:31 -07:00
|
|
|
#include "include/zcall.h"
|
2023-05-18 13:56:54 -07:00
|
|
|
#include "memory/paging_util.h"
|
|
|
|
#include "memory/physical_memory.h"
|
2023-06-06 20:18:53 -07:00
|
|
|
#include "object/thread.h"
|
2023-11-16 23:03:27 -08:00
|
|
|
#include "scheduler/process_manager.h"
|
2023-05-18 13:28:22 -07:00
|
|
|
#include "scheduler/scheduler.h"
|
2023-05-18 12:43:53 -07:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
static uint64_t gNextId = 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Process> Process::RootProcess() {
|
|
|
|
glcr::RefPtr<Process> proc = glcr::MakeRefCounted<Process>(0);
|
2023-05-30 21:27:20 -07:00
|
|
|
proc->threads_.PushBack(Thread::RootThread(*proc));
|
2023-05-18 12:43:53 -07:00
|
|
|
proc->next_thread_id_ = 1;
|
|
|
|
|
|
|
|
return proc;
|
|
|
|
}
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Process> Process::Create() {
|
|
|
|
return glcr::MakeRefCounted<Process>();
|
|
|
|
}
|
2023-05-18 13:56:54 -07:00
|
|
|
|
2023-06-07 00:04:53 -07:00
|
|
|
Process::Process()
|
2023-06-21 15:07:40 -07:00
|
|
|
: id_(gNextId++),
|
|
|
|
vmas_(glcr::MakeRefCounted<AddressSpace>()),
|
|
|
|
state_(RUNNING) {}
|
2023-06-06 20:13:07 -07:00
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Thread> Process::CreateThread() {
|
2023-06-12 20:56:25 -07:00
|
|
|
MutexHolder lock(mutex_);
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Thread> thread =
|
|
|
|
glcr::MakeRefCounted<Thread>(*this, next_thread_id_++);
|
2023-06-06 16:24:03 -07:00
|
|
|
threads_.PushBack(thread);
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Thread> Process::GetThread(uint64_t tid) {
|
2023-06-12 20:56:25 -07:00
|
|
|
MutexHolder lock(mutex_);
|
2023-06-26 15:46:03 -07:00
|
|
|
if (tid >= threads_.size()) {
|
2023-11-05 09:24:09 -08:00
|
|
|
panic("Bad thread access {} on process {} with {} threads.", tid, id_,
|
2023-06-26 15:46:03 -07:00
|
|
|
threads_.size());
|
2023-05-18 12:43:53 -07:00
|
|
|
}
|
2023-06-26 15:46:03 -07:00
|
|
|
return threads_[tid];
|
2023-05-18 12:43:53 -07:00
|
|
|
}
|
2023-05-29 13:51:00 -07:00
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
|
2023-06-16 14:25:45 -07:00
|
|
|
return caps_.ReleaseCapability(cid);
|
2023-06-07 08:24:10 -07:00
|
|
|
}
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<Capability> Process::GetCapability(uint64_t cid) {
|
2023-06-16 14:25:45 -07:00
|
|
|
return caps_.GetCapability(cid);
|
2023-05-30 23:55:42 -07:00
|
|
|
}
|
2023-06-06 16:24:03 -07:00
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
uint64_t Process::AddExistingCapability(const glcr::RefPtr<Capability>& cap) {
|
2023-06-16 14:25:45 -07:00
|
|
|
return caps_.AddExistingCapability(cap);
|
2023-06-07 08:24:10 -07:00
|
|
|
}
|
2023-11-16 23:03:27 -08:00
|
|
|
|
|
|
|
void Process::Exit() {
|
|
|
|
// TODO: Check this state elsewhere to ensure that we don't for instance
|
|
|
|
// create a running thread on a finished process.
|
2023-11-24 15:39:43 -08:00
|
|
|
state_ = CLEANUP;
|
2023-11-16 23:03:27 -08:00
|
|
|
|
|
|
|
for (uint64_t i = 0; i < threads_.size(); i++) {
|
|
|
|
if (!threads_[i]->IsDying()) {
|
2023-11-24 15:39:43 -08:00
|
|
|
threads_[i]->SetState(Thread::CLEANUP);
|
2023-11-16 23:03:27 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-24 15:39:43 -08:00
|
|
|
gProcMan->CleanupProcess(id_);
|
2023-11-16 23:03:27 -08:00
|
|
|
|
2023-11-24 15:39:43 -08:00
|
|
|
// Technically we may get interrupted here the cleanup process may start,
|
|
|
|
// truthfully that is fine. Once each thread is flagged for cleanup then it
|
|
|
|
// will no longer be scheduled again or need to be.
|
2023-11-16 23:03:27 -08:00
|
|
|
|
|
|
|
if (gScheduler->CurrentProcess().id_ == id_) {
|
|
|
|
gScheduler->Yield();
|
|
|
|
}
|
|
|
|
}
|
2023-11-24 15:39:43 -08:00
|
|
|
|
|
|
|
void Process::Cleanup() {
|
|
|
|
if (gScheduler->CurrentProcess().id_ == id_) {
|
|
|
|
panic("Can't clean up process from itself.");
|
|
|
|
}
|
|
|
|
if (state_ != CLEANUP) {
|
|
|
|
dbgln("WARN: Cleaning up process with non-cleanup state {}",
|
|
|
|
(uint64_t)state_);
|
|
|
|
state_ = CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1. For each thread, call cleanup.
|
|
|
|
for (uint64_t i = 0; i < threads_.size(); i++) {
|
|
|
|
if (threads_[i]->GetState() == Thread::CLEANUP) {
|
|
|
|
threads_[i]->Cleanup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-24 17:05:11 -08:00
|
|
|
// 2. Release all capabailities.
|
|
|
|
caps_.ReleaseAll();
|
2023-11-24 15:39:43 -08:00
|
|
|
|
2023-11-24 17:07:55 -08:00
|
|
|
// 3. Unmap all user memory.
|
|
|
|
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
|
|
|
|
"Failed to cleanup userspace mappings in process exit.");
|
2023-11-24 15:39:43 -08:00
|
|
|
|
2023-11-24 17:19:32 -08:00
|
|
|
// 4. Release paging structures.
|
|
|
|
vmas_ = nullptr;
|
|
|
|
|
2023-11-24 15:39:43 -08:00
|
|
|
state_ = FINISHED;
|
|
|
|
}
|