Compare commits
5 Commits
40ea359e9f
...
308dd6a203
Author | SHA1 | Date |
---|---|---|
Drew Galbraith | 308dd6a203 | |
Drew Galbraith | aa2d80b557 | |
Drew Galbraith | 6e227e1cf6 | |
Drew Galbraith | 6756d25e5c | |
Drew Galbraith | e1dc790155 |
|
@ -0,0 +1,222 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "glacier/container/array.h"
|
||||
#include "glacier/container/linked_list.h"
|
||||
#include "glacier/container/pair.h"
|
||||
#include "glacier/status/error.h"
|
||||
|
||||
namespace glcr {
|
||||
|
||||
template <typename T>
|
||||
struct HashFunc {
|
||||
uint64_t operator()(const T&);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct HashFunc<uint64_t> {
|
||||
uint64_t operator()(const uint64_t& value) {
|
||||
// FIXME: Write a real hash function.
|
||||
return 0xABBAABBAABBAABBA ^ value;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename K, typename V, class H = HashFunc<K>>
|
||||
class HashMap {
|
||||
public:
|
||||
HashMap() = default;
|
||||
HashMap(const HashMap&) = delete;
|
||||
HashMap& operator=(const HashMap&) = delete;
|
||||
// TODO: Implement Move.
|
||||
HashMap(HashMap&&) = delete;
|
||||
HashMap& operator=(HashMap&&) = delete;
|
||||
|
||||
// Accessors.
|
||||
uint64_t size() { return size_; }
|
||||
uint64_t empty() { return size_ == 0; }
|
||||
|
||||
// Returns load as a percentage (i.e. 60 means the load is 0.6).
|
||||
//
|
||||
// If data is a zero-size array, return load as 100 so it will be flagged for
|
||||
// resize.
|
||||
// TODO: Return a double here once FPE is enabled.
|
||||
uint64_t load() {
|
||||
if (data_.size() == 0) {
|
||||
return 100;
|
||||
}
|
||||
return size_ * 100 / data_.size();
|
||||
}
|
||||
|
||||
V& at(const K&);
|
||||
const V& at(const K&) const;
|
||||
|
||||
bool Contains(const K&) const;
|
||||
|
||||
// Setters.
|
||||
[[nodiscard]] ErrorCode Insert(const K&, const V&);
|
||||
[[nodiscard]] ErrorCode Insert(K&&, V&&);
|
||||
|
||||
[[nodiscard]] ErrorCode Update(const K&, const V&);
|
||||
[[nodiscard]] ErrorCode Update(const K&, V&&);
|
||||
|
||||
[[nodiscard]] ErrorCode Delete(const K&);
|
||||
|
||||
void Resize(uint64_t new_size);
|
||||
|
||||
private:
|
||||
Array<LinkedList<Pair<K, V>>> data_;
|
||||
uint64_t size_ = 0;
|
||||
|
||||
void ResizeIfNecessary();
|
||||
};
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
V& HashMap<K, V, H>::at(const K& key) {
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
return pair.second();
|
||||
}
|
||||
}
|
||||
// TODO: Add a failure mode here instead of constructing an object.
|
||||
ll.PushFront({key, {}});
|
||||
return ll.PeekFront().second();
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
const V& HashMap<K, V, H>::at(const K& key) const {
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
return pair.second();
|
||||
}
|
||||
}
|
||||
// TODO: Add a failure mode here instead of constructing an object.
|
||||
ll.PushFront({key, {}});
|
||||
return ll.PeekFront().second();
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
bool HashMap<K, V, H>::Contains(const K& key) const {
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
ErrorCode HashMap<K, V, H>::Insert(const K& key, const V& value) {
|
||||
ResizeIfNecessary();
|
||||
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
return ALREADY_EXISTS;
|
||||
}
|
||||
}
|
||||
ll.PushFront({Move(key), Move(value)});
|
||||
size_++;
|
||||
return OK;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
ErrorCode HashMap<K, V, H>::Insert(K&& key, V&& value) {
|
||||
ResizeIfNecessary();
|
||||
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
return ALREADY_EXISTS;
|
||||
}
|
||||
}
|
||||
ll.PushFront({Move(key), Move(value)});
|
||||
size_++;
|
||||
return OK;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
ErrorCode HashMap<K, V, H>::Update(const K& key, const V& value) {
|
||||
ResizeIfNecessary();
|
||||
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
pair.second() = value;
|
||||
return OK;
|
||||
}
|
||||
}
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
ErrorCode HashMap<K, V, H>::Update(const K& key, V&& value) {
|
||||
ResizeIfNecessary();
|
||||
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
pair.second() = Move(value);
|
||||
return OK;
|
||||
}
|
||||
}
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
ErrorCode HashMap<K, V, H>::Delete(const K& key) {
|
||||
uint64_t hc = H()(key);
|
||||
auto& ll = data_[hc % data_.size()];
|
||||
|
||||
for (auto& pair : ll) {
|
||||
if (pair.first() == key) {
|
||||
ll.Remove(pair);
|
||||
size_--;
|
||||
return OK;
|
||||
}
|
||||
}
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
void HashMap<K, V, H>::Resize(uint64_t new_size) {
|
||||
Array<LinkedList<Pair<K, V>>> new_data(new_size);
|
||||
|
||||
for (uint64_t i = 0; i < data_.size(); i++) {
|
||||
auto& ll = data_[i];
|
||||
while (!ll.empty()) {
|
||||
auto pair = ll.PopFront();
|
||||
uint64_t hc = H()(pair.first());
|
||||
new_data[hc % new_size].PushFront(Move(pair));
|
||||
}
|
||||
}
|
||||
data_ = glcr::Move(new_data);
|
||||
}
|
||||
|
||||
template <typename K, typename V, class H>
|
||||
void HashMap<K, V, H>::ResizeIfNecessary() {
|
||||
if (data_.size() == 0) {
|
||||
Resize(8);
|
||||
} else if (load() > 75) {
|
||||
Resize(data_.size() * 2);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace glcr
|
|
@ -23,6 +23,8 @@ class LinkedList {
|
|||
|
||||
T PopFront();
|
||||
|
||||
void Remove(const T& item);
|
||||
|
||||
void PushFront(const T& item);
|
||||
void PushFront(T&& item);
|
||||
|
||||
|
@ -53,7 +55,9 @@ class LinkedList {
|
|||
};
|
||||
|
||||
Iterator begin() { return {front_}; }
|
||||
const Iterator begin() const { return {front_}; }
|
||||
Iterator end() { return {nullptr}; }
|
||||
const Iterator end() const { return {nullptr}; }
|
||||
|
||||
private:
|
||||
uint64_t size_ = 0;
|
||||
|
@ -123,4 +127,21 @@ T LinkedList<T>::PopFront() {
|
|||
return Move(ret);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void LinkedList<T>::Remove(const T& item) {
|
||||
if (front_->item == item) {
|
||||
PopFront();
|
||||
return;
|
||||
}
|
||||
ListItem* iter = front_;
|
||||
while (iter != nullptr) {
|
||||
if (iter->next != nullptr && iter->next->item == item) {
|
||||
iter->next = iter->next->next;
|
||||
size_--;
|
||||
return;
|
||||
}
|
||||
iter = iter->next;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace glcr
|
||||
|
|
|
@ -1,14 +1,23 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "glacier/memory/move.h"
|
||||
|
||||
namespace glcr {
|
||||
|
||||
template <typename T, typename U>
|
||||
class Pair {
|
||||
public:
|
||||
Pair(const T& first, const U& second) : first_(first), second_(second) {}
|
||||
Pair(T&& first, U&& second) : first_(Move(first)), second_(Move(second)) {}
|
||||
T& first() { return first_; }
|
||||
U& second() { return second_; }
|
||||
|
||||
bool operator==(const Pair& other) {
|
||||
return other.first_ == first_ && other.second_ == second_;
|
||||
}
|
||||
|
||||
private:
|
||||
T first_;
|
||||
U second_;
|
||||
|
|
|
@ -14,8 +14,13 @@ ninja install
|
|||
|
||||
sudo sh ${DIR}/build_image.sh disk.img
|
||||
|
||||
QEMU_ARGS=
|
||||
if [[ $1 == "debug" ]]; then
|
||||
QEMU_ARGS+="-S -s"
|
||||
fi
|
||||
|
||||
# Use machine q35 to access PCI devices.
|
||||
qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda disk.img
|
||||
qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda disk.img ${QEMU_ARGS}
|
||||
popd
|
||||
|
||||
# Extra options to add to this script in the future.
|
||||
|
|
|
@ -8,37 +8,26 @@ uint64_t CapabilityTable::AddExistingCapability(
|
|||
const glcr::RefPtr<Capability>& cap) {
|
||||
MutexHolder h(lock_);
|
||||
uint64_t id = next_cap_id_++;
|
||||
capabilities_.PushBack({.id = id, .cap = cap});
|
||||
if (capabilities_.Insert(id, cap) != glcr::OK) {
|
||||
panic("Reusing capability id.");
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
|
||||
MutexHolder h(lock_);
|
||||
auto iter = capabilities_.begin();
|
||||
while (iter != capabilities_.end()) {
|
||||
if (iter->cap && iter->id == id) {
|
||||
return iter->cap;
|
||||
}
|
||||
++iter;
|
||||
if (!capabilities_.Contains(id)) {
|
||||
panic("Bad cap access {}", id);
|
||||
}
|
||||
dbgln("Bad cap access {}", id);
|
||||
dbgln("Num caps: {}", capabilities_.size());
|
||||
return {};
|
||||
return capabilities_.at(id);
|
||||
}
|
||||
|
||||
glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
|
||||
MutexHolder h(lock_);
|
||||
auto iter = capabilities_.begin();
|
||||
while (iter != capabilities_.end()) {
|
||||
if (iter->cap && iter->id == id) {
|
||||
// FIXME: Do an actual release here.
|
||||
auto cap = iter->cap;
|
||||
iter->cap = {nullptr};
|
||||
return cap;
|
||||
}
|
||||
++iter;
|
||||
if (!capabilities_.Contains(id)) {
|
||||
panic("Bad cap release {}", id);
|
||||
}
|
||||
dbgln("Bad cap release: {}", id);
|
||||
dbgln("Num caps: {}", capabilities_.size());
|
||||
return {};
|
||||
auto cap = capabilities_.at(id);
|
||||
(void)capabilities_.Delete(id);
|
||||
return cap;
|
||||
}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/container/linked_list.h>
|
||||
#include <glacier/container/hash_map.h>
|
||||
#include <glacier/memory/ref_ptr.h>
|
||||
|
||||
#include "capability/capability.h"
|
||||
#include "debug/debug.h"
|
||||
#include "object/mutex.h"
|
||||
|
||||
class CapabilityTable {
|
||||
|
@ -28,12 +29,8 @@ class CapabilityTable {
|
|||
glcr::RefPtr<Mutex> lock_ = Mutex::Create();
|
||||
// TODO: Do some randomization.
|
||||
uint64_t next_cap_id_ = 0x100;
|
||||
// FIXME: use a map data structure.
|
||||
struct CapEntry {
|
||||
uint64_t id;
|
||||
glcr::RefPtr<Capability> cap;
|
||||
};
|
||||
glcr::LinkedList<CapEntry> capabilities_;
|
||||
// TODO: Consider not holding a uniqueptr here instead of a refptr?
|
||||
glcr::HashMap<uint64_t, glcr::RefPtr<Capability>> capabilities_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -41,7 +38,9 @@ uint64_t CapabilityTable::AddNewCapability(const glcr::RefPtr<T>& object,
|
|||
uint64_t permissions) {
|
||||
MutexHolder h(lock_);
|
||||
uint64_t id = next_cap_id_++;
|
||||
capabilities_.PushBack(
|
||||
{.id = id, .cap = MakeRefCounted<Capability>(object, permissions)});
|
||||
if (capabilities_.Insert(
|
||||
id, MakeRefCounted<Capability>(object, permissions)) != glcr::OK) {
|
||||
panic("Reusing capability id {}", id);
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -25,9 +25,15 @@ template <typename... Args>
|
|||
void panic(const char* str, Args... args) {
|
||||
dbgln(str, args...);
|
||||
dbgln("PANIC");
|
||||
asm volatile("hlt;");
|
||||
asm volatile("cli; hlt;");
|
||||
}
|
||||
|
||||
#define PANIC_ON_ERR(expr, str) \
|
||||
{ \
|
||||
if (expr != glcr::OK) { \
|
||||
panic(str); \
|
||||
} \
|
||||
}
|
||||
#define UNREACHABLE \
|
||||
panic("Unreachable {}, {}", __FILE__, __LINE__); \
|
||||
__builtin_unreachable();
|
||||
|
|
|
@ -16,6 +16,7 @@ class KernelVmm {
|
|||
|
||||
static uint64_t AcquireKernelStack();
|
||||
|
||||
// Takes the base address to the stack. I.e. the highest value in it.
|
||||
static void FreeKernelStack(uint64_t);
|
||||
|
||||
private:
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "memory/paging_util.h"
|
||||
#include "memory/physical_memory.h"
|
||||
#include "object/thread.h"
|
||||
#include "scheduler/process_manager.h"
|
||||
#include "scheduler/scheduler.h"
|
||||
|
||||
namespace {
|
||||
|
@ -49,11 +50,11 @@ glcr::RefPtr<Thread> Process::GetThread(uint64_t tid) {
|
|||
void Process::CheckState() {
|
||||
MutexHolder lock(mutex_);
|
||||
for (uint64_t i = 0; i < threads_.size(); i++) {
|
||||
if (threads_[i]->GetState() != Thread::FINISHED) {
|
||||
if (!threads_[i]->IsDying()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
state_ = FINISHED;
|
||||
Exit();
|
||||
}
|
||||
|
||||
glcr::RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
|
||||
|
@ -67,3 +68,28 @@ glcr::RefPtr<Capability> Process::GetCapability(uint64_t cid) {
|
|||
uint64_t Process::AddExistingCapability(const glcr::RefPtr<Capability>& cap) {
|
||||
return caps_.AddExistingCapability(cap);
|
||||
}
|
||||
|
||||
void Process::Exit() {
|
||||
// TODO: Check this state elsewhere to ensure that we don't for instance
|
||||
// create a running thread on a finished process.
|
||||
state_ = FINISHED;
|
||||
|
||||
for (uint64_t i = 0; i < threads_.size(); i++) {
|
||||
if (!threads_[i]->IsDying()) {
|
||||
threads_[i]->Cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
// From this point onward no threads should be able to reach userspace.
|
||||
|
||||
// TODO: Unmap all userspace mappings.
|
||||
// TODO: Clear capabilities.
|
||||
|
||||
// TODO: In the future consider removing this from the process manager.
|
||||
// I need to think through the implications because the process object
|
||||
// will be kept alive by the process that created it most likely.
|
||||
|
||||
if (gScheduler->CurrentProcess().id_ == id_) {
|
||||
gScheduler->Yield();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,8 @@ class Process : public KernelObject {
|
|||
|
||||
State GetState() { return state_; }
|
||||
|
||||
void Exit();
|
||||
|
||||
private:
|
||||
friend class glcr::MakeRefCountedFriend<Process>;
|
||||
Process();
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include "common/gdt.h"
|
||||
#include "debug/debug.h"
|
||||
#include "memory/kernel_vmm.h"
|
||||
#include "memory/paging_util.h"
|
||||
#include "object/process.h"
|
||||
#include "scheduler/scheduler.h"
|
||||
|
@ -68,14 +69,27 @@ void Thread::Exit() {
|
|||
#if K_THREAD_DEBUG
|
||||
dbgln("Exiting");
|
||||
#endif
|
||||
state_ = FINISHED;
|
||||
auto curr_thread = gScheduler->CurrentThread();
|
||||
if (curr_thread->tid() != id_) {
|
||||
panic("Thread::Exit called from [{}.{}] on [{}.{}]", curr_thread->pid(),
|
||||
curr_thread->tid(), pid(), tid());
|
||||
}
|
||||
Cleanup();
|
||||
gScheduler->Yield();
|
||||
}
|
||||
|
||||
void Thread::Cleanup() {
|
||||
state_ = CLEANUP;
|
||||
process_.CheckState();
|
||||
while (blocked_threads_.size() != 0) {
|
||||
auto thread = blocked_threads_.PopFront();
|
||||
thread->SetState(Thread::RUNNABLE);
|
||||
gScheduler->Enqueue(thread);
|
||||
}
|
||||
gScheduler->Yield();
|
||||
state_ = FINISHED;
|
||||
// TODO: Race condition when called from exit, once kernel stack manager
|
||||
// actually reuses stacks this will cause an issue
|
||||
KernelVmm::FreeKernelStack(rsp0_start_);
|
||||
}
|
||||
|
||||
void Thread::Wait() {
|
||||
|
@ -86,7 +100,7 @@ void Thread::Wait() {
|
|||
// 3. B finishes.
|
||||
// 4. Context Switch B -> A
|
||||
// 5. A forever blocks on B.
|
||||
if (state_ == Thread::FINISHED) {
|
||||
if (IsDying()) {
|
||||
return;
|
||||
}
|
||||
auto thread = gScheduler->CurrentThread();
|
||||
|
|
|
@ -29,6 +29,7 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
|
|||
RUNNING,
|
||||
RUNNABLE,
|
||||
BLOCKED,
|
||||
CLEANUP,
|
||||
FINISHED,
|
||||
};
|
||||
static glcr::RefPtr<Thread> RootThread(Process& root_proc);
|
||||
|
@ -51,8 +52,17 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
|
|||
// State Management.
|
||||
State GetState() { return state_; };
|
||||
void SetState(State state) { state_ = state; }
|
||||
bool IsDying() { return state_ == CLEANUP || state_ == FINISHED; }
|
||||
|
||||
// Exits this thread.
|
||||
// Allows all blocked threads to run and releases the kernel stack.
|
||||
// This function should only be called by the running thread on itself
|
||||
// as it will yield.
|
||||
void Exit();
|
||||
|
||||
// Like Exit except it does not yield.
|
||||
void Cleanup();
|
||||
|
||||
void Wait();
|
||||
|
||||
private:
|
||||
|
|
|
@ -10,19 +10,12 @@ void ProcessManager::Init() {
|
|||
}
|
||||
|
||||
void ProcessManager::InsertProcess(const glcr::RefPtr<Process>& proc) {
|
||||
proc_list_.PushBack(proc);
|
||||
PANIC_ON_ERR(proc_map_.Insert(proc->id(), proc), "Reinserting process");
|
||||
}
|
||||
|
||||
Process& ProcessManager::FromId(uint64_t pid) {
|
||||
if (pid >= proc_list_.size()) {
|
||||
panic("Bad proc access {}, have {} processes", pid, proc_list_.size());
|
||||
}
|
||||
return *proc_list_[pid];
|
||||
}
|
||||
|
||||
void ProcessManager::DumpProcessStates() {
|
||||
dbgln("Process States: {}", proc_list_.size());
|
||||
for (uint64_t i = 0; i < proc_list_.size(); i++) {
|
||||
dbgln("{}: {}", proc_list_[i]->id(), (uint64_t)proc_list_[i]->GetState());
|
||||
if (!proc_map_.Contains(pid)) {
|
||||
panic("Bad proc access {}, have {} processes", pid, proc_map_.size());
|
||||
}
|
||||
return *proc_map_.at(pid);
|
||||
}
|
||||
|
|
|
@ -12,13 +12,12 @@ class ProcessManager {
|
|||
static void Init();
|
||||
|
||||
void InsertProcess(const glcr::RefPtr<Process>& proc);
|
||||
void RemoveProcess(uint64_t id);
|
||||
|
||||
Process& FromId(uint64_t id);
|
||||
|
||||
void DumpProcessStates();
|
||||
|
||||
private:
|
||||
// TODO: This should be a hashmap.
|
||||
glcr::Vector<glcr::RefPtr<Process>> proc_list_;
|
||||
glcr::HashMap<uint64_t, glcr::RefPtr<Process>> proc_map_;
|
||||
};
|
||||
|
||||
extern ProcessManager* gProcMan;
|
||||
|
|
|
@ -43,6 +43,8 @@ void Scheduler::Preempt() {
|
|||
return;
|
||||
}
|
||||
|
||||
ClearDeadThreadsFromFront();
|
||||
|
||||
asm volatile("cli");
|
||||
if (current_thread_ == sleep_thread_) {
|
||||
// Sleep should never be preempted. (We should yield it if another thread
|
||||
|
@ -66,9 +68,14 @@ void Scheduler::Preempt() {
|
|||
|
||||
void Scheduler::Yield() {
|
||||
if (!enabled_) {
|
||||
// This is expected to fire once at the start when we enqueue the first
|
||||
// thread before the scheduler is enabled. Maybe we should get rid of it?
|
||||
dbgln("WARN Scheduler skipped yield.");
|
||||
return;
|
||||
}
|
||||
|
||||
ClearDeadThreadsFromFront();
|
||||
|
||||
asm volatile("cli");
|
||||
|
||||
glcr::RefPtr<Thread> prev = current_thread_;
|
||||
|
@ -78,7 +85,9 @@ void Scheduler::Yield() {
|
|||
return;
|
||||
} else {
|
||||
current_thread_ = runnable_threads_.PopFront();
|
||||
prev->SetState(Thread::RUNNABLE);
|
||||
if (!prev->IsDying()) {
|
||||
prev->SetState(Thread::RUNNABLE);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (runnable_threads_.size() == 0) {
|
||||
|
@ -90,3 +99,10 @@ void Scheduler::Yield() {
|
|||
|
||||
SwapToCurrent(*prev);
|
||||
}
|
||||
|
||||
void Scheduler::ClearDeadThreadsFromFront() {
|
||||
while (runnable_threads_.size() > 0 &&
|
||||
runnable_threads_.PeekFront()->IsDying()) {
|
||||
runnable_threads_.PopFront();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ class Scheduler {
|
|||
|
||||
Scheduler();
|
||||
void SwapToCurrent(Thread& prev);
|
||||
|
||||
void ClearDeadThreadsFromFront();
|
||||
};
|
||||
|
||||
extern Scheduler* gScheduler;
|
||||
|
|
|
@ -8,8 +8,7 @@
|
|||
z_err_t ProcessExit(ZProcessExitReq* req) {
|
||||
auto curr_thread = gScheduler->CurrentThread();
|
||||
dbgln("Exit code: {x}", req->code);
|
||||
// FIXME: kill process here.
|
||||
curr_thread->Exit();
|
||||
curr_thread->process().Exit();
|
||||
panic("Returned from thread exit");
|
||||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue