Compare commits

..

No commits in common. "d60b2bdc61898f6a72d17cc0fcde356c50641066" and "1edd5023ceefda3fb882a827c4716fce382736f6" have entirely different histories.

11 changed files with 183 additions and 165 deletions

View File

@ -11,7 +11,6 @@ add_executable(zion
interrupt/interrupt_enter.s
interrupt/timer.cpp
lib/mutex.cpp
lib/message_queue.cpp
loader/init_loader.cpp
memory/kernel_heap.cpp
memory/kernel_stack_manager.cpp

View File

@ -142,7 +142,7 @@ RefPtr<Port> pci1_port;
extern "C" void isr_pci1();
extern "C" void interrupt_pci1(InterruptFrame*) {
dbgln("Interrupt PCI line 1");
pci1_port->Write(0, nullptr, 0, nullptr);
pci1_port->Write({});
SignalEOI();
}

View File

@ -1,71 +0,0 @@
#include "lib/message_queue.h"
#include "scheduler/scheduler.h"
z_err_t UnboundedMessageQueue::PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps,
const z_cap_t* caps) {
if (num_bytes > 0x1000) {
dbgln("Large message size unimplemented: %x", num_bytes);
return Z_ERR_UNIMPLEMENTED;
}
auto message = MakeShared<Message>();
message->num_bytes = num_bytes;
message->bytes = new uint8_t[num_bytes];
for (uint64_t i = 0; i < num_bytes; i++) {
message->bytes[i] = static_cast<const uint8_t*>(bytes)[i];
}
for (uint64_t i = 0; i < num_caps; i++) {
// FIXME: This would feel safer closer to the relevant syscall.
auto cap = gScheduler->CurrentProcess().ReleaseCapability(caps[i]);
if (!cap) {
return Z_ERR_CAP_NOT_FOUND;
}
message->caps.PushBack(cap);
}
pending_messages_.PushBack(message);
return Z_OK;
}
z_err_t UnboundedMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps, z_cap_t* caps) {
auto next_msg = pending_messages_.PeekFront();
if (next_msg->num_bytes > *num_bytes) {
return Z_ERR_BUFF_SIZE;
}
if (next_msg->caps.size() > *num_caps) {
return Z_ERR_BUFF_SIZE;
}
next_msg = pending_messages_.PopFront();
*num_bytes = next_msg->num_bytes;
for (uint64_t i = 0; i < *num_bytes; i++) {
static_cast<uint8_t*>(bytes)[i] = next_msg->bytes[i];
}
*num_caps = next_msg->caps.size();
auto& proc = gScheduler->CurrentProcess();
for (uint64_t i = 0; i < *num_caps; i++) {
caps[i] = proc.AddExistingCapability(next_msg->caps.PopFront());
}
return Z_OK;
}
void UnboundedMessageQueue::WriteKernel(uint64_t init, RefPtr<Capability> cap) {
auto msg = MakeShared<Message>();
msg->bytes = new uint8_t[8];
msg->num_bytes = sizeof(init);
uint8_t* data = reinterpret_cast<uint8_t*>(&init);
for (uint8_t i = 0; i < sizeof(init); i++) {
msg->bytes[i] = data[i];
}
msg->caps.PushBack(cap);
pending_messages_.PushBack(msg);
}

View File

@ -1,44 +0,0 @@
#pragma once
#include "capability/capability.h"
#include "include/ztypes.h"
#include "lib/linked_list.h"
#include "lib/shared_ptr.h"
class MessageQueue {
public:
virtual ~MessageQueue() {}
virtual z_err_t PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) = 0;
virtual z_err_t PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) = 0;
};
class UnboundedMessageQueue : public MessageQueue {
public:
UnboundedMessageQueue() {}
UnboundedMessageQueue(const UnboundedMessageQueue&) = delete;
UnboundedMessageQueue& operator=(const UnboundedMessageQueue&) = delete;
virtual ~UnboundedMessageQueue() override {}
z_err_t PushBack(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) override;
z_err_t PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) override;
void WriteKernel(uint64_t init, RefPtr<Capability> cap);
uint64_t size() { return pending_messages_.size(); }
bool empty() { return size() == 0; }
private:
struct Message {
uint64_t num_bytes;
uint8_t* bytes;
LinkedList<RefPtr<Capability>> caps;
};
LinkedList<SharedPtr<Message>> pending_messages_;
};

View File

@ -11,18 +11,14 @@ Pair<RefPtr<Channel>, RefPtr<Channel>> Channel::CreateChannelPair() {
return {c1, c2};
}
z_err_t Channel::Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) {
return peer_->WriteInternal(num_bytes, bytes, num_caps, caps);
z_err_t Channel::Write(const ZMessage& msg) {
return peer_->EnqueueMessage(msg);
}
z_err_t Channel::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) {
z_err_t Channel::Read(ZMessage& msg) {
mutex_.Lock();
while (message_queue_.empty()) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
while (pending_messages_.size() == 0) {
blocked_threads_.PushBack(gScheduler->CurrentThread());
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
@ -30,13 +26,58 @@ z_err_t Channel::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
mutex_.Unlock();
MutexHolder lock(mutex_);
return message_queue_.PopFront(num_bytes, bytes, num_caps, caps);
auto next_msg = pending_messages_.PeekFront();
if (next_msg->num_bytes > msg.num_bytes) {
return Z_ERR_BUFF_SIZE;
}
if (next_msg->caps.size() > msg.num_caps) {
return Z_ERR_BUFF_SIZE;
}
z_err_t Channel::WriteInternal(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) {
msg.num_bytes = next_msg->num_bytes;
for (uint64_t i = 0; i < msg.num_bytes; i++) {
static_cast<uint8_t*>(msg.data)[i] = next_msg->bytes[i];
}
msg.num_caps = next_msg->caps.size();
auto& proc = gScheduler->CurrentProcess();
for (uint64_t i = 0; i < msg.num_caps; i++) {
msg.caps[i] = proc.AddExistingCapability(next_msg->caps.PopFront());
}
pending_messages_.PopFront();
return Z_OK;
}
z_err_t Channel::EnqueueMessage(const ZMessage& msg) {
if (msg.num_bytes > 0x1000) {
dbgln("Large message size unimplemented: %x", msg.num_bytes);
return Z_ERR_INVALID;
}
auto message = MakeShared<Message>();
// Copy Message body.
message->num_bytes = msg.num_bytes;
message->bytes = new uint8_t[msg.num_bytes];
for (uint64_t i = 0; i < msg.num_bytes; i++) {
message->bytes[i] = static_cast<uint8_t*>(msg.data)[i];
}
// Release and store capabilities.
for (uint64_t i = 0; i < msg.num_caps; i++) {
auto cap = gScheduler->CurrentProcess().ReleaseCapability(msg.caps[i]);
if (!cap) {
return Z_ERR_CAP_NOT_FOUND;
}
message->caps.PushBack(cap);
}
// Enqueue.
MutexHolder lock(mutex_);
RET_ERR(message_queue_.PushBack(num_bytes, bytes, num_caps, caps));
pending_messages_.PushBack(message);
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();

View File

@ -3,7 +3,6 @@
#include "capability/capability.h"
#include "include/ztypes.h"
#include "lib/linked_list.h"
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "lib/pair.h"
#include "lib/ref_ptr.h"
@ -25,10 +24,8 @@ class Channel : public KernelObject {
RefPtr<Channel> peer() { return peer_; }
z_err_t Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps);
z_err_t Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps);
z_err_t Write(const ZMessage& msg);
z_err_t Read(ZMessage& msg);
private:
// FIXME: We will likely never close the channel based on this
@ -36,14 +33,22 @@ class Channel : public KernelObject {
RefPtr<Channel> peer_{nullptr};
Mutex mutex_{"channel"};
UnboundedMessageQueue message_queue_;
struct Message {
uint64_t num_bytes;
uint8_t* bytes;
LinkedList<RefPtr<Capability>> caps;
};
// FIXME: This is probably dangerous because of an
// implicit shallow copy.
LinkedList<SharedPtr<Message>> pending_messages_;
LinkedList<RefPtr<Thread>> blocked_threads_;
friend class MakeRefCountedFriend<Channel>;
Channel() {}
void SetPeer(const RefPtr<Channel>& peer) { peer_ = peer; }
z_err_t WriteInternal(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps);
z_err_t EnqueueMessage(const ZMessage& msg);
};

View File

@ -4,10 +4,29 @@
Port::Port() {}
z_err_t Port::Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) {
MutexHolder h(mutex_);
RET_ERR(message_queue_.PushBack(num_bytes, bytes, num_caps, caps));
z_err_t Port::Write(const ZMessage& msg) {
if (msg.num_bytes > 0x1000) {
dbgln("Large message size unimplemented: %x", msg.num_bytes);
return Z_ERR_INVALID;
}
auto message = MakeShared<Message>();
message->num_bytes = msg.num_bytes;
message->bytes = new uint8_t[msg.num_bytes];
for (uint64_t i = 0; i < msg.num_bytes; i++) {
message->bytes[i] = static_cast<uint8_t*>(msg.data)[i];
}
for (uint64_t i = 0; i < msg.num_caps; i++) {
auto cap = gScheduler->CurrentProcess().ReleaseCapability(msg.caps[i]);
if (!cap) {
return Z_ERR_CAP_NOT_FOUND;
}
message->caps.PushBack(cap);
}
MutexHolder lock(mutex_);
pending_messages_.PushBack(message);
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
@ -16,13 +35,10 @@ z_err_t Port::Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
return Z_OK;
}
z_err_t Port::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) {
z_err_t Port::Read(ZMessage& msg) {
mutex_.Lock();
while (message_queue_.empty()) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
while (pending_messages_.size() < 1) {
blocked_threads_.PushBack(gScheduler->CurrentThread());
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
@ -30,15 +46,48 @@ z_err_t Port::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
mutex_.Unlock();
MutexHolder lock(mutex_);
return message_queue_.PopFront(num_bytes, bytes, num_caps, caps);
auto next_msg = pending_messages_.PeekFront();
if (next_msg->num_bytes > msg.num_bytes) {
return Z_ERR_BUFF_SIZE;
}
if (next_msg->caps.size() > msg.num_caps) {
return Z_ERR_BUFF_SIZE;
}
msg.num_bytes = next_msg->num_bytes;
for (uint64_t i = 0; i < msg.num_bytes; i++) {
static_cast<uint8_t*>(msg.data)[i] = next_msg->bytes[i];
}
msg.num_caps = next_msg->caps.size();
auto& proc = gScheduler->CurrentProcess();
for (uint64_t i = 0; i < msg.num_caps; i++) {
msg.caps[i] = proc.AddExistingCapability(next_msg->caps.PopFront());
}
pending_messages_.PopFront();
return Z_OK;
}
void Port::WriteKernel(uint64_t init, RefPtr<Capability> cap) {
MutexHolder h(mutex_);
message_queue_.WriteKernel(init, cap);
auto msg = MakeShared<Message>();
msg->bytes = new uint8_t[8];
msg->num_bytes = sizeof(init);
uint8_t* data = reinterpret_cast<uint8_t*>(&init);
for (uint8_t i = 0; i < sizeof(init); i++) {
msg->bytes[i] = data[i];
}
msg->caps.PushBack(cap);
pending_messages_.PushBack(msg);
}
bool Port::HasMessages() {
MutexHolder h(mutex_);
return !message_queue_.empty();
return pending_messages_.size() != 0;
}

View File

@ -2,7 +2,6 @@
#include "capability/capability.h"
#include "lib/linked_list.h"
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "lib/shared_ptr.h"
#include "object/kernel_object.h"
@ -22,17 +21,23 @@ class Port : public KernelObject {
Port();
z_err_t Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps);
z_err_t Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps);
z_err_t Write(const ZMessage& msg);
z_err_t Read(ZMessage& msg);
void WriteKernel(uint64_t init, RefPtr<Capability> cap);
bool HasMessages();
private:
UnboundedMessageQueue message_queue_;
struct Message {
uint64_t num_bytes;
uint8_t* bytes;
LinkedList<RefPtr<Capability>> caps;
};
LinkedList<SharedPtr<Message>> pending_messages_;
LinkedList<RefPtr<Thread>> blocked_threads_;
Mutex mutex_{"Port"};

View File

@ -22,7 +22,6 @@ class Thread : public KernelObject {
CREATED,
RUNNING,
RUNNABLE,
BLOCKED,
FINISHED,
};
static RefPtr<Thread> RootThread(Process& root_proc);

View File

@ -19,7 +19,15 @@ z_err_t ChannelSend(ZChannelSendReq* req) {
auto chan = chan_cap->obj<Channel>();
RET_IF_NULL(chan);
return chan->Write(req->num_bytes, req->data, req->num_caps, req->caps);
// FIXME: Get rid of this hack.
ZMessage message{
.num_bytes = req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = req->num_caps,
.caps = req->caps,
};
return chan->Write(message);
}
z_err_t ChannelRecv(ZChannelRecvReq* req) {
@ -29,5 +37,16 @@ z_err_t ChannelRecv(ZChannelRecvReq* req) {
auto chan = chan_cap->obj<Channel>();
RET_IF_NULL(chan);
return chan->Read(req->num_bytes, req->data, req->num_caps, req->caps);
// FIXME: Get rid of this hack.
ZMessage message{
.num_bytes = *req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = *req->num_caps,
.caps = req->caps,
};
RET_ERR(chan->Read(message));
*req->num_bytes = message.num_bytes;
*req->num_caps = message.num_caps;
return Z_OK;
}

View File

@ -18,7 +18,13 @@ z_err_t PortSend(ZPortSendReq* req) {
auto port = port_cap->obj<Port>();
RET_IF_NULL(port);
return port->Write(req->num_bytes, req->data, req->num_caps, req->caps);
ZMessage message{
.num_bytes = req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = req->num_caps,
.caps = req->caps,
};
return port->Write(message);
}
z_err_t PortRecv(ZPortRecvReq* req) {
@ -34,7 +40,10 @@ z_err_t PortRecv(ZPortRecvReq* req) {
.num_caps = *req->num_caps,
.caps = req->caps,
};
return port->Read(req->num_bytes, req->data, req->num_caps, req->caps);
RET_ERR(port->Read(message));
*req->num_bytes = message.num_bytes;
*req->num_caps = message.num_caps;
return Z_OK;
}
z_err_t PortPoll(ZPortPollReq* req) {
@ -44,12 +53,19 @@ z_err_t PortPoll(ZPortPollReq* req) {
auto port = port_cap->obj<Port>();
RET_IF_NULL(port);
// FIXME: Race condition here where this call could block if the last message
// is removed between this check and the port read.
if (!port->HasMessages()) {
return Z_ERR_EMPTY;
}
return port->Read(req->num_bytes, req->data, req->num_caps, req->caps);
ZMessage message{
.num_bytes = *req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = *req->num_caps,
.caps = req->caps,
};
RET_ERR(port->Read(message));
*req->num_bytes = message.num_bytes;
*req->num_caps = message.num_caps;
return Z_OK;
}
z_err_t IrqRegister(ZIrqRegisterReq* req) {