2023-06-20 15:29:32 -07:00
|
|
|
#include "lib/message_queue.h"
|
|
|
|
|
2023-06-26 15:01:55 -07:00
|
|
|
#include "debug/debug.h"
|
2023-06-20 15:29:32 -07:00
|
|
|
#include "scheduler/scheduler.h"
|
|
|
|
|
2023-11-02 23:31:08 -07:00
|
|
|
glcr::ErrorCode UnboundedMessageQueue::PushBack(
|
|
|
|
const glcr::ArrayView<uint8_t>& message,
|
|
|
|
const glcr::ArrayView<z_cap_t>& caps, z_cap_t reply_cap) {
|
|
|
|
if (message.size() > 0x1000) {
|
|
|
|
dbgln("Large message size unimplemented: %x", message.size());
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::UNIMPLEMENTED;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
2023-11-02 23:40:23 -07:00
|
|
|
auto msg_struct = glcr::MakeShared<IpcMessage>();
|
|
|
|
msg_struct->data = glcr::Array<uint8_t>(message);
|
2023-06-20 15:29:32 -07:00
|
|
|
|
2023-10-24 23:32:05 -07:00
|
|
|
if (reply_cap != kZionInvalidCapability) {
|
|
|
|
// FIXME: We're just trusting that capability has the correct permissions.
|
2023-11-02 23:31:08 -07:00
|
|
|
msg_struct->reply_cap =
|
2023-10-24 23:32:05 -07:00
|
|
|
gScheduler->CurrentProcess().ReleaseCapability(reply_cap);
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:44:15 -07:00
|
|
|
msg_struct->caps.Resize(caps.size());
|
2023-11-02 23:31:08 -07:00
|
|
|
for (uint64_t i = 0; i < caps.size(); i++) {
|
2023-06-20 15:29:32 -07:00
|
|
|
// FIXME: This would feel safer closer to the relevant syscall.
|
2023-08-01 18:43:48 -07:00
|
|
|
// FIXME: Race conditions on get->check->release here. Would be better to
|
|
|
|
// have that as a single call on the process. (This pattern repeats other
|
|
|
|
// places too).
|
|
|
|
auto cap = gScheduler->CurrentProcess().GetCapability(caps[i]);
|
2023-06-20 15:29:32 -07:00
|
|
|
if (!cap) {
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::CAP_NOT_FOUND;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
2023-08-01 18:43:48 -07:00
|
|
|
if (!cap->HasPermissions(kZionPerm_Transmit)) {
|
|
|
|
return glcr::CAP_PERMISSION_DENIED;
|
|
|
|
}
|
|
|
|
cap = gScheduler->CurrentProcess().ReleaseCapability(caps[i]);
|
2023-11-02 23:31:08 -07:00
|
|
|
msg_struct->caps.PushBack(cap);
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
2023-06-21 23:57:23 -07:00
|
|
|
MutexHolder h(mutex_);
|
2023-11-02 23:31:08 -07:00
|
|
|
pending_messages_.PushBack(msg_struct);
|
2023-06-21 23:57:23 -07:00
|
|
|
|
|
|
|
if (blocked_threads_.size() > 0) {
|
|
|
|
auto thread = blocked_threads_.PopFront();
|
|
|
|
thread->SetState(Thread::RUNNABLE);
|
|
|
|
gScheduler->Enqueue(thread);
|
|
|
|
}
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::OK;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
2023-11-02 21:55:12 -07:00
|
|
|
glcr::ErrorCode UnboundedMessageQueue::PopFront(uint64_t* num_bytes,
|
|
|
|
void* bytes, uint64_t* num_caps,
|
|
|
|
z_cap_t* caps,
|
|
|
|
z_cap_t* reply_cap) {
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Lock();
|
2023-06-21 23:57:23 -07:00
|
|
|
while (pending_messages_.empty()) {
|
|
|
|
auto thread = gScheduler->CurrentThread();
|
|
|
|
thread->SetState(Thread::BLOCKED);
|
|
|
|
blocked_threads_.PushBack(thread);
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Release();
|
2023-06-21 23:57:23 -07:00
|
|
|
gScheduler->Yield();
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Lock();
|
2023-06-21 23:57:23 -07:00
|
|
|
}
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Release();
|
2023-06-21 23:57:23 -07:00
|
|
|
|
|
|
|
MutexHolder lock(mutex_);
|
2023-06-20 15:29:32 -07:00
|
|
|
auto next_msg = pending_messages_.PeekFront();
|
2023-11-02 23:40:23 -07:00
|
|
|
if (next_msg->data.size() > *num_bytes) {
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::BUFFER_SIZE;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
if (next_msg->caps.size() > *num_caps) {
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::BUFFER_SIZE;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
next_msg = pending_messages_.PopFront();
|
|
|
|
|
2023-11-02 23:40:23 -07:00
|
|
|
*num_bytes = next_msg->data.size();
|
2023-06-20 15:29:32 -07:00
|
|
|
|
|
|
|
for (uint64_t i = 0; i < *num_bytes; i++) {
|
2023-11-02 23:40:23 -07:00
|
|
|
static_cast<uint8_t*>(bytes)[i] = next_msg->data[i];
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
auto& proc = gScheduler->CurrentProcess();
|
2023-10-24 23:32:05 -07:00
|
|
|
if (reply_cap != nullptr) {
|
|
|
|
if (!next_msg->reply_cap) {
|
|
|
|
dbgln("Tried to read reply capability off of a message without one");
|
|
|
|
return glcr::INTERNAL;
|
|
|
|
}
|
|
|
|
*reply_cap = proc.AddExistingCapability(next_msg->reply_cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
*num_caps = next_msg->caps.size();
|
2023-06-20 15:29:32 -07:00
|
|
|
for (uint64_t i = 0; i < *num_caps; i++) {
|
2023-11-02 23:44:15 -07:00
|
|
|
caps[i] = proc.AddExistingCapability(next_msg->caps[i]);
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
2023-06-21 18:28:54 -07:00
|
|
|
return glcr::OK;
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
void UnboundedMessageQueue::WriteKernel(uint64_t init,
|
|
|
|
glcr::RefPtr<Capability> cap) {
|
2023-06-21 23:57:23 -07:00
|
|
|
// FIXME: Add synchronization here in case it is ever used outside of init.
|
2023-11-02 23:40:23 -07:00
|
|
|
auto msg = glcr::MakeShared<IpcMessage>();
|
|
|
|
msg->data = glcr::Array<uint8_t>(sizeof(init));
|
2023-06-20 15:29:32 -07:00
|
|
|
|
|
|
|
uint8_t* data = reinterpret_cast<uint8_t*>(&init);
|
|
|
|
for (uint8_t i = 0; i < sizeof(init); i++) {
|
2023-11-02 23:40:23 -07:00
|
|
|
msg->data[i] = data[i];
|
2023-06-20 15:29:32 -07:00
|
|
|
}
|
|
|
|
msg->caps.PushBack(cap);
|
|
|
|
|
|
|
|
pending_messages_.PushBack(msg);
|
|
|
|
}
|
2023-06-21 23:57:23 -07:00
|
|
|
|
2023-11-02 23:31:08 -07:00
|
|
|
glcr::ErrorCode SingleMessageQueue::PushBack(
|
|
|
|
const glcr::ArrayView<uint8_t>& message,
|
|
|
|
const glcr::ArrayView<z_cap_t>& caps, z_cap_t reply_port) {
|
2023-06-21 23:57:23 -07:00
|
|
|
MutexHolder h(mutex_);
|
2023-06-21 23:14:42 -07:00
|
|
|
if (has_written_) {
|
|
|
|
return glcr::FAILED_PRECONDITION;
|
|
|
|
}
|
2023-11-02 23:40:23 -07:00
|
|
|
message_.data = message;
|
2023-06-21 23:14:42 -07:00
|
|
|
|
2023-10-24 23:32:05 -07:00
|
|
|
if (reply_port != kZionInvalidCapability) {
|
|
|
|
dbgln("Sent a reply port to a single message queue");
|
|
|
|
return glcr::INTERNAL;
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:44:15 -07:00
|
|
|
message_.caps.Resize(caps.size());
|
2023-11-02 23:31:08 -07:00
|
|
|
for (uint64_t i = 0; i < caps.size(); i++) {
|
2023-06-21 23:14:42 -07:00
|
|
|
// FIXME: This would feel safer closer to the relevant syscall.
|
2023-08-01 18:43:48 -07:00
|
|
|
auto cap = gScheduler->CurrentProcess().GetCapability(caps[i]);
|
2023-06-21 23:14:42 -07:00
|
|
|
if (!cap) {
|
|
|
|
return glcr::CAP_NOT_FOUND;
|
|
|
|
}
|
2023-08-01 18:43:48 -07:00
|
|
|
if (!cap->HasPermissions(kZionPerm_Transmit)) {
|
|
|
|
return glcr::CAP_PERMISSION_DENIED;
|
|
|
|
}
|
|
|
|
cap = gScheduler->CurrentProcess().ReleaseCapability(caps[i]);
|
2023-11-02 23:40:23 -07:00
|
|
|
message_.caps.PushBack(cap);
|
2023-06-21 23:14:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
has_written_ = true;
|
|
|
|
|
2023-06-21 23:57:23 -07:00
|
|
|
if (blocked_threads_.size() > 0) {
|
|
|
|
auto thread = blocked_threads_.PopFront();
|
|
|
|
thread->SetState(Thread::RUNNABLE);
|
|
|
|
gScheduler->Enqueue(thread);
|
|
|
|
}
|
|
|
|
|
2023-06-21 23:14:42 -07:00
|
|
|
return glcr::OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
glcr::ErrorCode SingleMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
|
2023-10-24 23:32:05 -07:00
|
|
|
uint64_t* num_caps, z_cap_t* caps,
|
|
|
|
z_cap_t* reply_port) {
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Lock();
|
2023-06-21 23:57:23 -07:00
|
|
|
while (!has_written_) {
|
|
|
|
auto thread = gScheduler->CurrentThread();
|
|
|
|
thread->SetState(Thread::BLOCKED);
|
|
|
|
blocked_threads_.PushBack(thread);
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Release();
|
2023-06-21 23:57:23 -07:00
|
|
|
gScheduler->Yield();
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Lock();
|
2023-06-21 23:57:23 -07:00
|
|
|
}
|
2023-10-25 14:47:45 -07:00
|
|
|
mutex_->Release();
|
2023-06-21 23:57:23 -07:00
|
|
|
|
|
|
|
MutexHolder lock(mutex_);
|
|
|
|
if (has_read_) {
|
2023-06-21 23:14:42 -07:00
|
|
|
return glcr::FAILED_PRECONDITION;
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:40:23 -07:00
|
|
|
if (message_.data.size() > *num_bytes) {
|
2023-06-21 23:14:42 -07:00
|
|
|
return glcr::BUFFER_SIZE;
|
|
|
|
}
|
2023-11-02 23:40:23 -07:00
|
|
|
if (message_.caps.size() > *num_caps) {
|
2023-06-21 23:14:42 -07:00
|
|
|
return glcr::BUFFER_SIZE;
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:40:23 -07:00
|
|
|
*num_bytes = message_.data.size();
|
|
|
|
for (uint64_t i = 0; i < message_.data.size(); i++) {
|
|
|
|
reinterpret_cast<uint8_t*>(bytes)[i] = message_.data[i];
|
2023-06-21 23:14:42 -07:00
|
|
|
}
|
|
|
|
|
2023-10-24 23:32:05 -07:00
|
|
|
if (reply_port != nullptr) {
|
|
|
|
dbgln("Tried to read a reply port a single message queue");
|
|
|
|
return glcr::INTERNAL;
|
|
|
|
}
|
|
|
|
|
2023-11-02 23:40:23 -07:00
|
|
|
*num_caps = message_.caps.size();
|
2023-06-21 23:14:42 -07:00
|
|
|
auto& proc = gScheduler->CurrentProcess();
|
|
|
|
for (uint64_t i = 0; i < *num_caps; i++) {
|
2023-11-02 23:44:15 -07:00
|
|
|
caps[i] = proc.AddExistingCapability(message_.caps[i]);
|
2023-06-21 23:14:42 -07:00
|
|
|
}
|
|
|
|
has_read_ = true;
|
|
|
|
|
|
|
|
return glcr::OK;
|
|
|
|
}
|