Compare commits

...

5 Commits

46 changed files with 667 additions and 295 deletions

View File

@ -32,12 +32,15 @@ class ErrorOr {
bool ok_;
};
#define ASSIGN_OR_RETURN(lhs, rhs) \
\
auto e##__LINE__ = rhs; \
if (!e##__LINE__.ok()) { \
return e##__LINE__.error(); \
} \
lhs = rhs.value();
#define AOR_INNER(a, b) a##b
#define AOR_VAR(a) AOR_INNER(e, a)
#define ASSIGN_OR_RETURN(lhs, rhs) \
\
auto AOR_VAR(__LINE__) = rhs; \
if (!AOR_VAR(__LINE__).ok()) { \
return AOR_VAR(__LINE__).error(); \
} \
lhs = AOR_VAR(__LINE__).value();
} // namespace glcr

View File

@ -1,6 +1,8 @@
add_library(mammoth_lib STATIC
src/channel.cpp
src/debug.cpp
src/endpoint_client.cpp
src/endpoint_server.cpp
src/init.cpp
src/memory_region.cpp
src/process.cpp

View File

@ -0,0 +1,37 @@
#pragma once
#include <glacier/container/pair.h>
#include <glacier/status/error_or.h>
#include <zcall.h>
#include <ztypes.h>
class EndpointClient {
public:
static EndpointClient AdoptEndpoint(z_cap_t cap);
template <typename Req, typename Resp>
glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> CallEndpoint(const Req& req);
private:
EndpointClient(uint64_t cap) : cap_(cap) {}
z_cap_t cap_;
};
template <typename Req, typename Resp>
glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> EndpointClient::CallEndpoint(
const Req& req) {
uint64_t reply_port_cap;
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, &reply_port_cap));
Resp resp;
z_cap_t cap = 0;
uint64_t num_caps = 1;
uint64_t num_bytes = sizeof(Resp);
RET_ERR(ZReplyPortRecv(reply_port_cap, &num_bytes, &resp, &num_caps, &cap));
if (num_bytes != sizeof(resp) || num_caps != 1) {
return glcr::FAILED_PRECONDITION;
}
return glcr::Pair{resp, cap};
}

View File

@ -0,0 +1,25 @@
#pragma once
#include <glacier/status/error_or.h>
#include <ztypes.h>
#include "mammoth/endpoint_client.h"
class EndpointServer {
public:
static glcr::ErrorOr<EndpointServer> Create();
static EndpointServer Adopt(z_cap_t endpoint_cap);
glcr::ErrorOr<EndpointClient> CreateClient();
// FIXME: Release Cap here.
z_cap_t GetCap() { return endpoint_cap_; }
glcr::ErrorCode Recieve(uint64_t* num_bytes, void* data,
z_cap_t* reply_port_cap);
private:
z_cap_t endpoint_cap_;
EndpointServer(z_cap_t cap) : endpoint_cap_(cap) {}
};

View File

@ -3,6 +3,6 @@
#include <glacier/status/error_or.h>
#include <stdint.h>
#include "mammoth/channel.h"
#include "mammoth/endpoint_client.h"
glcr::ErrorOr<Channel> SpawnProcessFromElfRegion(uint64_t program);
glcr::ErrorOr<EndpointClient> SpawnProcessFromElfRegion(uint64_t program);

View File

@ -0,0 +1,3 @@
#include "mammoth/endpoint_server.h"
EndpointClient EndpointClient::AdoptEndpoint(z_cap_t cap) { return {cap}; }

View File

@ -0,0 +1,23 @@
#include "mammoth/endpoint_server.h"
glcr::ErrorOr<EndpointServer> EndpointServer::Create() {
uint64_t cap;
RET_ERR(ZEndpointCreate(&cap));
return EndpointServer(cap);
}
EndpointServer EndpointServer::Adopt(z_cap_t endpoint_cap) {
return EndpointServer(endpoint_cap);
}
glcr::ErrorOr<EndpointClient> EndpointServer::CreateClient() {
uint64_t client_cap;
// FIXME: Restrict permissions to send-only here.
RET_ERR(ZCapDuplicate(endpoint_cap_, &client_cap));
return EndpointClient::AdoptEndpoint(client_cap);
}
glcr::ErrorCode EndpointServer::Recieve(uint64_t* num_bytes, void* data,
z_cap_t* reply_port_cap) {
return ZEndpointRecv(endpoint_cap_, num_bytes, data, reply_port_cap);
}

View File

@ -9,7 +9,7 @@
uint64_t gSelfProcCap = 0;
uint64_t gSelfVmasCap = 0;
uint64_t gInitChannelCap = 0;
uint64_t gInitEndpointCap = 0;
uint64_t gBootDenaliVmmoCap = 0;
@ -28,8 +28,8 @@ z_err_t ParseInitPort(uint64_t init_port_cap) {
dbgln("received vmas");
gSelfVmasCap = init_cap;
break;
case Z_INIT_CHANNEL:
gInitChannelCap = init_cap;
case Z_INIT_ENDPOINT:
gInitEndpointCap = init_cap;
break;
case Z_BOOT_DENALI_VMMO:
dbgln("received denali");

View File

@ -3,8 +3,8 @@
#include <glacier/status/error.h>
#include <zcall.h>
#include "mammoth/channel.h"
#include "mammoth/debug.h"
#include "mammoth/endpoint_server.h"
#include "mammoth/init.h"
#include "mammoth/port.h"
@ -95,20 +95,21 @@ uint64_t LoadElfProgram(uint64_t base, uint64_t as_cap) {
} // namespace
glcr::ErrorOr<Channel> SpawnProcessFromElfRegion(uint64_t program) {
Channel local, foreign;
check(CreateChannels(local, foreign));
glcr::ErrorOr<EndpointClient> SpawnProcessFromElfRegion(uint64_t program) {
ASSIGN_OR_RETURN(EndpointServer server, EndpointServer::Create());
ASSIGN_OR_RETURN(EndpointClient client, server.CreateClient());
uint64_t proc_cap;
uint64_t as_cap;
uint64_t foreign_port_id;
uint64_t port_cap;
#if MAM_PROC_DEBUG
dbgln("Port Create");
#endif
check(ZPortCreate(&port_cap));
RET_ERR(ZPortCreate(&port_cap));
uint64_t port_cap_donate;
check(ZCapDuplicate(port_cap, &port_cap_donate));
RET_ERR(ZCapDuplicate(port_cap, &port_cap_donate));
#if MAM_PROC_DEBUG
dbgln("Spawn");
@ -127,12 +128,12 @@ glcr::ErrorOr<Channel> SpawnProcessFromElfRegion(uint64_t program) {
Port p(port_cap);
check(p.WriteMessage<uint64_t>(Z_INIT_SELF_PROC, proc_cap));
check(p.WriteMessage<uint64_t>(Z_INIT_SELF_VMAS, as_cap));
check(p.WriteMessage<uint64_t>(Z_INIT_CHANNEL, foreign.release_cap()));
check(p.WriteMessage<uint64_t>(Z_INIT_ENDPOINT, server.GetCap()));
#if MAM_PROC_DEBUG
dbgln("Thread start");
#endif
check(ZThreadStart(thread_cap, entry_point, foreign_port_id, 0));
return local;
return client;
}

View File

@ -7,8 +7,11 @@
Command::~Command() {}
DmaReadCommand::DmaReadCommand(uint64_t lba, uint64_t sector_cnt,
DmaCallback callback)
: lba_(lba), sector_cnt_(sector_cnt), callback_(callback) {
DmaCallback callback, z_cap_t reply_port)
: reply_port_(reply_port),
lba_(lba),
sector_cnt_(sector_cnt),
callback_(callback) {
region_ = MappedMemoryRegion::ContiguousPhysical(sector_cnt * 512);
}
@ -46,4 +49,6 @@ void DmaReadCommand::PopulatePrdt(PhysicalRegionDescriptor* prdt) {
prdt[0].region_address = region_.paddr();
prdt[0].byte_count = region_.size();
}
void DmaReadCommand::Callback() { callback_(lba_, sector_cnt_, region_.cap()); }
void DmaReadCommand::Callback() {
callback_(reply_port_, lba_, sector_cnt_, region_.cap());
}

View File

@ -15,8 +15,9 @@ class Command {
class DmaReadCommand : public Command {
public:
typedef void (*DmaCallback)(uint64_t, uint64_t, uint64_t);
DmaReadCommand(uint64_t lba, uint64_t sector_cnt, DmaCallback callback);
typedef void (*DmaCallback)(z_cap_t, uint64_t, uint64_t, z_cap_t);
DmaReadCommand(uint64_t lba, uint64_t sector_cnt, DmaCallback callback,
z_cap_t reply_port);
virtual ~DmaReadCommand() override;
@ -26,6 +27,7 @@ class DmaReadCommand : public Command {
void Callback() override;
private:
z_cap_t reply_port_;
uint64_t lba_;
uint64_t sector_cnt_;
DmaCallback callback_;

View File

@ -4,18 +4,21 @@
#include "denali/denali.h"
MappedMemoryRegion DenaliClient::ReadSectors(uint64_t device_id, uint64_t lba,
uint64_t num_sectors) {
glcr::ErrorOr<MappedMemoryRegion> DenaliClient::ReadSectors(
uint64_t device_id, uint64_t lba, uint64_t num_sectors) {
DenaliRead read{
.device_id = device_id,
.lba = lba,
.size = num_sectors,
};
check(channel_.WriteStruct(&read));
auto pair_or = endpoint_.CallEndpoint<DenaliRead, DenaliReadResponse>(read);
if (!pair_or) {
return pair_or.error();
}
auto pair = pair_or.value();
DenaliReadResponse resp;
uint64_t mem_cap;
check(channel_.ReadStructAndCap(&resp, &mem_cap));
DenaliReadResponse& resp = pair.first();
z_cap_t& mem_cap = pair.second();
return MappedMemoryRegion::FromCapability(mem_cap);
}

View File

@ -1,5 +1,6 @@
#include <mammoth/channel.h>
#include <mammoth/debug.h>
#include <mammoth/endpoint_server.h>
#include <mammoth/init.h>
#include <stdint.h>
@ -11,7 +12,8 @@ uint64_t main(uint64_t init_port_cap) {
AhciDriver driver;
RET_ERR(driver.Init());
DenaliServer server(gInitChannelCap, driver);
EndpointServer endpoint = EndpointServer::Adopt(gInitEndpointCap);
DenaliServer server(endpoint, driver);
RET_ERR(server.RunServer());
// FIXME: Add thread join.
return 0;

View File

@ -6,22 +6,22 @@
namespace {
DenaliServer* gServer = nullptr;
void HandleResponse(uint64_t lba, uint64_t size, uint64_t cap) {
gServer->HandleResponse(lba, size, cap);
void HandleResponse(z_cap_t reply_port, uint64_t lba, uint64_t size,
z_cap_t mem) {
gServer->HandleResponse(reply_port, lba, size, mem);
}
} // namespace
DenaliServer::DenaliServer(uint64_t channel_cap, AhciDriver& driver)
: channel_cap_(channel_cap), driver_(driver) {
DenaliServer::DenaliServer(EndpointServer server, AhciDriver& driver)
: server_(server), driver_(driver) {
gServer = this;
}
glcr::ErrorCode DenaliServer::RunServer() {
while (true) {
uint64_t buff_size = kBuffSize;
uint64_t cap_size = 0;
RET_ERR(ZChannelRecv(channel_cap_, &buff_size, read_buffer_, &cap_size,
nullptr));
z_cap_t reply_port;
RET_ERR(server_.Recieve(&buff_size, read_buffer_, &reply_port));
if (buff_size < sizeof(uint64_t)) {
dbgln("Skipping invalid message");
continue;
@ -34,7 +34,7 @@ glcr::ErrorCode DenaliServer::RunServer() {
case DENALI_READ: {
DenaliRead* read_req = reinterpret_cast<DenaliRead*>(read_buffer_);
uint64_t memcap = 0;
RET_ERR(HandleRead(*read_req));
RET_ERR(HandleRead(*read_req, reply_port));
break;
}
default:
@ -44,21 +44,22 @@ glcr::ErrorCode DenaliServer::RunServer() {
}
}
glcr::ErrorCode DenaliServer::HandleRead(const DenaliRead& read) {
glcr::ErrorCode DenaliServer::HandleRead(const DenaliRead& read,
z_cap_t reply_port) {
ASSIGN_OR_RETURN(AhciDevice * device, driver_.GetDevice(read.device_id));
device->IssueCommand(
new DmaReadCommand(read.lba, read.size, ::HandleResponse));
new DmaReadCommand(read.lba, read.size, ::HandleResponse, reply_port));
return glcr::OK;
}
void DenaliServer::HandleResponse(uint64_t lba, uint64_t size, uint64_t cap) {
void DenaliServer::HandleResponse(z_cap_t reply_port, uint64_t lba,
uint64_t size, z_cap_t mem) {
DenaliReadResponse resp{
.device_id = 0,
.lba = lba,
.size = size,
};
check(ZChannelSend(channel_cap_, sizeof(resp),
reinterpret_cast<uint8_t*>(&resp), 1, &cap));
check(ZReplyPortSend(reply_port, sizeof(resp), &resp, 1, &mem));
}

View File

@ -1,24 +1,26 @@
#pragma once
#include <glacier/status/error.h>
#include <mammoth/endpoint_server.h>
#include "ahci/ahci_driver.h"
#include "denali/denali.h"
class DenaliServer {
public:
DenaliServer(uint64_t channel_cap, AhciDriver& driver);
DenaliServer(EndpointServer server, AhciDriver& driver);
glcr::ErrorCode RunServer();
void HandleResponse(uint64_t lba, uint64_t size, uint64_t cap);
void HandleResponse(z_cap_t reply_port, uint64_t lba, uint64_t size,
z_cap_t cap);
private:
static const uint64_t kBuffSize = 1024;
uint64_t channel_cap_;
EndpointServer server_;
uint8_t read_buffer_[kBuffSize];
AhciDriver& driver_;
glcr::ErrorCode HandleRead(const DenaliRead& read);
glcr::ErrorCode HandleRead(const DenaliRead& read, z_cap_t reply_port);
};

View File

@ -1,15 +1,17 @@
#pragma once
#include <mammoth/channel.h>
#include <glacier/status/error_or.h>
#include <mammoth/endpoint_client.h>
#include <mammoth/memory_region.h>
class DenaliClient {
public:
DenaliClient(const Channel& channel) : channel_(channel) {}
DenaliClient(const EndpointClient& endpoint) : endpoint_(endpoint) {}
MappedMemoryRegion ReadSectors(uint64_t device_id, uint64_t lba,
uint64_t num_sectors);
glcr::ErrorOr<MappedMemoryRegion> ReadSectors(uint64_t device_id,
uint64_t lba,
uint64_t num_sectors);
private:
Channel channel_;
EndpointClient endpoint_;
};

View File

@ -49,8 +49,9 @@ struct PartitionEntry {
GptReader::GptReader(const DenaliClient& client) : denali_(client) {}
z_err_t GptReader::ParsePartitionTables() {
MappedMemoryRegion lba_1_and_2 = denali_.ReadSectors(0, 0, 2);
glcr::ErrorCode GptReader::ParsePartitionTables() {
ASSIGN_OR_RETURN(MappedMemoryRegion lba_1_and_2,
denali_.ReadSectors(0, 0, 2));
uint16_t* mbr_sig = reinterpret_cast<uint16_t*>(lba_1_and_2.vaddr() + 0x1FE);
if (*mbr_sig != 0xAA55) {
return glcr::FAILED_PRECONDITION;
@ -83,8 +84,9 @@ z_err_t GptReader::ParsePartitionTables() {
dbgln("partition_entry_size: %x", entry_size);
dbgln("Num blocks: %x", num_blocks);
MappedMemoryRegion part_table =
denali_.ReadSectors(0, header->lba_partition_entries, num_blocks);
ASSIGN_OR_RETURN(
MappedMemoryRegion part_table,
denali_.ReadSectors(0, header->lba_partition_entries, num_blocks));
dbgln("Entries");
for (uint64_t i = 0; i < num_partitions; i++) {
PartitionEntry* entry = reinterpret_cast<PartitionEntry*>(

View File

@ -8,7 +8,7 @@ class GptReader {
public:
GptReader(const DenaliClient&);
z_err_t ParsePartitionTables();
glcr::ErrorCode ParsePartitionTables();
private:
DenaliClient denali_;

View File

@ -1,6 +1,6 @@
#include <denali/denali.h>
#include <mammoth/channel.h>
#include <mammoth/debug.h>
#include <mammoth/endpoint_client.h>
#include <mammoth/init.h>
#include <mammoth/process.h>
#include <zcall.h>
@ -17,13 +17,13 @@ uint64_t main(uint64_t port_cap) {
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, gBootDenaliVmmoCap, &vaddr));
auto local_or = SpawnProcessFromElfRegion(vaddr);
if (!local_or) {
check(local_or.error());
auto endpoint_or = SpawnProcessFromElfRegion(vaddr);
if (!endpoint_or) {
check(endpoint_or.error());
}
Channel local = local_or.value();
EndpointClient endpoint = endpoint_or.value();
DenaliClient client(local);
DenaliClient client(endpoint);
GptReader reader(client);
check(reader.ParsePartitionTables());

View File

@ -20,9 +20,12 @@ add_executable(zion
memory/user_stack_manager.cpp
object/address_space.cpp
object/channel.cpp
object/endpoint.cpp
object/ipc_object.cpp
object/memory_object.cpp
object/port.cpp
object/process.cpp
object/reply_port.cpp
object/thread.cpp
scheduler/context_switch.s
scheduler/jump_user_space.s
@ -30,10 +33,9 @@ add_executable(zion
scheduler/scheduler.cpp
syscall/address_space.cpp
syscall/capability.cpp
syscall/channel.cpp
syscall/debug.cpp
syscall/ipc.cpp
syscall/memory_object.cpp
syscall/port.cpp
syscall/process.cpp
syscall/syscall.cpp
syscall/syscall_enter.s

View File

@ -123,6 +123,16 @@ SYS5(PortPoll, z_cap_t, port_cap, uint64_t*, num_bytes, void*, data, uint64_t*,
SYS2(IrqRegister, uint64_t, irq_num, z_cap_t*, port_cap);
SYS1(EndpointCreate, z_cap_t*, endpoint_cap);
SYS4(EndpointSend, z_cap_t, endpoint_cap, uint64_t, num_bytes, const void*,
data, z_cap_t*, reply_port_cap);
SYS4(EndpointRecv, z_cap_t, endpoint_cap, uint64_t*, num_bytes, void*, data,
z_cap_t*, reply_port_cap);
SYS5(ReplyPortSend, z_cap_t, reply_port_cap, uint64_t, num_bytes, const void*,
data, uint64_t, num_caps, z_cap_t*, caps);
SYS5(ReplyPortRecv, z_cap_t, reply_port_cap, uint64_t*, num_bytes, void*, data,
uint64_t*, num_caps, z_cap_t*, caps);
SYS2(CapDuplicate, z_cap_t, cap_in, z_cap_t*, cap_out);
SYS1(Debug, const char*, message);

View File

@ -5,6 +5,6 @@
extern uint64_t gSelfProcCap;
extern uint64_t gSelfVmasCap;
extern uint64_t gInitChannelCap;
extern uint64_t gInitEndpointCap;
extern uint64_t gBootDenaliVmmoCap;

View File

@ -41,6 +41,13 @@ const uint64_t kZionPortPoll = 0x53;
const uint64_t kZionIrqRegister = 0x58;
const uint64_t kZionEndpointCreate = 0x60;
const uint64_t kZionEndpointSend = 0x61;
const uint64_t kZionEndpointRecv = 0x62;
const uint64_t kZionReplyPortSend = 0x63;
const uint64_t kZionReplyPortRecv = 0x64;
const uint64_t kZionEndpointCall = 0x65;
#define Z_IRQ_PCI_BASE 0x30
// Capability Calls
@ -78,6 +85,6 @@ typedef uint64_t z_cap_t;
#define Z_INIT_SELF_PROC 0x4000'0000
#define Z_INIT_SELF_VMAS 0x4000'0001
#define Z_INIT_CHANNEL 0x4100'0000
#define Z_INIT_ENDPOINT 0x4100'0000
#define Z_BOOT_DENALI_VMMO 0x4200'0000

View File

@ -142,7 +142,7 @@ glcr::RefPtr<Port> pci1_port;
extern "C" void isr_pci1();
extern "C" void interrupt_pci1(InterruptFrame*) {
dbgln("Interrupt PCI line 1");
pci1_port->Write(0, nullptr, 0, nullptr);
pci1_port->Send(0, nullptr, 0, nullptr);
SignalEOI();
}

View File

@ -11,6 +11,7 @@ class LinkedList {
LinkedList(const LinkedList&) = delete;
bool empty() const { return size_ == 0; }
uint64_t size() const { return size_; }
void PushBack(const T& item) {

View File

@ -26,12 +26,31 @@ z_err_t UnboundedMessageQueue::PushBack(uint64_t num_bytes, const void* bytes,
message->caps.PushBack(cap);
}
MutexHolder h(mutex_);
pending_messages_.PushBack(message);
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
return glcr::OK;
}
z_err_t UnboundedMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps, z_cap_t* caps) {
mutex_.Lock();
while (pending_messages_.empty()) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
}
mutex_.Unlock();
MutexHolder lock(mutex_);
auto next_msg = pending_messages_.PeekFront();
if (next_msg->num_bytes > *num_bytes) {
return glcr::BUFFER_SIZE;
@ -58,6 +77,7 @@ z_err_t UnboundedMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
void UnboundedMessageQueue::WriteKernel(uint64_t init,
glcr::RefPtr<Capability> cap) {
// FIXME: Add synchronization here in case it is ever used outside of init.
auto msg = glcr::MakeShared<Message>();
msg->bytes = new uint8_t[8];
msg->num_bytes = sizeof(init);
@ -70,3 +90,79 @@ void UnboundedMessageQueue::WriteKernel(uint64_t init,
pending_messages_.PushBack(msg);
}
glcr::ErrorCode SingleMessageQueue::PushBack(uint64_t num_bytes,
const void* bytes,
uint64_t num_caps,
const z_cap_t* caps) {
MutexHolder h(mutex_);
if (has_written_) {
return glcr::FAILED_PRECONDITION;
}
num_bytes_ = num_bytes;
bytes_ = new uint8_t[num_bytes];
for (uint64_t i = 0; i < num_bytes; i++) {
bytes_[i] = reinterpret_cast<const uint8_t*>(bytes)[i];
}
for (uint64_t i = 0; i < num_caps; i++) {
// FIXME: This would feel safer closer to the relevant syscall.
auto cap = gScheduler->CurrentProcess().ReleaseCapability(caps[i]);
if (!cap) {
return glcr::CAP_NOT_FOUND;
}
caps_.PushBack(cap);
}
has_written_ = true;
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
return glcr::OK;
}
glcr::ErrorCode SingleMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps,
z_cap_t* caps) {
mutex_.Lock();
while (!has_written_) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
}
mutex_.Unlock();
MutexHolder lock(mutex_);
if (has_read_) {
return glcr::FAILED_PRECONDITION;
}
if (num_bytes_ > *num_bytes) {
return glcr::BUFFER_SIZE;
}
if (caps_.size() > *num_caps) {
return glcr::BUFFER_SIZE;
}
*num_bytes = num_bytes_;
for (uint64_t i = 0; i < num_bytes_; i++) {
reinterpret_cast<uint8_t*>(bytes)[i] = bytes_[i];
}
*num_caps = caps_.size();
auto& proc = gScheduler->CurrentProcess();
for (uint64_t i = 0; i < *num_caps; i++) {
caps[i] = proc.AddExistingCapability(caps_.PopFront());
}
has_read_ = true;
return glcr::OK;
}

View File

@ -1,20 +1,30 @@
#pragma once
#include <glacier/container/intrusive_list.h>
#include <glacier/memory/ref_ptr.h>
#include <glacier/memory/shared_ptr.h>
#include <glacier/status/error.h>
#include "capability/capability.h"
#include "include/ztypes.h"
#include "lib/linked_list.h"
#include "lib/mutex.h"
class MessageQueue {
public:
virtual ~MessageQueue() {}
virtual z_err_t PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) = 0;
virtual z_err_t PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) = 0;
virtual glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) = 0;
virtual glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps, z_cap_t* caps) = 0;
virtual bool empty() = 0;
protected:
Mutex mutex_{"message"};
// FIXME: This maybe shouldn't be shared between classes since the
// SingleMessageQueue should only ever have one blocked thread.
glcr::IntrusiveList<Thread> blocked_threads_;
};
class UnboundedMessageQueue : public MessageQueue {
@ -24,15 +34,17 @@ class UnboundedMessageQueue : public MessageQueue {
UnboundedMessageQueue& operator=(const UnboundedMessageQueue&) = delete;
virtual ~UnboundedMessageQueue() override {}
z_err_t PushBack(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) override;
z_err_t PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) override;
glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) override;
glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) override;
void WriteKernel(uint64_t init, glcr::RefPtr<Capability> cap);
uint64_t size() { return pending_messages_.size(); }
bool empty() { return size() == 0; }
bool empty() override {
MutexHolder h(mutex_);
return pending_messages_.size() == 0;
}
private:
struct Message {
@ -44,3 +56,28 @@ class UnboundedMessageQueue : public MessageQueue {
LinkedList<glcr::SharedPtr<Message>> pending_messages_;
};
class SingleMessageQueue : public MessageQueue {
public:
SingleMessageQueue() {}
SingleMessageQueue(const SingleMessageQueue&) = delete;
SingleMessageQueue(SingleMessageQueue&&) = delete;
virtual ~SingleMessageQueue() override {}
glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) override;
glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) override;
bool empty() override {
MutexHolder h(mutex_);
return has_written_ == false;
};
private:
bool has_written_ = false;
bool has_read_ = false;
uint64_t num_bytes_;
uint8_t* bytes_;
LinkedList<glcr::RefPtr<Capability>> caps_;
};

View File

@ -5,7 +5,7 @@
void Mutex::Lock() {
while (__atomic_fetch_or(&lock_, 0x1, __ATOMIC_SEQ_CST) == 0x1) {
dbgln("Lock sleep: %s", name_);
// dbgln("Lock sleep: %s", name_);
gScheduler->Preempt();
}
}

View File

@ -5,44 +5,9 @@
glcr::Pair<glcr::RefPtr<Channel>, glcr::RefPtr<Channel>>
Channel::CreateChannelPair() {
auto c1 = glcr::MakeRefCounted<Channel>();
auto c2 = glcr::MakeRefCounted<Channel>();
auto c1 = glcr::AdoptPtr(new Channel);
auto c2 = glcr::AdoptPtr(new Channel);
c1->SetPeer(c2);
c2->SetPeer(c1);
return {c1, c2};
}
z_err_t Channel::Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) {
return peer_->WriteInternal(num_bytes, bytes, num_caps, caps);
}
z_err_t Channel::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) {
mutex_.Lock();
while (message_queue_.empty()) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
}
mutex_.Unlock();
MutexHolder lock(mutex_);
return message_queue_.PopFront(num_bytes, bytes, num_caps, caps);
}
z_err_t Channel::WriteInternal(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) {
MutexHolder lock(mutex_);
RET_ERR(message_queue_.PushBack(num_bytes, bytes, num_caps, caps));
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
return glcr::OK;
}

View File

@ -1,6 +1,5 @@
#pragma once
#include <glacier/container/intrusive_list.h>
#include <glacier/container/pair.h>
#include <glacier/memory/ref_ptr.h>
@ -8,6 +7,7 @@
#include "include/ztypes.h"
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "object/ipc_object.h"
#include "object/kernel_object.h"
#include "usr/zcall_internal.h"
@ -18,7 +18,7 @@ struct KernelObjectTag<Channel> {
static const uint64_t type = KernelObject::CHANNEL;
};
class Channel : public KernelObject {
class Channel : public IpcObject {
public:
uint64_t TypeTag() override { return KernelObject::CHANNEL; }
static glcr::Pair<glcr::RefPtr<Channel>, glcr::RefPtr<Channel>>
@ -26,25 +26,20 @@ class Channel : public KernelObject {
glcr::RefPtr<Channel> peer() { return peer_; }
z_err_t Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps);
z_err_t Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps);
virtual MessageQueue& GetSendMessageQueue() override {
return peer_->message_queue_;
}
virtual MessageQueue& GetRecvMessageQueue() override {
return message_queue_;
}
private:
// FIXME: We will likely never close the channel based on this
// circular dependency.
glcr::RefPtr<Channel> peer_{nullptr};
Mutex mutex_{"channel"};
UnboundedMessageQueue message_queue_;
glcr::IntrusiveList<Thread> blocked_threads_;
friend class glcr::MakeRefCountedFriend<Channel>;
Channel() {}
void SetPeer(const glcr::RefPtr<Channel>& peer) { peer_ = peer; }
z_err_t WriteInternal(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps);
};

7
zion/object/endpoint.cpp Normal file
View File

@ -0,0 +1,7 @@
#include "object/endpoint.h"
#include "scheduler/scheduler.h"
glcr::RefPtr<Endpoint> Endpoint::Create() {
return glcr::AdoptPtr(new Endpoint);
}

39
zion/object/endpoint.h Normal file
View File

@ -0,0 +1,39 @@
#pragma once
#include <glacier/container/intrusive_list.h>
#include <glacier/memory/ref_ptr.h>
#include <glacier/status/error.h>
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "object/ipc_object.h"
#include "object/kernel_object.h"
class Endpoint;
class ReplyPort;
template <>
struct KernelObjectTag<Endpoint> {
static const uint64_t type = KernelObject::ENDPOINT;
};
class Endpoint : public IpcObject {
public:
uint64_t TypeTag() override { return KernelObject::ENDPOINT; }
static glcr::RefPtr<Endpoint> Create();
glcr::ErrorCode Read(uint64_t* num_bytes, void* data,
z_cap_t* reply_port_cap);
virtual MessageQueue& GetSendMessageQueue() override {
return message_queue_;
}
virtual MessageQueue& GetRecvMessageQueue() override {
return message_queue_;
}
private:
UnboundedMessageQueue message_queue_;
Endpoint() {}
};

View File

@ -0,0 +1,15 @@
#include "object/ipc_object.h"
#include "scheduler/scheduler.h"
glcr::ErrorCode IpcObject::Send(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) {
auto& message_queue = GetSendMessageQueue();
return message_queue.PushBack(num_bytes, bytes, num_caps, caps);
}
glcr::ErrorCode IpcObject::Recv(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps, z_cap_t* caps) {
auto& message_queue = GetRecvMessageQueue();
return message_queue.PopFront(num_bytes, bytes, num_caps, caps);
}

23
zion/object/ipc_object.h Normal file
View File

@ -0,0 +1,23 @@
#pragma once
#include <glacier/status/error.h>
#include "include/ztypes.h"
#include "lib/message_queue.h"
#include "object/kernel_object.h"
class IpcObject : public KernelObject {
public:
IpcObject(){};
virtual ~IpcObject() {}
virtual glcr::ErrorCode Send(uint64_t num_bytes, const void* bytes,
uint64_t num_caps, const z_cap_t* caps) final;
virtual glcr::ErrorCode Recv(uint64_t* num_bytes, void* bytes,
uint64_t* num_caps, z_cap_t* caps) final;
bool HasMessages() { return !GetRecvMessageQueue().empty(); }
virtual MessageQueue& GetSendMessageQueue() = 0;
virtual MessageQueue& GetRecvMessageQueue() = 0;
};

View File

@ -12,6 +12,8 @@ class KernelObject : public glcr::RefCounted<KernelObject> {
MEMORY_OBJECT = 0x4,
CHANNEL = 0x5,
PORT = 0x6,
ENDPOINT = 0x7,
REPLY_PORT = 0x8,
};
virtual uint64_t TypeTag() = 0;

View File

@ -2,43 +2,6 @@
#include "scheduler/scheduler.h"
Port::Port() {}
z_err_t Port::Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps) {
MutexHolder h(mutex_);
RET_ERR(message_queue_.PushBack(num_bytes, bytes, num_caps, caps));
if (blocked_threads_.size() > 0) {
auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE);
gScheduler->Enqueue(thread);
}
return glcr::OK;
}
z_err_t Port::Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps) {
mutex_.Lock();
while (message_queue_.empty()) {
auto thread = gScheduler->CurrentThread();
thread->SetState(Thread::BLOCKED);
blocked_threads_.PushBack(thread);
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
}
mutex_.Unlock();
MutexHolder lock(mutex_);
return message_queue_.PopFront(num_bytes, bytes, num_caps, caps);
}
void Port::WriteKernel(uint64_t init, glcr::RefPtr<Capability> cap) {
MutexHolder h(mutex_);
message_queue_.WriteKernel(init, cap);
}
bool Port::HasMessages() {
MutexHolder h(mutex_);
return !message_queue_.empty();
}

View File

@ -6,6 +6,7 @@
#include "capability/capability.h"
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "object/ipc_object.h"
#include "object/kernel_object.h"
#include "object/thread.h"
#include "usr/zcall_internal.h"
@ -17,24 +18,21 @@ struct KernelObjectTag<Port> {
static const uint64_t type = KernelObject::PORT;
};
class Port : public KernelObject {
class Port : public IpcObject {
public:
uint64_t TypeTag() override { return KernelObject::PORT; }
Port();
z_err_t Write(uint64_t num_bytes, const void* bytes, uint64_t num_caps,
const z_cap_t* caps);
z_err_t Read(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
z_cap_t* caps);
Port() = default;
void WriteKernel(uint64_t init, glcr::RefPtr<Capability> cap);
bool HasMessages();
virtual MessageQueue& GetSendMessageQueue() override {
return message_queue_;
}
virtual MessageQueue& GetRecvMessageQueue() override {
return message_queue_;
}
private:
UnboundedMessageQueue message_queue_;
glcr::IntrusiveList<Thread> blocked_threads_;
Mutex mutex_{"Port"};
};

View File

@ -0,0 +1,7 @@
#include "object/reply_port.h"
#include "scheduler/scheduler.h"
glcr::RefPtr<ReplyPort> ReplyPort::Create() {
return glcr::AdoptPtr(new ReplyPort);
}

33
zion/object/reply_port.h Normal file
View File

@ -0,0 +1,33 @@
#pragma once
#include <glacier/memory/ref_ptr.h>
#include "lib/message_queue.h"
#include "lib/mutex.h"
#include "object/ipc_object.h"
#include "object/kernel_object.h"
class ReplyPort;
template <>
struct KernelObjectTag<ReplyPort> {
static const uint64_t type = KernelObject::REPLY_PORT;
};
class ReplyPort : public IpcObject {
public:
uint64_t TypeTag() override { return KernelObject::REPLY_PORT; }
static glcr::RefPtr<ReplyPort> Create();
virtual MessageQueue& GetSendMessageQueue() override {
return message_holder_;
}
virtual MessageQueue& GetRecvMessageQueue() override {
return message_holder_;
}
private:
SingleMessageQueue message_holder_;
ReplyPort() {}
};

View File

@ -1,31 +0,0 @@
#include "syscall/channel.h"
#include "capability/capability.h"
#include "scheduler/scheduler.h"
z_err_t ChannelCreate(ZChannelCreateReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_pair = Channel::CreateChannelPair();
*req->channel1 = proc.AddNewCapability(chan_pair.first(), ZC_WRITE | ZC_READ);
*req->channel2 =
proc.AddNewCapability(chan_pair.second(), ZC_WRITE | ZC_READ);
return glcr::OK;
}
z_err_t ChannelSend(ZChannelSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_cap = proc.GetCapability(req->chan_cap);
RET_ERR(ValidateCapability<Channel>(chan_cap, ZC_WRITE));
auto chan = chan_cap->obj<Channel>();
return chan->Write(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t ChannelRecv(ZChannelRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_cap = proc.GetCapability(req->chan_cap);
RET_ERR(ValidateCapability<Channel>(chan_cap, ZC_READ));
auto chan = chan_cap->obj<Channel>();
return chan->Read(req->num_bytes, req->data, req->num_caps, req->caps);
}

View File

@ -1,7 +0,0 @@
#pragma once
#include "include/zcall.h"
z_err_t ChannelCreate(ZChannelCreateReq* resp);
z_err_t ChannelSend(ZChannelSendReq* req);
z_err_t ChannelRecv(ZChannelRecvReq* req);

145
zion/syscall/ipc.cpp Normal file
View File

@ -0,0 +1,145 @@
#include "syscall/ipc.h"
#include "capability/capability.h"
#include "interrupt/interrupt.h"
#include "object/endpoint.h"
#include "object/reply_port.h"
#include "scheduler/scheduler.h"
z_err_t ChannelCreate(ZChannelCreateReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_pair = Channel::CreateChannelPair();
*req->channel1 = proc.AddNewCapability(chan_pair.first(), ZC_WRITE | ZC_READ);
*req->channel2 =
proc.AddNewCapability(chan_pair.second(), ZC_WRITE | ZC_READ);
return glcr::OK;
}
z_err_t ChannelSend(ZChannelSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_cap = proc.GetCapability(req->chan_cap);
RET_ERR(ValidateCapability<Channel>(chan_cap, ZC_WRITE));
auto chan = chan_cap->obj<Channel>();
return chan->Send(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t ChannelRecv(ZChannelRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto chan_cap = proc.GetCapability(req->chan_cap);
RET_ERR(ValidateCapability<Channel>(chan_cap, ZC_READ));
auto chan = chan_cap->obj<Channel>();
return chan->Recv(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t PortCreate(ZPortCreateReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port, ZC_WRITE | ZC_READ);
return glcr::OK;
}
z_err_t PortSend(ZPortSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_WRITE));
auto port = port_cap->obj<Port>();
return port->Send(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t PortRecv(ZPortRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_READ));
auto port = port_cap->obj<Port>();
ZMessage message{
.num_bytes = *req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = *req->num_caps,
.caps = req->caps,
};
return port->Recv(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t PortPoll(ZPortPollReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_READ));
auto port = port_cap->obj<Port>();
// FIXME: Race condition here where this call could block if the last message
// is removed between this check and the port read.
if (!port->HasMessages()) {
return glcr::EMPTY;
}
return port->Recv(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t IrqRegister(ZIrqRegisterReq* req) {
auto& proc = gScheduler->CurrentProcess();
if (req->irq_num != Z_IRQ_PCI_BASE) {
// FIXME: Don't hardcode this nonsense.
return glcr::UNIMPLEMENTED;
}
glcr::RefPtr<Port> port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port, ZC_READ | ZC_WRITE);
RegisterPciPort(port);
return glcr::OK;
}
glcr::ErrorCode EndpointCreate(ZEndpointCreateReq* req) {
auto& proc = gScheduler->CurrentProcess();
*req->endpoint_cap =
proc.AddNewCapability(Endpoint::Create(), ZC_READ | ZC_WRITE);
return glcr::OK;
}
glcr::ErrorCode EndpointSend(ZEndpointSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto endpoint_cap = proc.GetCapability(req->endpoint_cap);
ValidateCapability<Endpoint>(endpoint_cap, ZC_WRITE);
auto endpoint = endpoint_cap->obj<Endpoint>();
auto reply_port = ReplyPort::Create();
*req->reply_port_cap = proc.AddNewCapability(reply_port, ZC_READ);
uint64_t reply_port_cap_to_send = proc.AddNewCapability(reply_port, ZC_WRITE);
return endpoint->Send(req->num_bytes, req->data, 1, &reply_port_cap_to_send);
}
glcr::ErrorCode EndpointRecv(ZEndpointRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto endpoint_cap = proc.GetCapability(req->endpoint_cap);
ValidateCapability<Endpoint>(endpoint_cap, ZC_READ);
auto endpoint = endpoint_cap->obj<Endpoint>();
uint64_t num_caps = 1;
RET_ERR(endpoint->Recv(req->num_bytes, req->data, &num_caps,
req->reply_port_cap));
if (num_caps != 1) {
return glcr::INTERNAL;
}
return glcr::OK;
}
glcr::ErrorCode ReplyPortSend(ZReplyPortSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto reply_port_cap = proc.GetCapability(req->reply_port_cap);
ValidateCapability<ReplyPort>(reply_port_cap, ZC_WRITE);
auto reply_port = reply_port_cap->obj<ReplyPort>();
return reply_port->Send(req->num_bytes, req->data, req->num_caps, req->caps);
}
glcr::ErrorCode ReplyPortRecv(ZReplyPortRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto reply_port_cap = proc.GetCapability(req->reply_port_cap);
ValidateCapability<ReplyPort>(reply_port_cap, ZC_READ);
auto reply_port = reply_port_cap->obj<ReplyPort>();
return reply_port->Recv(req->num_bytes, req->data, req->num_caps, req->caps);
}

22
zion/syscall/ipc.h Normal file
View File

@ -0,0 +1,22 @@
#pragma once
#include <glacier/status/error.h>
#include "include/zcall.h"
glcr::ErrorCode ChannelCreate(ZChannelCreateReq* resp);
glcr::ErrorCode ChannelSend(ZChannelSendReq* req);
glcr::ErrorCode ChannelRecv(ZChannelRecvReq* req);
glcr::ErrorCode PortCreate(ZPortCreateReq* req);
glcr::ErrorCode PortSend(ZPortSendReq* req);
glcr::ErrorCode PortRecv(ZPortRecvReq* req);
glcr::ErrorCode PortPoll(ZPortPollReq* req);
glcr::ErrorCode IrqRegister(ZIrqRegisterReq* req);
glcr::ErrorCode EndpointCreate(ZEndpointCreateReq* req);
glcr::ErrorCode EndpointSend(ZEndpointSendReq* req);
glcr::ErrorCode EndpointRecv(ZEndpointRecvReq* req);
glcr::ErrorCode ReplyPortSend(ZReplyPortSendReq* req);
glcr::ErrorCode ReplyPortRecv(ZReplyPortRecvReq* req);

View File

@ -1,64 +0,0 @@
#include "syscall/port.h"
#include <glacier/status/error.h>
#include "capability/capability.h"
#include "interrupt/interrupt.h"
#include "scheduler/scheduler.h"
z_err_t PortCreate(ZPortCreateReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port, ZC_WRITE | ZC_READ);
return glcr::OK;
}
z_err_t PortSend(ZPortSendReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_WRITE));
auto port = port_cap->obj<Port>();
return port->Write(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t PortRecv(ZPortRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_READ));
auto port = port_cap->obj<Port>();
ZMessage message{
.num_bytes = *req->num_bytes,
.data = const_cast<void*>(req->data),
.num_caps = *req->num_caps,
.caps = req->caps,
};
return port->Read(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t PortPoll(ZPortPollReq* req) {
auto& proc = gScheduler->CurrentProcess();
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCapability<Port>(port_cap, ZC_READ));
auto port = port_cap->obj<Port>();
// FIXME: Race condition here where this call could block if the last message
// is removed between this check and the port read.
if (!port->HasMessages()) {
return glcr::EMPTY;
}
return port->Read(req->num_bytes, req->data, req->num_caps, req->caps);
}
z_err_t IrqRegister(ZIrqRegisterReq* req) {
auto& proc = gScheduler->CurrentProcess();
if (req->irq_num != Z_IRQ_PCI_BASE) {
// FIXME: Don't hardcode this nonsense.
return glcr::UNIMPLEMENTED;
}
glcr::RefPtr<Port> port = glcr::MakeRefCounted<Port>();
*req->port_cap = proc.AddNewCapability(port, ZC_READ | ZC_WRITE);
RegisterPciPort(port);
return glcr::OK;
}

View File

@ -1,9 +0,0 @@
#pragma once
#include "include/zcall.h"
z_err_t PortCreate(ZPortCreateReq* req);
z_err_t PortSend(ZPortSendReq* req);
z_err_t PortRecv(ZPortRecvReq* req);
z_err_t PortPoll(ZPortPollReq* req);
z_err_t IrqRegister(ZIrqRegisterReq* req);

View File

@ -6,10 +6,9 @@
#include "scheduler/scheduler.h"
#include "syscall/address_space.h"
#include "syscall/capability.h"
#include "syscall/channel.h"
#include "syscall/debug.h"
#include "syscall/ipc.h"
#include "syscall/memory_object.h"
#include "syscall/port.h"
#include "syscall/process.h"
#include "syscall/thread.h"
@ -63,16 +62,20 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
CASE(MemoryObjectCreatePhysical);
CASE(MemoryObjectCreateContiguous);
CASE(TempPcieConfigObjectCreate);
// syscall/channel.h
// syscall/ipc.h
CASE(ChannelCreate);
CASE(ChannelSend);
CASE(ChannelRecv);
// syscall/port.h
CASE(PortCreate);
CASE(PortSend);
CASE(PortRecv);
CASE(PortPoll);
CASE(IrqRegister);
CASE(EndpointCreate);
CASE(EndpointSend);
CASE(EndpointRecv);
CASE(ReplyPortSend);
CASE(ReplyPortRecv);
// syscall/capability.h
CASE(CapDuplicate);
// syscall/debug.h