[Zion] Add the ability to pass capabilities via endpoint call.
This commit is contained in:
parent
5b781bb394
commit
b516087922
|
@ -13,6 +13,11 @@ class CapBuffer {
|
|||
|
||||
~CapBuffer() { delete[] buffer_; }
|
||||
|
||||
void Reset() {
|
||||
// FIXME: Zero out caps here?
|
||||
used_slots_ = 0;
|
||||
}
|
||||
|
||||
uint64_t* RawPtr() { return buffer_; }
|
||||
|
||||
uint64_t UsedSlots() { return used_slots_; }
|
||||
|
|
|
@ -31,7 +31,7 @@ template <typename Req, typename Resp>
|
|||
glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> EndpointClient::CallEndpointGetCap(
|
||||
const Req& req) {
|
||||
uint64_t reply_port_cap;
|
||||
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, 0, nullptr, &reply_port_cap));
|
||||
|
||||
Resp resp;
|
||||
z_cap_t cap = 0;
|
||||
|
@ -49,7 +49,7 @@ glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> EndpointClient::CallEndpointGetCap(
|
|||
template <typename Req, typename Resp>
|
||||
glcr::ErrorOr<Resp> EndpointClient::CallEndpoint(const Req& req) {
|
||||
uint64_t reply_port_cap;
|
||||
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, 0, nullptr, &reply_port_cap));
|
||||
|
||||
Resp resp;
|
||||
uint64_t num_bytes = sizeof(Resp);
|
||||
|
|
|
@ -22,8 +22,10 @@ void EndpointServer::ServerThread() {
|
|||
while (true) {
|
||||
uint64_t message_size = kBufferSize;
|
||||
uint64_t reply_port_cap = 0;
|
||||
glcr::ErrorCode err = ZEndpointRecv(endpoint_cap_, &message_size,
|
||||
recieve_buffer_, &reply_port_cap);
|
||||
uint64_t num_caps = 0;
|
||||
glcr::ErrorCode err =
|
||||
ZEndpointRecv(endpoint_cap_, &message_size, recieve_buffer_, &num_caps,
|
||||
nullptr, &reply_port_cap);
|
||||
if (err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", err);
|
||||
continue;
|
||||
|
|
|
@ -16,12 +16,12 @@ glcr::ErrorCode YellowstoneClient::GetRegister(const Empty& request, RegisterInf
|
|||
buffer_.WriteAt<uint32_t>(0, kSentinel);
|
||||
buffer_.WriteAt<uint64_t>(8, 0);
|
||||
|
||||
// FIXME: We need to reset the cap buffer here.
|
||||
uint64_t length = request.SerializeToBytes(buffer_, /*offset=*/16, cap_buffer_);
|
||||
buffer_.WriteAt<uint32_t>(4, 16 + length);
|
||||
|
||||
z_cap_t reply_port_cap;
|
||||
// FIXME: We need to be able to send capabilities via endpoint call.
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), cap_buffer_.UsedSlots(), cap_buffer_.RawPtr(), &reply_port_cap));
|
||||
|
||||
// FIXME: Add a way to zero out the first buffer.
|
||||
RET_ERR(ZReplyPortRecv(reply_port_cap, &buffer_size, buffer_.RawPtr(), &cap_size, cap_buffer_.RawPtr()));
|
||||
|
@ -48,12 +48,12 @@ glcr::ErrorCode YellowstoneClient::GetAhciInfo(const Empty& request, AhciInfo& r
|
|||
buffer_.WriteAt<uint32_t>(0, kSentinel);
|
||||
buffer_.WriteAt<uint64_t>(8, 1);
|
||||
|
||||
// FIXME: We need to reset the cap buffer here.
|
||||
uint64_t length = request.SerializeToBytes(buffer_, /*offset=*/16, cap_buffer_);
|
||||
buffer_.WriteAt<uint32_t>(4, 16 + length);
|
||||
|
||||
z_cap_t reply_port_cap;
|
||||
// FIXME: We need to be able to send capabilities via endpoint call.
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), cap_buffer_.UsedSlots(), cap_buffer_.RawPtr(), &reply_port_cap));
|
||||
|
||||
// FIXME: Add a way to zero out the first buffer.
|
||||
RET_ERR(ZReplyPortRecv(reply_port_cap, &buffer_size, buffer_.RawPtr(), &cap_size, cap_buffer_.RawPtr()));
|
||||
|
@ -80,12 +80,12 @@ glcr::ErrorCode YellowstoneClient::GetDenali(const Empty& request, DenaliInfo& r
|
|||
buffer_.WriteAt<uint32_t>(0, kSentinel);
|
||||
buffer_.WriteAt<uint64_t>(8, 2);
|
||||
|
||||
// FIXME: We need to reset the cap buffer here.
|
||||
uint64_t length = request.SerializeToBytes(buffer_, /*offset=*/16, cap_buffer_);
|
||||
buffer_.WriteAt<uint32_t>(4, 16 + length);
|
||||
|
||||
z_cap_t reply_port_cap;
|
||||
// FIXME: We need to be able to send capabilities via endpoint call.
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), cap_buffer_.UsedSlots(), cap_buffer_.RawPtr(), &reply_port_cap));
|
||||
|
||||
// FIXME: Add a way to zero out the first buffer.
|
||||
RET_ERR(ZReplyPortRecv(reply_port_cap, &buffer_size, buffer_.RawPtr(), &cap_size, cap_buffer_.RawPtr()));
|
||||
|
|
|
@ -42,14 +42,15 @@ Thread YellowstoneServerBase::RunServer() {
|
|||
|
||||
void YellowstoneServerBase::ServerThread() {
|
||||
glcr::ByteBuffer recv_buffer(0x1000);
|
||||
glcr::CapBuffer recv_cap(0x10);
|
||||
glcr::ByteBuffer resp_buffer(0x1000);
|
||||
uint64_t resp_cap_size = 0x10;
|
||||
glcr::CapBuffer resp_cap(resp_cap_size);
|
||||
glcr::CapBuffer resp_cap(0x10);
|
||||
z_cap_t reply_port_cap;
|
||||
|
||||
while (true) {
|
||||
uint64_t recv_cap_size = 0x10;
|
||||
uint64_t recv_buf_size = 0x1000;
|
||||
glcr::ErrorCode recv_err = ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &reply_port_cap);
|
||||
glcr::ErrorCode recv_err = ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &recv_cap_size, recv_cap.RawPtr(), &reply_port_cap);
|
||||
if (recv_err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", recv_err);
|
||||
continue;
|
||||
|
@ -58,7 +59,7 @@ void YellowstoneServerBase::ServerThread() {
|
|||
uint64_t resp_length = 0;
|
||||
|
||||
glcr::ErrorCode reply_err = glcr::OK;
|
||||
glcr::ErrorCode err = HandleRequest(recv_buffer, resp_buffer, resp_length, resp_cap);
|
||||
glcr::ErrorCode err = HandleRequest(recv_buffer, recv_cap, resp_buffer, resp_length, resp_cap);
|
||||
if (err != glcr::OK) {
|
||||
WriteError(resp_buffer, err);
|
||||
reply_err = ZReplyPortSend(reply_port_cap, kHeaderSize, resp_buffer.RawPtr(), 0, nullptr);
|
||||
|
@ -74,6 +75,7 @@ void YellowstoneServerBase::ServerThread() {
|
|||
}
|
||||
|
||||
glcr::ErrorCode YellowstoneServerBase::HandleRequest(const glcr::ByteBuffer& request,
|
||||
const glcr::CapBuffer& req_caps,
|
||||
glcr::ByteBuffer& response, uint64_t& resp_length,
|
||||
glcr::CapBuffer& resp_caps) {
|
||||
if (request.At<uint32_t>(0) != kSentinel) {
|
||||
|
@ -87,7 +89,7 @@ glcr::ErrorCode YellowstoneServerBase::HandleRequest(const glcr::ByteBuffer& req
|
|||
Empty yunq_request;
|
||||
RegisterInfo yunq_response;
|
||||
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize);
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize, req_caps);
|
||||
|
||||
RET_ERR(HandleGetRegister(yunq_request, yunq_response));
|
||||
|
||||
|
@ -98,7 +100,7 @@ glcr::ErrorCode YellowstoneServerBase::HandleRequest(const glcr::ByteBuffer& req
|
|||
Empty yunq_request;
|
||||
AhciInfo yunq_response;
|
||||
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize);
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize, req_caps);
|
||||
|
||||
RET_ERR(HandleGetAhciInfo(yunq_request, yunq_response));
|
||||
|
||||
|
@ -109,7 +111,7 @@ glcr::ErrorCode YellowstoneServerBase::HandleRequest(const glcr::ByteBuffer& req
|
|||
Empty yunq_request;
|
||||
DenaliInfo yunq_response;
|
||||
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize);
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize, req_caps);
|
||||
|
||||
RET_ERR(HandleGetDenali(yunq_request, yunq_response));
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ class YellowstoneServerBase {
|
|||
friend void YellowstoneServerBaseThreadBootstrap(void*);
|
||||
void ServerThread();
|
||||
|
||||
[[nodiscard]] glcr::ErrorCode HandleRequest(const glcr::ByteBuffer& request, glcr::ByteBuffer& response,
|
||||
uint64_t& resp_length,
|
||||
[[nodiscard]] glcr::ErrorCode HandleRequest(const glcr::ByteBuffer& request, const glcr::CapBuffer& req_caps,
|
||||
glcr::ByteBuffer& response, uint64_t& resp_length,
|
||||
glcr::CapBuffer& resp_caps);
|
||||
};
|
||||
|
||||
|
|
|
@ -16,12 +16,12 @@ glcr::ErrorCode {{interface.name}}Client::{{method.name}}(const {{method.request
|
|||
buffer_.WriteAt<uint32_t>(0, kSentinel);
|
||||
buffer_.WriteAt<uint64_t>(8, {{loop.index0}});
|
||||
|
||||
cap_buffer_.Reset();
|
||||
uint64_t length = request.SerializeToBytes(buffer_, /*offset=*/16, cap_buffer_);
|
||||
buffer_.WriteAt<uint32_t>(4, 16 + length);
|
||||
|
||||
z_cap_t reply_port_cap;
|
||||
// FIXME: We need to be able to send capabilities via endpoint call.
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), &reply_port_cap));
|
||||
RET_ERR(ZEndpointSend(endpoint_, 16 + length, buffer_.RawPtr(), cap_buffer_.UsedSlots(), cap_buffer_.RawPtr(), &reply_port_cap));
|
||||
|
||||
// FIXME: Add a way to zero out the first buffer.
|
||||
RET_ERR(ZReplyPortRecv(reply_port_cap, &buffer_size, buffer_.RawPtr(), &cap_size, cap_buffer_.RawPtr()));
|
||||
|
|
|
@ -42,14 +42,15 @@ Thread {{interface.name}}ServerBase::RunServer() {
|
|||
|
||||
void {{interface.name}}ServerBase::ServerThread() {
|
||||
glcr::ByteBuffer recv_buffer(0x1000);
|
||||
glcr::CapBuffer recv_cap(0x10);
|
||||
glcr::ByteBuffer resp_buffer(0x1000);
|
||||
uint64_t resp_cap_size = 0x10;
|
||||
glcr::CapBuffer resp_cap(resp_cap_size);
|
||||
glcr::CapBuffer resp_cap(0x10);
|
||||
z_cap_t reply_port_cap;
|
||||
|
||||
while (true) {
|
||||
uint64_t recv_cap_size = 0x10;
|
||||
uint64_t recv_buf_size = 0x1000;
|
||||
glcr::ErrorCode recv_err = ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &reply_port_cap);
|
||||
glcr::ErrorCode recv_err = ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &recv_cap_size, recv_cap.RawPtr(), &reply_port_cap);
|
||||
if (recv_err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", recv_err);
|
||||
continue;
|
||||
|
@ -58,7 +59,7 @@ void {{interface.name}}ServerBase::ServerThread() {
|
|||
uint64_t resp_length = 0;
|
||||
|
||||
glcr::ErrorCode reply_err = glcr::OK;
|
||||
glcr::ErrorCode err = HandleRequest(recv_buffer, resp_buffer, resp_length, resp_cap);
|
||||
glcr::ErrorCode err = HandleRequest(recv_buffer, recv_cap, resp_buffer, resp_length, resp_cap);
|
||||
if (err != glcr::OK) {
|
||||
WriteError(resp_buffer, err);
|
||||
reply_err = ZReplyPortSend(reply_port_cap, kHeaderSize, resp_buffer.RawPtr(), 0, nullptr);
|
||||
|
@ -74,6 +75,7 @@ void {{interface.name}}ServerBase::ServerThread() {
|
|||
}
|
||||
|
||||
glcr::ErrorCode {{interface.name}}ServerBase::HandleRequest(const glcr::ByteBuffer& request,
|
||||
const glcr::CapBuffer& req_caps,
|
||||
glcr::ByteBuffer& response, uint64_t& resp_length,
|
||||
glcr::CapBuffer& resp_caps) {
|
||||
if (request.At<uint32_t>(0) != kSentinel) {
|
||||
|
@ -88,7 +90,7 @@ glcr::ErrorCode {{interface.name}}ServerBase::HandleRequest(const glcr::ByteBuff
|
|||
{{method.request}} yunq_request;
|
||||
{{method.response}} yunq_response;
|
||||
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize);
|
||||
yunq_request.ParseFromBytes(request, kHeaderSize, req_caps);
|
||||
|
||||
RET_ERR(Handle{{method.name}}(yunq_request, yunq_response));
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ class {{interface.name}}ServerBase {
|
|||
friend void {{interface.name}}ServerBaseThreadBootstrap(void*);
|
||||
void ServerThread();
|
||||
|
||||
[[nodiscard]] glcr::ErrorCode HandleRequest(const glcr::ByteBuffer& request, glcr::ByteBuffer& response,
|
||||
uint64_t& resp_length,
|
||||
[[nodiscard]] glcr::ErrorCode HandleRequest(const glcr::ByteBuffer& request, const glcr::CapBuffer& req_caps,
|
||||
glcr::ByteBuffer& response, uint64_t& resp_length,
|
||||
glcr::CapBuffer& resp_caps);
|
||||
};
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
|
||||
- Add syscalls for inspecting capabilities.
|
||||
- Randomize/obfuscate capability numbers passed to user space.
|
||||
- Remove ReplyPort capabilities once the response is sent.
|
||||
|
||||
## Scheduling
|
||||
|
||||
|
|
|
@ -2,90 +2,9 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "zcall_macros.h"
|
||||
#include "ztypes.h"
|
||||
|
||||
#define SYS0(name) \
|
||||
struct Z##name##Req {}; \
|
||||
[[nodiscard]] inline z_err_t Z##name() { \
|
||||
Z##name##Req req{}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS1(name, t1, a1) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS2(name, t1, a1, t2, a2) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS3(name, t1, a1, t2, a2, t3, a3) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS4(name, t1, a1, t2, a2, t3, a3, t4, a4) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
t4 a4; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3, t4 a4) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
.a4 = a4, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS5(name, t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
t4 a4; \
|
||||
t5 a5; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
.a4 = a4, \
|
||||
.a5 = a5, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
z_err_t SysCall1(uint64_t code, const void* req);
|
||||
|
||||
SYS1(ProcessExit, uint64_t, code);
|
||||
|
@ -127,10 +46,10 @@ SYS5(PortPoll, z_cap_t, port_cap, uint64_t*, num_bytes, void*, data, uint64_t*,
|
|||
SYS2(IrqRegister, uint64_t, irq_num, z_cap_t*, port_cap);
|
||||
|
||||
SYS1(EndpointCreate, z_cap_t*, endpoint_cap);
|
||||
SYS4(EndpointSend, z_cap_t, endpoint_cap, uint64_t, num_bytes, const void*,
|
||||
data, z_cap_t*, reply_port_cap);
|
||||
SYS4(EndpointRecv, z_cap_t, endpoint_cap, uint64_t*, num_bytes, void*, data,
|
||||
z_cap_t*, reply_port_cap);
|
||||
SYS6(EndpointSend, z_cap_t, endpoint_cap, uint64_t, num_bytes, const void*,
|
||||
data, uint64_t, num_caps, const z_cap_t*, caps, z_cap_t*, reply_port_cap);
|
||||
SYS6(EndpointRecv, z_cap_t, endpoint_cap, uint64_t*, num_bytes, void*, data,
|
||||
uint64_t*, num_caps, z_cap_t*, caps, z_cap_t*, reply_port_cap);
|
||||
SYS5(ReplyPortSend, z_cap_t, reply_port_cap, uint64_t, num_bytes, const void*,
|
||||
data, uint64_t, num_caps, z_cap_t*, caps);
|
||||
SYS5(ReplyPortRecv, z_cap_t, reply_port_cap, uint64_t*, num_bytes, void*, data,
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
#pragma once
|
||||
#define SYS0(name) \
|
||||
struct Z##name##Req {}; \
|
||||
[[nodiscard]] inline z_err_t Z##name() { \
|
||||
Z##name##Req req{}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS1(name, t1, a1) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS2(name, t1, a1, t2, a2) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS3(name, t1, a1, t2, a2, t3, a3) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS4(name, t1, a1, t2, a2, t3, a3, t4, a4) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
t4 a4; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3, t4 a4) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
.a4 = a4, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS5(name, t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
t4 a4; \
|
||||
t5 a5; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
.a4 = a4, \
|
||||
.a5 = a5, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
||||
|
||||
#define SYS6(name, t1, a1, t2, a2, t3, a3, t4, a4, t5, a5, t6, a6) \
|
||||
struct Z##name##Req { \
|
||||
t1 a1; \
|
||||
t2 a2; \
|
||||
t3 a3; \
|
||||
t4 a4; \
|
||||
t5 a5; \
|
||||
t6 a6; \
|
||||
}; \
|
||||
[[nodiscard]] inline z_err_t Z##name(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5, \
|
||||
t6 a6) { \
|
||||
Z##name##Req req{ \
|
||||
.a1 = a1, \
|
||||
.a2 = a2, \
|
||||
.a3 = a3, \
|
||||
.a4 = a4, \
|
||||
.a5 = a5, \
|
||||
.a6 = a6, \
|
||||
}; \
|
||||
return SysCall1(kZion##name, &req); \
|
||||
}
|
|
@ -63,6 +63,8 @@ const uint64_t kZionDebug = 0x1'0000;
|
|||
|
||||
typedef uint64_t z_cap_t;
|
||||
|
||||
const uint64_t kZionInvalidCapability = 0x0;
|
||||
|
||||
// General Capability Permissions
|
||||
const uint64_t kZionPerm_Write = 0x1;
|
||||
const uint64_t kZionPerm_Read = 0x2;
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
#include "scheduler/scheduler.h"
|
||||
|
||||
z_err_t UnboundedMessageQueue::PushBack(uint64_t num_bytes, const void* bytes,
|
||||
uint64_t num_caps,
|
||||
const z_cap_t* caps) {
|
||||
uint64_t num_caps, const z_cap_t* caps,
|
||||
z_cap_t reply_cap) {
|
||||
if (num_bytes > 0x1000) {
|
||||
dbgln("Large message size unimplemented: %x", num_bytes);
|
||||
return glcr::UNIMPLEMENTED;
|
||||
|
@ -18,6 +18,12 @@ z_err_t UnboundedMessageQueue::PushBack(uint64_t num_bytes, const void* bytes,
|
|||
message->bytes[i] = static_cast<const uint8_t*>(bytes)[i];
|
||||
}
|
||||
|
||||
if (reply_cap != kZionInvalidCapability) {
|
||||
// FIXME: We're just trusting that capability has the correct permissions.
|
||||
message->reply_cap =
|
||||
gScheduler->CurrentProcess().ReleaseCapability(reply_cap);
|
||||
}
|
||||
|
||||
for (uint64_t i = 0; i < num_caps; i++) {
|
||||
// FIXME: This would feel safer closer to the relevant syscall.
|
||||
// FIXME: Race conditions on get->check->release here. Would be better to
|
||||
|
@ -46,7 +52,8 @@ z_err_t UnboundedMessageQueue::PushBack(uint64_t num_bytes, const void* bytes,
|
|||
}
|
||||
|
||||
z_err_t UnboundedMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
|
||||
uint64_t* num_caps, z_cap_t* caps) {
|
||||
uint64_t* num_caps, z_cap_t* caps,
|
||||
z_cap_t* reply_cap) {
|
||||
mutex_.Lock();
|
||||
while (pending_messages_.empty()) {
|
||||
auto thread = gScheduler->CurrentThread();
|
||||
|
@ -75,8 +82,16 @@ z_err_t UnboundedMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
|
|||
static_cast<uint8_t*>(bytes)[i] = next_msg->bytes[i];
|
||||
}
|
||||
|
||||
*num_caps = next_msg->caps.size();
|
||||
auto& proc = gScheduler->CurrentProcess();
|
||||
if (reply_cap != nullptr) {
|
||||
if (!next_msg->reply_cap) {
|
||||
dbgln("Tried to read reply capability off of a message without one");
|
||||
return glcr::INTERNAL;
|
||||
}
|
||||
*reply_cap = proc.AddExistingCapability(next_msg->reply_cap);
|
||||
}
|
||||
|
||||
*num_caps = next_msg->caps.size();
|
||||
for (uint64_t i = 0; i < *num_caps; i++) {
|
||||
caps[i] = proc.AddExistingCapability(next_msg->caps.PopFront());
|
||||
}
|
||||
|
@ -102,7 +117,8 @@ void UnboundedMessageQueue::WriteKernel(uint64_t init,
|
|||
glcr::ErrorCode SingleMessageQueue::PushBack(uint64_t num_bytes,
|
||||
const void* bytes,
|
||||
uint64_t num_caps,
|
||||
const z_cap_t* caps) {
|
||||
const z_cap_t* caps,
|
||||
z_cap_t reply_port) {
|
||||
MutexHolder h(mutex_);
|
||||
if (has_written_) {
|
||||
return glcr::FAILED_PRECONDITION;
|
||||
|
@ -114,6 +130,11 @@ glcr::ErrorCode SingleMessageQueue::PushBack(uint64_t num_bytes,
|
|||
bytes_[i] = reinterpret_cast<const uint8_t*>(bytes)[i];
|
||||
}
|
||||
|
||||
if (reply_port != kZionInvalidCapability) {
|
||||
dbgln("Sent a reply port to a single message queue");
|
||||
return glcr::INTERNAL;
|
||||
}
|
||||
|
||||
for (uint64_t i = 0; i < num_caps; i++) {
|
||||
// FIXME: This would feel safer closer to the relevant syscall.
|
||||
auto cap = gScheduler->CurrentProcess().GetCapability(caps[i]);
|
||||
|
@ -139,8 +160,8 @@ glcr::ErrorCode SingleMessageQueue::PushBack(uint64_t num_bytes,
|
|||
}
|
||||
|
||||
glcr::ErrorCode SingleMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
|
||||
uint64_t* num_caps,
|
||||
z_cap_t* caps) {
|
||||
uint64_t* num_caps, z_cap_t* caps,
|
||||
z_cap_t* reply_port) {
|
||||
mutex_.Lock();
|
||||
while (!has_written_) {
|
||||
auto thread = gScheduler->CurrentThread();
|
||||
|
@ -169,6 +190,11 @@ glcr::ErrorCode SingleMessageQueue::PopFront(uint64_t* num_bytes, void* bytes,
|
|||
reinterpret_cast<uint8_t*>(bytes)[i] = bytes_[i];
|
||||
}
|
||||
|
||||
if (reply_port != nullptr) {
|
||||
dbgln("Tried to read a reply port a single message queue");
|
||||
return glcr::INTERNAL;
|
||||
}
|
||||
|
||||
*num_caps = caps_.size();
|
||||
auto& proc = gScheduler->CurrentProcess();
|
||||
for (uint64_t i = 0; i < *num_caps; i++) {
|
||||
|
|
|
@ -15,9 +15,11 @@ class MessageQueue {
|
|||
virtual ~MessageQueue() {}
|
||||
|
||||
virtual glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
|
||||
uint64_t num_caps, const z_cap_t* caps) = 0;
|
||||
uint64_t num_caps, const z_cap_t* caps,
|
||||
z_cap_t reply_cap = 0) = 0;
|
||||
virtual glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes,
|
||||
uint64_t* num_caps, z_cap_t* caps) = 0;
|
||||
uint64_t* num_caps, z_cap_t* caps,
|
||||
z_cap_t* reply_cap = nullptr) = 0;
|
||||
virtual bool empty() = 0;
|
||||
|
||||
protected:
|
||||
|
@ -35,9 +37,10 @@ class UnboundedMessageQueue : public MessageQueue {
|
|||
virtual ~UnboundedMessageQueue() override {}
|
||||
|
||||
glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
|
||||
uint64_t num_caps, const z_cap_t* caps) override;
|
||||
uint64_t num_caps, const z_cap_t* caps,
|
||||
z_cap_t reply_cap) override;
|
||||
glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
|
||||
z_cap_t* caps) override;
|
||||
z_cap_t* caps, z_cap_t* reply_cap) override;
|
||||
|
||||
void WriteKernel(uint64_t init, glcr::RefPtr<Capability> cap);
|
||||
|
||||
|
@ -52,6 +55,7 @@ class UnboundedMessageQueue : public MessageQueue {
|
|||
uint8_t* bytes;
|
||||
|
||||
glcr::LinkedList<glcr::RefPtr<Capability>> caps;
|
||||
glcr::RefPtr<Capability> reply_cap;
|
||||
};
|
||||
|
||||
glcr::LinkedList<glcr::SharedPtr<Message>> pending_messages_;
|
||||
|
@ -65,9 +69,10 @@ class SingleMessageQueue : public MessageQueue {
|
|||
virtual ~SingleMessageQueue() override {}
|
||||
|
||||
glcr::ErrorCode PushBack(uint64_t num_bytes, const void* bytes,
|
||||
uint64_t num_caps, const z_cap_t* caps) override;
|
||||
uint64_t num_caps, const z_cap_t* caps,
|
||||
z_cap_t reply_cap) override;
|
||||
glcr::ErrorCode PopFront(uint64_t* num_bytes, void* bytes, uint64_t* num_caps,
|
||||
z_cap_t* caps) override;
|
||||
z_cap_t* caps, z_cap_t* reply_cap) override;
|
||||
|
||||
bool empty() override {
|
||||
MutexHolder h(mutex_);
|
||||
|
|
|
@ -5,3 +5,18 @@
|
|||
glcr::RefPtr<Endpoint> Endpoint::Create() {
|
||||
return glcr::AdoptPtr(new Endpoint);
|
||||
}
|
||||
|
||||
glcr::ErrorCode Endpoint::Send(uint64_t num_bytes, const void* data,
|
||||
uint64_t num_caps, const z_cap_t* caps,
|
||||
z_cap_t reply_port_cap) {
|
||||
auto& message_queue = GetSendMessageQueue();
|
||||
return message_queue.PushBack(num_bytes, data, num_caps, caps,
|
||||
reply_port_cap);
|
||||
}
|
||||
glcr::ErrorCode Endpoint::Recv(uint64_t* num_bytes, void* data,
|
||||
uint64_t* num_caps, z_cap_t* caps,
|
||||
z_cap_t* reply_port_cap) {
|
||||
auto& message_queue = GetRecvMessageQueue();
|
||||
return message_queue.PopFront(num_bytes, data, num_caps, caps,
|
||||
reply_port_cap);
|
||||
}
|
||||
|
|
|
@ -27,8 +27,11 @@ class Endpoint : public IpcObject {
|
|||
|
||||
static glcr::RefPtr<Endpoint> Create();
|
||||
|
||||
glcr::ErrorCode Read(uint64_t* num_bytes, void* data,
|
||||
z_cap_t* reply_port_cap);
|
||||
// FIXME: These are hacky "almost" overrides that could lead to bugs.
|
||||
glcr::ErrorCode Send(uint64_t num_bytes, const void* data, uint64_t num_caps,
|
||||
const z_cap_t* caps, z_cap_t reply_port_cap);
|
||||
glcr::ErrorCode Recv(uint64_t* num_bytes, void* data, uint64_t* num_caps,
|
||||
z_cap_t* caps, z_cap_t* reply_port_cap);
|
||||
|
||||
virtual MessageQueue& GetSendMessageQueue() override {
|
||||
return message_queue_;
|
||||
|
|
|
@ -106,7 +106,8 @@ glcr::ErrorCode EndpointSend(ZEndpointSendReq* req) {
|
|||
*req->reply_port_cap = proc.AddNewCapability(reply_port, kZionPerm_Read);
|
||||
uint64_t reply_port_cap_to_send =
|
||||
proc.AddNewCapability(reply_port, kZionPerm_Write | kZionPerm_Transmit);
|
||||
return endpoint->Send(req->num_bytes, req->data, 1, &reply_port_cap_to_send);
|
||||
return endpoint->Send(req->num_bytes, req->data, req->num_caps, req->caps,
|
||||
reply_port_cap_to_send);
|
||||
}
|
||||
|
||||
glcr::ErrorCode EndpointRecv(ZEndpointRecvReq* req) {
|
||||
|
@ -117,7 +118,7 @@ glcr::ErrorCode EndpointRecv(ZEndpointRecvReq* req) {
|
|||
auto endpoint = endpoint_cap->obj<Endpoint>();
|
||||
|
||||
uint64_t num_caps = 1;
|
||||
RET_ERR(endpoint->Recv(req->num_bytes, req->data, &num_caps,
|
||||
RET_ERR(endpoint->Recv(req->num_bytes, req->data, req->num_caps, req->caps,
|
||||
req->reply_port_cap));
|
||||
if (num_caps != 1) {
|
||||
return glcr::INTERNAL;
|
||||
|
|
Loading…
Reference in New Issue