Compare commits

...

7 Commits

11 changed files with 371 additions and 31 deletions

View File

@ -3,7 +3,7 @@
namespace yunq {
namespace {
const uint64_t kIdentByte = 0x33441122;
const uint32_t kIdentByte = 0x33441122;
} // namespace

View File

@ -100,7 +100,7 @@ template <typename T>
void Serializer::WriteRepeatedMessage(uint64_t field_index,
const glcr::Vector<T>& value) {
uint64_t next_offset = next_extension_;
uint64_t length = 0;
uint64_t length = value.size();
for (T& message : value) {
uint64_t msg_length = 0;
@ -110,7 +110,6 @@ void Serializer::WriteRepeatedMessage(uint64_t field_index,
} else {
msg_length = message.SerializeToBytes(buffer_, offset_ + next_offset);
}
length += msg_length;
next_offset += msg_length;
}
@ -119,7 +118,7 @@ void Serializer::WriteRepeatedMessage(uint64_t field_index,
.length = (uint32_t)length,
};
next_extension_ += length;
next_extension_ = next_offset;
buffer_.WriteAt<ExtensionPointer>(field_offset(field_index), ptr);
}

View File

@ -26,13 +26,11 @@ impl TaskId {
pub struct Task {
id: TaskId,
// FIXME: This only needs to be sync because of the CURRENT_EXECUTOR
// needing to be shared between threads.
future: Pin<Box<dyn Future<Output = ()> + Sync>>,
future: Pin<Box<dyn Future<Output = ()> + Send>>,
}
impl Task {
pub fn new(future: impl Future<Output = ()> + Sync + 'static) -> Task {
pub fn new(future: impl Future<Output = ()> + Sync + Send + 'static) -> Task {
Task {
id: TaskId::new(),
future: Box::pin(future),
@ -72,7 +70,7 @@ impl Wake for TaskWaker {
}
pub struct Executor {
tasks: BTreeMap<TaskId, Task>,
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
// TODO: Consider a better datastructure for this.
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
waker_cache: BTreeMap<TaskId, Waker>,
@ -81,7 +79,7 @@ pub struct Executor {
impl Executor {
pub fn new() -> Executor {
Executor {
tasks: BTreeMap::new(),
tasks: Arc::new(Mutex::new(BTreeMap::new())),
task_queue: Arc::new(Mutex::new(VecDeque::new())),
waker_cache: BTreeMap::new(),
}
@ -89,7 +87,7 @@ impl Executor {
pub fn spawn(&mut self, task: Task) {
let task_id = task.id;
if self.tasks.insert(task_id, task).is_some() {
if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map");
}
self.task_queue.lock().push_back(task_id);
@ -97,7 +95,8 @@ impl Executor {
fn run_ready_tasks(&mut self) {
while let Some(task_id) = self.task_queue.lock().pop_front() {
let task = self.tasks.get_mut(&task_id).unwrap();
let mut tasks = self.tasks.lock();
let task = tasks.get_mut(&task_id).unwrap();
let waker = self
.waker_cache
.entry(task_id)
@ -105,7 +104,7 @@ impl Executor {
let mut ctx = Context::from_waker(waker);
match task.poll(&mut ctx) {
Poll::Ready(()) => {
self.tasks.remove(&task_id);
tasks.remove(&task_id);
self.waker_cache.remove(&task_id);
}
Poll::Pending => {}
@ -120,6 +119,30 @@ impl Executor {
syscall::thread_sleep(50).unwrap();
}
}
pub fn new_spawner(&self) -> Spawner {
Spawner::new(self.tasks.clone(), self.task_queue.clone())
}
}
static CURRENT_EXECUTOR: Option<Executor> = None;
pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
}
impl Spawner {
fn new(
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
) -> Self {
Spawner { tasks, task_queue }
}
pub fn spawn(&self, task: Task) {
let task_id = task.id;
if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map");
}
self.task_queue.lock().push_back(task_id);
}
}

View File

@ -1,7 +1,12 @@
use core::future::Future;
use crate::buffer::ByteBuffer;
use alloc::sync::Arc;
use alloc::vec::Vec;
use mammoth::cap::Capability;
use mammoth::syscall;
use mammoth::task::Spawner;
use mammoth::task::Task;
use mammoth::thread;
use mammoth::thread::JoinHandle;
use mammoth::zion::z_cap_t;
@ -59,3 +64,83 @@ where
{
thread::spawn(move || server.server_loop())
}
pub trait AsyncYunqServer
where
Self: Send + Sync + 'static,
{
fn server_loop(self: Arc<Self>, spawner: Spawner) {
loop {
let mut byte_buffer = ByteBuffer::<1024>::new();
let mut cap_buffer = vec![0; 10];
let (_, _, reply_port_cap) = syscall::endpoint_recv(
self.endpoint_cap(),
byte_buffer.mut_slice(),
&mut cap_buffer,
)
.expect("Failed to call endpoint recv");
let method = byte_buffer
.at::<u64>(8)
.expect("Failed to access request length.");
let self_clone = self.clone();
spawner.spawn(Task::new((async move || {
self_clone
.handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap)
.await;
})()));
}
}
fn handle_request_and_response(
&self,
method: u64,
mut byte_buffer: ByteBuffer<1024>,
mut cap_buffer: Vec<u64>,
reply_port_cap: Capability,
) -> impl Future<Output = ()> + Sync + Send {
async move {
let resp = self
.handle_request(method, &mut byte_buffer, &mut cap_buffer)
.await;
match resp {
Ok(resp_len) => syscall::reply_port_send(
reply_port_cap,
byte_buffer.slice(resp_len),
&cap_buffer,
)
.expect("Failed to reply"),
Err(err) => {
crate::message::serialize_error(&mut byte_buffer, err);
syscall::reply_port_send(reply_port_cap, &byte_buffer.slice(0x10), &[])
.expect("Failed to reply w/ error")
}
}
()
}
}
fn endpoint_cap(&self) -> &Capability;
fn create_client_cap(&self) -> Result<Capability, ZError> {
self.endpoint_cap()
.duplicate(!mammoth::zion::kZionPerm_Read)
}
fn handle_request(
&self,
method_number: u64,
byte_buffer: &mut ByteBuffer<1024>,
cap_buffer: &mut Vec<z_cap_t>,
) -> impl Future<Output = Result<usize, ZError>> + Sync + Send;
}
pub fn spawn_async_server_thread<T>(server: Arc<T>, spawner: Spawner) -> JoinHandle
where
T: AsyncYunqServer + Send + Sync + 'static,
{
thread::spawn(move || {
server.server_loop(spawner);
})
}

View File

@ -9,6 +9,7 @@ use alloc::boxed::Box;
use alloc::sync::Arc;
use mammoth::cap::Capability;
use mammoth::sync::Mutex;
use mammoth::syscall;
use mammoth::{mem, thread};
use mammoth::{mem::MemoryRegion, zion::ZError};
@ -141,10 +142,12 @@ impl AhciController {
for port in self.ports.iter().flatten() {
let sig = port.get_signature();
if sig == 0x101 {
let command = Command::identify()?;
let mut command = Command::identify()?;
mammoth::debug!("IDENT!");
port.issue_command(&command)?.await;
let ident = command.memory_region.slice::<u16>();
let memory_region =
MemoryRegion::from_cap(Capability::take(command.release_mem_cap()))?;
let ident = memory_region.slice::<u16>();
let new_sector_size = if ident[106] & (1 << 12) != 0 {
ident[117] as u32 | ((ident[118] as u32) << 16)
} else {
@ -170,6 +173,18 @@ impl AhciController {
}
Ok(())
}
pub async fn issue_command(&self, port_num: usize, command: &Command) -> Result<(), ZError> {
assert!(port_num < 32);
self.ports[port_num]
.as_ref()
.ok_or(ZError::INVALID_ARGUMENT)?
.issue_command(command)?
.await;
Ok(())
}
}
pub fn spawn_irq_thread(controller: Arc<AhciController>) -> thread::JoinHandle {
@ -182,7 +197,6 @@ pub fn spawn_irq_thread(controller: Arc<AhciController>) -> thread::JoinHandle {
});
loop {
irq_port.recv_null().unwrap();
mammoth::debug!("Interrupt!");
controller.handle_irq();
}
};
@ -202,11 +216,11 @@ enum CommandStatus {
struct CommandFuture {
status: Arc<Mutex<CommandStatus>>,
trigger: Box<dyn Fn() + Sync>,
trigger: Box<dyn Fn() + Sync + Send>,
}
impl CommandFuture {
fn new(status: Arc<Mutex<CommandStatus>>, trigger: Box<dyn Fn() + Sync>) -> Self {
fn new(status: Arc<Mutex<CommandStatus>>, trigger: Box<dyn Fn() + Sync + Send>) -> Self {
Self { status, trigger }
}
}
@ -236,28 +250,54 @@ impl Future for CommandFuture {
}
}
struct Command {
pub struct Command {
command: SataCommand,
lba: u64,
sector_cnt: u16,
paddr: u64,
#[allow(dead_code)] // We need to own this even if we never access it.
memory_region: MemoryRegion,
memory_region: Option<Capability>,
}
impl Command {
pub fn identify() -> Result<Self, ZError> {
let (memory_region, paddr) = MemoryRegion::contiguous_physical(512)?;
let (memory_region, paddr) = syscall::memory_object_contiguous_physical(512)?;
Ok(Self {
command: SataCommand::IdentifyDevice,
lba: 0,
sector_cnt: 1,
paddr,
memory_region,
memory_region: Some(memory_region),
})
}
pub fn read(lba: u64, lba_count: u16) -> Result<Self, ZError> {
let (memory_region, paddr) =
syscall::memory_object_contiguous_physical(512 * (lba_count as u64))?;
Ok(Self {
command: SataCommand::DmaReadExt,
lba,
sector_cnt: lba_count,
paddr,
memory_region: Some(memory_region),
})
}
pub fn read_manual(lba: u64, lba_count: u16, paddr: u64) -> Self {
Self {
command: SataCommand::DmaReadExt,
lba,
sector_cnt: lba_count,
paddr: paddr,
memory_region: None,
}
}
pub fn release_mem_cap(&mut self) -> u64 {
self.memory_region.take().unwrap().release()
}
}
impl From<&Command> for HostToDeviceRegisterFis {

View File

@ -6,3 +6,4 @@ mod port;
pub use controller::identify_ports;
pub use controller::spawn_irq_thread;
pub use controller::AhciController;
pub use controller::Command;

View File

@ -12,6 +12,9 @@ use mammoth::{
};
use denali::ahci::{identify_ports, spawn_irq_thread, AhciController};
use denali::{denali_server::DenaliServerImpl, AsyncDenaliServer};
use yellowstone_yunq::RegisterEndpointRequest;
use yunq::server::AsyncYunqServer;
define_entry!();
@ -29,14 +32,36 @@ extern "C" fn main() -> z_err_t {
ahci_info.ahci_region,
)));
let mut executor = Executor::new();
let executor = Arc::new(Mutex::new(Executor::new()));
executor.spawn(Task::new(identify_ports(ahci_controller.clone())));
executor
.clone()
.lock()
.spawn(Task::new(identify_ports(ahci_controller.clone())));
let thread = spawn_irq_thread(ahci_controller.clone());
executor.run();
let denali_server =
Arc::new(AsyncDenaliServer::new(DenaliServerImpl::new(ahci_controller.clone())).unwrap());
let server_thread = yunq::server::spawn_async_server_thread(
denali_server.clone(),
executor.lock().new_spawner(),
);
let yellowstone = yellowstone_yunq::from_init_endpoint();
yellowstone
.register_endpoint(&RegisterEndpointRequest {
endpoint_name: "denali".into(),
endpoint_capability: denali_server.create_client_cap().unwrap().release(),
})
.unwrap();
executor.clone().lock().run();
thread.join().expect("Failed to wait on irq thread.");
server_thread
.join()
.expect("Failed to wait on server thread.");
0
}

View File

@ -0,0 +1,61 @@
use alloc::sync::Arc;
use mammoth::{syscall, zion::ZError};
use crate::{
ahci::{AhciController, Command},
AsyncDenaliServerHandler, ReadManyRequest, ReadRequest, ReadResponse,
};
pub struct DenaliServerImpl {
ahci_controller: Arc<AhciController>,
}
impl DenaliServerImpl {
pub fn new(controller: Arc<AhciController>) -> Self {
DenaliServerImpl {
ahci_controller: controller,
}
}
}
impl AsyncDenaliServerHandler for DenaliServerImpl {
async fn read(&self, req: ReadRequest) -> Result<ReadResponse, ZError> {
let mut command = Command::read(req.block.lba, req.block.size as u16)?;
self.ahci_controller
.issue_command(req.device_id as usize, &command)
.await?;
Ok(ReadResponse {
device_id: req.device_id,
size: req.block.size,
memory: command.release_mem_cap(),
})
}
async fn read_many(&self, req: ReadManyRequest) -> Result<ReadResponse, ZError> {
let total_sector_cnt = req.blocks.iter().map(|b| b.size).sum();
// FIXME: Don't hardcode this.
let sector_size = 512;
let (mem_cap, mut paddr) =
syscall::memory_object_contiguous_physical(sector_size * total_sector_cnt)?;
for block in req.blocks {
let command = Command::read_manual(block.lba, block.size as u16, paddr);
// TODO: We should actually be able to read all of these in parallel.
self.ahci_controller
.issue_command(req.device_id as usize, &command)
.await;
paddr += block.size * sector_size;
}
Ok(ReadResponse {
device_id: req.device_id,
size: total_sector_cnt,
memory: mem_cap.release(),
})
}
}

View File

@ -5,3 +5,4 @@ use core::include;
include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
pub mod ahci;
pub mod denali_server;

View File

@ -76,7 +76,7 @@ glcr::ErrorOr<mmth::OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
ReadResponse resp;
auto status = denali_.Read(req, resp);
if (!status.ok()) {
dbgln("Failed to read blocks: {}", status.message());
dbgln("Failed to read block: {}", status.code());
return status.code();
}
return mmth::OwnedMemoryRegion::FromCapability(resp.memory());
@ -102,7 +102,7 @@ glcr::ErrorOr<mmth::OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
ReadResponse resp;
auto status = denali_.ReadMany(req, resp);
if (!status.ok()) {
dbgln("Failed to read blocks: {}", status.message());
dbgln("Failed to read blocks: {}", status.code());
return status.code();
}
return mmth::OwnedMemoryRegion::FromCapability(resp.memory());

View File

@ -112,7 +112,7 @@ fn parse_field(field: &Field) -> TokenStream {
let rep_offset = buf.at::<u32>(yunq::message::field_offset(offset, #ind))?;
let rep_len = buf.at::<u32>(yunq::message::field_offset(offset, #ind) + 4)?;
yunq::message::parse_repeated(buf, rep_offset as usize, rep_len as usize)?
yunq::message::parse_repeated(buf, offset + rep_offset as usize, rep_len as usize)?
};
},
Type::I64 => unimplemented!(),
@ -122,7 +122,7 @@ fn parse_field(field: &Field) -> TokenStream {
let rep_offset = buf.at::<u32>(yunq::message::field_offset(offset, #ind))?;
let rep_len = buf.at::<u32>(yunq::message::field_offset(offset, #ind) + 4)?;
yunq::message::parse_repeated_message(buf, rep_offset as usize, rep_len as usize, &caps)?
yunq::message::parse_repeated_message(buf, offset + rep_offset as usize, rep_len as usize, &caps)?
};
},
}
@ -206,6 +206,7 @@ fn generate_parse(message: &Message) -> TokenStream {
Self: Sized,
{
if buf.at::<u32>(offset + 0)? != yunq::message::MESSAGE_IDENT {
mammoth::debug!("Expected IDENT at offest {:#x}, got {:#x}", offset, buf.at::<u32>(offset)?);
return Err(ZError::INVALID_ARGUMENT);
}
// TODO: Parse core size.
@ -392,13 +393,116 @@ fn generate_server(interface: &Interface) -> TokenStream {
}
}
fn generate_async_server_case(method: &Method) -> TokenStream {
let id = proc_macro2::Literal::u64_suffixed(method.number);
let name = ident(&method.name.to_case(Case::Snake));
let maybe_req = method.request.clone().map(|r| ident(&r));
let maybe_resp = method.response.clone().map(|r| ident(&r));
match (maybe_req, maybe_resp) {
(Some(req), Some(_)) => quote! {
#id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
let resp = self.handler.#name(req).await?;
cap_buffer.resize(0, 0);
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len)
},
},
(Some(req), None) => quote! {
#id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
self.handler.#name(req).await?;
cap_buffer.resize(0, 0);
// TODO: Implement serialization for EmptyMessage so this is less hacky.
yunq::message::serialize_error(byte_buffer, ZError::from(0));
Ok(0x10)
},
},
(None, Some(_)) => quote! {
#id => {
let resp = self.handler.#name().await?;
cap_buffer.resize(0, 0);
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len)
},
},
_ => unreachable!(),
}
}
fn generate_async_server_method(method: &Method) -> TokenStream {
let name = ident(&method.name.to_case(Case::Snake));
let maybe_req = method.request.clone().map(|r| ident(&r));
let maybe_resp = method.response.clone().map(|r| ident(&r));
match (maybe_req, maybe_resp) {
(Some(req), Some(resp)) => quote! {
fn #name (&self, req: #req) -> impl Future<Output= Result<#resp, ZError>> + Sync + Send;
},
(Some(req), None) => quote! {
fn #name (&self, req: #req) -> impl Future<Output = Result<(), ZError>> + Sync + Send;
},
(None, Some(resp)) => quote! {
fn #name (&self) -> impl Future<Output = Result<#resp, ZError>> + Sync + Send;
},
_ => unreachable!(),
}
}
fn generate_async_server(interface: &Interface) -> TokenStream {
let server_name = ident(&(String::from("Async") + &interface.name.clone() + "Server"));
let server_trait = ident(&(String::from("Async") + &interface.name.clone() + "ServerHandler"));
let server_trait_methods = interface.methods.iter().map(generate_async_server_method);
let server_match_cases = interface.methods.iter().map(generate_async_server_case);
quote! {
pub trait #server_trait {
#(#server_trait_methods)*
}
pub struct #server_name<T: #server_trait> {
endpoint_cap: Capability,
handler: T
}
impl<T: #server_trait> #server_name<T> {
pub fn new(handler: T) -> Result<Self, ZError> {
Ok(Self {
endpoint_cap: syscall::endpoint_create()?,
handler,
})
}
}
impl<T: #server_trait + Send + Sync + 'static> yunq::server::AsyncYunqServer for #server_name<T> {
fn endpoint_cap(&self) -> &Capability {
&self.endpoint_cap
}
async fn handle_request(
&self,
method_number: u64,
byte_buffer: &mut ByteBuffer<1024>,
cap_buffer: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> {
match method_number {
#(#server_match_cases)*
_ => Err(ZError::UNIMPLEMENTED)
}
}
}
}
}
fn generate_interface(interface: &Interface) -> TokenStream {
let client = generate_client(interface);
let server = generate_server(interface);
let async_server = generate_async_server(interface);
quote! {
#client
#server
#async_server
}
}
@ -428,6 +532,7 @@ pub fn generate_code(ast: &[Decl]) -> String {
let interface_imports = if any_interfaces(ast) {
quote! {
use core::future::Future;
use mammoth::cap::Capability;
use mammoth::syscall;
}