Register denali on yellowstone, handle read requests.

This commit is contained in:
Drew Galbraith 2025-02-01 13:09:42 -08:00
parent a5cdd23f0b
commit 6f0dfa8719
6 changed files with 56 additions and 22 deletions

View File

@ -26,9 +26,7 @@ impl TaskId {
pub struct Task { pub struct Task {
id: TaskId, id: TaskId,
// FIXME: This only needs to be sync because of the CURRENT_EXECUTOR future: Pin<Box<dyn Future<Output = ()> + Send>>,
// needing to be shared between threads.
future: Pin<Box<dyn Future<Output = ()> + Sync + Send>>,
} }
impl Task { impl Task {
@ -72,7 +70,7 @@ impl Wake for TaskWaker {
} }
pub struct Executor { pub struct Executor {
tasks: BTreeMap<TaskId, Task>, tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
// TODO: Consider a better datastructure for this. // TODO: Consider a better datastructure for this.
task_queue: Arc<Mutex<VecDeque<TaskId>>>, task_queue: Arc<Mutex<VecDeque<TaskId>>>,
waker_cache: BTreeMap<TaskId, Waker>, waker_cache: BTreeMap<TaskId, Waker>,
@ -81,7 +79,7 @@ pub struct Executor {
impl Executor { impl Executor {
pub fn new() -> Executor { pub fn new() -> Executor {
Executor { Executor {
tasks: BTreeMap::new(), tasks: Arc::new(Mutex::new(BTreeMap::new())),
task_queue: Arc::new(Mutex::new(VecDeque::new())), task_queue: Arc::new(Mutex::new(VecDeque::new())),
waker_cache: BTreeMap::new(), waker_cache: BTreeMap::new(),
} }
@ -89,7 +87,7 @@ impl Executor {
pub fn spawn(&mut self, task: Task) { pub fn spawn(&mut self, task: Task) {
let task_id = task.id; let task_id = task.id;
if self.tasks.insert(task_id, task).is_some() { if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map"); panic!("Task is already existed in executor map");
} }
self.task_queue.lock().push_back(task_id); self.task_queue.lock().push_back(task_id);
@ -97,7 +95,8 @@ impl Executor {
fn run_ready_tasks(&mut self) { fn run_ready_tasks(&mut self) {
while let Some(task_id) = self.task_queue.lock().pop_front() { while let Some(task_id) = self.task_queue.lock().pop_front() {
let task = self.tasks.get_mut(&task_id).unwrap(); let mut tasks = self.tasks.lock();
let task = tasks.get_mut(&task_id).unwrap();
let waker = self let waker = self
.waker_cache .waker_cache
.entry(task_id) .entry(task_id)
@ -105,7 +104,7 @@ impl Executor {
let mut ctx = Context::from_waker(waker); let mut ctx = Context::from_waker(waker);
match task.poll(&mut ctx) { match task.poll(&mut ctx) {
Poll::Ready(()) => { Poll::Ready(()) => {
self.tasks.remove(&task_id); tasks.remove(&task_id);
self.waker_cache.remove(&task_id); self.waker_cache.remove(&task_id);
} }
Poll::Pending => {} Poll::Pending => {}
@ -120,6 +119,30 @@ impl Executor {
syscall::thread_sleep(50).unwrap(); syscall::thread_sleep(50).unwrap();
} }
} }
pub fn new_spawner(&self) -> Spawner {
Spawner::new(self.tasks.clone(), self.task_queue.clone())
}
} }
static CURRENT_EXECUTOR: Option<Executor> = None; pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
}
impl Spawner {
fn new(
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
) -> Self {
Spawner { tasks, task_queue }
}
pub fn spawn(&self, task: Task) {
let task_id = task.id;
if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map");
}
self.task_queue.lock().push_back(task_id);
}
}

View File

@ -4,9 +4,8 @@ use crate::buffer::ByteBuffer;
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::vec::Vec; use alloc::vec::Vec;
use mammoth::cap::Capability; use mammoth::cap::Capability;
use mammoth::sync::Mutex;
use mammoth::syscall; use mammoth::syscall;
use mammoth::task::Executor; use mammoth::task::Spawner;
use mammoth::task::Task; use mammoth::task::Task;
use mammoth::thread; use mammoth::thread;
use mammoth::thread::JoinHandle; use mammoth::thread::JoinHandle;
@ -70,7 +69,7 @@ pub trait AsyncYunqServer
where where
Self: Send + Sync + 'static, Self: Send + Sync + 'static,
{ {
fn server_loop(self: Arc<Self>, executor: Arc<Mutex<Executor>>) { fn server_loop(self: Arc<Self>, spawner: Spawner) {
loop { loop {
let mut byte_buffer = ByteBuffer::<1024>::new(); let mut byte_buffer = ByteBuffer::<1024>::new();
let mut cap_buffer = vec![0; 10]; let mut cap_buffer = vec![0; 10];
@ -85,7 +84,7 @@ where
.at::<u64>(8) .at::<u64>(8)
.expect("Failed to access request length."); .expect("Failed to access request length.");
let self_clone = self.clone(); let self_clone = self.clone();
executor.lock().spawn(Task::new((async move || { spawner.spawn(Task::new((async move || {
self_clone self_clone
.handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap) .handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap)
.await; .await;
@ -137,11 +136,11 @@ where
) -> impl Future<Output = Result<usize, ZError>> + Sync + Send; ) -> impl Future<Output = Result<usize, ZError>> + Sync + Send;
} }
pub fn spawn_async_server_thread<T>(server: Arc<T>, executor: Arc<Mutex<Executor>>) -> JoinHandle pub fn spawn_async_server_thread<T>(server: Arc<T>, spawner: Spawner) -> JoinHandle
where where
T: AsyncYunqServer + Send + Sync + 'static, T: AsyncYunqServer + Send + Sync + 'static,
{ {
thread::spawn(move || { thread::spawn(move || {
server.server_loop(executor); server.server_loop(spawner);
}) })
} }

View File

@ -197,7 +197,6 @@ pub fn spawn_irq_thread(controller: Arc<AhciController>) -> thread::JoinHandle {
}); });
loop { loop {
irq_port.recv_null().unwrap(); irq_port.recv_null().unwrap();
mammoth::debug!("Interrupt!");
controller.handle_irq(); controller.handle_irq();
} }
}; };

View File

@ -11,8 +11,10 @@ use mammoth::{
zion::z_err_t, zion::z_err_t,
}; };
use denali::ahci::{self, identify_ports, spawn_irq_thread, AhciController}; use denali::ahci::{identify_ports, spawn_irq_thread, AhciController};
use denali::{denali_server::DenaliServerImpl, AsyncDenaliServer}; use denali::{denali_server::DenaliServerImpl, AsyncDenaliServer};
use yellowstone_yunq::RegisterEndpointRequest;
use yunq::server::AsyncYunqServer;
define_entry!(); define_entry!();
@ -42,7 +44,18 @@ extern "C" fn main() -> z_err_t {
let denali_server = let denali_server =
Arc::new(AsyncDenaliServer::new(DenaliServerImpl::new(ahci_controller.clone())).unwrap()); Arc::new(AsyncDenaliServer::new(DenaliServerImpl::new(ahci_controller.clone())).unwrap());
let server_thread = yunq::server::spawn_async_server_thread(denali_server, executor.clone()); let server_thread = yunq::server::spawn_async_server_thread(
denali_server.clone(),
executor.lock().new_spawner(),
);
let yellowstone = yellowstone_yunq::from_init_endpoint();
yellowstone
.register_endpoint(&RegisterEndpointRequest {
endpoint_name: "denali".into(),
endpoint_capability: denali_server.create_client_cap().unwrap().release(),
})
.unwrap();
executor.clone().lock().run(); executor.clone().lock().run();

View File

@ -32,7 +32,7 @@ impl AsyncDenaliServerHandler for DenaliServerImpl {
}) })
} }
async fn read_many(&self, req: ReadManyRequest) -> Result<ReadResponse, ZError> { async fn read_many(&self, _req: ReadManyRequest) -> Result<ReadResponse, ZError> {
Err(ZError::UNIMPLEMENTED) Err(ZError::UNIMPLEMENTED)
} }
} }

View File

@ -435,13 +435,13 @@ fn generate_async_server_method(method: &Method) -> TokenStream {
let maybe_resp = method.response.clone().map(|r| ident(&r)); let maybe_resp = method.response.clone().map(|r| ident(&r));
match (maybe_req, maybe_resp) { match (maybe_req, maybe_resp) {
(Some(req), Some(resp)) => quote! { (Some(req), Some(resp)) => quote! {
fn #name (&self, req: #req) -> impl Future<Output= Result<#resp, ZError>> + Sync; fn #name (&self, req: #req) -> impl Future<Output= Result<#resp, ZError>> + Sync + Send;
}, },
(Some(req), None) => quote! { (Some(req), None) => quote! {
fn #name (&self, req: #req) -> impl Future<Output = Result<(), ZError>> + Sync; fn #name (&self, req: #req) -> impl Future<Output = Result<(), ZError>> + Sync + Send;
}, },
(None, Some(resp)) => quote! { (None, Some(resp)) => quote! {
fn #name (&self) -> impl Future<Output = Result<#resp, ZError>> + Sync; fn #name (&self) -> impl Future<Output = Result<#resp, ZError>> + Sync + Send;
}, },
_ => unreachable!(), _ => unreachable!(),
} }