Compare commits

...

5 Commits

Author SHA1 Message Date
Drew Galbraith b8b6576b7f Cleanup AHCI Ident a bit and reduce logging 2023-06-12 23:32:24 -07:00
Drew Galbraith 72885190e9 Resolved page faults from user stacks 2023-06-12 23:28:23 -07:00
Drew Galbraith 6c13fdc801 Add a memory region type to mammoth 2023-06-12 23:24:05 -07:00
Drew Galbraith 6986f534f8 Add a method for blocking threads on ports.
Additionally add the first lock class since we are becoming more
concurrent.
2023-06-12 20:56:25 -07:00
Drew Galbraith b6735d3175 [zion] Fix an over-allocation bug when creating a memory object 2023-06-12 20:55:53 -07:00
26 changed files with 376 additions and 83 deletions

View File

@ -1,6 +1,7 @@
add_library(mammoth_lib STATIC
src/channel.cpp
src/debug.cpp
src/memory_region.cpp
src/process.cpp
src/thread.cpp
)

View File

@ -0,0 +1,29 @@
#pragma once
#include <stdint.h>
class MappedMemoryRegion {
public:
// FIXME: Introduce optional type to contain error or.
static MappedMemoryRegion DirectPhysical(uint64_t phys_addr, uint64_t size);
static MappedMemoryRegion ContiguousPhysical(uint64_t size);
static MappedMemoryRegion Default(uint64_t size);
MappedMemoryRegion() {}
// TODO: Disallow copy before doing any cleanup here.
~MappedMemoryRegion() {}
uint64_t paddr() { return paddr_; }
uint64_t vaddr() { return vaddr_; }
operator bool() { return vmmo_cap_ != 0; }
private:
MappedMemoryRegion(uint64_t vmmo_cap, uint64_t paddr, uint64_t vaddr,
uint64_t size)
: vmmo_cap_(vmmo_cap), paddr_(paddr), vaddr_(vaddr), size_(size) {}
uint64_t vmmo_cap_ = 0;
uint64_t paddr_ = 0;
uint64_t vaddr_ = 0;
uint64_t size_ = 0;
};

View File

@ -0,0 +1,36 @@
#include "mammoth/memory_region.h"
#include <zcall.h>
#include "mammoth/debug.h"
MappedMemoryRegion MappedMemoryRegion::DirectPhysical(uint64_t paddr,
uint64_t size) {
uint64_t vmmo_cap;
check(ZMemoryObjectCreatePhysical(paddr, size, &vmmo_cap));
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
}
MappedMemoryRegion MappedMemoryRegion::ContiguousPhysical(uint64_t size) {
uint64_t vmmo_cap, paddr;
check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, &paddr));
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
}
MappedMemoryRegion MappedMemoryRegion::Default(uint64_t size) {
uint64_t vmmo_cap;
check(ZMemoryObjectCreate(size, &vmmo_cap));
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
return MappedMemoryRegion(vmmo_cap, 0, vaddr, size);
}

View File

@ -35,7 +35,7 @@ struct PciMsiCap {
uint8_t reserved;
uint64_t message_address;
uint16_t message_data;
};
} __attribute__((packed));
struct AhciHba {
uint32_t capabilities;
@ -90,7 +90,7 @@ struct PhysicalRegionDescriptor {
// 21:0 is byte count
// 31 is Interrupt on Completion
uint32_t byte_count;
};
} __attribute__((packed));
struct CommandTable {
uint8_t command_fis[64];
@ -110,9 +110,73 @@ typedef enum {
FIS_TYPE_DEV_BITS = 0xA1, // Set device bits FIS - device to host
} FIS_TYPE;
struct DmaFis {};
struct DmaFis {
// DWORD 0
uint8_t fis_type; // FIS_TYPE_DMA_SETUP
uint8_t pmport : 4; // Port multiplier
uint8_t rsv0 : 1; // Reserved
uint8_t d : 1; // Data transfer direction, 1 - device to host
uint8_t i : 1; // Interrupt bit
uint8_t a : 1; // Auto-activate. Specifies if DMA Activate FIS is needed
uint8_t rsved[2]; // Reserved
// DWORD 1&2
uint64_t DMAbufferID; // DMA Buffer Identifier. Used to Identify DMA buffer
// in host memory. SATA Spec says host specific and not
// in Spec. Trying AHCI spec might work.
// DWORD 3
uint32_t rsvd; // More reserved
// DWORD 4
uint32_t DMAbufOffset; // Byte offset into buffer. First 2 bits must be 0
// DWORD 5
uint32_t TransferCount; // Number of bytes to transfer. Bit 0 must be 0
// DWORD 6
uint32_t resvd; // Reserved
} __attribute__((packed));
struct PioSetupFis {
// DWORD 0
uint8_t fis_type; // FIS_TYPE_PIO_SETUP
uint8_t pmport : 4; // Port multiplier
uint8_t rsv0 : 1; // Reserved
uint8_t d : 1; // Data transfer direction, 1 - device to host
uint8_t i : 1; // Interrupt bit
uint8_t rsv1 : 1;
uint8_t status; // Status register
uint8_t error; // Error register
// DWORD 1
uint8_t lba0; // LBA low register, 7:0
uint8_t lba1; // LBA mid register, 15:8
uint8_t lba2; // LBA high register, 23:16
uint8_t device; // Device register
// DWORD 2
uint8_t lba3; // LBA register, 31:24
uint8_t lba4; // LBA register, 39:32
uint8_t lba5; // LBA register, 47:40
uint8_t rsv2; // Reserved
// DWORD 3
uint8_t countl; // Count register, 7:0
uint8_t counth; // Count register, 15:8
uint8_t rsv3; // Reserved
uint8_t e_status; // New value of status register
// DWORD 4
uint16_t tc; // Transfer count
uint8_t rsv4[2]; // Reserved
} __attribute__((packed));
struct PioSetupFis {};
struct HostToDeviceRegisterFis {
uint8_t fis_type; // FIS_TYPE_REG_H2D
uint8_t pmp_and_c;
@ -138,9 +202,36 @@ struct HostToDeviceRegisterFis {
// DWORD 4
uint32_t reserved; // Reserved
};
struct DeviceToHostRegisterFis {};
struct SetDeviceBitsFis {};
} __attribute__((packed));
struct DeviceToHostRegisterFis {
// DWORD 0
uint8_t fis_type; // FIS_TYPE_REG_D2H
uint8_t pmport_and_i;
uint8_t status; // Status register
uint8_t error; // Error register
// DWORD 1
uint8_t lba0; // LBA low register, 7:0
uint8_t lba1; // LBA mid register, 15:8
uint8_t lba2; // LBA high register, 23:16
uint8_t device; // Device register
// DWORD 2
uint8_t lba3; // LBA register, 31:24
uint8_t lba4; // LBA register, 39:32
uint8_t lba5; // LBA register, 47:40
uint8_t reserved1;
// DWORD 3
uint16_t count;
uint16_t reserved2;
uint32_t reserved3;
} __attribute__((packed));
struct SetDeviceBitsFis {
} __attribute__((packed));
struct ReceivedFis {
DmaFis dma_fis;

View File

@ -4,6 +4,12 @@
#include <string.h>
#include <zcall.h>
namespace {
void HandleIdent(AhciDevice* dev) { dev->HandleIdentify(); }
} // namespace
AhciDevice::AhciDevice(AhciPort* port) : port_struct_(port) {
if ((port_struct_->sata_status & 0x103) != 0x103) {
return;
@ -15,26 +21,30 @@ AhciDevice::AhciDevice(AhciPort* port) : port_struct_(port) {
crash("Non adjacent cl & fis", Z_ERR_UNIMPLEMENTED);
}
check(ZMemoryObjectCreatePhysical(cl_page, 0x1000, &vmmo_cap_));
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap_, &vaddr));
command_structures_ = MappedMemoryRegion::DirectPhysical(cl_page, 0x1000);
uint64_t cl_off = port_struct_->command_list_base & 0xFFF;
command_list_ = reinterpret_cast<CommandList*>(vaddr + cl_off);
command_list_ =
reinterpret_cast<CommandList*>(command_structures_.vaddr() + cl_off);
uint64_t fis_off = port_struct_->fis_base & 0xFFF;
received_fis_ = reinterpret_cast<ReceivedFis*>(vaddr + fis_off);
received_fis_ =
reinterpret_cast<ReceivedFis*>(command_structures_.vaddr() + fis_off);
// FIXME: Hacky
uint64_t ct_off =
command_list_->command_headers[0].command_table_base_addr & 0xFFF;
command_table_ = reinterpret_cast<CommandTable*>(vaddr + ct_off);
command_table_ =
reinterpret_cast<CommandTable*>(command_structures_.vaddr() + ct_off);
port_struct_->interrupt_enable = 0xFFFFFFFF;
if (port_struct_->signature == 0x101) {
SendIdentify();
}
}
z_err_t AhciDevice::SendIdentify(uint16_t** result) {
z_err_t AhciDevice::SendIdentify() {
HostToDeviceRegisterFis fis{
.fis_type = FIS_TYPE_REG_H2D,
.pmp_and_c = 0x80,
@ -63,22 +73,24 @@ z_err_t AhciDevice::SendIdentify(uint16_t** result) {
memcpy(command_table_->command_fis, &fis, sizeof(fis));
port_struct_->command_issue |= 1;
commands_[0].region = MappedMemoryRegion::ContiguousPhysical(512);
commands_[0].callback = HandleIdent;
uint64_t vmmo_cap, paddr;
RET_ERR(ZMemoryObjectCreateContiguous(512, &vmmo_cap, &paddr));
command_table_->prds[0].region_address = paddr;
command_table_->prds[0].region_address = commands_[0].region.paddr();
command_table_->prds[0].byte_count = 512;
uint64_t vaddr;
RET_ERR(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
*result = reinterpret_cast<uint16_t*>(vaddr);
port_struct_->command_issue |= 1;
commands_issued_ |= 1;
return Z_OK;
}
void AhciDevice::HandleIdentify() {
dbgln("Handling Idenify");
uint16_t* ident = reinterpret_cast<uint16_t*>(commands_[0].region.vaddr());
dbgln("Ident: %x", ident[0]);
}
void AhciDevice::DumpInfo() {
dbgln("Comlist: %lx", port_struct_->command_list_base);
dbgln("FIS: %lx", port_struct_->fis_base);
@ -87,7 +99,6 @@ void AhciDevice::DumpInfo() {
dbgln("SATA status: %x", port_struct_->sata_status);
dbgln("Int status: %x", port_struct_->interrupt_status);
dbgln("Int enable: %x", port_struct_->interrupt_enable);
dbgln("Int enable: %x", port_struct_->interrupt_enable);
// Just dump one command info for now.
for (uint64_t i = 0; i < 1; i++) {
@ -104,5 +115,36 @@ void AhciDevice::HandleIrq() {
// FIXME: Probably only clear the interrupts we know how to handle.
port_struct_->interrupt_status = int_status;
dbgln("int receieved: %x", int_status);
uint32_t commands_finished = commands_issued_ & ~port_struct_->command_issue;
for (uint64_t i = 0; i < 32; i++) {
if (commands_finished & (1 << i)) {
commands_[i].callback(this);
commands_issued_ &= ~(1 << i);
}
}
// TODO: Do something with this information.
if (int_status & 0x1) {
// Device to host.
DeviceToHostRegisterFis& fis = received_fis_->device_to_host_register_fis;
if (fis.fis_type != FIS_TYPE_REG_D2H) {
dbgln("BAD FIS TYPE (exp,act): %x, %x", FIS_TYPE_REG_D2H, fis.fis_type);
return;
}
if (fis.error) {
dbgln("D2H err: %x", fis.error);
}
}
if (int_status & 0x2) {
// PIO.
PioSetupFis& fis = received_fis_->pio_set_fis;
if (fis.fis_type != FIS_TYPE_PIO_SETUP) {
dbgln("BAD FIS TYPE (exp,act): %x, %x", FIS_TYPE_PIO_SETUP, fis.fis_type);
return;
}
if (fis.error) {
dbgln("PIO err: %x", fis.error);
}
}
}

View File

@ -1,5 +1,6 @@
#pragma once
#include <mammoth/memory_region.h>
#include <zerrors.h>
#include "ahci/ahci.h"
@ -12,18 +13,26 @@ class AhciDevice {
void DumpInfo();
bool IsInit() { return port_struct_ != nullptr && vmmo_cap_ != 0; }
bool IsInit() { return port_struct_ != nullptr && command_structures_; }
// Result will point to a 512 byte (256 word array).
z_err_t SendIdentify(uint16_t** result);
z_err_t SendIdentify();
void HandleIdentify();
void HandleIrq();
private:
AhciPort* port_struct_ = nullptr;
uint64_t vmmo_cap_ = 0;
MappedMemoryRegion command_structures_;
CommandList* command_list_ = nullptr;
ReceivedFis* received_fis_ = nullptr;
CommandTable* command_table_ = nullptr;
struct Command {
typedef void (*Callback)(AhciDevice*);
MappedMemoryRegion region;
Callback callback;
};
Command commands_[32];
uint32_t commands_issued_ = 0;
};

View File

@ -13,7 +13,6 @@ const uint64_t kGhc_InteruptEnable = 0x2;
void interrupt_thread(void* void_driver) {
AhciDriver* driver = static_cast<AhciDriver*>(void_driver);
dbgln("this %lx", driver);
driver->InterruptLoop();
@ -24,17 +23,13 @@ void interrupt_thread(void* void_driver) {
z_err_t AhciDriver::Init() {
RET_ERR(LoadPciDeviceHeader());
RET_ERR(LoadCapabilities());
dbgln("ABAR: %x", pci_device_header_->abar);
dbgln("Interrupt line: %x", pci_device_header_->interrupt_line);
dbgln("Interrupt pin: %x", pci_device_header_->interrupt_pin);
// RET_ERR(LoadCapabilities());
RET_ERR(RegisterIrq());
RET_ERR(LoadHbaRegisters());
dbgln("Version: %x", ahci_hba_->version);
ahci_hba_->global_host_control |= kGhc_InteruptEnable;
RET_ERR(LoadDevices());
DumpCapabilities();
DumpPorts();
// DumpCapabilities();
// DumpPorts();
return Z_OK;
}
@ -42,8 +37,6 @@ void AhciDriver::DumpCapabilities() {
dbgln("AHCI Capabilities:");
uint32_t caps = ahci_hba_->capabilities;
dbgln("Num Ports: %u", (caps & 0x1F) + 1);
dbgln("Num Command Slots: %u", ((caps & 0x1F00) >> 8) + 1);
if (caps & 0x20) {
dbgln("External SATA");
}
@ -132,25 +125,21 @@ void AhciDriver::DumpPorts() {
}
void AhciDriver::InterruptLoop() {
dbgln("this %lx", this);
while (true) {
uint64_t type, bytes, caps;
check(ZPortRecv(irq_port_cap_, 0, 0, 0, 0, &type, &bytes, &caps));
for (uint64_t i = 0; i < 6; i++) {
if (devices_[i].IsInit()) {
for (uint64_t i = 0; i < 32; i++) {
if (devices_[i].IsInit() && (ahci_hba_->interrupt_status & (1 << i))) {
devices_[i].HandleIrq();
ahci_hba_->interrupt_status &= ~(1 << i);
}
}
}
}
z_err_t AhciDriver::LoadPciDeviceHeader() {
uint64_t vmmo_cap;
RET_ERR(ZMemoryObjectCreatePhysical(kSataPciPhys, kPciSize, &vmmo_cap));
uint64_t vaddr;
RET_ERR(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
pci_device_header_ = reinterpret_cast<PciDeviceHeader*>(vaddr);
pci_region_ = MappedMemoryRegion::DirectPhysical(kSataPciPhys, kPciSize);
pci_device_header_ = reinterpret_cast<PciDeviceHeader*>(pci_region_.vaddr());
return Z_OK;
}
@ -189,37 +178,28 @@ z_err_t AhciDriver::RegisterIrq() {
}
uint64_t irq_num = Z_IRQ_PCI_BASE + pci_device_header_->interrupt_pin - 1;
RET_ERR(ZIrqRegister(irq_num, &irq_port_cap_));
dbgln("this %lx", this);
irq_thread_ = Thread(interrupt_thread, this);
return Z_OK;
}
z_err_t AhciDriver::LoadHbaRegisters() {
uint64_t vmmo_cap;
RET_ERR(
ZMemoryObjectCreatePhysical(pci_device_header_->abar, 0x1100, &vmmo_cap));
ahci_region_ =
MappedMemoryRegion::DirectPhysical(pci_device_header_->abar, 0x1100);
ahci_hba_ = reinterpret_cast<AhciHba*>(ahci_region_.vaddr());
num_ports_ = (ahci_hba_->capabilities & 0x1F) + 1;
num_commands_ = ((ahci_hba_->capabilities & 0x1F00) >> 8) + 1;
uint64_t vaddr;
RET_ERR(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
ahci_hba_ = reinterpret_cast<AhciHba*>(vaddr);
return Z_OK;
}
z_err_t AhciDriver::LoadDevices() {
// FIXME: Don't set this up so we hardcode 6 devices.
for (uint8_t i = 0; i < 6; i++) {
for (uint8_t i = 0; i < 32; i++) {
if (!(ahci_hba_->port_implemented & (1 << i))) {
continue;
}
uint64_t port_addr =
reinterpret_cast<uint64_t>(ahci_hba_) + 0x100 + (0x80 * i);
devices_[i] = AhciDevice(reinterpret_cast<AhciPort*>(port_addr));
if (!devices_[i].IsInit()) {
continue;
}
dbgln("Identify %u", i);
uint16_t* identify;
devices_[i].SendIdentify(&identify);
}
return Z_OK;
}

View File

@ -16,14 +16,20 @@ class AhciDriver {
void DumpPorts();
private:
MappedMemoryRegion pci_region_;
PciDeviceHeader* pci_device_header_ = nullptr;
MappedMemoryRegion ahci_region_;
AhciHba* ahci_hba_ = nullptr;
AhciDevice devices_[6];
// TODO: Allocate these dynamically.
AhciDevice devices_[32];
Thread irq_thread_;
uint64_t irq_port_cap_ = 0;
uint64_t num_ports_;
uint64_t num_commands_;
z_err_t LoadPciDeviceHeader();
z_err_t LoadCapabilities();
z_err_t RegisterIrq();

View File

@ -10,6 +10,7 @@ add_executable(zion
interrupt/interrupt.cpp
interrupt/interrupt_enter.s
interrupt/timer.cpp
lib/mutex.cpp
loader/init_loader.cpp
memory/kernel_heap.cpp
memory/kernel_stack_manager.cpp

View File

@ -136,7 +136,6 @@ void ParseMcfg(SdtHeader* rsdt) {
void ParseMadt(SdtHeader* rsdt) {
#if K_ACPI_DEBUG
dbgsz(rsdt->signature, 4);
#endif
uint64_t max_addr = reinterpret_cast<uint64_t>(rsdt) + rsdt->length;
MadtHeader* header = reinterpret_cast<MadtHeader*>(rsdt);
@ -180,6 +179,7 @@ void ParseMadt(SdtHeader* rsdt) {
entry = reinterpret_cast<MadtEntry*>(reinterpret_cast<uint64_t>(entry) +
entry->length);
}
#endif
}
void ParseSdt(SdtHeader* rsdt) {

View File

@ -76,8 +76,8 @@ void MemToStr(uint64_t u, char* str) {
void AddProcPrefix() {
if (gScheduler != nullptr) {
auto& t = gScheduler->CurrentThread();
dbg("[%u.%u] ", t.pid(), t.tid());
auto t = gScheduler->CurrentThread();
dbg("[%u.%u] ", t->pid(), t->tid());
}
}

View File

@ -7,7 +7,7 @@
#include "common/port.h"
#include "debug/debug.h"
#define APIC_DEBUG 1
#define APIC_DEBUG 0
namespace {

11
zion/lib/mutex.cpp Normal file
View File

@ -0,0 +1,11 @@
#include "lib/mutex.h"
#include "debug/debug.h"
#include "scheduler/scheduler.h"
void Mutex::Lock() {
while (__atomic_fetch_or(&lock_, 0x1, __ATOMIC_SEQ_CST) == 0x1) {
dbgln("Lock sleep: %s", name_);
gScheduler->Preempt();
}
}

29
zion/lib/mutex.h Normal file
View File

@ -0,0 +1,29 @@
#pragma once
#include <stdint.h>
class Mutex {
public:
Mutex(const char* name) : name_(name) {}
void Lock();
void Unlock() { lock_ = false; }
private:
const char* name_;
uint8_t lock_ = 0;
};
class MutexHolder {
public:
MutexHolder(Mutex& mutex) : mutex_(mutex) { mutex_.Lock(); }
~MutexHolder() { mutex_.Unlock(); }
MutexHolder(MutexHolder&) = delete;
MutexHolder(MutexHolder&&) = delete;
private:
Mutex& mutex_;
};

View File

@ -24,7 +24,8 @@ KernelHeap::KernelHeap(uint64_t lower_bound, uint64_t upper_bound)
void* KernelHeap::Allocate(uint64_t size) {
if (next_addr_ + size >= upper_bound_) {
panic("Kernel Heap Overrun");
panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size,
upper_bound_);
}
#if K_HEAP_DEBUG
RecordSize(size);

View File

@ -61,6 +61,9 @@ class PhysicalMemoryManager {
front_ = front_->next;
delete temp;
}
#if K_PHYS_DEBUG
dbgln("Single %m", page);
#endif
return page;
}
uint64_t AllocateContinuous(uint64_t num_pages) {
@ -89,6 +92,9 @@ class PhysicalMemoryManager {
front_ = front_->next;
delete temp;
}
#if K_PHYS_DEBUG
dbgln("Continuous %m:%u", page, num_pages);
#endif
return page;
}
void FreePage(uint64_t page) { AddMemoryRegion(page, 0x1000); }

View File

@ -21,3 +21,14 @@ void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
freed_stacks_++;
dbgln("%u freed user stacks", freed_stacks_);
}
bool UserStackManager::IsValidStack(uint64_t vaddr) {
if (vaddr < next_stack_ || vaddr > (kStackMax - 0x1000)) {
return false;
}
// Checks if the address is in the first page of the stack.
if (vaddr & 0xFF000) {
return true;
}
return false;
}

View File

@ -20,6 +20,9 @@ class UserStackManager {
uint64_t NewUserStack();
void FreeUserStack(uint64_t stack_ptr);
// Used to check if we should page in this address.
bool IsValidStack(uint64_t vaddr);
private:
const uint64_t kStackMax = 0x00008000'00000000;
const uint64_t kStackMin = 0x00007FF0'00000000;

View File

@ -55,6 +55,11 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
#if K_VMAS_DEBUG
dbgln("[VMAS] Page Fault!");
#endif
if (user_stacks_.IsValidStack(vaddr)) {
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
return true;
}
MemoryMapping* mapping = GetMemoryMappingForAddr(vaddr);
if (mapping == nullptr) {
return false;

View File

@ -1,5 +1,7 @@
#include "object/port.h"
#include "scheduler/scheduler.h"
Port::Port() {}
z_err_t Port::Write(const ZMessage& msg) {
@ -21,16 +23,26 @@ z_err_t Port::Write(const ZMessage& msg) {
for (uint64_t i = 0; i < msg.num_bytes; i++) {
message.bytes[i] = msg.bytes[i];
}
MutexHolder lock(mutex_);
pending_messages_.PushBack(message);
if (blocked_threads_.size() > 0) {
gScheduler->Enqueue(blocked_threads_.PopFront());
}
return Z_OK;
}
z_err_t Port::Read(ZMessage& msg) {
if (pending_messages_.size() < 1) {
dbgln("Implement blocking");
return Z_ERR_UNIMPLEMENTED;
mutex_.Lock();
while (pending_messages_.size() < 1) {
blocked_threads_.PushBack(gScheduler->CurrentThread());
mutex_.Unlock();
gScheduler->Yield();
mutex_.Lock();
}
mutex_.Unlock();
MutexHolder lock(mutex_);
Message next_msg = pending_messages_.PeekFront();
if (next_msg.num_bytes > msg.num_bytes) {
return Z_ERR_BUFF_SIZE;

View File

@ -1,7 +1,9 @@
#pragma once
#include "lib/linked_list.h"
#include "lib/mutex.h"
#include "object/kernel_object.h"
#include "object/thread.h"
#include "usr/zcall_internal.h"
class Port : public KernelObject {
@ -19,4 +21,8 @@ class Port : public KernelObject {
};
LinkedList<Message> pending_messages_;
LinkedList<RefPtr<Thread>> blocked_threads_;
Mutex mutex_{"Port"};
};

View File

@ -34,12 +34,14 @@ Process::Process()
: id_(gNextId++), vmas_(MakeRefCounted<AddressSpace>()), state_(RUNNING) {}
RefPtr<Thread> Process::CreateThread() {
MutexHolder lock(mutex_);
RefPtr<Thread> thread = MakeRefCounted<Thread>(*this, next_thread_id_++);
threads_.PushBack(thread);
return thread;
}
RefPtr<Thread> Process::GetThread(uint64_t tid) {
MutexHolder lock(mutex_);
auto iter = threads_.begin();
while (iter != threads_.end()) {
if (iter->tid() == tid) {
@ -52,6 +54,7 @@ RefPtr<Thread> Process::GetThread(uint64_t tid) {
}
void Process::CheckState() {
MutexHolder lock(mutex_);
auto iter = threads_.begin();
while (iter != threads_.end()) {
if (iter->GetState() != Thread::FINISHED) {
@ -63,6 +66,7 @@ void Process::CheckState() {
}
RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
MutexHolder lock(mutex_);
auto iter = caps_.begin();
while (iter != caps_.end()) {
if (*iter && iter->id() == cid) {
@ -78,6 +82,7 @@ RefPtr<Capability> Process::ReleaseCapability(uint64_t cid) {
}
RefPtr<Capability> Process::GetCapability(uint64_t cid) {
MutexHolder lock(mutex_);
auto iter = caps_.begin();
while (iter != caps_.end()) {
if (*iter && iter->id() == cid) {
@ -91,11 +96,13 @@ RefPtr<Capability> Process::GetCapability(uint64_t cid) {
}
uint64_t Process::AddCapability(const RefPtr<Capability>& cap) {
MutexHolder lock(mutex_);
cap->set_id(next_cap_id_++);
caps_.PushBack(cap);
return cap->id();
}
uint64_t Process::AddCapability(const RefPtr<Thread>& thread) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(
MakeRefCounted<Capability>(thread, Capability::THREAD, cap_id, ZC_WRITE));
@ -103,24 +110,28 @@ uint64_t Process::AddCapability(const RefPtr<Thread>& thread) {
}
uint64_t Process::AddCapability(const RefPtr<Process>& p) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(MakeRefCounted<Capability>(p, Capability::PROCESS, cap_id,
ZC_WRITE | ZC_PROC_SPAWN_THREAD));
return cap_id;
}
uint64_t Process::AddCapability(const RefPtr<AddressSpace>& vmas) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(MakeRefCounted<Capability>(vmas, Capability::ADDRESS_SPACE,
cap_id, ZC_WRITE));
return cap_id;
}
uint64_t Process::AddCapability(const RefPtr<MemoryObject>& vmmo) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(MakeRefCounted<Capability>(vmmo, Capability::MEMORY_OBJECT,
cap_id, ZC_WRITE));
return cap_id;
}
uint64_t Process::AddCapability(const RefPtr<Channel>& chan) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(MakeRefCounted<Capability>(chan, Capability::CHANNEL, cap_id,
ZC_WRITE | ZC_READ));
@ -128,6 +139,7 @@ uint64_t Process::AddCapability(const RefPtr<Channel>& chan) {
}
uint64_t Process::AddCapability(const RefPtr<Port>& port) {
MutexHolder lock(mutex_);
uint64_t cap_id = next_cap_id_++;
caps_.PushBack(MakeRefCounted<Capability>(port, Capability::PORT, cap_id,
ZC_WRITE | ZC_READ));
@ -135,6 +147,7 @@ uint64_t Process::AddCapability(const RefPtr<Port>& port) {
}
void Process::AddCapability(uint64_t cap_id, const RefPtr<MemoryObject>& vmmo) {
MutexHolder lock(mutex_);
caps_.PushBack(MakeRefCounted<Capability>(vmmo, Capability::MEMORY_OBJECT,
cap_id, ZC_WRITE));
}

View File

@ -4,6 +4,7 @@
#include "capability/capability.h"
#include "lib/linked_list.h"
#include "lib/mutex.h"
#include "lib/ref_ptr.h"
#include "object/address_space.h"
#include "object/channel.h"
@ -51,6 +52,9 @@ class Process : public KernelObject {
friend class MakeRefCountedFriend<Process>;
Process();
Process(uint64_t id) : id_(id), vmas_(AddressSpace::ForRoot()) {}
Mutex mutex_{"Process"};
uint64_t id_;
RefPtr<AddressSpace> vmas_;
State state_;

View File

@ -15,7 +15,7 @@ extern "C" void jump_user_space(uint64_t rip, uint64_t rsp, uint64_t arg1,
extern "C" void thread_init() {
asm("sti");
gScheduler->CurrentThread().Init();
gScheduler->CurrentThread()->Init();
panic("Reached end of thread.");
}

View File

@ -13,7 +13,7 @@ class Scheduler {
void Enable() { enabled_ = true; }
Process& CurrentProcess() { return current_thread_->process(); }
Thread& CurrentThread() { return *current_thread_; }
RefPtr<Thread> CurrentThread() { return current_thread_; }
void Enqueue(const RefPtr<Thread>& thread) {
runnable_threads_.PushBack(thread);

View File

@ -24,7 +24,7 @@ extern "C" void syscall_enter();
// Used by syscall_enter.s
extern "C" uint64_t GetKernelRsp() {
return gScheduler->CurrentThread().Rsp0Start();
return gScheduler->CurrentThread()->Rsp0Start();
}
void InitSyscall() {
@ -138,7 +138,7 @@ z_err_t MemoryObjectCreatePhysical(ZMemoryObjectCreatePhysicalReq* req,
auto& curr_proc = gScheduler->CurrentProcess();
uint64_t paddr = req->paddr;
if (paddr == 0) {
paddr = phys_mem::AllocateContinuous((req->size - 1 / 0x1000) + 1);
paddr = phys_mem::AllocateContinuous(((req->size - 1) / 0x1000) + 1);
}
auto vmmo_ref = MakeRefCounted<FixedMemoryObject>(paddr, req->size);
resp->vmmo_cap =
@ -187,7 +187,6 @@ z_err_t ChannelRecv(ZChannelRecvReq* req) {
z_err_t PortRecv(ZPortRecvReq* req) {
auto& proc = gScheduler->CurrentProcess();
dbgln("Port cap %u", req->port_cap);
auto port_cap = proc.GetCapability(req->port_cap);
RET_ERR(ValidateCap(port_cap, Capability::PORT, ZC_READ));
@ -199,23 +198,21 @@ z_err_t IrqRegister(ZIrqRegisterReq* req, ZIrqRegisterResp* resp) {
auto& proc = gScheduler->CurrentProcess();
if (req->irq_num != Z_IRQ_PCI_BASE) {
// FIXME: Don't hardcode this nonsense.
dbgln("Irq %x", req->irq_num);
return Z_ERR_UNIMPLEMENTED;
}
RefPtr<Port> port = MakeRefCounted<Port>();
resp->port_cap = proc.AddCapability(port);
dbgln("Port cap %u", resp->port_cap);
RegisterPciPort(port);
return Z_OK;
}
extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req, void* resp) {
Thread& thread = gScheduler->CurrentThread();
RefPtr<Thread> thread = gScheduler->CurrentThread();
switch (call_id) {
case Z_PROCESS_EXIT:
// FIXME: kill process here.
dbgln("Exit code: %u", req);
thread.Exit();
thread->Exit();
panic("Returned from thread exit");
break;
case Z_PROCESS_SPAWN:
@ -227,7 +224,7 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req, void* resp) {
case Z_THREAD_START:
return ThreadStart(reinterpret_cast<ZThreadStartReq*>(req));
case Z_THREAD_EXIT:
thread.Exit();
thread->Exit();
panic("Returned from thread exit");
break;