Allow mapping the PCI Config so Yellowstone can map it.

This is a temp system call. Evemtually we should probably supply the
root process with all of the system physical memory objects.
This commit is contained in:
Drew Galbraith 2023-06-07 22:45:42 -07:00
parent 71a601362d
commit 56789400d7
9 changed files with 126 additions and 2 deletions

View File

@ -1,5 +1,9 @@
add_executable(yellowstone
yellowstone.cpp)
hw/pcie.cpp
yellowstone.cpp
)
target_include_directories(yellowstone
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries(yellowstone
cxx

View File

@ -0,0 +1,61 @@
#include "hw/pcie.h"
#include <mammoth/debug.h>
#include <zcall.h>
namespace {
struct PciDeviceHeader {
uint16_t vendor_id;
uint16_t device_id;
uint16_t command_reg;
uint16_t status_reg;
uint8_t revision;
uint8_t prog_interface;
uint8_t subclass;
uint8_t class_code;
uint8_t cache_line_size;
uint8_t latency_timer;
uint8_t header_type;
uint8_t bist;
} __attribute__((packed));
void DumpFn(uint64_t fn_start, uint64_t bus, uint64_t dev, uint64_t fun) {
PciDeviceHeader* hdr = reinterpret_cast<PciDeviceHeader*>(fn_start);
if (hdr->vendor_id == 0xFFFF) {
return;
}
dbgln(
"[%u.%u.%u] (Vendor, Device): (%x, %x), (Type, Class, Sub, PIF): (%u, "
"%x, %x, %x)",
bus, dev, fun, hdr->vendor_id, hdr->device_id, hdr->header_type,
hdr->class_code, hdr->subclass, hdr->prog_interface);
}
void PciDump(uint64_t base, uint64_t size) {
for (uint64_t b = 0; b <= 0xFF; b++) {
for (uint64_t d = 0; d <= 0x1F; d++) {
for (uint64_t f = 0; f < 8; f++) {
uint64_t fn_base = base + (b << 20) + (d << 15) + (f << 12);
DumpFn(fn_base, b, d, f);
}
}
}
}
} // namespace
void DumpPciEDevices() {
dbgln("Creating PCI obj");
uint64_t vmmo_cap, vmmo_size;
check(ZTempPcieConfigObjectCreate(&vmmo_cap, &vmmo_size));
dbgln("Creating addr space");
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, vmmo_cap, &vaddr));
dbgln("Addr %x", vaddr);
dbgln("Dumping PCI");
PciDump(vaddr, vmmo_size);
dbgln("Done");
}

View File

@ -0,0 +1,3 @@
#pragma once
void DumpPciEDevices();

View File

@ -2,12 +2,16 @@
#include <mammoth/process.h>
#include <zcall.h>
#include "hw/pcie.h"
uint64_t main() {
dbgln("Yellowstone Initializing.");
uint64_t vaddr;
check(ZAddressSpaceMap(Z_INIT_VMAS_SELF, 0, Z_INIT_BOOT_VMMO, &vaddr));
check(SpawnProcessFromElfRegion(vaddr));
DumpPciEDevices();
dbgln("Yellowstone Finished Successfully.");
return 0;
}

View File

@ -32,6 +32,8 @@
#define Z_MEMORY_OBJECT_CREATE 0x30
#define Z_TEMP_PCIE_CONFIG_OBJECT_CREATE 0x3F
#define Z_INIT_BOOT_VMMO 0x31
// IPC Calls
@ -65,6 +67,8 @@ void ZThreadExit();
[[nodiscard]] z_err_t ZAddressSpaceMap(uint64_t vmas_cap, uint64_t vmas_offset,
uint64_t vmmo_cap, uint64_t* vaddr);
[[nodiscard]] z_err_t ZMemoryObjectCreate(uint64_t size, uint64_t* vmmo_cap);
[[nodiscard]] z_err_t ZTempPcieConfigObjectCreate(uint64_t* vmmo_cap,
uint64_t* vmmo_size);
[[nodiscard]] z_err_t ZChannelCreate(uint64_t* channel1, uint64_t* channel2);
[[nodiscard]] z_err_t ZChannelSend(uint64_t chan_cap, uint64_t type,

View File

@ -20,11 +20,28 @@ class MemoryObject : public KernelObject {
void CopyBytesToObject(uint64_t source, uint64_t length);
protected:
// Hacky to avoid linked_list creation.
MemoryObject(uint64_t size, bool) : size_(size) {}
private:
// Always stores the full page-aligned size.
uint64_t size_;
uint64_t PageNumberToPhysAddr(uint64_t page_num);
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num);
LinkedList<uint64_t> phys_page_list_;
};
class FixedMemoryObject : public MemoryObject {
public:
FixedMemoryObject(uint64_t physical_addr, uint64_t size)
: MemoryObject(size, true), physical_addr_(physical_addr) {}
private:
uint64_t physical_addr_;
uint64_t PageNumberToPhysAddr(uint64_t page_num) override {
return physical_addr_ + (0x1000 * page_num);
}
};

View File

@ -2,6 +2,7 @@
#include <stdint.h>
#include "boot/acpi.h"
#include "common/msr.h"
#include "debug/debug.h"
#include "include/zcall.h"
@ -129,6 +130,20 @@ z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req,
return Z_OK;
}
z_err_t TempPcieConfigObjectCreate(ZTempPcieConfigObjectCreateResp* resp) {
auto& curr_proc = gScheduler->CurrentProcess();
uint64_t pci_base, pci_size;
dbgln("Getting config");
RET_ERR(GetPciExtendedConfiguration(&pci_base, &pci_size));
dbgln("Making obj");
auto vmmo_ref = MakeRefCounted<FixedMemoryObject>(pci_base, pci_size);
dbgln("Adding cap");
resp->vmmo_cap =
curr_proc.AddCapability(StaticCastRefPtr<MemoryObject>(vmmo_ref));
resp->vmmo_size = pci_size;
return Z_OK;
}
z_err_t ChannelCreate(ZChannelCreateResp* resp) {
auto& proc = gScheduler->CurrentProcess();
auto chan_pair = Channel::CreateChannelPair();
@ -186,6 +201,9 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req, void* resp) {
return MemoryObjectCreate(
reinterpret_cast<ZMemoryObjectCreateReq*>(req),
reinterpret_cast<ZMemoryObjectCreateResp*>(resp));
case Z_TEMP_PCIE_CONFIG_OBJECT_CREATE:
return TempPcieConfigObjectCreate(
reinterpret_cast<ZTempPcieConfigObjectCreateResp*>(resp));
case Z_CHANNEL_CREATE:
return ChannelCreate(reinterpret_cast<ZChannelCreateResp*>(resp));
case Z_CHANNEL_SEND:

View File

@ -83,6 +83,14 @@ z_err_t ZMemoryObjectCreate(uint64_t size, uint64_t* vmmo_cap) {
return ret;
}
z_err_t ZTempPcieConfigObjectCreate(uint64_t* vmmo_cap, uint64_t* vmmo_size) {
ZTempPcieConfigObjectCreateResp resp;
z_err_t ret = SysCall2(Z_TEMP_PCIE_CONFIG_OBJECT_CREATE, 0, &resp);
*vmmo_cap = resp.vmmo_cap;
*vmmo_size = resp.vmmo_size;
return ret;
}
z_err_t ZChannelCreate(uint64_t* channel1, uint64_t* channel2) {
ZChannelCreateResp resp;
z_err_t ret = SysCall2(Z_CHANNEL_CREATE, 0, &resp);

View File

@ -46,6 +46,11 @@ struct ZMemoryObjectCreateResp {
uint64_t vmmo_cap;
};
struct ZTempPcieConfigObjectCreateResp {
uint64_t vmmo_cap;
uint64_t vmmo_size;
};
struct ZChannelCreateResp {
uint64_t chan_cap1;
uint64_t chan_cap2;