Compare commits
5 Commits
a16dcc2aa9
...
17ed4ca1f6
Author | SHA1 | Date |
---|---|---|
|
17ed4ca1f6 | |
|
69aced2220 | |
|
4af19d010f | |
|
85564b018d | |
|
9ba26195d2 |
|
@ -1,4 +1,4 @@
|
|||
add_library(glacier STATIC
|
||||
set(glacier_files
|
||||
string/string.cpp
|
||||
string/string_builder.cpp
|
||||
string/string_view.cpp
|
||||
|
@ -6,9 +6,24 @@ add_library(glacier STATIC
|
|||
string/str_split.cpp
|
||||
)
|
||||
|
||||
add_library(glacier STATIC
|
||||
${glacier_files}
|
||||
)
|
||||
|
||||
target_include_directories(glacier
|
||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..")
|
||||
|
||||
set_target_properties(glacier PROPERTIES
|
||||
COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS}")
|
||||
|
||||
add_library(glacier_kernel STATIC
|
||||
${glacier_files}
|
||||
)
|
||||
|
||||
target_include_directories(glacier_kernel
|
||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..")
|
||||
|
||||
set_target_properties(glacier_kernel PROPERTIES
|
||||
COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS} -mcmodel=kernel")
|
||||
|
|
|
@ -68,8 +68,40 @@ void BinaryTree<K, V>::Insert(K key, V&& value) {
|
|||
|
||||
template <typename K, typename V>
|
||||
void BinaryTree<K, V>::Delete(K key) {
|
||||
// TODO: Implement Delete.
|
||||
return;
|
||||
auto node = FindOrInsertionParent(key);
|
||||
if (node.empty() || node->key != key) {
|
||||
return;
|
||||
}
|
||||
|
||||
RefPtr<BinaryNode> new_child = nullptr;
|
||||
if (!node.left) {
|
||||
// No children.
|
||||
// Right child only.
|
||||
new_child = node.right;
|
||||
} else if (!node.right) {
|
||||
// Left child only.
|
||||
new_child = node.left;
|
||||
} else {
|
||||
// Find Successor.
|
||||
auto successor = node.right;
|
||||
while (successor.left) {
|
||||
successor = successor.left;
|
||||
}
|
||||
new_child = successor;
|
||||
if (successor != node.right) {
|
||||
successor.parent.left = successor.right;
|
||||
}
|
||||
}
|
||||
|
||||
if (node == root_) {
|
||||
root_ = new_child;
|
||||
} else {
|
||||
if (node.parent.right == node) {
|
||||
node.parent.right = new_child;
|
||||
} else {
|
||||
node.parent.left = new_child;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
|
|
|
@ -54,6 +54,11 @@ void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts) {
|
|||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, char value, StringView opts) {
|
||||
builder.PushBack(value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const char* value,
|
||||
StringView opts) {
|
||||
|
@ -65,6 +70,11 @@ void StrFormatValue(StringBuilder& builder, StringView value, StringView opts) {
|
|||
StrFormatInternal(builder, value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, String value, StringView opts) {
|
||||
StrFormatInternal(builder, value);
|
||||
}
|
||||
|
||||
void StrFormatInternal(StringBuilder& builder, StringView format) {
|
||||
// TODO: Consider throwing an error if there are unhandled format
|
||||
builder.PushBack(format);
|
||||
|
|
|
@ -28,12 +28,18 @@ void StrFormatValue(StringBuilder& builder, uint64_t value, StringView opts);
|
|||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, char value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const char* value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, StringView value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, String value, StringView opts);
|
||||
|
||||
void StrFormatInternal(StringBuilder& builder, StringView format);
|
||||
|
||||
template <typename T, typename... Args>
|
||||
|
|
|
@ -119,7 +119,7 @@ void AhciDriver::DumpCapabilities() {
|
|||
dbgln("Aggressive device sleep management");
|
||||
}
|
||||
|
||||
dbgln("Control %x", ahci_hba_->global_host_control);
|
||||
dbgln("Control {x}", ahci_hba_->global_host_control);
|
||||
}
|
||||
|
||||
void AhciDriver::DumpPorts() {
|
||||
|
|
|
@ -52,7 +52,7 @@ void DenaliServerBase::ServerThread() {
|
|||
recv_cap.Reset();
|
||||
glcr::ErrorCode recv_err = static_cast<glcr::ErrorCode>(ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &recv_cap_size, recv_cap.RawPtr(), &reply_port_cap));
|
||||
if (recv_err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", recv_err);
|
||||
dbgln("Error in receive: {x}", recv_err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ void DenaliServerBase::ServerThread() {
|
|||
reply_err = static_cast<glcr::ErrorCode>(ZReplyPortSend(reply_port_cap, kHeaderSize + resp_length, resp_buffer.RawPtr(), resp_cap.UsedSlots(), resp_cap.RawPtr()));
|
||||
}
|
||||
if (reply_err != glcr::OK) {
|
||||
dbgln("Error in reply: %x", reply_err);
|
||||
dbgln("Error in reply: {x}", reply_err);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,22 +20,22 @@ glcr::ErrorOr<Ext2Driver> Ext2Driver::Init(ScopedDenaliClient&& denali) {
|
|||
glcr::ErrorCode Ext2Driver::ProbePartition() {
|
||||
Superblock* superblock = ext2_reader_->GetSuperblock();
|
||||
if (superblock->magic != 0xEF53) {
|
||||
dbgln("Invalid EXT2 magic code: %x");
|
||||
dbgln("Invalid EXT2 magic code: {x}");
|
||||
return glcr::INVALID_ARGUMENT;
|
||||
}
|
||||
dbgln("Block size: 0x%x", 1024 << superblock->log_block_size);
|
||||
dbgln("Block size: 0x{x}", 1024 << superblock->log_block_size);
|
||||
|
||||
dbgln("Blocks: 0x%x (0x%x per group)", superblock->blocks_count,
|
||||
dbgln("Blocks: 0x{x} (0x{x} per group)", superblock->blocks_count,
|
||||
superblock->blocks_per_group);
|
||||
dbgln("Inodes: 0x%x (0x%x per group)", superblock->inodes_count,
|
||||
dbgln("Inodes: 0x{x} (0x{x} per group)", superblock->inodes_count,
|
||||
superblock->inodes_per_group);
|
||||
dbgln("Inode size: 0x%x", superblock->inode_size);
|
||||
dbgln("Inode size: 0x{x}", superblock->inode_size);
|
||||
|
||||
dbgln("Mounts: 0x%x out of 0x%x", superblock->mnt_count,
|
||||
dbgln("Mounts: 0x{x} out of 0x{x}", superblock->mnt_count,
|
||||
superblock->max_mnt_count);
|
||||
dbgln("State: %x", superblock->state);
|
||||
dbgln("State: {x}", superblock->state);
|
||||
|
||||
dbgln("Created by: %x", superblock->creator_os);
|
||||
dbgln("Created by: {x}", superblock->creator_os);
|
||||
|
||||
return glcr::OK;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ void VFSServerBase::ServerThread() {
|
|||
recv_cap.Reset();
|
||||
glcr::ErrorCode recv_err = static_cast<glcr::ErrorCode>(ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &recv_cap_size, recv_cap.RawPtr(), &reply_port_cap));
|
||||
if (recv_err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", recv_err);
|
||||
dbgln("Error in receive: {x}", recv_err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ void VFSServerBase::ServerThread() {
|
|||
reply_err = static_cast<glcr::ErrorCode>(ZReplyPortSend(reply_port_cap, kHeaderSize + resp_length, resp_buffer.RawPtr(), resp_cap.UsedSlots(), resp_cap.RawPtr()));
|
||||
}
|
||||
if (reply_err != glcr::OK) {
|
||||
dbgln("Error in reply: %x", reply_err);
|
||||
dbgln("Error in reply: {x}", reply_err);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ void YellowstoneServerBase::ServerThread() {
|
|||
recv_cap.Reset();
|
||||
glcr::ErrorCode recv_err = static_cast<glcr::ErrorCode>(ZEndpointRecv(endpoint_, &recv_buf_size, recv_buffer.RawPtr(), &recv_cap_size, recv_cap.RawPtr(), &reply_port_cap));
|
||||
if (recv_err != glcr::OK) {
|
||||
dbgln("Error in receive: %x", recv_err);
|
||||
dbgln("Error in receive: {x}", recv_err);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ void YellowstoneServerBase::ServerThread() {
|
|||
reply_err = static_cast<glcr::ErrorCode>(ZReplyPortSend(reply_port_cap, kHeaderSize + resp_length, resp_buffer.RawPtr(), resp_cap.UsedSlots(), resp_cap.RawPtr()));
|
||||
}
|
||||
if (reply_err != glcr::OK) {
|
||||
dbgln("Error in reply: %x", reply_err);
|
||||
dbgln("Error in reply: {x}", reply_err);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ target_include_directories(zion
|
|||
)
|
||||
|
||||
target_link_libraries(zion
|
||||
glacier)
|
||||
glacier_kernel)
|
||||
|
||||
# -c -- Don't run the linker (only necessary for the assembler)
|
||||
# -ffreestanding
|
||||
|
|
|
@ -133,7 +133,7 @@ void ParseMcfg(SdtHeader* rsdt) {
|
|||
uint64_t bytes_per_fn = 0x1000;
|
||||
gPcieEcSize = num_busses * dev_per_bus * fns_per_dev * bytes_per_fn;
|
||||
#if K_ACPI_DEBUG
|
||||
dbgln("PCI Map: %m:%x", gPcieEcBase, gPcieEcSize);
|
||||
dbgln("PCI Map: {x}:{x}", gPcieEcBase, gPcieEcSize);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -143,9 +143,9 @@ void ParseMadt(SdtHeader* rsdt) {
|
|||
uint64_t max_addr = reinterpret_cast<uint64_t>(rsdt) + rsdt->length;
|
||||
MadtHeader* header = reinterpret_cast<MadtHeader*>(rsdt);
|
||||
|
||||
dbgln("Local APIC %x", header->local_apic_address);
|
||||
dbgln("Local APIC {x}", header->local_apic_address);
|
||||
gLApicBase = header->local_apic_address;
|
||||
dbgln("Flags: %x", header->flags);
|
||||
dbgln("Flags: {x}", header->flags);
|
||||
|
||||
MadtEntry* entry = &header->first_entry;
|
||||
|
||||
|
@ -153,13 +153,13 @@ void ParseMadt(SdtHeader* rsdt) {
|
|||
switch (entry->type) {
|
||||
case 0: {
|
||||
MadtLocalApic* local = reinterpret_cast<MadtLocalApic*>(entry);
|
||||
dbgln("Local APIC (Proc id, id, flags): %x, %x, %x",
|
||||
dbgln("Local APIC (Proc id, id, flags): {x}, {x}, {x}",
|
||||
local->processor_id, local->apic_id, local->flags);
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
MadtIoApic* io = reinterpret_cast<MadtIoApic*>(entry);
|
||||
dbgln("IO Apic (id, addr, gsi base): %x, %x, %x", io->io_apic_id,
|
||||
dbgln("IO Apic (id, addr, gsi base): {x}, {x}, {x}", io->io_apic_id,
|
||||
io->io_apic_address, io->global_system_interrupt_base);
|
||||
if (gIOApicBase != 0) {
|
||||
dbgln("More than one IOApic, unhandled");
|
||||
|
@ -170,7 +170,7 @@ void ParseMadt(SdtHeader* rsdt) {
|
|||
case 2: {
|
||||
MadtIoApicInterruptSource* src =
|
||||
reinterpret_cast<MadtIoApicInterruptSource*>(entry);
|
||||
dbgln("IO Source (Bus, IRQ, GSI, flags): %x, %x, %x, %x",
|
||||
dbgln("IO Source (Bus, IRQ, GSI, flags): {x}, {x}, {x}, {x}",
|
||||
src->bus_source, src->irq_source, src->global_system_interrupt,
|
||||
src->flags);
|
||||
break;
|
||||
|
@ -178,12 +178,12 @@ void ParseMadt(SdtHeader* rsdt) {
|
|||
case 4: {
|
||||
MadtLocalApicNonMaskable* lnmi =
|
||||
reinterpret_cast<MadtLocalApicNonMaskable*>(entry);
|
||||
dbgln("Local NMI (proc id, flags, lint#): %x, %x, %x",
|
||||
dbgln("Local NMI (proc id, flags, lint#): {x}, {x}, {x}",
|
||||
lnmi->apic_processor_id, lnmi->flags, lnmi->lint_num);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dbgln("Unhandled entry type: %u", entry->type);
|
||||
dbgln("Unhandled entry type: {}", entry->type);
|
||||
}
|
||||
entry = reinterpret_cast<MadtEntry*>(reinterpret_cast<uint64_t>(entry) +
|
||||
entry->length);
|
||||
|
@ -240,8 +240,8 @@ void ProbeRsdp() {
|
|||
}
|
||||
|
||||
#if K_ACPI_DEBUG
|
||||
dbgln("ACPI Ver %u", rsdp->revision);
|
||||
dbgln("RSDT Addr: %m", rsdp->rsdt_addr);
|
||||
dbgln("ACPI Ver {}", rsdp->revision);
|
||||
dbgln("RSDT Addr: {x}", rsdp->rsdt_addr);
|
||||
#endif
|
||||
|
||||
ProbeRsdt(reinterpret_cast<SdtHeader*>(rsdp->rsdt_addr));
|
||||
|
@ -257,7 +257,7 @@ void ProbeRsdp() {
|
|||
}
|
||||
|
||||
#if K_ACPI_DEBUG
|
||||
dbgln("XSDT Addr: %m", rsdp->xsdt_addr);
|
||||
dbgln("XSDT Addr: {x}", rsdp->xsdt_addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
|
|||
}
|
||||
++iter;
|
||||
}
|
||||
dbgln("Bad cap access %u", id);
|
||||
dbgln("Num caps: %u", capabilities_.size());
|
||||
dbgln("Bad cap access {}", id);
|
||||
dbgln("Num caps: {}", capabilities_.size());
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
|
|||
}
|
||||
++iter;
|
||||
}
|
||||
dbgln("Bad cap release: %u", id);
|
||||
dbgln("Num caps: %u", capabilities_.size());
|
||||
dbgln("Bad cap release: {}", id);
|
||||
dbgln("Num caps: {}", capabilities_.size());
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -21,6 +21,12 @@ void dbgcstr(const char* str) {
|
|||
}
|
||||
}
|
||||
|
||||
void dbg(const glcr::StringView& str) {
|
||||
for (uint64_t i = 0; i < str.size(); i++) {
|
||||
dbgputchar(str[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void U64ToStr(uint64_t u, char* str) {
|
||||
uint64_t len = 0;
|
||||
uint64_t u2 = u;
|
||||
|
@ -77,7 +83,7 @@ void MemToStr(uint64_t u, char* str) {
|
|||
void AddProcPrefix() {
|
||||
if (gScheduler != nullptr) {
|
||||
auto t = gScheduler->CurrentThread();
|
||||
dbg("[%u.%u] ", t->pid(), t->tid());
|
||||
dbg(glcr::StrFormat("[{}.{}] ", t->pid(), t->tid()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,6 +138,14 @@ void dbg_internal(const char* fmt, va_list args) {
|
|||
|
||||
} // namespace
|
||||
|
||||
void early_dbgln(const char* str) { dbgcstr(str); }
|
||||
|
||||
void dbgln(const glcr::StringView& str) {
|
||||
AddProcPrefix();
|
||||
dbg(str);
|
||||
dbg("\n");
|
||||
}
|
||||
|
||||
void dbg(const char* fmt, ...) {
|
||||
va_list arg;
|
||||
va_start(arg, fmt);
|
||||
|
|
|
@ -1,13 +1,27 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/string/str_format.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "include/ztypes.h"
|
||||
|
||||
void dbg(const char* fmt, ...);
|
||||
void dbgln(const char* str, ...);
|
||||
void panic(const char* str, ...);
|
||||
// Debug line without formatting for
|
||||
// before allocations are available.
|
||||
void early_dbgln(const char* str);
|
||||
|
||||
void dbgln(const glcr::StringView& str);
|
||||
|
||||
template <typename... Args>
|
||||
void dbgln(const char* str, Args... args) {
|
||||
dbgln(glcr::StrFormat(str, args...));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void panic(const char* str, Args... args) {
|
||||
dbgln(glcr::StrFormat(str, args...));
|
||||
dbgln("PANIC");
|
||||
}
|
||||
|
||||
#define UNREACHABLE \
|
||||
panic("Unreachable %s, %s", __FILE__, __LINE__); \
|
||||
panic("Unreachable {}, {}", __FILE__, __LINE__); \
|
||||
__builtin_unreachable();
|
||||
|
|
|
@ -62,7 +62,7 @@ Apic* gApic = nullptr;
|
|||
void Apic::Init() {
|
||||
auto config_or = GetApicConfiguration();
|
||||
if (!config_or.ok()) {
|
||||
panic("Error fetching APIC info from ACPI: %x", config_or.error());
|
||||
panic("Error fetching APIC info from ACPI: {x}", config_or.error());
|
||||
}
|
||||
gApic = new Apic(config_or.value());
|
||||
}
|
||||
|
@ -70,32 +70,32 @@ void Apic::Init() {
|
|||
void Apic::DumpInfo() {
|
||||
#if APIC_DEBUG
|
||||
dbgln("APIC:");
|
||||
dbgln("ID: %x", GetLocalReg(0x20));
|
||||
dbgln("VER: %x", GetLocalReg(0x30));
|
||||
dbgln("TPR: %x", GetLocalReg(0x80));
|
||||
dbgln("APR: %x", GetLocalReg(0x90));
|
||||
dbgln("PPR: %x", GetLocalReg(0xA0));
|
||||
dbgln("LDR: %x", GetLocalReg(0xD0));
|
||||
dbgln("DFR: %x", GetLocalReg(0xE0));
|
||||
dbgln("SIV: %x", GetLocalReg(0xF0));
|
||||
dbgln("ID: {x}", GetLocalReg(0x20));
|
||||
dbgln("VER: {x}", GetLocalReg(0x30));
|
||||
dbgln("TPR: {x}", GetLocalReg(0x80));
|
||||
dbgln("APR: {x}", GetLocalReg(0x90));
|
||||
dbgln("PPR: {x}", GetLocalReg(0xA0));
|
||||
dbgln("LDR: {x}", GetLocalReg(0xD0));
|
||||
dbgln("DFR: {x}", GetLocalReg(0xE0));
|
||||
dbgln("SIV: {x}", GetLocalReg(0xF0));
|
||||
for (uint64_t i = 0; i < 8; i++) {
|
||||
dbgln("ISR(%u): %x", i, GetLocalReg(0x100 + (0x10 * i)));
|
||||
dbgln("ISR({}): {x}", i, GetLocalReg(0x100 + (0x10 * i)));
|
||||
}
|
||||
for (uint64_t i = 0; i < 8; i++) {
|
||||
dbgln("TMR(%u): %x", i, GetLocalReg(0x180 + (0x10 * i)));
|
||||
dbgln("TMR({}): {x}", i, GetLocalReg(0x180 + (0x10 * i)));
|
||||
}
|
||||
for (uint64_t i = 0; i < 8; i++) {
|
||||
dbgln("IRR(%u): %x", i, GetLocalReg(0x200 + (0x10 * i)));
|
||||
dbgln("IRR({}): {x}", i, GetLocalReg(0x200 + (0x10 * i)));
|
||||
}
|
||||
dbgln("ESR: %x", GetLocalReg(0x280));
|
||||
dbgln("ESR: {x}", GetLocalReg(0x280));
|
||||
|
||||
dbgln("IO ID: %x", GetIoReg(0x0));
|
||||
dbgln("IO VER: %x", GetIoReg(0x1));
|
||||
dbgln("IO ARB: %x", GetIoReg(0x2));
|
||||
dbgln("IO ID: {x}", GetIoReg(0x0));
|
||||
dbgln("IO VER: {x}", GetIoReg(0x1));
|
||||
dbgln("IO ARB: {x}", GetIoReg(0x2));
|
||||
for (uint8_t i = 0x10; i < 0x3F; i += 2) {
|
||||
dbgln("IO (%u): %x", i, GetIoEntry(i));
|
||||
dbgln("IO ({}): {x}", i, GetIoEntry(i));
|
||||
}
|
||||
dbgln("APIC MSR: %x", GetMSR(0x1B));
|
||||
dbgln("APIC MSR: {x}", GetMSR(0x1B));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ struct InterruptFrame {
|
|||
|
||||
extern "C" void isr_divide_by_zero();
|
||||
extern "C" void interrupt_divide_by_zero(InterruptFrame* frame) {
|
||||
dbgln("RIP: %m", frame->rip);
|
||||
dbgln("RIP: {x}", frame->rip);
|
||||
panic("DIV0");
|
||||
}
|
||||
|
||||
|
@ -86,9 +86,9 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) {
|
|||
} else {
|
||||
dbgln("GDT");
|
||||
}
|
||||
dbgln("Index: %u", err >> 3);
|
||||
dbgln("RIP: %m", frame->rip);
|
||||
dbgln("RSP: %m", frame->rsp);
|
||||
dbgln("Index: {}", err >> 3);
|
||||
dbgln("RIP: {x}", frame->rip);
|
||||
dbgln("RSP: {x}", frame->rsp);
|
||||
|
||||
panic("GP");
|
||||
}
|
||||
|
@ -124,8 +124,8 @@ extern "C" void interrupt_page_fault(InterruptFrame* frame) {
|
|||
dbgln("Instruction Fetch");
|
||||
}
|
||||
|
||||
dbgln("rip: %m", frame->rip);
|
||||
dbgln("addr: %m", frame->cr2);
|
||||
dbgln("rip: {x}", frame->rip);
|
||||
dbgln("addr: {x}", frame->cr2);
|
||||
panic("PF");
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ extern "C" void interrupt_apic_timer(InterruptFrame*) {
|
|||
if (cnt == 20) {
|
||||
KernelHeap::DumpDistribution();
|
||||
}
|
||||
dbgln("timer: %us", cnt * 50 / 1000);
|
||||
dbgln("timer: {}s", cnt * 50 / 1000);
|
||||
}
|
||||
gApic->SignalEOI();
|
||||
gScheduler->Preempt();
|
||||
|
|
|
@ -61,7 +61,7 @@ typedef struct {
|
|||
uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
||||
Elf64Header* header = reinterpret_cast<Elf64Header*>(base);
|
||||
#if K_INIT_DEBUG
|
||||
dbgln("phoff: %u phnum: %u", header->phoff, header->phnum);
|
||||
dbgln("phoff: {} phnum: {}", header->phoff, header->phnum);
|
||||
#endif
|
||||
Elf64ProgramHeader* programs =
|
||||
reinterpret_cast<Elf64ProgramHeader*>(base + header->phoff);
|
||||
|
@ -69,8 +69,8 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
|||
Elf64ProgramHeader& program = programs[i];
|
||||
#if K_INIT_DEBUG
|
||||
dbgln(
|
||||
"prog: type: %u, flags: %u, offset: %u\n vaddr: %m, paddr: %m\n "
|
||||
"filesz: %x, memsz: %x, align: %x",
|
||||
"prog: type: {}, flags: {}, offset: {}\n vaddr: {x}, paddr: {x}\n "
|
||||
"filesz: {x}, memsz: {x}, align: {x}",
|
||||
program.type, program.flags, program.offset, program.vaddr,
|
||||
program.paddr, program.filesz, program.memsz, program.align);
|
||||
#endif
|
||||
|
@ -101,7 +101,7 @@ void DumpModules() {
|
|||
dbgln("[boot] Dumping bootloader modules.");
|
||||
for (uint64_t i = 0; i < resp.module_count; i++) {
|
||||
const limine_file& file = *resp.modules[i];
|
||||
dbgln(" %s,%m,%x", file.path, file.address, file.size);
|
||||
dbgln(" {},{x},{}", file.path, file.address, file.size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ const limine_file& GetInitProgram(glcr::String path) {
|
|||
return file;
|
||||
}
|
||||
}
|
||||
panic("Program not found: %s", path);
|
||||
panic("Program not found: {}", path);
|
||||
UNREACHABLE
|
||||
}
|
||||
|
||||
|
|
|
@ -36,32 +36,32 @@ void KernelHeap::InitializeSlabAllocators() {
|
|||
|
||||
void* KernelHeap::Allocate(uint64_t size) {
|
||||
#if K_HEAP_DEBUG
|
||||
dbgln("Alloc (%x)", size);
|
||||
dbgln("Alloc ({x})", size);
|
||||
#endif
|
||||
if ((size <= 8) && slab_8_) {
|
||||
auto ptr_or = slab_8_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 8): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 8): {x}", ptr_or.error());
|
||||
}
|
||||
if ((size <= 16) && slab_16_) {
|
||||
auto ptr_or = slab_16_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 16): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 16): {x}", ptr_or.error());
|
||||
}
|
||||
if ((size <= 32) && slab_32_) {
|
||||
auto ptr_or = slab_32_->Allocate();
|
||||
if (ptr_or.ok()) {
|
||||
return ptr_or.value();
|
||||
}
|
||||
dbgln("Failed allocation (slab 32): %x", ptr_or.error());
|
||||
dbgln("Failed allocation (slab 32): {x}", ptr_or.error());
|
||||
}
|
||||
if (next_addr_ + size >= upper_bound_) {
|
||||
panic("Kernel Heap Overrun (next, size, max): %m, %x, %m", next_addr_, size,
|
||||
upper_bound_);
|
||||
panic("Kernel Heap Overrun (next, size, max): {x}, {x}, {x}", next_addr_,
|
||||
size, upper_bound_);
|
||||
}
|
||||
#if K_HEAP_DEBUG
|
||||
RecordSize(size);
|
||||
|
@ -75,18 +75,18 @@ void* KernelHeap::Allocate(uint64_t size) {
|
|||
void KernelHeap::DumpDistribution() {
|
||||
#if K_HEAP_DEBUG
|
||||
uint64_t* distributions = gKernelHeap->distributions;
|
||||
dbgln("<=4B: %u", distributions[0]);
|
||||
dbgln("<=8B: %u", distributions[1]);
|
||||
dbgln("<=16B: %u", distributions[2]);
|
||||
dbgln("<=32B: %u", distributions[3]);
|
||||
dbgln("<=64B: %u", distributions[4]);
|
||||
dbgln("<=128B: %u", distributions[5]);
|
||||
dbgln("<=256B: %u", distributions[6]);
|
||||
dbgln("<=512B: %u", distributions[7]);
|
||||
dbgln("<=1KiB: %u", distributions[8]);
|
||||
dbgln("<=2KiB: %u", distributions[9]);
|
||||
dbgln("<=4KiB: %u", distributions[10]);
|
||||
dbgln("> 4KiB: %u", distributions[11]);
|
||||
dbgln("<=4B: {}", distributions[0]);
|
||||
dbgln("<=8B: {}", distributions[1]);
|
||||
dbgln("<=16B: {}", distributions[2]);
|
||||
dbgln("<=32B: {}", distributions[3]);
|
||||
dbgln("<=64B: {}", distributions[4]);
|
||||
dbgln("<=128B: {}", distributions[5]);
|
||||
dbgln("<=256B: {}", distributions[6]);
|
||||
dbgln("<=512B: {}", distributions[7]);
|
||||
dbgln("<=1KiB: {}", distributions[8]);
|
||||
dbgln("<=2KiB: {}", distributions[9]);
|
||||
dbgln("<=4KiB: {}", distributions[10]);
|
||||
dbgln("> 4KiB: {}", distributions[11]);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -30,5 +30,5 @@ uint64_t* KernelStackManager::AllocateKernelStack() {
|
|||
|
||||
void KernelStackManager::FreeKernelStack(uint64_t stack_base) {
|
||||
freed_stack_cnt_++;
|
||||
dbgln("Freed kernel stacks using %u KiB", freed_stack_cnt_ * 12);
|
||||
dbgln("Freed kernel stacks using {} KiB", freed_stack_cnt_ * 12);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ class PhysicalMemoryManager {
|
|||
for (uint64_t i = 0; i < memmap.entry_count; i++) {
|
||||
const limine_memmap_entry& entry = *memmap.entries[i];
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Region(%u) at %m:%x", entry.type, entry.base, entry.length);
|
||||
dbgln("Region({}) at {x}:{x}", entry.type, entry.base, entry.length);
|
||||
#endif
|
||||
if (entry.type == 0) {
|
||||
uint64_t base = entry.base;
|
||||
|
@ -46,7 +46,7 @@ class PhysicalMemoryManager {
|
|||
base = gBootstrap.next_page;
|
||||
uint64_t bootstrap_used = gBootstrap.next_page - gBootstrap.init_page;
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("[PMM] Taking over from bootstrap, used: %x", bootstrap_used);
|
||||
dbgln("[PMM] Taking over from bootstrap, used: {x}", bootstrap_used);
|
||||
#endif
|
||||
size -= bootstrap_used;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ class PhysicalMemoryManager {
|
|||
delete temp;
|
||||
}
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Single %m", page);
|
||||
dbgln("Single {x}", page);
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ class PhysicalMemoryManager {
|
|||
|
||||
MemBlock* block = front_;
|
||||
while (block != nullptr && block->num_pages < num_pages) {
|
||||
dbgln("Skipping block of size %u seeking %u", block->num_pages,
|
||||
dbgln("Skipping block of size {} seeking {}", block->num_pages,
|
||||
num_pages);
|
||||
block = block->next;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ class PhysicalMemoryManager {
|
|||
delete temp;
|
||||
}
|
||||
#if K_PHYS_DEBUG
|
||||
dbgln("Continuous %m:%u", page, num_pages);
|
||||
dbgln("Continuous {x}:{}", page, num_pages);
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ uint64_t UserStackManager::NewUserStack() {
|
|||
|
||||
void UserStackManager::FreeUserStack(uint64_t stack_ptr) {
|
||||
freed_stacks_++;
|
||||
dbgln("%u freed user stacks", freed_stacks_);
|
||||
dbgln("{} freed user stacks", freed_stacks_);
|
||||
}
|
||||
|
||||
bool UserStackManager::IsValidStack(uint64_t vaddr) {
|
||||
|
|
|
@ -74,7 +74,7 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
|||
return false;
|
||||
}
|
||||
#if K_VMAS_DEBUG
|
||||
dbgln("[VMAS] Mapping P(%m) at V(%m)", physical_addr, vaddr);
|
||||
dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr);
|
||||
#endif
|
||||
MapPage(cr3_, vaddr, physical_addr);
|
||||
return true;
|
||||
|
|
|
@ -92,6 +92,13 @@ class AddressSpace : public KernelObject {
|
|||
uint64_t vaddr;
|
||||
glcr::RefPtr<MemoryObject> mem_obj;
|
||||
};
|
||||
|
||||
// TODO: Consider adding a red-black tree implementation here.
|
||||
// As is this tree functions about as well as a linked list
|
||||
// because mappings are likely to be added in near-perfect ascedning order.
|
||||
// Also worth considering creating a special tree implementation for
|
||||
// just this purpose, or maybe a BinaryTree implementation that accepts
|
||||
// ranges rather than a single key.
|
||||
glcr::BinaryTree<uint64_t, MemoryMapping> memory_mappings_;
|
||||
|
||||
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
|
||||
|
|
|
@ -10,7 +10,7 @@ MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
|||
if ((size & 0xFFF) != 0) {
|
||||
size_ = (size & ~0xFFF) + 0x1000;
|
||||
#if K_MEM_DEBUG
|
||||
dbgln("MemoryObject: aligned %x to %x", size, size_);
|
||||
dbgln("MemoryObject: aligned {x} to {x}", size, size_);
|
||||
#endif
|
||||
}
|
||||
// FIXME: Do this lazily.
|
||||
|
@ -30,7 +30,7 @@ uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
|||
|
||||
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
||||
if (length > size_) {
|
||||
panic("Copy overruns memory object: %x too large for %x", length, size_);
|
||||
panic("Copy overruns memory object: {x} too large for {x}", length, size_);
|
||||
}
|
||||
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
||||
uint64_t page_number = 0;
|
||||
|
@ -61,7 +61,7 @@ uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
|||
|
||||
if (*iter == 0) {
|
||||
#if K_MEM_DEBUG
|
||||
dbgln("Allocating page num %u for mem object", page_num);
|
||||
dbgln("Allocating page num {} for mem object", page_num);
|
||||
#endif
|
||||
*iter = phys_mem::AllocatePage();
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ glcr::RefPtr<Thread> Process::CreateThread() {
|
|||
glcr::RefPtr<Thread> Process::GetThread(uint64_t tid) {
|
||||
MutexHolder lock(mutex_);
|
||||
if (tid >= threads_.size()) {
|
||||
panic("Bad thread access %u on process %u with %u threads.", tid, id_,
|
||||
panic("Bad thread access {} on process {} with {} threads.", tid, id_,
|
||||
threads_.size());
|
||||
}
|
||||
return threads_[tid];
|
||||
|
|
|
@ -15,14 +15,14 @@ void ProcessManager::InsertProcess(const glcr::RefPtr<Process>& proc) {
|
|||
|
||||
Process& ProcessManager::FromId(uint64_t pid) {
|
||||
if (pid >= proc_list_.size()) {
|
||||
panic("Bad proc access %u, have %u processes", pid, proc_list_.size());
|
||||
panic("Bad proc access {}, have {} processes", pid, proc_list_.size());
|
||||
}
|
||||
return *proc_list_[pid];
|
||||
}
|
||||
|
||||
void ProcessManager::DumpProcessStates() {
|
||||
dbgln("Process States: %u", proc_list_.size());
|
||||
dbgln("Process States: {}", proc_list_.size());
|
||||
for (uint64_t i = 0; i < proc_list_.size(); i++) {
|
||||
dbgln("%u: %u", proc_list_[i]->id(), proc_list_[i]->GetState());
|
||||
dbgln("{}: {}", proc_list_[i]->id(), (uint64_t)proc_list_[i]->GetState());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,6 @@
|
|||
#include "debug/debug.h"
|
||||
|
||||
z_err_t Debug(ZDebugReq* req) {
|
||||
dbgln("[Debug] %s", req->message);
|
||||
dbgln("[Debug] {}", req->message);
|
||||
return glcr::OK;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ glcr::ArrayView<uint8_t> Buffer(const void* bytes, uint64_t num_bytes) {
|
|||
template <typename T>
|
||||
glcr::ErrorOr<IpcMessage> TranslateRequestToIpcMessage(const T& req) {
|
||||
if (req.num_bytes > 0x1000) {
|
||||
dbgln("Large message size unimplemented: %x", req.num_bytes);
|
||||
dbgln("Large message size unimplemented: {x}", req.num_bytes);
|
||||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
z_err_t ProcessExit(ZProcessExitReq* req) {
|
||||
auto curr_thread = gScheduler->CurrentThread();
|
||||
dbgln("Exit code: %x", req->code);
|
||||
dbgln("Exit code: {x}", req->code);
|
||||
// FIXME: kill process here.
|
||||
curr_thread->Exit();
|
||||
panic("Returned from thread exit");
|
||||
|
|
|
@ -88,7 +88,7 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
|
|||
// syscall/debug.h
|
||||
CASE(Debug);
|
||||
default:
|
||||
dbgln("Unhandled syscall number: %x", call_id);
|
||||
dbgln("Unhandled syscall number: {x}", call_id);
|
||||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
UNREACHABLE
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
#include "syscall/syscall.h"
|
||||
|
||||
extern "C" void zion() {
|
||||
dbgln("[boot] Init GDT & IDT.");
|
||||
early_dbgln("[boot] Init GDT & IDT.");
|
||||
InitGdt();
|
||||
InitIdt();
|
||||
|
||||
dbgln("[boot] Init Physical Memory Manager.");
|
||||
early_dbgln("[boot] Init Physical Memory Manager.");
|
||||
phys_mem::InitBootstrapPageAllocation();
|
||||
KernelHeap heap(0xFFFFFFFF'40000000, 0xFFFFFFFF'80000000);
|
||||
phys_mem::InitPhysicalMemoryManager();
|
||||
|
|
Loading…
Reference in New Issue