Compare commits

...

5 Commits

25 changed files with 213 additions and 119 deletions

View File

@ -1,3 +1,2 @@
add_subdirectory(glacier)
add_subdirectory(libc)
add_subdirectory(mammoth)

View File

@ -1,16 +0,0 @@
add_library(c STATIC
src/malloc.cpp
src/string.cpp
)
target_include_directories(c
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
target_link_libraries(c
zion_stub
)
set_target_properties(c PROPERTIES
COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS}")

View File

@ -1,5 +0,0 @@
#pragma once
#include <stdint.h>
typedef uint64_t size_t;

View File

@ -1,5 +0,0 @@
#pragma once
#include "stddef.h"
void* malloc(size_t size);

View File

@ -1,5 +0,0 @@
#pragma once
#include "stddef.h"
void* memcpy(void* dest, const void* src, size_t count);

View File

@ -1,46 +0,0 @@
#include <zcall.h>
#include <zglobal.h>
#include "stdlib.h"
namespace {
class NaiveAllocator {
public:
constexpr static uint64_t kSize = 0x10000;
NaiveAllocator() {}
bool is_init() { return next_addr_ != 0; }
void Init() {
uint64_t vmmo_cap;
uint64_t err = ZMemoryObjectCreate(kSize, &vmmo_cap);
if (err != 0) {
ZProcessExit(err);
}
err = ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &next_addr_);
max_addr_ = next_addr_ + kSize;
}
void* Allocate(size_t size) {
uint64_t addr = next_addr_;
next_addr_ += size;
if (next_addr_ >= max_addr_) {
ZProcessExit(0xBEEF);
return 0;
}
return reinterpret_cast<void*>(addr);
}
private:
uint64_t next_addr_ = 0;
uint64_t max_addr_ = 0;
};
NaiveAllocator gAlloc;
} // namespace
void* malloc(size_t size) {
if (!gAlloc.is_init()) {
gAlloc.Init();
}
return gAlloc.Allocate(size);
}

View File

@ -1,10 +0,0 @@
#include "string.h"
void* memcpy(void* dest, const void* src, size_t count) {
uint8_t* d = (uint8_t*)dest;
const uint8_t* s = (uint8_t*)src;
for (size_t i = 0; i < count; i++) {
d[i] = s[i];
}
return dest;
}

View File

@ -23,7 +23,6 @@ target_include_directories(mammoth
target_link_libraries(mammoth
glacier
c
victoriafalls_yunq
yellowstone_yunq
zion_stub

View File

@ -81,7 +81,7 @@ uint64_t LoadElfProgram(uint64_t base, uint64_t as_cap) {
dbgln("Map Local");
#endif
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, &vaddr));
check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, 0, &vaddr));
uint8_t* offset = reinterpret_cast<uint8_t*>(vaddr);
for (uint64_t j = 0; j < size; j++) {
offset[j] = 0;
@ -95,8 +95,8 @@ uint64_t LoadElfProgram(uint64_t base, uint64_t as_cap) {
#if MAM_PROC_DEBUG
dbgln("Map Foreign");
#endif
check(
ZAddressSpaceMap(as_cap, program.vaddr - page_offset, mem_cap, &vaddr));
check(ZAddressSpaceMap(as_cap, program.vaddr - page_offset, mem_cap, 0,
&vaddr));
}
return header->entry;
}

View File

@ -3,7 +3,9 @@
#include <glacier/status/error.h>
#include <zcall.h>
void dbgln(const glcr::String& string) { (void)ZDebug(string.cstr()); }
void dbgln(glcr::StringView string) {
(void)ZDebug(string.data(), string.size());
}
void check(uint64_t code) {
switch (code) {

View File

@ -6,7 +6,7 @@
#include <ztypes.h>
// TODO: Take StringView here instead.
void dbgln(const glcr::String& string);
void dbgln(glcr::StringView string);
template <typename... Args>
void dbgln(const glcr::StringView& fmt, Args... args) {

View File

@ -36,7 +36,7 @@ OwnedMemoryRegion::~OwnedMemoryRegion() {
OwnedMemoryRegion OwnedMemoryRegion::FromCapability(z_cap_t vmmo_cap) {
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, 0, &vaddr));
uint64_t size;
check(ZMemoryObjectInspect(vmmo_cap, &size));
@ -50,7 +50,7 @@ OwnedMemoryRegion OwnedMemoryRegion::ContiguousPhysical(uint64_t size,
check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, paddr));
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, 0, &vaddr));
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
}
@ -61,7 +61,7 @@ OwnedMemoryRegion OwnedMemoryRegion::DirectPhysical(uint64_t paddr,
check(ZMemoryObjectCreatePhysical(paddr, size, &vmmo_cap));
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, 0, &vaddr));
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
}

View File

@ -1,8 +1,166 @@
#include <glacier/container/intrusive_list.h>
#include <glacier/string/str_format.h>
#include <stdint.h>
#include <stdlib.h>
#include <zcall.h>
#include <zglobal.h>
[[nodiscard]] void* operator new(uint64_t size) { return malloc(size); }
[[nodiscard]] void* operator new[](uint64_t size) { return malloc(size); }
#include "util/debug.h"
namespace {
class PageAllocator {
public:
static uint64_t AllocatePagePair() {
uint64_t mem_cap;
check(ZMemoryObjectCreate(0x2000, &mem_cap));
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, /* align= */ 0x2000,
&vaddr));
// The address space mapping will keep this memory capability alive.
check(ZCapRelease(mem_cap));
return vaddr;
}
};
struct BuddySlot {
BuddySlot* next;
BuddySlot* prev;
uint64_t size;
};
uint64_t NeededSize(uint64_t size) {
uint64_t needed = size + sizeof(BuddySlot);
// Start at 32 because sizeof(BuddySlot) is already 24;
uint64_t pow2 = 32;
while (pow2 < needed) {
pow2 <<= 1;
}
return pow2;
}
class BuddyAllocator {
public:
BuddyAllocator() {}
void Init() {
free_front_ = nullptr;
AddPage();
check(ZMutexCreate(&mutex_cap_));
}
void* Allocate(uint64_t size) {
check(ZMutexLock(mutex_cap_));
if (size > (0x2000 - sizeof(BuddySlot))) {
crash("Can't allocate greater than one page", glcr::UNIMPLEMENTED);
}
if (free_front_ == nullptr) {
AddPage();
}
BuddySlot* slot = free_front_;
uint64_t needed = NeededSize(size);
BuddySlot* best_fit = nullptr;
while (slot != nullptr) {
bool fits = slot->size >= needed;
bool better = best_fit == nullptr || slot->size < best_fit->size;
if (fits && better) {
best_fit = slot;
}
slot = slot->next;
}
if (best_fit == nullptr) {
AddPage();
best_fit = free_front_;
}
while (best_fit->size > needed) {
best_fit = Split(best_fit);
}
Remove(best_fit);
// TODO: We may need to align the pointer here.
void* ptr = reinterpret_cast<uint8_t*>(best_fit) + sizeof(BuddySlot);
check(ZMutexRelease(mutex_cap_));
return ptr;
}
private:
BuddySlot* free_front_ = nullptr;
z_cap_t mutex_cap_ = 0;
void AddPage() {
uint64_t vaddr = PageAllocator::AllocatePagePair();
BuddySlot* slot = reinterpret_cast<BuddySlot*>(vaddr);
slot->prev = nullptr;
slot->next = free_front_;
free_front_ = slot;
slot->size = 0x2000;
}
BuddySlot* Split(BuddySlot* slot) {
if (slot->size <= 32) {
crash("Splitting smallest buddy chunk", glcr::INTERNAL);
}
slot->size /= 2;
BuddySlot* new_slot = reinterpret_cast<BuddySlot*>(
reinterpret_cast<uint64_t>(slot) ^ slot->size);
new_slot->size = slot->size;
new_slot->next = slot->next;
new_slot->prev = slot;
if (slot->next) {
slot->next->prev = new_slot;
}
slot->next = new_slot;
return slot;
}
void Remove(BuddySlot* slot) {
if (slot->prev) {
slot->prev->next = slot->next;
}
if (slot->next) {
slot->next->prev = slot->prev;
}
if (free_front_ == slot) {
free_front_ = slot->next;
}
slot->next = nullptr;
slot->prev = nullptr;
}
};
BuddyAllocator gAllocator;
bool gHasInit = false;
// FIXME: Race condition.
void* Allocate(uint64_t size) {
if (!gHasInit) {
// Call Init since we don't call global constructors yet.
gAllocator.Init();
gHasInit = true;
}
void* ptr = gAllocator.Allocate(size);
char buffer[64];
// glcr::FixedStringBuilder builder(buffer, 64);
// glcr::StrFormatIntoBuffer(builder, "Allocated {x}", (uint64_t)ptr);
// dbgln(builder.operator glcr::StringView());
return ptr;
}
} // namespace
[[nodiscard]] void* operator new(uint64_t size) { return Allocate(size); }
[[nodiscard]] void* operator new[](uint64_t size) { return Allocate(size); }
void operator delete(void*, uint64_t) {}
void operator delete[](void*) {}

View File

@ -2,7 +2,6 @@
#include <glacier/status/error.h>
#include <mammoth/util/debug.h>
#include <string.h>
#include <zcall.h>
AhciDevice::AhciDevice(AhciPort* port) : port_struct_(port) {

View File

@ -1,9 +1,20 @@
#include "ahci/command.h"
#include <string.h>
#include "ahci/ahci.h"
namespace {
void* memcpy(void* dest, const void* src, uint64_t count) {
uint8_t* d = (uint8_t*)dest;
const uint8_t* s = (uint8_t*)src;
for (uint64_t i = 0; i < count; i++) {
d[i] = s[i];
}
return dest;
}
} // namespace
Command::~Command() {}
DmaReadCommand::DmaReadCommand(uint64_t lba, uint64_t sector_cnt,

View File

@ -18,7 +18,7 @@ PciDeviceHeader* PciHeader(uint64_t base, uint64_t bus, uint64_t dev,
PciReader::PciReader() {
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, gBootPciVmmoCap, &vaddr));
check(ZAddressSpaceMap(gSelfVmasCap, 0, gBootPciVmmoCap, 0, &vaddr));
PciDump(vaddr);

View File

@ -6,7 +6,6 @@
#include <mammoth/util/debug.h>
#include <mammoth/util/init.h>
#include <mammoth/util/memory_region.h>
#include <stdlib.h>
#include <zcall.h>
#include "hw/gpt.h"

View File

@ -17,8 +17,8 @@ SYS4(ThreadStart, z_cap_t, thread_cap, uint64_t, entry, uint64_t, arg1,
SYS0(ThreadExit);
SYS1(ThreadWait, z_cap_t, thread_cap);
SYS4(AddressSpaceMap, z_cap_t, vmas_cap, uint64_t, vmas_offset, z_cap_t,
vmmo_cap, uint64_t*, vaddr);
SYS5(AddressSpaceMap, z_cap_t, vmas_cap, uint64_t, vmas_offset, z_cap_t,
vmmo_cap, uint64_t, align, uint64_t*, vaddr);
SYS3(AddressSpaceUnmap, z_cap_t, vmas_cap, uint64_t, lower_addr, uint64_t,
upper_addr);
@ -68,4 +68,4 @@ SYS1(SemaphoreCreate, z_cap_t*, semaphore_cap);
SYS1(SemaphoreWait, z_cap_t, semaphore_cap);
SYS1(SemaphoreSignal, z_cap_t, semaphore_cap);
SYS1(Debug, const char*, message);
SYS2(Debug, const char*, message, uint64_t, size);

View File

@ -104,6 +104,7 @@ class PhysicalMemoryManager {
#if K_PHYS_DEBUG
dbgln("Continuous {x}:{}", page, num_pages);
#endif
allocated_pages_ += num_pages;
return page;
}

View File

@ -22,11 +22,21 @@ uint64_t AddressSpace::AllocateUserStack() {
return user_stacks_.NewUserStack();
}
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
void AddressSpace::FreeUserStack(uint64_t rsp) {
return user_stacks_.FreeUserStack(rsp);
}
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) {
if (size == 0) {
panic("Zero size memmap");
}
size = ((size - 1) & ~0xFFF) + 0x1000;
// FIXME: We need to validate that align is a power of 2;
if (align > 0) {
while ((next_memmap_addr_ & (align - 1)) != 0) {
next_memmap_addr_ += kPageSize;
}
}
uint64_t addr = next_memmap_addr_;
next_memmap_addr_ += size;
if (next_memmap_addr_ >= 0x30'00000000) {
@ -41,8 +51,8 @@ glcr::ErrorCode AddressSpace::MapInMemoryObject(
}
glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
const glcr::RefPtr<MemoryObject>& mem_obj) {
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size());
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align) {
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size(), align);
RET_ERR(mapping_tree_.AddInMemoryObject(vaddr, mem_obj));
return vaddr;
}

View File

@ -66,7 +66,8 @@ class AddressSpace : public KernelObject {
// User Mappings.
uint64_t AllocateUserStack();
uint64_t GetNextMemMapAddr(uint64_t size);
void FreeUserStack(uint64_t);
uint64_t GetNextMemMapAddr(uint64_t size, uint64_t align);
// Maps in a memory object at a specific address.
// Note this is unsafe for now as it may clobber other mappings.
@ -74,7 +75,7 @@ class AddressSpace : public KernelObject {
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj);
[[nodiscard]] glcr::ErrorOr<uint64_t> MapInMemoryObject(
const glcr::RefPtr<MemoryObject>& mem_obj);
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit) {

View File

@ -67,14 +67,14 @@ void Thread::Init() {
#if K_THREAD_DEBUG
dbgln("Thread start.", pid(), id_);
#endif
uint64_t rsp = process_.vmas()->AllocateUserStack();
uint64_t rsp_ = process_.vmas()->AllocateUserStack();
// TODO: Investigate this further but without this GCC
// will emit movaps calls to non-16-bit-aligned stack
// addresses.
rsp -= 0x8;
*reinterpret_cast<uint64_t*>(rsp) = kStackBaseSentinel;
rsp_ -= 0x8;
*reinterpret_cast<uint64_t*>(rsp_) = kStackBaseSentinel;
SetRsp0(rsp0_start_);
jump_user_space(rip_, rsp, arg1_, arg2_);
jump_user_space(rip_, rsp_, arg1_, arg2_);
}
void Thread::Exit() {
@ -102,6 +102,7 @@ void Thread::Cleanup() {
// TODO: Race condition when called from exit, once kernel stack manager
// actually reuses stacks this will cause an issue
KernelVmm::FreeKernelStack(rsp0_start_);
process_.vmas()->FreeUserStack(rsp_);
}
void Thread::Wait() {

View File

@ -77,6 +77,7 @@ class Thread : public KernelObject, public glcr::IntrusiveListNode<Thread> {
State state_ = CREATED;
// Startup Context for the thread.
uint64_t rsp_;
uint64_t rip_;
uint64_t arg1_;
uint64_t arg2_;

View File

@ -18,7 +18,7 @@ z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req) {
RET_ERR(vmas->MapInMemoryObject(req->vmas_offset, vmmo));
*req->vaddr = req->vmas_offset;
} else {
ASSIGN_OR_RETURN(*req->vaddr, vmas->MapInMemoryObject(vmmo));
ASSIGN_OR_RETURN(*req->vaddr, vmas->MapInMemoryObject(vmmo, req->align));
}
return glcr::OK;
}

View File

@ -5,6 +5,6 @@
#include "debug/debug.h"
z_err_t Debug(ZDebugReq* req) {
dbgln_large("[Debug] {}", req->message);
dbgln_large("[Debug] {}", glcr::StringView(req->message, req->size));
return glcr::OK;
}