Compare commits

...

2 Commits

11 changed files with 254 additions and 94 deletions

View File

@ -14,7 +14,8 @@ add_executable(zion
memory/paging_util.cpp
memory/physical_memory.cpp
memory/user_stack_manager.cpp
memory/virtual_memory.cpp
object/address_space.cpp
object/memory_object.cpp
object/process.cpp
object/thread.cpp
scheduler/context_switch.s

View File

@ -86,7 +86,15 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) {
extern "C" void isr_page_fault();
extern "C" void interrupt_page_fault(InterruptFrame* frame) {
dbgln("Page Fault:");
dbgln("Page Fault - trying to resolve");
uint64_t cr2;
asm volatile("mov %%cr2, %0" : "=r"(cr2));
if (gScheduler->CurrentProcess().vmm().HandlePageFault(cr2)) {
dbgln("Handled");
return;
}
dbgln("Unable to handle:");
uint64_t err = frame->error_code;
if (err & 0x1) {
dbgln("Page Protection");
@ -100,12 +108,14 @@ extern "C" void interrupt_page_fault(InterruptFrame* frame) {
dbgln("Read");
}
if (err & 0x8) {
if (err & 0x4) {
dbgln("User Space");
}
if (err & 0x10) {
dbgln("Instruction Fetch");
}
uint64_t cr2;
asm volatile("mov %%cr2, %0" : "=r"(cr2));
dbgln("rip: %m", frame->rip);
dbgln("addr: %m", cr2);
panic("PF");

View File

@ -57,11 +57,12 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
Elf64ProgramHeader& program = programs[i];
dbgln(
"prog: type: %u, flags: %u, offset: %u\n vaddr: %m, paddr: %m\n "
"filesz: %u, memsz: %u, align: %u",
"filesz: %x, memsz: %x, align: %x",
program.type, program.flags, program.offset, program.vaddr,
program.paddr, program.filesz, program.memsz, program.align);
CopyIntoNonResidentProcess(base + program.offset, program.filesz, dest_proc,
program.vaddr);
auto mem_obj = MakeRefCounted<MemoryObject>(program.filesz);
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
dest_proc.vmm().MapInMemoryObject(program.vaddr, mem_obj);
}
return header->entry;
}

View File

@ -89,45 +89,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) {
return *pt_entry & ~0xFFF;
}
uint64_t MapPage(uint64_t cr3, uint64_t virt) {
uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT;
uint64_t higher_half = 0xffff8000'00000000;
if ((virt & higher_half) != higher_half) {
access_bits |= USER_MODE_BIT;
}
uint64_t* pml4_entry = Pml4Entry(cr3, virt);
if (!(*pml4_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*pml4_entry = page | access_bits;
ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, virt));
}
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, virt);
if (!(*pdp_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*pdp_entry = page | access_bits;
ZeroOutPage(PageDirectoryEntry(*pdp_entry, virt));
}
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, virt);
if (!(*pd_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*(pd_entry) = page | access_bits;
ZeroOutPage(PageTableEntry(*pd_entry, virt));
}
uint64_t* pt_entry = PageTableEntry(*pd_entry, virt);
if (!(*pt_entry & PRESENT_BIT)) {
uint64_t phys = phys_mem::AllocatePage();
*pt_entry = PageAlign(phys) | access_bits;
ZeroOutPage(reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() +
PageAlign(phys)));
return phys;
} else {
panic("Page already allocated.");
return 0;
}
}
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
uint64_t CurrCr3() {
@ -170,6 +131,42 @@ void InitializePml4(uint64_t pml4_physical_addr) {
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm);
}
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
vaddr = PageAlign(vaddr);
paddr = PageAlign(paddr);
uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT;
uint64_t higher_half = 0xffff8000'00000000;
if ((vaddr & higher_half) != higher_half) {
access_bits |= USER_MODE_BIT;
}
uint64_t* pml4_entry = Pml4Entry(cr3, vaddr);
if (!(*pml4_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*pml4_entry = page | access_bits;
ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, vaddr));
}
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr);
if (!(*pdp_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*pdp_entry = page | access_bits;
ZeroOutPage(PageDirectoryEntry(*pdp_entry, vaddr));
}
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr);
if (!(*pd_entry & PRESENT_BIT)) {
uint64_t page = phys_mem::AllocatePage();
*(pd_entry) = page | access_bits;
ZeroOutPage(PageTableEntry(*pd_entry, vaddr));
}
uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr);
if (!(*pt_entry & PRESENT_BIT)) {
*pt_entry = paddr | access_bits;
} else {
panic("Page already allocated.");
}
}
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) {
if (cr3 == 0) {
cr3 = CurrCr3();
@ -178,7 +175,12 @@ uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) {
if (phys) {
return phys;
}
return MapPage(cr3, addr);
phys = phys_mem::AllocatePage();
// FIXME: Maybe move this to the physical memory allocator.
ZeroOutPage(
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + phys));
MapPage(cr3, addr, phys);
return phys;
}
void EnsureResident(uint64_t addr, uint64_t size) {

View File

@ -6,6 +6,8 @@
void InitializePml4(uint64_t pml4_physical_addr);
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3 = 0);
void EnsureResident(uint64_t addr, uint64_t size);

View File

@ -1,35 +0,0 @@
#include "memory/virtual_memory.h"
#include "memory/kernel_stack_manager.h"
#include "memory/paging_util.h"
#include "memory/physical_memory.h"
extern KernelStackManager* gKernelStackManager;
VirtualMemory VirtualMemory::ForRoot() {
uint64_t cr3 = 0;
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
return {cr3};
}
VirtualMemory::VirtualMemory() {
cr3_ = phys_mem::AllocatePage();
InitializePml4(cr3_);
}
uint64_t VirtualMemory::AllocateUserStack() {
return user_stacks_.NewUserStack();
}
uint64_t VirtualMemory::GetNextMemMapAddr(uint64_t size) {
uint64_t addr = next_memmap_addr_;
next_memmap_addr_ += size;
if (next_memmap_addr_ >= 0x30'00000000) {
panic("OOM: Memmap");
}
return addr;
}
uint64_t* VirtualMemory::AllocateKernelStack() {
return gKernelStackManager->AllocateKernelStack();
}

View File

@ -0,0 +1,70 @@
#include "object/address_space.h"
#include "memory/kernel_stack_manager.h"
#include "memory/paging_util.h"
#include "memory/physical_memory.h"
extern KernelStackManager* gKernelStackManager;
AddressSpace AddressSpace::ForRoot() {
uint64_t cr3 = 0;
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
return {cr3};
}
AddressSpace::AddressSpace() {
cr3_ = phys_mem::AllocatePage();
InitializePml4(cr3_);
}
uint64_t AddressSpace::AllocateUserStack() {
return user_stacks_.NewUserStack();
}
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
uint64_t addr = next_memmap_addr_;
next_memmap_addr_ += size;
if (next_memmap_addr_ >= 0x30'00000000) {
panic("OOM: Memmap");
}
return addr;
}
void AddressSpace::MapInMemoryObject(uint64_t vaddr,
const RefPtr<MemoryObject>& mem_obj) {
memory_mappings_.PushBack({.vaddr = vaddr, .mem_obj = mem_obj});
}
uint64_t* AddressSpace::AllocateKernelStack() {
return gKernelStackManager->AllocateKernelStack();
}
bool AddressSpace::HandlePageFault(uint64_t vaddr) {
MemoryMapping* mapping = GetMemoryMappingForAddr(vaddr);
if (mapping == nullptr) {
return false;
}
uint64_t offset = vaddr - mapping->vaddr;
uint64_t physical_addr = mapping->mem_obj->PhysicalPageAtOffset(offset);
if (physical_addr == 0) {
dbgln("WARN: Memory object returned invalid physical addr.");
return false;
}
dbgln("Mapping P(%m) at V(%m)", physical_addr, vaddr);
MapPage(cr3_, vaddr, physical_addr);
return true;
}
AddressSpace::MemoryMapping* AddressSpace::GetMemoryMappingForAddr(
uint64_t vaddr) {
auto iter = memory_mappings_.begin();
while (iter != memory_mappings_.end()) {
if ((vaddr >= (*iter).vaddr) &&
(vaddr < ((*iter).vaddr + (*iter).mem_obj->size()))) {
return &(*iter);
}
++iter;
}
return 0;
}

View File

@ -2,8 +2,9 @@
#include <stdint.h>
#include "debug/debug.h"
#include "lib/ref_ptr.h"
#include "memory/user_stack_manager.h"
#include "object/memory_object.h"
// VirtualMemory class holds a memory space for an individual process.
//
@ -23,7 +24,7 @@
// 0xFFFFFFFF 40000000 - 0xFFFFFFFF 7FFFFFFF : KERNEL_HEAP (1 GiB)
// 0xFFFFFFFF 80000000 - 0xFFFFFFFF 80FFFFFF : KERNEL_CODE (16 MiB)
// 0xFFFFFFFF 90000000 - 0xFFFFFFFF 9FFFFFFF : KERNEL_STACK (256 MiB)
class VirtualMemory {
class AddressSpace {
public:
enum MemoryType {
UNSPECIFIED,
@ -39,11 +40,11 @@ class VirtualMemory {
KERNEL_STACK,
};
static VirtualMemory ForRoot();
static AddressSpace ForRoot();
VirtualMemory();
VirtualMemory(const VirtualMemory&) = delete;
VirtualMemory(VirtualMemory&&) = delete;
AddressSpace();
AddressSpace(const AddressSpace&) = delete;
AddressSpace(AddressSpace&&) = delete;
uint64_t cr3() { return cr3_; }
@ -51,13 +52,28 @@ class VirtualMemory {
uint64_t AllocateUserStack();
uint64_t GetNextMemMapAddr(uint64_t size);
// Maps in a memory object at a specific address.
// Note this is unsafe for now as it may clobber other mappings.
void MapInMemoryObject(uint64_t vaddr, const RefPtr<MemoryObject>& mem_obj);
// Kernel Mappings.
uint64_t* AllocateKernelStack();
// Returns true if the page fault has been resolved.
bool HandlePageFault(uint64_t vaddr);
private:
VirtualMemory(uint64_t cr3) : cr3_(cr3) {}
AddressSpace(uint64_t cr3) : cr3_(cr3) {}
uint64_t cr3_ = 0;
UserStackManager user_stacks_;
uint64_t next_memmap_addr_ = 0x20'00000000;
struct MemoryMapping {
uint64_t vaddr;
RefPtr<MemoryObject> mem_obj;
};
LinkedList<MemoryMapping> memory_mappings_;
MemoryMapping* GetMemoryMappingForAddr(uint64_t vaddr);
};

View File

@ -0,0 +1,63 @@
#include "object/memory_object.h"
#include "boot/boot_info.h"
#include "debug/debug.h"
#include "memory/physical_memory.h"
MemoryObject::MemoryObject(uint64_t size) : size_(size) {
if ((size & 0xFFF) != 0) {
size_ = (size & ~0xFFF) + 0x1000;
dbgln("MemoryObject: aligned %x to %x", size, size_);
}
// FIXME: Do this lazily.
uint64_t num_pages = size_ / 0x1000;
for (uint64_t i = 0; i < num_pages; i++) {
phys_page_list_.PushBack(0);
}
}
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
if (offset > size_) {
panic("Invalid offset");
}
uint64_t page_num = offset / 0x1000;
return PageNumberToPhysAddr(page_num);
}
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
if (length > size_) {
panic("Copy overruns memory object: %x too large for %x", length, size_);
}
uint64_t hhdm = boot::GetHigherHalfDirectMap();
uint64_t page_number = 0;
while (length > 0) {
uint64_t physical = hhdm + PageNumberToPhysAddr(page_number);
uint64_t bytes_to_copy = length >= 0x1000 ? 0x1000 : length;
uint8_t* srcptr = reinterpret_cast<uint8_t*>(source);
uint8_t* destptr = reinterpret_cast<uint8_t*>(physical);
for (uint64_t i = 0; i < bytes_to_copy; i++) {
destptr[i] = srcptr[i];
}
length -= bytes_to_copy;
source += 0x1000;
page_number++;
}
}
uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
auto iter = phys_page_list_.begin();
uint64_t index = 0;
while (index < page_num) {
++iter;
index++;
}
if (*iter == 0) {
dbgln("Allocating page num %u for mem object", page_num);
*iter = phys_mem::AllocatePage();
}
return *iter;
}

View File

@ -0,0 +1,30 @@
#pragma once
#include "lib/linked_list.h"
#include "object/kernel_object.h"
/*
* MemoryObject is a page-aligned set of memory that corresponds
* to physical pages.
*
* It can be mapped in to one or more address spaces.
* */
class MemoryObject : public KernelObject {
public:
MemoryObject(uint64_t size);
uint64_t size() { return size_; }
uint64_t num_pages() { return size_ / 0x1000; }
uint64_t PhysicalPageAtOffset(uint64_t offset);
void CopyBytesToObject(uint64_t source, uint64_t length);
private:
// Always stores the full page-aligned size.
uint64_t size_;
uint64_t PageNumberToPhysAddr(uint64_t page_num);
LinkedList<uint64_t> phys_page_list_;
};

View File

@ -6,7 +6,7 @@
#include "lib/linked_list.h"
#include "lib/ref_ptr.h"
#include "lib/shared_ptr.h"
#include "memory/virtual_memory.h"
#include "object/address_space.h"
// Forward decl due to cyclic dependency.
class Thread;
@ -23,7 +23,7 @@ class Process : public KernelObject {
static RefPtr<Process> Create();
uint64_t id() const { return id_; }
VirtualMemory& vmm() { return vmm_; }
AddressSpace& vmm() { return vmm_; }
RefPtr<Thread> CreateThread();
RefPtr<Thread> GetThread(uint64_t tid);
@ -39,9 +39,9 @@ class Process : public KernelObject {
private:
friend class MakeRefCountedFriend<Process>;
Process();
Process(uint64_t id) : id_(id), vmm_(VirtualMemory::ForRoot()) {}
Process(uint64_t id) : id_(id), vmm_(AddressSpace::ForRoot()) {}
uint64_t id_;
VirtualMemory vmm_;
AddressSpace vmm_;
State state_;
uint64_t next_thread_id_ = 0;