diff --git a/zion/CMakeLists.txt b/zion/CMakeLists.txt index 1931f2f..519dbe2 100644 --- a/zion/CMakeLists.txt +++ b/zion/CMakeLists.txt @@ -14,9 +14,10 @@ add_executable(zion memory/paging_util.cpp memory/physical_memory.cpp memory/user_stack_manager.cpp + object/address_space.cpp + object/memory_object.cpp object/process.cpp object/thread.cpp - object/address_space.cpp scheduler/context_switch.s scheduler/jump_user_space.s scheduler/process_manager.cpp diff --git a/zion/interrupt/interrupt.cpp b/zion/interrupt/interrupt.cpp index 963f03f..46a82d3 100644 --- a/zion/interrupt/interrupt.cpp +++ b/zion/interrupt/interrupt.cpp @@ -86,7 +86,15 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) { extern "C" void isr_page_fault(); extern "C" void interrupt_page_fault(InterruptFrame* frame) { - dbgln("Page Fault:"); + dbgln("Page Fault - trying to resolve"); + uint64_t cr2; + asm volatile("mov %%cr2, %0" : "=r"(cr2)); + + if (gScheduler->CurrentProcess().vmm().HandlePageFault(cr2)) { + dbgln("Handled"); + return; + } + dbgln("Unable to handle:"); uint64_t err = frame->error_code; if (err & 0x1) { dbgln("Page Protection"); @@ -100,12 +108,14 @@ extern "C" void interrupt_page_fault(InterruptFrame* frame) { dbgln("Read"); } - if (err & 0x8) { + if (err & 0x4) { + dbgln("User Space"); + } + + if (err & 0x10) { dbgln("Instruction Fetch"); } - uint64_t cr2; - asm volatile("mov %%cr2, %0" : "=r"(cr2)); dbgln("rip: %m", frame->rip); dbgln("addr: %m", cr2); panic("PF"); diff --git a/zion/loader/elf_loader.cpp b/zion/loader/elf_loader.cpp index b336d8a..034ed21 100644 --- a/zion/loader/elf_loader.cpp +++ b/zion/loader/elf_loader.cpp @@ -57,11 +57,12 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) { Elf64ProgramHeader& program = programs[i]; dbgln( "prog: type: %u, flags: %u, offset: %u\n vaddr: %m, paddr: %m\n " - "filesz: %u, memsz: %u, align: %u", + "filesz: %x, memsz: %x, align: %x", program.type, program.flags, program.offset, program.vaddr, program.paddr, program.filesz, program.memsz, program.align); - CopyIntoNonResidentProcess(base + program.offset, program.filesz, dest_proc, - program.vaddr); + auto mem_obj = MakeRefCounted(program.filesz); + mem_obj->CopyBytesToObject(base + program.offset, program.filesz); + dest_proc.vmm().MapInMemoryObject(program.vaddr, mem_obj); } return header->entry; } diff --git a/zion/memory/paging_util.cpp b/zion/memory/paging_util.cpp index d3fff13..98267f0 100644 --- a/zion/memory/paging_util.cpp +++ b/zion/memory/paging_util.cpp @@ -89,45 +89,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) { return *pt_entry & ~0xFFF; } -uint64_t MapPage(uint64_t cr3, uint64_t virt) { - uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT; - uint64_t higher_half = 0xffff8000'00000000; - if ((virt & higher_half) != higher_half) { - access_bits |= USER_MODE_BIT; - } - - uint64_t* pml4_entry = Pml4Entry(cr3, virt); - if (!(*pml4_entry & PRESENT_BIT)) { - uint64_t page = phys_mem::AllocatePage(); - *pml4_entry = page | access_bits; - ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, virt)); - } - uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, virt); - if (!(*pdp_entry & PRESENT_BIT)) { - uint64_t page = phys_mem::AllocatePage(); - *pdp_entry = page | access_bits; - ZeroOutPage(PageDirectoryEntry(*pdp_entry, virt)); - } - uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, virt); - if (!(*pd_entry & PRESENT_BIT)) { - uint64_t page = phys_mem::AllocatePage(); - *(pd_entry) = page | access_bits; - ZeroOutPage(PageTableEntry(*pd_entry, virt)); - } - - uint64_t* pt_entry = PageTableEntry(*pd_entry, virt); - if (!(*pt_entry & PRESENT_BIT)) { - uint64_t phys = phys_mem::AllocatePage(); - *pt_entry = PageAlign(phys) | access_bits; - ZeroOutPage(reinterpret_cast(boot::GetHigherHalfDirectMap() + - PageAlign(phys))); - return phys; - } else { - panic("Page already allocated."); - return 0; - } -} - uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; } uint64_t CurrCr3() { @@ -170,6 +131,42 @@ void InitializePml4(uint64_t pml4_physical_addr) { pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm); } +void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) { + vaddr = PageAlign(vaddr); + paddr = PageAlign(paddr); + uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT; + uint64_t higher_half = 0xffff8000'00000000; + if ((vaddr & higher_half) != higher_half) { + access_bits |= USER_MODE_BIT; + } + + uint64_t* pml4_entry = Pml4Entry(cr3, vaddr); + if (!(*pml4_entry & PRESENT_BIT)) { + uint64_t page = phys_mem::AllocatePage(); + *pml4_entry = page | access_bits; + ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, vaddr)); + } + uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr); + if (!(*pdp_entry & PRESENT_BIT)) { + uint64_t page = phys_mem::AllocatePage(); + *pdp_entry = page | access_bits; + ZeroOutPage(PageDirectoryEntry(*pdp_entry, vaddr)); + } + uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr); + if (!(*pd_entry & PRESENT_BIT)) { + uint64_t page = phys_mem::AllocatePage(); + *(pd_entry) = page | access_bits; + ZeroOutPage(PageTableEntry(*pd_entry, vaddr)); + } + + uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr); + if (!(*pt_entry & PRESENT_BIT)) { + *pt_entry = paddr | access_bits; + } else { + panic("Page already allocated."); + } +} + uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) { if (cr3 == 0) { cr3 = CurrCr3(); @@ -178,7 +175,12 @@ uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) { if (phys) { return phys; } - return MapPage(cr3, addr); + phys = phys_mem::AllocatePage(); + // FIXME: Maybe move this to the physical memory allocator. + ZeroOutPage( + reinterpret_cast(boot::GetHigherHalfDirectMap() + phys)); + MapPage(cr3, addr, phys); + return phys; } void EnsureResident(uint64_t addr, uint64_t size) { diff --git a/zion/memory/paging_util.h b/zion/memory/paging_util.h index 0bdae7c..fc56650 100644 --- a/zion/memory/paging_util.h +++ b/zion/memory/paging_util.h @@ -6,6 +6,8 @@ void InitializePml4(uint64_t pml4_physical_addr); +void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr); + uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3 = 0); void EnsureResident(uint64_t addr, uint64_t size); diff --git a/zion/object/address_space.cpp b/zion/object/address_space.cpp index 0dcf904..897326c 100644 --- a/zion/object/address_space.cpp +++ b/zion/object/address_space.cpp @@ -30,6 +30,41 @@ uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) { return addr; } +void AddressSpace::MapInMemoryObject(uint64_t vaddr, + const RefPtr& mem_obj) { + memory_mappings_.PushBack({.vaddr = vaddr, .mem_obj = mem_obj}); +} + uint64_t* AddressSpace::AllocateKernelStack() { return gKernelStackManager->AllocateKernelStack(); } + +bool AddressSpace::HandlePageFault(uint64_t vaddr) { + MemoryMapping* mapping = GetMemoryMappingForAddr(vaddr); + if (mapping == nullptr) { + return false; + } + uint64_t offset = vaddr - mapping->vaddr; + uint64_t physical_addr = mapping->mem_obj->PhysicalPageAtOffset(offset); + if (physical_addr == 0) { + dbgln("WARN: Memory object returned invalid physical addr."); + return false; + } + dbgln("Mapping P(%m) at V(%m)", physical_addr, vaddr); + MapPage(cr3_, vaddr, physical_addr); + return true; +} + +AddressSpace::MemoryMapping* AddressSpace::GetMemoryMappingForAddr( + uint64_t vaddr) { + auto iter = memory_mappings_.begin(); + while (iter != memory_mappings_.end()) { + if ((vaddr >= (*iter).vaddr) && + (vaddr < ((*iter).vaddr + (*iter).mem_obj->size()))) { + return &(*iter); + } + ++iter; + } + + return 0; +} diff --git a/zion/object/address_space.h b/zion/object/address_space.h index 932370d..5844824 100644 --- a/zion/object/address_space.h +++ b/zion/object/address_space.h @@ -2,8 +2,9 @@ #include -#include "debug/debug.h" +#include "lib/ref_ptr.h" #include "memory/user_stack_manager.h" +#include "object/memory_object.h" // VirtualMemory class holds a memory space for an individual process. // @@ -51,13 +52,28 @@ class AddressSpace { uint64_t AllocateUserStack(); uint64_t GetNextMemMapAddr(uint64_t size); + // Maps in a memory object at a specific address. + // Note this is unsafe for now as it may clobber other mappings. + void MapInMemoryObject(uint64_t vaddr, const RefPtr& mem_obj); + // Kernel Mappings. uint64_t* AllocateKernelStack(); + // Returns true if the page fault has been resolved. + bool HandlePageFault(uint64_t vaddr); + private: AddressSpace(uint64_t cr3) : cr3_(cr3) {} uint64_t cr3_ = 0; UserStackManager user_stacks_; uint64_t next_memmap_addr_ = 0x20'00000000; + + struct MemoryMapping { + uint64_t vaddr; + RefPtr mem_obj; + }; + LinkedList memory_mappings_; + + MemoryMapping* GetMemoryMappingForAddr(uint64_t vaddr); }; diff --git a/zion/object/memory_object.cpp b/zion/object/memory_object.cpp new file mode 100644 index 0000000..763ec51 --- /dev/null +++ b/zion/object/memory_object.cpp @@ -0,0 +1,63 @@ +#include "object/memory_object.h" + +#include "boot/boot_info.h" +#include "debug/debug.h" +#include "memory/physical_memory.h" + +MemoryObject::MemoryObject(uint64_t size) : size_(size) { + if ((size & 0xFFF) != 0) { + size_ = (size & ~0xFFF) + 0x1000; + dbgln("MemoryObject: aligned %x to %x", size, size_); + } + // FIXME: Do this lazily. + uint64_t num_pages = size_ / 0x1000; + for (uint64_t i = 0; i < num_pages; i++) { + phys_page_list_.PushBack(0); + } +} + +uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) { + if (offset > size_) { + panic("Invalid offset"); + } + uint64_t page_num = offset / 0x1000; + return PageNumberToPhysAddr(page_num); +} + +void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) { + if (length > size_) { + panic("Copy overruns memory object: %x too large for %x", length, size_); + } + uint64_t hhdm = boot::GetHigherHalfDirectMap(); + uint64_t page_number = 0; + while (length > 0) { + uint64_t physical = hhdm + PageNumberToPhysAddr(page_number); + + uint64_t bytes_to_copy = length >= 0x1000 ? 0x1000 : length; + + uint8_t* srcptr = reinterpret_cast(source); + uint8_t* destptr = reinterpret_cast(physical); + for (uint64_t i = 0; i < bytes_to_copy; i++) { + destptr[i] = srcptr[i]; + } + + length -= bytes_to_copy; + source += 0x1000; + page_number++; + } +} + +uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) { + auto iter = phys_page_list_.begin(); + uint64_t index = 0; + while (index < page_num) { + ++iter; + index++; + } + + if (*iter == 0) { + dbgln("Allocating page num %u for mem object", page_num); + *iter = phys_mem::AllocatePage(); + } + return *iter; +} diff --git a/zion/object/memory_object.h b/zion/object/memory_object.h new file mode 100644 index 0000000..c2bb42c --- /dev/null +++ b/zion/object/memory_object.h @@ -0,0 +1,30 @@ +#pragma once + +#include "lib/linked_list.h" +#include "object/kernel_object.h" + +/* + * MemoryObject is a page-aligned set of memory that corresponds + * to physical pages. + * + * It can be mapped in to one or more address spaces. + * */ +class MemoryObject : public KernelObject { + public: + MemoryObject(uint64_t size); + + uint64_t size() { return size_; } + uint64_t num_pages() { return size_ / 0x1000; } + + uint64_t PhysicalPageAtOffset(uint64_t offset); + + void CopyBytesToObject(uint64_t source, uint64_t length); + + private: + // Always stores the full page-aligned size. + uint64_t size_; + + uint64_t PageNumberToPhysAddr(uint64_t page_num); + + LinkedList phys_page_list_; +};