Compare commits
No commits in common. "b06c76e477eee9ca6ee79456b5963d4e2b223302" and "b5ad454ad1e7271ef73f698344452e00d484fb5e" have entirely different histories.
b06c76e477
...
b5ad454ad1
|
@ -14,8 +14,7 @@ add_executable(zion
|
||||||
memory/paging_util.cpp
|
memory/paging_util.cpp
|
||||||
memory/physical_memory.cpp
|
memory/physical_memory.cpp
|
||||||
memory/user_stack_manager.cpp
|
memory/user_stack_manager.cpp
|
||||||
object/address_space.cpp
|
memory/virtual_memory.cpp
|
||||||
object/memory_object.cpp
|
|
||||||
object/process.cpp
|
object/process.cpp
|
||||||
object/thread.cpp
|
object/thread.cpp
|
||||||
scheduler/context_switch.s
|
scheduler/context_switch.s
|
||||||
|
|
|
@ -86,15 +86,7 @@ extern "C" void interrupt_protection_fault(InterruptFrame* frame) {
|
||||||
|
|
||||||
extern "C" void isr_page_fault();
|
extern "C" void isr_page_fault();
|
||||||
extern "C" void interrupt_page_fault(InterruptFrame* frame) {
|
extern "C" void interrupt_page_fault(InterruptFrame* frame) {
|
||||||
dbgln("Page Fault - trying to resolve");
|
dbgln("Page Fault:");
|
||||||
uint64_t cr2;
|
|
||||||
asm volatile("mov %%cr2, %0" : "=r"(cr2));
|
|
||||||
|
|
||||||
if (gScheduler->CurrentProcess().vmm().HandlePageFault(cr2)) {
|
|
||||||
dbgln("Handled");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
dbgln("Unable to handle:");
|
|
||||||
uint64_t err = frame->error_code;
|
uint64_t err = frame->error_code;
|
||||||
if (err & 0x1) {
|
if (err & 0x1) {
|
||||||
dbgln("Page Protection");
|
dbgln("Page Protection");
|
||||||
|
@ -108,14 +100,12 @@ extern "C" void interrupt_page_fault(InterruptFrame* frame) {
|
||||||
dbgln("Read");
|
dbgln("Read");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err & 0x4) {
|
if (err & 0x8) {
|
||||||
dbgln("User Space");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err & 0x10) {
|
|
||||||
dbgln("Instruction Fetch");
|
dbgln("Instruction Fetch");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t cr2;
|
||||||
|
asm volatile("mov %%cr2, %0" : "=r"(cr2));
|
||||||
dbgln("rip: %m", frame->rip);
|
dbgln("rip: %m", frame->rip);
|
||||||
dbgln("addr: %m", cr2);
|
dbgln("addr: %m", cr2);
|
||||||
panic("PF");
|
panic("PF");
|
||||||
|
|
|
@ -57,12 +57,11 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
||||||
Elf64ProgramHeader& program = programs[i];
|
Elf64ProgramHeader& program = programs[i];
|
||||||
dbgln(
|
dbgln(
|
||||||
"prog: type: %u, flags: %u, offset: %u\n vaddr: %m, paddr: %m\n "
|
"prog: type: %u, flags: %u, offset: %u\n vaddr: %m, paddr: %m\n "
|
||||||
"filesz: %x, memsz: %x, align: %x",
|
"filesz: %u, memsz: %u, align: %u",
|
||||||
program.type, program.flags, program.offset, program.vaddr,
|
program.type, program.flags, program.offset, program.vaddr,
|
||||||
program.paddr, program.filesz, program.memsz, program.align);
|
program.paddr, program.filesz, program.memsz, program.align);
|
||||||
auto mem_obj = MakeRefCounted<MemoryObject>(program.filesz);
|
CopyIntoNonResidentProcess(base + program.offset, program.filesz, dest_proc,
|
||||||
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
|
program.vaddr);
|
||||||
dest_proc.vmm().MapInMemoryObject(program.vaddr, mem_obj);
|
|
||||||
}
|
}
|
||||||
return header->entry;
|
return header->entry;
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,6 +89,45 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) {
|
||||||
return *pt_entry & ~0xFFF;
|
return *pt_entry & ~0xFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t MapPage(uint64_t cr3, uint64_t virt) {
|
||||||
|
uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT;
|
||||||
|
uint64_t higher_half = 0xffff8000'00000000;
|
||||||
|
if ((virt & higher_half) != higher_half) {
|
||||||
|
access_bits |= USER_MODE_BIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* pml4_entry = Pml4Entry(cr3, virt);
|
||||||
|
if (!(*pml4_entry & PRESENT_BIT)) {
|
||||||
|
uint64_t page = phys_mem::AllocatePage();
|
||||||
|
*pml4_entry = page | access_bits;
|
||||||
|
ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, virt));
|
||||||
|
}
|
||||||
|
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, virt);
|
||||||
|
if (!(*pdp_entry & PRESENT_BIT)) {
|
||||||
|
uint64_t page = phys_mem::AllocatePage();
|
||||||
|
*pdp_entry = page | access_bits;
|
||||||
|
ZeroOutPage(PageDirectoryEntry(*pdp_entry, virt));
|
||||||
|
}
|
||||||
|
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, virt);
|
||||||
|
if (!(*pd_entry & PRESENT_BIT)) {
|
||||||
|
uint64_t page = phys_mem::AllocatePage();
|
||||||
|
*(pd_entry) = page | access_bits;
|
||||||
|
ZeroOutPage(PageTableEntry(*pd_entry, virt));
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* pt_entry = PageTableEntry(*pd_entry, virt);
|
||||||
|
if (!(*pt_entry & PRESENT_BIT)) {
|
||||||
|
uint64_t phys = phys_mem::AllocatePage();
|
||||||
|
*pt_entry = PageAlign(phys) | access_bits;
|
||||||
|
ZeroOutPage(reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() +
|
||||||
|
PageAlign(phys)));
|
||||||
|
return phys;
|
||||||
|
} else {
|
||||||
|
panic("Page already allocated.");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
||||||
|
|
||||||
uint64_t CurrCr3() {
|
uint64_t CurrCr3() {
|
||||||
|
@ -131,42 +170,6 @@ void InitializePml4(uint64_t pml4_physical_addr) {
|
||||||
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm);
|
pml4_virtual[Pml4Index(hhdm)] = *Pml4Entry(curr_cr3, hhdm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
|
|
||||||
vaddr = PageAlign(vaddr);
|
|
||||||
paddr = PageAlign(paddr);
|
|
||||||
uint64_t access_bits = PRESENT_BIT | READ_WRITE_BIT;
|
|
||||||
uint64_t higher_half = 0xffff8000'00000000;
|
|
||||||
if ((vaddr & higher_half) != higher_half) {
|
|
||||||
access_bits |= USER_MODE_BIT;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t* pml4_entry = Pml4Entry(cr3, vaddr);
|
|
||||||
if (!(*pml4_entry & PRESENT_BIT)) {
|
|
||||||
uint64_t page = phys_mem::AllocatePage();
|
|
||||||
*pml4_entry = page | access_bits;
|
|
||||||
ZeroOutPage(PageDirectoryPointerEntry(*pml4_entry, vaddr));
|
|
||||||
}
|
|
||||||
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr);
|
|
||||||
if (!(*pdp_entry & PRESENT_BIT)) {
|
|
||||||
uint64_t page = phys_mem::AllocatePage();
|
|
||||||
*pdp_entry = page | access_bits;
|
|
||||||
ZeroOutPage(PageDirectoryEntry(*pdp_entry, vaddr));
|
|
||||||
}
|
|
||||||
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr);
|
|
||||||
if (!(*pd_entry & PRESENT_BIT)) {
|
|
||||||
uint64_t page = phys_mem::AllocatePage();
|
|
||||||
*(pd_entry) = page | access_bits;
|
|
||||||
ZeroOutPage(PageTableEntry(*pd_entry, vaddr));
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr);
|
|
||||||
if (!(*pt_entry & PRESENT_BIT)) {
|
|
||||||
*pt_entry = paddr | access_bits;
|
|
||||||
} else {
|
|
||||||
panic("Page already allocated.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) {
|
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) {
|
||||||
if (cr3 == 0) {
|
if (cr3 == 0) {
|
||||||
cr3 = CurrCr3();
|
cr3 = CurrCr3();
|
||||||
|
@ -175,12 +178,7 @@ uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3) {
|
||||||
if (phys) {
|
if (phys) {
|
||||||
return phys;
|
return phys;
|
||||||
}
|
}
|
||||||
phys = phys_mem::AllocatePage();
|
return MapPage(cr3, addr);
|
||||||
// FIXME: Maybe move this to the physical memory allocator.
|
|
||||||
ZeroOutPage(
|
|
||||||
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + phys));
|
|
||||||
MapPage(cr3, addr, phys);
|
|
||||||
return phys;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnsureResident(uint64_t addr, uint64_t size) {
|
void EnsureResident(uint64_t addr, uint64_t size) {
|
||||||
|
|
|
@ -6,8 +6,6 @@
|
||||||
|
|
||||||
void InitializePml4(uint64_t pml4_physical_addr);
|
void InitializePml4(uint64_t pml4_physical_addr);
|
||||||
|
|
||||||
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
|
|
||||||
|
|
||||||
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3 = 0);
|
uint64_t AllocatePageIfNecessary(uint64_t addr, uint64_t cr3 = 0);
|
||||||
void EnsureResident(uint64_t addr, uint64_t size);
|
void EnsureResident(uint64_t addr, uint64_t size);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
#include "memory/virtual_memory.h"
|
||||||
|
|
||||||
|
#include "memory/kernel_stack_manager.h"
|
||||||
|
#include "memory/paging_util.h"
|
||||||
|
#include "memory/physical_memory.h"
|
||||||
|
|
||||||
|
extern KernelStackManager* gKernelStackManager;
|
||||||
|
|
||||||
|
VirtualMemory VirtualMemory::ForRoot() {
|
||||||
|
uint64_t cr3 = 0;
|
||||||
|
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
|
||||||
|
return {cr3};
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemory::VirtualMemory() {
|
||||||
|
cr3_ = phys_mem::AllocatePage();
|
||||||
|
InitializePml4(cr3_);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t VirtualMemory::AllocateUserStack() {
|
||||||
|
return user_stacks_.NewUserStack();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t VirtualMemory::GetNextMemMapAddr(uint64_t size) {
|
||||||
|
uint64_t addr = next_memmap_addr_;
|
||||||
|
next_memmap_addr_ += size;
|
||||||
|
if (next_memmap_addr_ >= 0x30'00000000) {
|
||||||
|
panic("OOM: Memmap");
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* VirtualMemory::AllocateKernelStack() {
|
||||||
|
return gKernelStackManager->AllocateKernelStack();
|
||||||
|
}
|
|
@ -2,9 +2,8 @@
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "lib/ref_ptr.h"
|
#include "debug/debug.h"
|
||||||
#include "memory/user_stack_manager.h"
|
#include "memory/user_stack_manager.h"
|
||||||
#include "object/memory_object.h"
|
|
||||||
|
|
||||||
// VirtualMemory class holds a memory space for an individual process.
|
// VirtualMemory class holds a memory space for an individual process.
|
||||||
//
|
//
|
||||||
|
@ -24,7 +23,7 @@
|
||||||
// 0xFFFFFFFF 40000000 - 0xFFFFFFFF 7FFFFFFF : KERNEL_HEAP (1 GiB)
|
// 0xFFFFFFFF 40000000 - 0xFFFFFFFF 7FFFFFFF : KERNEL_HEAP (1 GiB)
|
||||||
// 0xFFFFFFFF 80000000 - 0xFFFFFFFF 80FFFFFF : KERNEL_CODE (16 MiB)
|
// 0xFFFFFFFF 80000000 - 0xFFFFFFFF 80FFFFFF : KERNEL_CODE (16 MiB)
|
||||||
// 0xFFFFFFFF 90000000 - 0xFFFFFFFF 9FFFFFFF : KERNEL_STACK (256 MiB)
|
// 0xFFFFFFFF 90000000 - 0xFFFFFFFF 9FFFFFFF : KERNEL_STACK (256 MiB)
|
||||||
class AddressSpace {
|
class VirtualMemory {
|
||||||
public:
|
public:
|
||||||
enum MemoryType {
|
enum MemoryType {
|
||||||
UNSPECIFIED,
|
UNSPECIFIED,
|
||||||
|
@ -40,11 +39,11 @@ class AddressSpace {
|
||||||
KERNEL_STACK,
|
KERNEL_STACK,
|
||||||
};
|
};
|
||||||
|
|
||||||
static AddressSpace ForRoot();
|
static VirtualMemory ForRoot();
|
||||||
|
|
||||||
AddressSpace();
|
VirtualMemory();
|
||||||
AddressSpace(const AddressSpace&) = delete;
|
VirtualMemory(const VirtualMemory&) = delete;
|
||||||
AddressSpace(AddressSpace&&) = delete;
|
VirtualMemory(VirtualMemory&&) = delete;
|
||||||
|
|
||||||
uint64_t cr3() { return cr3_; }
|
uint64_t cr3() { return cr3_; }
|
||||||
|
|
||||||
|
@ -52,28 +51,13 @@ class AddressSpace {
|
||||||
uint64_t AllocateUserStack();
|
uint64_t AllocateUserStack();
|
||||||
uint64_t GetNextMemMapAddr(uint64_t size);
|
uint64_t GetNextMemMapAddr(uint64_t size);
|
||||||
|
|
||||||
// Maps in a memory object at a specific address.
|
|
||||||
// Note this is unsafe for now as it may clobber other mappings.
|
|
||||||
void MapInMemoryObject(uint64_t vaddr, const RefPtr<MemoryObject>& mem_obj);
|
|
||||||
|
|
||||||
// Kernel Mappings.
|
// Kernel Mappings.
|
||||||
uint64_t* AllocateKernelStack();
|
uint64_t* AllocateKernelStack();
|
||||||
|
|
||||||
// Returns true if the page fault has been resolved.
|
|
||||||
bool HandlePageFault(uint64_t vaddr);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AddressSpace(uint64_t cr3) : cr3_(cr3) {}
|
VirtualMemory(uint64_t cr3) : cr3_(cr3) {}
|
||||||
uint64_t cr3_ = 0;
|
uint64_t cr3_ = 0;
|
||||||
|
|
||||||
UserStackManager user_stacks_;
|
UserStackManager user_stacks_;
|
||||||
uint64_t next_memmap_addr_ = 0x20'00000000;
|
uint64_t next_memmap_addr_ = 0x20'00000000;
|
||||||
|
|
||||||
struct MemoryMapping {
|
|
||||||
uint64_t vaddr;
|
|
||||||
RefPtr<MemoryObject> mem_obj;
|
|
||||||
};
|
|
||||||
LinkedList<MemoryMapping> memory_mappings_;
|
|
||||||
|
|
||||||
MemoryMapping* GetMemoryMappingForAddr(uint64_t vaddr);
|
|
||||||
};
|
};
|
|
@ -1,70 +0,0 @@
|
||||||
#include "object/address_space.h"
|
|
||||||
|
|
||||||
#include "memory/kernel_stack_manager.h"
|
|
||||||
#include "memory/paging_util.h"
|
|
||||||
#include "memory/physical_memory.h"
|
|
||||||
|
|
||||||
extern KernelStackManager* gKernelStackManager;
|
|
||||||
|
|
||||||
AddressSpace AddressSpace::ForRoot() {
|
|
||||||
uint64_t cr3 = 0;
|
|
||||||
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
|
|
||||||
return {cr3};
|
|
||||||
}
|
|
||||||
|
|
||||||
AddressSpace::AddressSpace() {
|
|
||||||
cr3_ = phys_mem::AllocatePage();
|
|
||||||
InitializePml4(cr3_);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t AddressSpace::AllocateUserStack() {
|
|
||||||
return user_stacks_.NewUserStack();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
|
|
||||||
uint64_t addr = next_memmap_addr_;
|
|
||||||
next_memmap_addr_ += size;
|
|
||||||
if (next_memmap_addr_ >= 0x30'00000000) {
|
|
||||||
panic("OOM: Memmap");
|
|
||||||
}
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddressSpace::MapInMemoryObject(uint64_t vaddr,
|
|
||||||
const RefPtr<MemoryObject>& mem_obj) {
|
|
||||||
memory_mappings_.PushBack({.vaddr = vaddr, .mem_obj = mem_obj});
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t* AddressSpace::AllocateKernelStack() {
|
|
||||||
return gKernelStackManager->AllocateKernelStack();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
|
||||||
MemoryMapping* mapping = GetMemoryMappingForAddr(vaddr);
|
|
||||||
if (mapping == nullptr) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
uint64_t offset = vaddr - mapping->vaddr;
|
|
||||||
uint64_t physical_addr = mapping->mem_obj->PhysicalPageAtOffset(offset);
|
|
||||||
if (physical_addr == 0) {
|
|
||||||
dbgln("WARN: Memory object returned invalid physical addr.");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
dbgln("Mapping P(%m) at V(%m)", physical_addr, vaddr);
|
|
||||||
MapPage(cr3_, vaddr, physical_addr);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
AddressSpace::MemoryMapping* AddressSpace::GetMemoryMappingForAddr(
|
|
||||||
uint64_t vaddr) {
|
|
||||||
auto iter = memory_mappings_.begin();
|
|
||||||
while (iter != memory_mappings_.end()) {
|
|
||||||
if ((vaddr >= (*iter).vaddr) &&
|
|
||||||
(vaddr < ((*iter).vaddr + (*iter).mem_obj->size()))) {
|
|
||||||
return &(*iter);
|
|
||||||
}
|
|
||||||
++iter;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
#include "object/memory_object.h"
|
|
||||||
|
|
||||||
#include "boot/boot_info.h"
|
|
||||||
#include "debug/debug.h"
|
|
||||||
#include "memory/physical_memory.h"
|
|
||||||
|
|
||||||
MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
|
||||||
if ((size & 0xFFF) != 0) {
|
|
||||||
size_ = (size & ~0xFFF) + 0x1000;
|
|
||||||
dbgln("MemoryObject: aligned %x to %x", size, size_);
|
|
||||||
}
|
|
||||||
// FIXME: Do this lazily.
|
|
||||||
uint64_t num_pages = size_ / 0x1000;
|
|
||||||
for (uint64_t i = 0; i < num_pages; i++) {
|
|
||||||
phys_page_list_.PushBack(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
|
||||||
if (offset > size_) {
|
|
||||||
panic("Invalid offset");
|
|
||||||
}
|
|
||||||
uint64_t page_num = offset / 0x1000;
|
|
||||||
return PageNumberToPhysAddr(page_num);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
|
||||||
if (length > size_) {
|
|
||||||
panic("Copy overruns memory object: %x too large for %x", length, size_);
|
|
||||||
}
|
|
||||||
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
|
||||||
uint64_t page_number = 0;
|
|
||||||
while (length > 0) {
|
|
||||||
uint64_t physical = hhdm + PageNumberToPhysAddr(page_number);
|
|
||||||
|
|
||||||
uint64_t bytes_to_copy = length >= 0x1000 ? 0x1000 : length;
|
|
||||||
|
|
||||||
uint8_t* srcptr = reinterpret_cast<uint8_t*>(source);
|
|
||||||
uint8_t* destptr = reinterpret_cast<uint8_t*>(physical);
|
|
||||||
for (uint64_t i = 0; i < bytes_to_copy; i++) {
|
|
||||||
destptr[i] = srcptr[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
length -= bytes_to_copy;
|
|
||||||
source += 0x1000;
|
|
||||||
page_number++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
|
||||||
auto iter = phys_page_list_.begin();
|
|
||||||
uint64_t index = 0;
|
|
||||||
while (index < page_num) {
|
|
||||||
++iter;
|
|
||||||
index++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*iter == 0) {
|
|
||||||
dbgln("Allocating page num %u for mem object", page_num);
|
|
||||||
*iter = phys_mem::AllocatePage();
|
|
||||||
}
|
|
||||||
return *iter;
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "lib/linked_list.h"
|
|
||||||
#include "object/kernel_object.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MemoryObject is a page-aligned set of memory that corresponds
|
|
||||||
* to physical pages.
|
|
||||||
*
|
|
||||||
* It can be mapped in to one or more address spaces.
|
|
||||||
* */
|
|
||||||
class MemoryObject : public KernelObject {
|
|
||||||
public:
|
|
||||||
MemoryObject(uint64_t size);
|
|
||||||
|
|
||||||
uint64_t size() { return size_; }
|
|
||||||
uint64_t num_pages() { return size_ / 0x1000; }
|
|
||||||
|
|
||||||
uint64_t PhysicalPageAtOffset(uint64_t offset);
|
|
||||||
|
|
||||||
void CopyBytesToObject(uint64_t source, uint64_t length);
|
|
||||||
|
|
||||||
private:
|
|
||||||
// Always stores the full page-aligned size.
|
|
||||||
uint64_t size_;
|
|
||||||
|
|
||||||
uint64_t PageNumberToPhysAddr(uint64_t page_num);
|
|
||||||
|
|
||||||
LinkedList<uint64_t> phys_page_list_;
|
|
||||||
};
|
|
|
@ -6,7 +6,7 @@
|
||||||
#include "lib/linked_list.h"
|
#include "lib/linked_list.h"
|
||||||
#include "lib/ref_ptr.h"
|
#include "lib/ref_ptr.h"
|
||||||
#include "lib/shared_ptr.h"
|
#include "lib/shared_ptr.h"
|
||||||
#include "object/address_space.h"
|
#include "memory/virtual_memory.h"
|
||||||
|
|
||||||
// Forward decl due to cyclic dependency.
|
// Forward decl due to cyclic dependency.
|
||||||
class Thread;
|
class Thread;
|
||||||
|
@ -23,7 +23,7 @@ class Process : public KernelObject {
|
||||||
static RefPtr<Process> Create();
|
static RefPtr<Process> Create();
|
||||||
|
|
||||||
uint64_t id() const { return id_; }
|
uint64_t id() const { return id_; }
|
||||||
AddressSpace& vmm() { return vmm_; }
|
VirtualMemory& vmm() { return vmm_; }
|
||||||
|
|
||||||
RefPtr<Thread> CreateThread();
|
RefPtr<Thread> CreateThread();
|
||||||
RefPtr<Thread> GetThread(uint64_t tid);
|
RefPtr<Thread> GetThread(uint64_t tid);
|
||||||
|
@ -39,9 +39,9 @@ class Process : public KernelObject {
|
||||||
private:
|
private:
|
||||||
friend class MakeRefCountedFriend<Process>;
|
friend class MakeRefCountedFriend<Process>;
|
||||||
Process();
|
Process();
|
||||||
Process(uint64_t id) : id_(id), vmm_(AddressSpace::ForRoot()) {}
|
Process(uint64_t id) : id_(id), vmm_(VirtualMemory::ForRoot()) {}
|
||||||
uint64_t id_;
|
uint64_t id_;
|
||||||
AddressSpace vmm_;
|
VirtualMemory vmm_;
|
||||||
State state_;
|
State state_;
|
||||||
|
|
||||||
uint64_t next_thread_id_ = 0;
|
uint64_t next_thread_id_ = 0;
|
||||||
|
|
Loading…
Reference in New Issue