2023-06-06 20:43:15 -07:00
|
|
|
#include "object/address_space.h"
|
2023-05-30 21:27:20 -07:00
|
|
|
|
2023-06-26 15:01:55 -07:00
|
|
|
#include "debug/debug.h"
|
2023-11-15 15:38:25 -08:00
|
|
|
#include "memory/kernel_vmm.h"
|
2023-05-30 21:39:19 -07:00
|
|
|
#include "memory/paging_util.h"
|
|
|
|
#include "memory/physical_memory.h"
|
2023-05-30 21:27:20 -07:00
|
|
|
|
2023-06-07 13:51:13 -07:00
|
|
|
#define K_VMAS_DEBUG 0
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<AddressSpace> AddressSpace::ForRoot() {
|
2023-05-30 21:39:19 -07:00
|
|
|
uint64_t cr3 = 0;
|
|
|
|
asm volatile("mov %%cr3, %0;" : "=r"(cr3));
|
2023-06-21 15:07:40 -07:00
|
|
|
return glcr::MakeRefCounted<AddressSpace>(cr3);
|
2023-05-30 21:39:19 -07:00
|
|
|
}
|
|
|
|
|
2023-06-06 20:43:15 -07:00
|
|
|
AddressSpace::AddressSpace() {
|
2023-11-19 21:37:30 -08:00
|
|
|
cr3_ = phys_mem::AllocateAndZeroPage();
|
2023-05-30 21:39:19 -07:00
|
|
|
InitializePml4(cr3_);
|
|
|
|
}
|
|
|
|
|
2023-06-06 20:43:15 -07:00
|
|
|
uint64_t AddressSpace::AllocateUserStack() {
|
2023-05-30 22:35:57 -07:00
|
|
|
return user_stacks_.NewUserStack();
|
2023-05-30 21:27:20 -07:00
|
|
|
}
|
|
|
|
|
2023-11-23 07:12:23 -08:00
|
|
|
void AddressSpace::FreeUserStack(uint64_t rsp) {
|
|
|
|
return user_stacks_.FreeUserStack(rsp);
|
|
|
|
}
|
|
|
|
|
2023-11-23 18:49:01 -08:00
|
|
|
uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size, uint64_t align) {
|
2023-06-07 00:04:53 -07:00
|
|
|
if (size == 0) {
|
|
|
|
panic("Zero size memmap");
|
|
|
|
}
|
|
|
|
size = ((size - 1) & ~0xFFF) + 0x1000;
|
2023-11-23 18:49:01 -08:00
|
|
|
// FIXME: We need to validate that align is a power of 2;
|
|
|
|
if (align > 0) {
|
|
|
|
while ((next_memmap_addr_ & (align - 1)) != 0) {
|
|
|
|
next_memmap_addr_ += kPageSize;
|
|
|
|
}
|
|
|
|
}
|
2023-05-30 21:27:20 -07:00
|
|
|
uint64_t addr = next_memmap_addr_;
|
|
|
|
next_memmap_addr_ += size;
|
|
|
|
if (next_memmap_addr_ >= 0x30'00000000) {
|
|
|
|
panic("OOM: Memmap");
|
|
|
|
}
|
|
|
|
return addr;
|
|
|
|
}
|
2023-05-30 22:35:57 -07:00
|
|
|
|
2023-11-19 18:45:13 -08:00
|
|
|
glcr::ErrorCode AddressSpace::MapInMemoryObject(
|
2023-06-21 15:07:40 -07:00
|
|
|
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj) {
|
2023-11-19 18:45:13 -08:00
|
|
|
return mapping_tree_.AddInMemoryObject(vaddr, mem_obj);
|
2023-06-06 21:44:10 -07:00
|
|
|
}
|
|
|
|
|
2023-11-19 18:45:13 -08:00
|
|
|
glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
|
2023-11-23 18:49:01 -08:00
|
|
|
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align) {
|
|
|
|
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size(), align);
|
2023-11-19 18:45:13 -08:00
|
|
|
RET_ERR(mapping_tree_.AddInMemoryObject(vaddr, mem_obj));
|
2023-06-07 00:04:53 -07:00
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
2023-11-15 15:38:25 -08:00
|
|
|
uint64_t AddressSpace::AllocateKernelStack() {
|
|
|
|
return KernelVmm::AcquireKernelStack();
|
2023-05-30 22:35:57 -07:00
|
|
|
}
|
2023-06-06 21:44:10 -07:00
|
|
|
|
|
|
|
bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
2023-06-07 13:51:13 -07:00
|
|
|
#if K_VMAS_DEBUG
|
|
|
|
dbgln("[VMAS] Page Fault!");
|
|
|
|
#endif
|
2023-11-19 18:45:13 -08:00
|
|
|
if (vaddr < kPageSize) {
|
|
|
|
// Invalid page access.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-12 23:28:23 -07:00
|
|
|
if (user_stacks_.IsValidStack(vaddr)) {
|
|
|
|
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-11-19 18:45:13 -08:00
|
|
|
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
|
|
|
|
if (!offset_or.ok()) {
|
2023-06-06 21:44:10 -07:00
|
|
|
return false;
|
|
|
|
}
|
2023-06-07 13:51:13 -07:00
|
|
|
#if K_VMAS_DEBUG
|
2023-11-05 09:24:09 -08:00
|
|
|
dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr);
|
2023-06-07 13:51:13 -07:00
|
|
|
#endif
|
2023-11-19 18:45:13 -08:00
|
|
|
MapPage(cr3_, vaddr, offset_or.value());
|
2023-06-06 21:44:10 -07:00
|
|
|
return true;
|
|
|
|
}
|