Compare commits

..

2 Commits

6 changed files with 62 additions and 13 deletions

View File

@ -81,12 +81,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) {
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
uint64_t CurrCr3() {
uint64_t pml4_addr = 0;
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
return pml4_addr;
}
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
uint64_t* struct_virtual =
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
@ -102,6 +96,12 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
phys_mem::FreePage(struct_phys);
}
uint64_t CurrCr3() {
uint64_t pml4_addr = 0;
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
return pml4_addr;
}
} // namespace
void InitializePml4(uint64_t pml4_physical_addr) {
@ -167,6 +167,32 @@ void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
}
}
void UnmapPage(uint64_t cr3, uint64_t vaddr) {
uint64_t* pml4_entry = Pml4Entry(cr3, vaddr);
if (!(*pml4_entry & PRESENT_BIT)) {
return;
}
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr);
if (!(*pdp_entry & PRESENT_BIT)) {
return;
}
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr);
if (!(*pd_entry & PRESENT_BIT)) {
return;
}
uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr);
if (!(*pt_entry & PRESENT_BIT)) {
return;
}
*pt_entry &= ~PRESENT_BIT;
if (cr3 == CurrCr3()) {
asm volatile("invlpg (%0)" : : "b"(vaddr) : "memory");
}
}
uint64_t AllocatePageIfNecessary(uint64_t addr) {
uint64_t cr3 = CurrCr3();
uint64_t phys = PagePhysIfResident(cr3, addr);

View File

@ -8,6 +8,7 @@ void InitializePml4(uint64_t pml4_physical_addr);
void CleanupPml4(uint64_t pml4_physical_addr);
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
void UnmapPage(uint64_t cr3, uint64_t vaddr);
uint64_t AllocatePageIfNecessary(uint64_t addr);
void EnsureResident(uint64_t addr, uint64_t size);

View File

@ -29,7 +29,8 @@ glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
}
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
RET_ERR(FreeAddressRange(base, base + kUserStackSize,
/* is_dying_proc= */ false));
user_stacks_.FreeUserStack(base);
return glcr::OK;
}
@ -65,6 +66,26 @@ glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
return vaddr;
}
glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit,
bool is_dying_proc) {
RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit));
if (is_dying_proc) {
return glcr::OK;
}
// TODO: Consider moving this to the Mapping Tree implmementation to only
// call this instruction for pages that we know are mapped.
for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) {
UnmapPage(cr3_, addr);
}
// Clobber vaddr_limit as well in case of an alignment issue.
UnmapPage(cr3_, vaddr_limit);
return glcr::OK;
}
uint64_t AddressSpace::AllocateKernelStack() {
return KernelVmm::AcquireKernelStack();
}

View File

@ -82,9 +82,8 @@ class AddressSpace : public KernelObject {
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit) {
return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit);
}
uint64_t vaddr_limit,
bool is_dying_proc);
// Kernel Mappings.
uint64_t AllocateKernelStack();

View File

@ -103,7 +103,8 @@ void Process::Cleanup() {
caps_.ReleaseAll();
// 3. Unmap all user memory.
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
PANIC_ON_ERR(
vmas_->FreeAddressRange(0, kUserSpaceMax, /* is_dying_proc= */ true),
"Failed to cleanup userspace mappings in process exit.");
// 4. Release paging structures.

View File

@ -29,5 +29,6 @@ z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) {
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
auto vmas = vmas_cap->obj<AddressSpace>();
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr);
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr,
/* is_dying_proc= */ true);
}