From 815a603c1c8d646581f1286d3a2047b64bca1f9f Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Tue, 5 Dec 2023 16:11:37 -0800 Subject: [PATCH] [Zion] Mark pages as not present before calling invlpg. --- zion/memory/paging_util.cpp | 30 ++++++++++++++++++++++++++++-- zion/memory/paging_util.h | 3 +-- zion/object/address_space.cpp | 21 ++++++++++++--------- zion/object/address_space.h | 3 ++- zion/object/process.cpp | 5 +++-- zion/syscall/address_space.cpp | 3 ++- 6 files changed, 48 insertions(+), 17 deletions(-) diff --git a/zion/memory/paging_util.cpp b/zion/memory/paging_util.cpp index f0eefce..1b712c1 100644 --- a/zion/memory/paging_util.cpp +++ b/zion/memory/paging_util.cpp @@ -96,14 +96,14 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) { phys_mem::FreePage(struct_phys); } -} // namespace - uint64_t CurrCr3() { uint64_t pml4_addr = 0; asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr)); return pml4_addr; } +} // namespace + void InitializePml4(uint64_t pml4_physical_addr) { uint64_t* pml4_virtual = reinterpret_cast( boot::GetHigherHalfDirectMap() + pml4_physical_addr); @@ -167,6 +167,32 @@ void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) { } } +void UnmapPage(uint64_t cr3, uint64_t vaddr) { + uint64_t* pml4_entry = Pml4Entry(cr3, vaddr); + if (!(*pml4_entry & PRESENT_BIT)) { + return; + } + + uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr); + if (!(*pdp_entry & PRESENT_BIT)) { + return; + } + uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr); + if (!(*pd_entry & PRESENT_BIT)) { + return; + } + + uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr); + if (!(*pt_entry & PRESENT_BIT)) { + return; + } + + *pt_entry &= ~PRESENT_BIT; + if (cr3 == CurrCr3()) { + asm volatile("invlpg (%0)" : : "b"(vaddr) : "memory"); + } +} + uint64_t AllocatePageIfNecessary(uint64_t addr) { uint64_t cr3 = CurrCr3(); uint64_t phys = PagePhysIfResident(cr3, addr); diff --git a/zion/memory/paging_util.h b/zion/memory/paging_util.h index 91c671b..ee7027e 100644 --- a/zion/memory/paging_util.h +++ b/zion/memory/paging_util.h @@ -4,12 +4,11 @@ #include "object/process.h" -uint64_t CurrCr3(); - void InitializePml4(uint64_t pml4_physical_addr); void CleanupPml4(uint64_t pml4_physical_addr); void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr); +void UnmapPage(uint64_t cr3, uint64_t vaddr); uint64_t AllocatePageIfNecessary(uint64_t addr); void EnsureResident(uint64_t addr, uint64_t size); diff --git a/zion/object/address_space.cpp b/zion/object/address_space.cpp index eaaf7b5..c5d99d0 100644 --- a/zion/object/address_space.cpp +++ b/zion/object/address_space.cpp @@ -29,7 +29,8 @@ glcr::ErrorOr AddressSpace::AllocateUserStack() { } glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) { - RET_ERR(FreeAddressRange(base, base + kUserStackSize)); + RET_ERR(FreeAddressRange(base, base + kUserStackSize, + /* is_dying_proc= */ false)); user_stacks_.FreeUserStack(base); return glcr::OK; } @@ -66,19 +67,21 @@ glcr::ErrorOr AddressSpace::MapInMemoryObject( } glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base, - uint64_t vaddr_limit) { + uint64_t vaddr_limit, + bool is_dying_proc) { RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit)); - // If this is the current address space we need to invalidate any pages. + if (is_dying_proc) { + return glcr::OK; + } + // TODO: Consider moving this to the Mapping Tree implmementation to only // call this instruction for pages that we know are mapped. - if (cr3_ == CurrCr3()) { - for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) { - asm volatile("invlpg (%0)" : : "b"(addr) : "memory"); - } - // Clobber vaddr_limit as well in case of an alignment issue. - asm volatile("invlpg (%0)" : : "b"(vaddr_limit) : "memory"); + for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) { + UnmapPage(cr3_, addr); } + // Clobber vaddr_limit as well in case of an alignment issue. + UnmapPage(cr3_, vaddr_limit); return glcr::OK; } diff --git a/zion/object/address_space.h b/zion/object/address_space.h index b4b0291..160d51b 100644 --- a/zion/object/address_space.h +++ b/zion/object/address_space.h @@ -82,7 +82,8 @@ class AddressSpace : public KernelObject { const glcr::RefPtr& mem_obj, uint64_t align); [[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base, - uint64_t vaddr_limit); + uint64_t vaddr_limit, + bool is_dying_proc); // Kernel Mappings. uint64_t AllocateKernelStack(); diff --git a/zion/object/process.cpp b/zion/object/process.cpp index ac53c97..8f45086 100644 --- a/zion/object/process.cpp +++ b/zion/object/process.cpp @@ -103,8 +103,9 @@ void Process::Cleanup() { caps_.ReleaseAll(); // 3. Unmap all user memory. - PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax), - "Failed to cleanup userspace mappings in process exit."); + PANIC_ON_ERR( + vmas_->FreeAddressRange(0, kUserSpaceMax, /* is_dying_proc= */ true), + "Failed to cleanup userspace mappings in process exit."); // 4. Release paging structures. vmas_ = nullptr; diff --git a/zion/syscall/address_space.cpp b/zion/syscall/address_space.cpp index b40124b..8ea2185 100644 --- a/zion/syscall/address_space.cpp +++ b/zion/syscall/address_space.cpp @@ -29,5 +29,6 @@ z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) { RET_ERR(ValidateCapability(vmas_cap, kZionPerm_Write)); auto vmas = vmas_cap->obj(); - return vmas->FreeAddressRange(req->lower_addr, req->upper_addr); + return vmas->FreeAddressRange(req->lower_addr, req->upper_addr, + /* is_dying_proc= */ true); }