From 1f8085f791e094c09dc78a52a74d142bcf1f197d Mon Sep 17 00:00:00 2001 From: Drew Galbraith Date: Tue, 5 Dec 2023 15:53:42 -0800 Subject: [PATCH] [Zion] Call invlpg when unmapping memory. --- zion/memory/paging_util.cpp | 12 ++++++------ zion/memory/paging_util.h | 2 ++ zion/object/address_space.cpp | 18 ++++++++++++++++++ zion/object/address_space.h | 4 +--- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/zion/memory/paging_util.cpp b/zion/memory/paging_util.cpp index 75b4c0a..f0eefce 100644 --- a/zion/memory/paging_util.cpp +++ b/zion/memory/paging_util.cpp @@ -81,12 +81,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) { uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; } -uint64_t CurrCr3() { - uint64_t pml4_addr = 0; - asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr)); - return pml4_addr; -} - void CleanupPageStructure(uint64_t struct_phys, uint64_t level) { uint64_t* struct_virtual = reinterpret_cast(boot::GetHigherHalfDirectMap() + struct_phys); @@ -104,6 +98,12 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) { } // namespace +uint64_t CurrCr3() { + uint64_t pml4_addr = 0; + asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr)); + return pml4_addr; +} + void InitializePml4(uint64_t pml4_physical_addr) { uint64_t* pml4_virtual = reinterpret_cast( boot::GetHigherHalfDirectMap() + pml4_physical_addr); diff --git a/zion/memory/paging_util.h b/zion/memory/paging_util.h index 83c522a..91c671b 100644 --- a/zion/memory/paging_util.h +++ b/zion/memory/paging_util.h @@ -4,6 +4,8 @@ #include "object/process.h" +uint64_t CurrCr3(); + void InitializePml4(uint64_t pml4_physical_addr); void CleanupPml4(uint64_t pml4_physical_addr); diff --git a/zion/object/address_space.cpp b/zion/object/address_space.cpp index 791fdbf..eaaf7b5 100644 --- a/zion/object/address_space.cpp +++ b/zion/object/address_space.cpp @@ -65,6 +65,24 @@ glcr::ErrorOr AddressSpace::MapInMemoryObject( return vaddr; } +glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base, + uint64_t vaddr_limit) { + RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit)); + + // If this is the current address space we need to invalidate any pages. + // TODO: Consider moving this to the Mapping Tree implmementation to only + // call this instruction for pages that we know are mapped. + if (cr3_ == CurrCr3()) { + for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) { + asm volatile("invlpg (%0)" : : "b"(addr) : "memory"); + } + // Clobber vaddr_limit as well in case of an alignment issue. + asm volatile("invlpg (%0)" : : "b"(vaddr_limit) : "memory"); + } + + return glcr::OK; +} + uint64_t AddressSpace::AllocateKernelStack() { return KernelVmm::AcquireKernelStack(); } diff --git a/zion/object/address_space.h b/zion/object/address_space.h index 7c4daf3..b4b0291 100644 --- a/zion/object/address_space.h +++ b/zion/object/address_space.h @@ -82,9 +82,7 @@ class AddressSpace : public KernelObject { const glcr::RefPtr& mem_obj, uint64_t align); [[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base, - uint64_t vaddr_limit) { - return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit); - } + uint64_t vaddr_limit); // Kernel Mappings. uint64_t AllocateKernelStack();