[Zion] Call invlpg when unmapping memory.
This commit is contained in:
parent
0b9f83b321
commit
1f8085f791
|
@ -81,12 +81,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) {
|
||||||
|
|
||||||
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
||||||
|
|
||||||
uint64_t CurrCr3() {
|
|
||||||
uint64_t pml4_addr = 0;
|
|
||||||
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
|
|
||||||
return pml4_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
||||||
uint64_t* struct_virtual =
|
uint64_t* struct_virtual =
|
||||||
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
|
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
|
||||||
|
@ -104,6 +98,12 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
uint64_t CurrCr3() {
|
||||||
|
uint64_t pml4_addr = 0;
|
||||||
|
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
|
||||||
|
return pml4_addr;
|
||||||
|
}
|
||||||
|
|
||||||
void InitializePml4(uint64_t pml4_physical_addr) {
|
void InitializePml4(uint64_t pml4_physical_addr) {
|
||||||
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
|
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
|
||||||
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
|
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
#include "object/process.h"
|
#include "object/process.h"
|
||||||
|
|
||||||
|
uint64_t CurrCr3();
|
||||||
|
|
||||||
void InitializePml4(uint64_t pml4_physical_addr);
|
void InitializePml4(uint64_t pml4_physical_addr);
|
||||||
void CleanupPml4(uint64_t pml4_physical_addr);
|
void CleanupPml4(uint64_t pml4_physical_addr);
|
||||||
|
|
||||||
|
|
|
@ -65,6 +65,24 @@ glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base,
|
||||||
|
uint64_t vaddr_limit) {
|
||||||
|
RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit));
|
||||||
|
|
||||||
|
// If this is the current address space we need to invalidate any pages.
|
||||||
|
// TODO: Consider moving this to the Mapping Tree implmementation to only
|
||||||
|
// call this instruction for pages that we know are mapped.
|
||||||
|
if (cr3_ == CurrCr3()) {
|
||||||
|
for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) {
|
||||||
|
asm volatile("invlpg (%0)" : : "b"(addr) : "memory");
|
||||||
|
}
|
||||||
|
// Clobber vaddr_limit as well in case of an alignment issue.
|
||||||
|
asm volatile("invlpg (%0)" : : "b"(vaddr_limit) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
return glcr::OK;
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t AddressSpace::AllocateKernelStack() {
|
uint64_t AddressSpace::AllocateKernelStack() {
|
||||||
return KernelVmm::AcquireKernelStack();
|
return KernelVmm::AcquireKernelStack();
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,9 +82,7 @@ class AddressSpace : public KernelObject {
|
||||||
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
|
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
|
||||||
|
|
||||||
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
|
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
|
||||||
uint64_t vaddr_limit) {
|
uint64_t vaddr_limit);
|
||||||
return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kernel Mappings.
|
// Kernel Mappings.
|
||||||
uint64_t AllocateKernelStack();
|
uint64_t AllocateKernelStack();
|
||||||
|
|
Loading…
Reference in New Issue