[Zion] Mark pages as not present before calling invlpg.

This commit is contained in:
Drew Galbraith 2023-12-05 16:11:37 -08:00
parent 1f8085f791
commit 815a603c1c
6 changed files with 48 additions and 17 deletions

View File

@ -96,14 +96,14 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
phys_mem::FreePage(struct_phys);
}
} // namespace
uint64_t CurrCr3() {
uint64_t pml4_addr = 0;
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
return pml4_addr;
}
} // namespace
void InitializePml4(uint64_t pml4_physical_addr) {
uint64_t* pml4_virtual = reinterpret_cast<uint64_t*>(
boot::GetHigherHalfDirectMap() + pml4_physical_addr);
@ -167,6 +167,32 @@ void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
}
}
void UnmapPage(uint64_t cr3, uint64_t vaddr) {
uint64_t* pml4_entry = Pml4Entry(cr3, vaddr);
if (!(*pml4_entry & PRESENT_BIT)) {
return;
}
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr);
if (!(*pdp_entry & PRESENT_BIT)) {
return;
}
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr);
if (!(*pd_entry & PRESENT_BIT)) {
return;
}
uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr);
if (!(*pt_entry & PRESENT_BIT)) {
return;
}
*pt_entry &= ~PRESENT_BIT;
if (cr3 == CurrCr3()) {
asm volatile("invlpg (%0)" : : "b"(vaddr) : "memory");
}
}
uint64_t AllocatePageIfNecessary(uint64_t addr) {
uint64_t cr3 = CurrCr3();
uint64_t phys = PagePhysIfResident(cr3, addr);

View File

@ -4,12 +4,11 @@
#include "object/process.h"
uint64_t CurrCr3();
void InitializePml4(uint64_t pml4_physical_addr);
void CleanupPml4(uint64_t pml4_physical_addr);
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
void UnmapPage(uint64_t cr3, uint64_t vaddr);
uint64_t AllocatePageIfNecessary(uint64_t addr);
void EnsureResident(uint64_t addr, uint64_t size);

View File

@ -29,7 +29,8 @@ glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
}
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
RET_ERR(FreeAddressRange(base, base + kUserStackSize,
/* is_dying_proc= */ false));
user_stacks_.FreeUserStack(base);
return glcr::OK;
}
@ -66,19 +67,21 @@ glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
}
glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit) {
uint64_t vaddr_limit,
bool is_dying_proc) {
RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit));
// If this is the current address space we need to invalidate any pages.
if (is_dying_proc) {
return glcr::OK;
}
// TODO: Consider moving this to the Mapping Tree implmementation to only
// call this instruction for pages that we know are mapped.
if (cr3_ == CurrCr3()) {
for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) {
asm volatile("invlpg (%0)" : : "b"(addr) : "memory");
}
// Clobber vaddr_limit as well in case of an alignment issue.
asm volatile("invlpg (%0)" : : "b"(vaddr_limit) : "memory");
for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) {
UnmapPage(cr3_, addr);
}
// Clobber vaddr_limit as well in case of an alignment issue.
UnmapPage(cr3_, vaddr_limit);
return glcr::OK;
}

View File

@ -82,7 +82,8 @@ class AddressSpace : public KernelObject {
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit);
uint64_t vaddr_limit,
bool is_dying_proc);
// Kernel Mappings.
uint64_t AllocateKernelStack();

View File

@ -103,8 +103,9 @@ void Process::Cleanup() {
caps_.ReleaseAll();
// 3. Unmap all user memory.
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
"Failed to cleanup userspace mappings in process exit.");
PANIC_ON_ERR(
vmas_->FreeAddressRange(0, kUserSpaceMax, /* is_dying_proc= */ true),
"Failed to cleanup userspace mappings in process exit.");
// 4. Release paging structures.
vmas_ = nullptr;

View File

@ -29,5 +29,6 @@ z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) {
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
auto vmas = vmas_cap->obj<AddressSpace>();
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr);
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr,
/* is_dying_proc= */ true);
}