Compare commits
2 Commits
0b9f83b321
...
815a603c1c
Author | SHA1 | Date |
---|---|---|
|
815a603c1c | |
|
1f8085f791 |
|
@ -81,12 +81,6 @@ uint64_t PagePhysIfResident(uint64_t cr3, uint64_t virt) {
|
||||||
|
|
||||||
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
uint64_t Pml4Index(uint64_t addr) { return (addr >> PML_OFFSET) & 0x1FF; }
|
||||||
|
|
||||||
uint64_t CurrCr3() {
|
|
||||||
uint64_t pml4_addr = 0;
|
|
||||||
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
|
|
||||||
return pml4_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
||||||
uint64_t* struct_virtual =
|
uint64_t* struct_virtual =
|
||||||
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
|
reinterpret_cast<uint64_t*>(boot::GetHigherHalfDirectMap() + struct_phys);
|
||||||
|
@ -102,6 +96,12 @@ void CleanupPageStructure(uint64_t struct_phys, uint64_t level) {
|
||||||
phys_mem::FreePage(struct_phys);
|
phys_mem::FreePage(struct_phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t CurrCr3() {
|
||||||
|
uint64_t pml4_addr = 0;
|
||||||
|
asm volatile("mov %%cr3, %0;" : "=r"(pml4_addr));
|
||||||
|
return pml4_addr;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void InitializePml4(uint64_t pml4_physical_addr) {
|
void InitializePml4(uint64_t pml4_physical_addr) {
|
||||||
|
@ -167,6 +167,32 @@ void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void UnmapPage(uint64_t cr3, uint64_t vaddr) {
|
||||||
|
uint64_t* pml4_entry = Pml4Entry(cr3, vaddr);
|
||||||
|
if (!(*pml4_entry & PRESENT_BIT)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* pdp_entry = PageDirectoryPointerEntry(*pml4_entry, vaddr);
|
||||||
|
if (!(*pdp_entry & PRESENT_BIT)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
uint64_t* pd_entry = PageDirectoryEntry(*pdp_entry, vaddr);
|
||||||
|
if (!(*pd_entry & PRESENT_BIT)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t* pt_entry = PageTableEntry(*pd_entry, vaddr);
|
||||||
|
if (!(*pt_entry & PRESENT_BIT)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pt_entry &= ~PRESENT_BIT;
|
||||||
|
if (cr3 == CurrCr3()) {
|
||||||
|
asm volatile("invlpg (%0)" : : "b"(vaddr) : "memory");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t AllocatePageIfNecessary(uint64_t addr) {
|
uint64_t AllocatePageIfNecessary(uint64_t addr) {
|
||||||
uint64_t cr3 = CurrCr3();
|
uint64_t cr3 = CurrCr3();
|
||||||
uint64_t phys = PagePhysIfResident(cr3, addr);
|
uint64_t phys = PagePhysIfResident(cr3, addr);
|
||||||
|
|
|
@ -8,6 +8,7 @@ void InitializePml4(uint64_t pml4_physical_addr);
|
||||||
void CleanupPml4(uint64_t pml4_physical_addr);
|
void CleanupPml4(uint64_t pml4_physical_addr);
|
||||||
|
|
||||||
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
|
void MapPage(uint64_t cr3, uint64_t vaddr, uint64_t paddr);
|
||||||
|
void UnmapPage(uint64_t cr3, uint64_t vaddr);
|
||||||
|
|
||||||
uint64_t AllocatePageIfNecessary(uint64_t addr);
|
uint64_t AllocatePageIfNecessary(uint64_t addr);
|
||||||
void EnsureResident(uint64_t addr, uint64_t size);
|
void EnsureResident(uint64_t addr, uint64_t size);
|
||||||
|
|
|
@ -29,7 +29,8 @@ glcr::ErrorOr<uint64_t> AddressSpace::AllocateUserStack() {
|
||||||
}
|
}
|
||||||
|
|
||||||
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
|
glcr::ErrorCode AddressSpace::FreeUserStack(uint64_t base) {
|
||||||
RET_ERR(FreeAddressRange(base, base + kUserStackSize));
|
RET_ERR(FreeAddressRange(base, base + kUserStackSize,
|
||||||
|
/* is_dying_proc= */ false));
|
||||||
user_stacks_.FreeUserStack(base);
|
user_stacks_.FreeUserStack(base);
|
||||||
return glcr::OK;
|
return glcr::OK;
|
||||||
}
|
}
|
||||||
|
@ -65,6 +66,26 @@ glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glcr::ErrorCode AddressSpace::FreeAddressRange(uint64_t vaddr_base,
|
||||||
|
uint64_t vaddr_limit,
|
||||||
|
bool is_dying_proc) {
|
||||||
|
RET_ERR(mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit));
|
||||||
|
|
||||||
|
if (is_dying_proc) {
|
||||||
|
return glcr::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Consider moving this to the Mapping Tree implmementation to only
|
||||||
|
// call this instruction for pages that we know are mapped.
|
||||||
|
for (uint64_t addr = vaddr_base; addr < vaddr_limit; addr += kPageSize) {
|
||||||
|
UnmapPage(cr3_, addr);
|
||||||
|
}
|
||||||
|
// Clobber vaddr_limit as well in case of an alignment issue.
|
||||||
|
UnmapPage(cr3_, vaddr_limit);
|
||||||
|
|
||||||
|
return glcr::OK;
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t AddressSpace::AllocateKernelStack() {
|
uint64_t AddressSpace::AllocateKernelStack() {
|
||||||
return KernelVmm::AcquireKernelStack();
|
return KernelVmm::AcquireKernelStack();
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,9 +82,8 @@ class AddressSpace : public KernelObject {
|
||||||
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
|
const glcr::RefPtr<MemoryObject>& mem_obj, uint64_t align);
|
||||||
|
|
||||||
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
|
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
|
||||||
uint64_t vaddr_limit) {
|
uint64_t vaddr_limit,
|
||||||
return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit);
|
bool is_dying_proc);
|
||||||
}
|
|
||||||
|
|
||||||
// Kernel Mappings.
|
// Kernel Mappings.
|
||||||
uint64_t AllocateKernelStack();
|
uint64_t AllocateKernelStack();
|
||||||
|
|
|
@ -103,7 +103,8 @@ void Process::Cleanup() {
|
||||||
caps_.ReleaseAll();
|
caps_.ReleaseAll();
|
||||||
|
|
||||||
// 3. Unmap all user memory.
|
// 3. Unmap all user memory.
|
||||||
PANIC_ON_ERR(vmas_->FreeAddressRange(0, kUserSpaceMax),
|
PANIC_ON_ERR(
|
||||||
|
vmas_->FreeAddressRange(0, kUserSpaceMax, /* is_dying_proc= */ true),
|
||||||
"Failed to cleanup userspace mappings in process exit.");
|
"Failed to cleanup userspace mappings in process exit.");
|
||||||
|
|
||||||
// 4. Release paging structures.
|
// 4. Release paging structures.
|
||||||
|
|
|
@ -29,5 +29,6 @@ z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) {
|
||||||
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
|
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
|
||||||
|
|
||||||
auto vmas = vmas_cap->obj<AddressSpace>();
|
auto vmas = vmas_cap->obj<AddressSpace>();
|
||||||
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr);
|
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr,
|
||||||
|
/* is_dying_proc= */ true);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue