2023-05-30 20:55:03 -07:00
|
|
|
#pragma once
|
|
|
|
|
2023-11-03 19:47:39 -07:00
|
|
|
#include <glacier/container/binary_tree.h>
|
2023-06-21 15:07:40 -07:00
|
|
|
#include <glacier/memory/ref_ptr.h>
|
2023-05-30 20:55:03 -07:00
|
|
|
#include <stdint.h>
|
|
|
|
|
2023-08-01 18:22:41 -07:00
|
|
|
#include "include/ztypes.h"
|
2023-05-30 22:35:57 -07:00
|
|
|
#include "memory/user_stack_manager.h"
|
2023-06-06 21:44:10 -07:00
|
|
|
#include "object/memory_object.h"
|
2023-05-30 20:55:03 -07:00
|
|
|
|
2023-06-16 14:53:57 -07:00
|
|
|
class AddressSpace;
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct KernelObjectTag<AddressSpace> {
|
|
|
|
static const uint64_t type = KernelObject::ADDRESS_SPACE;
|
|
|
|
};
|
|
|
|
|
2023-05-30 20:55:03 -07:00
|
|
|
// VirtualMemory class holds a memory space for an individual process.
|
|
|
|
//
|
|
|
|
// Memory Regions are predefined for simplicity for now. However, in general
|
|
|
|
// we try not to rely on these regions being static to allow for flexibility in
|
|
|
|
// the future.
|
|
|
|
//
|
|
|
|
// User Regions (Per Process):
|
|
|
|
// 0x00000000 00000000 - 0x0000000F FFFFFFFF : USER_CODE (64 GiB)
|
|
|
|
// 0x00000010 00000000 - 0x0000001F FFFFFFFF : USER_HEAP (64 GiB)
|
|
|
|
// 0x00000020 00000000 - 0x0000002F FFFFFFFF : MEM_MAP (64 GiB)
|
|
|
|
// 0x00000040 00000000 - 0x0000004F FFFFFFFF : IPC_BUF (64 GiB)
|
2023-05-30 22:35:57 -07:00
|
|
|
// 0x00007FF0 00000000 - 0x00007FFF FFFFFFFF : USER_STACK (64 GiB)
|
2023-05-30 20:55:03 -07:00
|
|
|
//
|
|
|
|
// Kernel Regions (Shared across processes):
|
|
|
|
// 0xFFFF8000 00000000 - 0xFFFF800F FFFFFFFF : HHDM (64 GiB)
|
|
|
|
// 0xFFFFFFFF 40000000 - 0xFFFFFFFF 7FFFFFFF : KERNEL_HEAP (1 GiB)
|
|
|
|
// 0xFFFFFFFF 80000000 - 0xFFFFFFFF 80FFFFFF : KERNEL_CODE (16 MiB)
|
|
|
|
// 0xFFFFFFFF 90000000 - 0xFFFFFFFF 9FFFFFFF : KERNEL_STACK (256 MiB)
|
2023-06-07 00:04:53 -07:00
|
|
|
class AddressSpace : public KernelObject {
|
2023-05-30 20:55:03 -07:00
|
|
|
public:
|
2023-06-16 14:53:57 -07:00
|
|
|
uint64_t TypeTag() override { return KernelObject::ADDRESS_SPACE; }
|
|
|
|
|
2023-08-01 18:43:48 -07:00
|
|
|
static uint64_t DefaultPermissions() {
|
|
|
|
return kZionPerm_Write | kZionPerm_Transmit;
|
|
|
|
}
|
2023-08-01 18:22:41 -07:00
|
|
|
|
2023-05-30 20:55:03 -07:00
|
|
|
enum MemoryType {
|
|
|
|
UNSPECIFIED,
|
|
|
|
UNMAPPED,
|
|
|
|
USER_CODE,
|
|
|
|
USER_HEAP,
|
|
|
|
MEM_MAP,
|
|
|
|
IPC_BUF,
|
|
|
|
USER_STACK,
|
|
|
|
HHDM,
|
|
|
|
KERNEL_HEAP,
|
|
|
|
KERNEL_CODE,
|
|
|
|
KERNEL_STACK,
|
|
|
|
};
|
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
static glcr::RefPtr<AddressSpace> ForRoot();
|
2023-05-30 21:39:19 -07:00
|
|
|
|
2023-06-06 20:43:15 -07:00
|
|
|
AddressSpace();
|
|
|
|
AddressSpace(const AddressSpace&) = delete;
|
|
|
|
AddressSpace(AddressSpace&&) = delete;
|
2023-05-30 20:55:03 -07:00
|
|
|
|
2023-05-30 21:39:19 -07:00
|
|
|
uint64_t cr3() { return cr3_; }
|
|
|
|
|
2023-05-30 22:35:57 -07:00
|
|
|
// User Mappings.
|
|
|
|
uint64_t AllocateUserStack();
|
2023-05-30 21:27:20 -07:00
|
|
|
uint64_t GetNextMemMapAddr(uint64_t size);
|
|
|
|
|
2023-06-06 21:44:10 -07:00
|
|
|
// Maps in a memory object at a specific address.
|
|
|
|
// Note this is unsafe for now as it may clobber other mappings.
|
2023-06-21 15:07:40 -07:00
|
|
|
void MapInMemoryObject(uint64_t vaddr,
|
|
|
|
const glcr::RefPtr<MemoryObject>& mem_obj);
|
2023-06-06 21:44:10 -07:00
|
|
|
|
2023-06-21 15:07:40 -07:00
|
|
|
uint64_t MapInMemoryObject(const glcr::RefPtr<MemoryObject>& mem_obj);
|
2023-06-07 00:04:53 -07:00
|
|
|
|
2023-05-30 22:35:57 -07:00
|
|
|
// Kernel Mappings.
|
2023-05-30 21:27:20 -07:00
|
|
|
uint64_t* AllocateKernelStack();
|
2023-05-30 20:55:03 -07:00
|
|
|
|
2023-06-06 21:44:10 -07:00
|
|
|
// Returns true if the page fault has been resolved.
|
|
|
|
bool HandlePageFault(uint64_t vaddr);
|
|
|
|
|
2023-05-30 20:55:03 -07:00
|
|
|
private:
|
2023-06-21 15:07:40 -07:00
|
|
|
friend class glcr::MakeRefCountedFriend<AddressSpace>;
|
2023-06-06 20:43:15 -07:00
|
|
|
AddressSpace(uint64_t cr3) : cr3_(cr3) {}
|
2023-05-30 20:55:03 -07:00
|
|
|
uint64_t cr3_ = 0;
|
|
|
|
|
2023-05-30 22:35:57 -07:00
|
|
|
UserStackManager user_stacks_;
|
2023-05-30 20:55:03 -07:00
|
|
|
uint64_t next_memmap_addr_ = 0x20'00000000;
|
2023-06-06 21:44:10 -07:00
|
|
|
|
|
|
|
struct MemoryMapping {
|
|
|
|
uint64_t vaddr;
|
2023-06-21 15:07:40 -07:00
|
|
|
glcr::RefPtr<MemoryObject> mem_obj;
|
2023-06-06 21:44:10 -07:00
|
|
|
};
|
2023-11-05 05:59:45 -08:00
|
|
|
|
|
|
|
// TODO: Consider adding a red-black tree implementation here.
|
|
|
|
// As is this tree functions about as well as a linked list
|
|
|
|
// because mappings are likely to be added in near-perfect ascedning order.
|
|
|
|
// Also worth considering creating a special tree implementation for
|
|
|
|
// just this purpose, or maybe a BinaryTree implementation that accepts
|
|
|
|
// ranges rather than a single key.
|
2023-11-03 19:47:39 -07:00
|
|
|
glcr::BinaryTree<uint64_t, MemoryMapping> memory_mappings_;
|
2023-06-06 21:44:10 -07:00
|
|
|
|
2023-11-03 19:47:39 -07:00
|
|
|
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
|
|
|
|
uint64_t vaddr);
|
2023-05-30 20:55:03 -07:00
|
|
|
};
|