[Zion] Make the base MemoryObject pure abstract.
This commit is contained in:
parent
126482f3e8
commit
a8ad225cf1
|
@ -74,10 +74,12 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
||||||
program.type, program.flags, program.offset, program.vaddr,
|
program.type, program.flags, program.offset, program.vaddr,
|
||||||
program.paddr, program.filesz, program.memsz, program.align);
|
program.paddr, program.filesz, program.memsz, program.align);
|
||||||
#endif
|
#endif
|
||||||
auto mem_obj = glcr::MakeRefCounted<MemoryObject>(program.memsz);
|
auto mem_obj = glcr::MakeRefCounted<VariableMemoryObject>(program.memsz);
|
||||||
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
|
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
|
||||||
PANIC_ON_ERR(dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj),
|
PANIC_ON_ERR(
|
||||||
"Couldn't map in init program.");
|
dest_proc.vmas()->MapInMemoryObject(
|
||||||
|
program.vaddr, glcr::StaticCastRefPtr<MemoryObject>(mem_obj)),
|
||||||
|
"Couldn't map in init program.");
|
||||||
}
|
}
|
||||||
return header->entry;
|
return header->entry;
|
||||||
}
|
}
|
||||||
|
@ -121,8 +123,8 @@ const limine_file& GetInitProgram(const glcr::String& path) {
|
||||||
|
|
||||||
void WriteInitProgram(glcr::RefPtr<Port> port, glcr::String name, uint64_t id) {
|
void WriteInitProgram(glcr::RefPtr<Port> port, glcr::String name, uint64_t id) {
|
||||||
const limine_file& prog = GetInitProgram(name);
|
const limine_file& prog = GetInitProgram(name);
|
||||||
glcr::RefPtr<MemoryObject> prog_vmmo =
|
glcr::RefPtr<VariableMemoryObject> prog_vmmo =
|
||||||
glcr::MakeRefCounted<MemoryObject>(prog.size);
|
glcr::MakeRefCounted<VariableMemoryObject>(prog.size);
|
||||||
prog_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(prog.address),
|
prog_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(prog.address),
|
||||||
prog.size);
|
prog.size);
|
||||||
port->WriteKernel(id, MakeRefCounted<Capability>(prog_vmmo));
|
port->WriteKernel(id, MakeRefCounted<Capability>(prog_vmmo));
|
||||||
|
@ -155,8 +157,8 @@ void WriteFramebufferVmmo(glcr::RefPtr<Port> port) {
|
||||||
.blue_mask_size = buf.blue_mask_size,
|
.blue_mask_size = buf.blue_mask_size,
|
||||||
.blue_mask_shift = buf.blue_mask_shift,
|
.blue_mask_shift = buf.blue_mask_shift,
|
||||||
};
|
};
|
||||||
glcr::RefPtr<MemoryObject> ubuf_vmmo =
|
glcr::RefPtr<VariableMemoryObject> ubuf_vmmo =
|
||||||
glcr::MakeRefCounted<MemoryObject>(sizeof(ubuf));
|
glcr::MakeRefCounted<VariableMemoryObject>(sizeof(ubuf));
|
||||||
ubuf_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(&ubuf), sizeof(ubuf));
|
ubuf_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(&ubuf), sizeof(ubuf));
|
||||||
port->WriteKernel(Z_BOOT_FRAMEBUFFER_INFO_VMMO,
|
port->WriteKernel(Z_BOOT_FRAMEBUFFER_INFO_VMMO,
|
||||||
MakeRefCounted<Capability>(ubuf_vmmo));
|
MakeRefCounted<Capability>(ubuf_vmmo));
|
||||||
|
|
|
@ -6,30 +6,8 @@
|
||||||
|
|
||||||
#define K_MEM_DEBUG 0
|
#define K_MEM_DEBUG 0
|
||||||
|
|
||||||
MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
|
||||||
if ((size & 0xFFF) != 0) {
|
|
||||||
size_ = (size & ~0xFFF) + 0x1000;
|
|
||||||
#if K_MEM_DEBUG
|
|
||||||
dbgln("MemoryObject: aligned {x} to {x}", size, size_);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
// FIXME: Do this lazily.
|
|
||||||
uint64_t num_pages = size_ / 0x1000;
|
|
||||||
for (uint64_t i = 0; i < num_pages; i++) {
|
|
||||||
phys_page_list_.PushBack(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MemoryObject::~MemoryObject() {
|
|
||||||
for (uint64_t page : phys_page_list_) {
|
|
||||||
if (page != 0) {
|
|
||||||
phys_mem::FreePage(page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
||||||
if (offset > size_) {
|
if (offset > size()) {
|
||||||
panic("Invalid offset");
|
panic("Invalid offset");
|
||||||
}
|
}
|
||||||
uint64_t page_num = offset / 0x1000;
|
uint64_t page_num = offset / 0x1000;
|
||||||
|
@ -37,8 +15,8 @@ uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
||||||
if (length > size_) {
|
if (length > size()) {
|
||||||
panic("Copy overruns memory object: {x} too large for {x}", length, size_);
|
panic("Copy overruns memory object: {x} too large for {x}", length, size());
|
||||||
}
|
}
|
||||||
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
||||||
uint64_t page_number = 0;
|
uint64_t page_number = 0;
|
||||||
|
@ -59,21 +37,38 @@ void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
VariableMemoryObject::VariableMemoryObject(uint64_t size) : size_(size) {
|
||||||
auto iter = phys_page_list_.begin();
|
if ((size & 0xFFF) != 0) {
|
||||||
uint64_t index = 0;
|
size_ = (size & ~0xFFF) + 0x1000;
|
||||||
while (index < page_num) {
|
#if K_MEM_DEBUG
|
||||||
++iter;
|
dbgln("MemoryObject: aligned {x} to {x}", size, size_);
|
||||||
index++;
|
#endif
|
||||||
}
|
}
|
||||||
|
// FIXME: Do this lazily.
|
||||||
|
uint64_t num_pages = size_ / 0x1000;
|
||||||
|
phys_page_list_ = glcr::Array<uint64_t>(num_pages);
|
||||||
|
for (uint64_t i = 0; i < phys_page_list_.size(); i++) {
|
||||||
|
phys_page_list_[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (*iter == 0) {
|
VariableMemoryObject::~VariableMemoryObject() {
|
||||||
|
for (uint64_t p = 0; p < phys_page_list_.size(); p++) {
|
||||||
|
if (phys_page_list_[p] != 0) {
|
||||||
|
// TODO: We may be able to do some sort of coalescing here.
|
||||||
|
phys_mem::FreePage(phys_page_list_[p]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t VariableMemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
||||||
|
if (phys_page_list_[page_num] == 0) {
|
||||||
#if K_MEM_DEBUG
|
#if K_MEM_DEBUG
|
||||||
dbgln("Allocating page num {} for mem object", page_num);
|
dbgln("Allocating page num {} for mem object", page_num);
|
||||||
#endif
|
#endif
|
||||||
*iter = phys_mem::AllocatePage();
|
phys_page_list_[page_num] = phys_mem::AllocatePage();
|
||||||
}
|
}
|
||||||
return *iter;
|
return phys_page_list_[page_num];
|
||||||
}
|
}
|
||||||
|
|
||||||
FixedMemoryObject::~FixedMemoryObject() {
|
FixedMemoryObject::~FixedMemoryObject() {
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <glacier/container/linked_list.h>
|
#include <glacier/container/array.h>
|
||||||
#include <glacier/memory/ref_ptr.h>
|
#include <glacier/memory/ref_ptr.h>
|
||||||
#include <glacier/status/error_or.h>
|
#include <glacier/status/error_or.h>
|
||||||
|
|
||||||
#include "include/ztypes.h"
|
#include "include/ztypes.h"
|
||||||
|
#include "memory/constants.h"
|
||||||
#include "object/kernel_object.h"
|
#include "object/kernel_object.h"
|
||||||
|
|
||||||
class MemoryObject;
|
class MemoryObject;
|
||||||
|
@ -27,32 +28,41 @@ class MemoryObject : public KernelObject {
|
||||||
kZionPerm_Transmit;
|
kZionPerm_Transmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit MemoryObject(uint64_t size);
|
MemoryObject() = default;
|
||||||
~MemoryObject();
|
virtual ~MemoryObject() = default;
|
||||||
|
|
||||||
uint64_t size() { return size_; }
|
virtual uint64_t size() = 0;
|
||||||
uint64_t num_pages() { return ((size_ - 1) / 0x1000) + 1; }
|
uint64_t num_pages() { return ((size() - 1) / kPageSize) + 1; }
|
||||||
|
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||||
|
uint64_t offset, uint64_t length) = 0;
|
||||||
|
|
||||||
uint64_t PhysicalPageAtOffset(uint64_t offset);
|
uint64_t PhysicalPageAtOffset(uint64_t offset);
|
||||||
|
|
||||||
void CopyBytesToObject(uint64_t source, uint64_t length);
|
void CopyBytesToObject(uint64_t source, uint64_t length);
|
||||||
|
|
||||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(uint64_t offset,
|
protected:
|
||||||
uint64_t length) {
|
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class VariableMemoryObject : public MemoryObject {
|
||||||
|
public:
|
||||||
|
explicit VariableMemoryObject(uint64_t size);
|
||||||
|
~VariableMemoryObject() override;
|
||||||
|
|
||||||
|
uint64_t size() override { return size_; }
|
||||||
|
|
||||||
|
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||||
|
uint64_t offset, uint64_t length) override {
|
||||||
return glcr::UNIMPLEMENTED;
|
return glcr::UNIMPLEMENTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Hacky to avoid linked_list creation.
|
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num) override;
|
||||||
MemoryObject(uint64_t size, bool) : size_(size) {}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Always stores the full page-aligned size.
|
// Always stores the full page-aligned size.
|
||||||
uint64_t size_;
|
uint64_t size_;
|
||||||
|
|
||||||
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num);
|
glcr::Array<uint64_t> phys_page_list_;
|
||||||
|
|
||||||
glcr::LinkedList<uint64_t> phys_page_list_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class FixedMemoryObject : public MemoryObject {
|
class FixedMemoryObject : public MemoryObject {
|
||||||
|
@ -60,20 +70,21 @@ class FixedMemoryObject : public MemoryObject {
|
||||||
// FIXME: Validate that this is 4k aligned.
|
// FIXME: Validate that this is 4k aligned.
|
||||||
// Create a new class object for should free.
|
// Create a new class object for should free.
|
||||||
FixedMemoryObject(uint64_t physical_addr, uint64_t size, bool should_free)
|
FixedMemoryObject(uint64_t physical_addr, uint64_t size, bool should_free)
|
||||||
: MemoryObject(size, true),
|
: size_(size), physical_addr_(physical_addr), should_free_(should_free) {}
|
||||||
physical_addr_(physical_addr),
|
|
||||||
should_free_(should_free) {}
|
|
||||||
|
|
||||||
~FixedMemoryObject();
|
~FixedMemoryObject() override;
|
||||||
|
|
||||||
|
virtual uint64_t size() override { return size_; }
|
||||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||||
uint64_t offset, uint64_t length) override;
|
uint64_t offset, uint64_t length) override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
uint64_t PageNumberToPhysAddr(uint64_t page_num) override {
|
||||||
|
return physical_addr_ + (kPageSize * page_num);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
uint64_t size_;
|
||||||
uint64_t physical_addr_;
|
uint64_t physical_addr_;
|
||||||
bool should_free_;
|
bool should_free_;
|
||||||
|
|
||||||
uint64_t PageNumberToPhysAddr(uint64_t page_num) override {
|
|
||||||
return physical_addr_ + (0x1000 * page_num);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
|
|
||||||
z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req) {
|
z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req) {
|
||||||
auto& curr_proc = gScheduler->CurrentProcess();
|
auto& curr_proc = gScheduler->CurrentProcess();
|
||||||
*req->vmmo_cap =
|
*req->vmmo_cap = curr_proc.AddNewCapability(
|
||||||
curr_proc.AddNewCapability(glcr::MakeRefCounted<MemoryObject>(req->size));
|
glcr::MakeRefCounted<VariableMemoryObject>(req->size));
|
||||||
return glcr::OK;
|
return glcr::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue