Compare commits
2 Commits
126482f3e8
...
12ca4e4e89
Author | SHA1 | Date |
---|---|---|
|
12ca4e4e89 | |
|
a8ad225cf1 |
|
@ -74,10 +74,12 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
|||
program.type, program.flags, program.offset, program.vaddr,
|
||||
program.paddr, program.filesz, program.memsz, program.align);
|
||||
#endif
|
||||
auto mem_obj = glcr::MakeRefCounted<MemoryObject>(program.memsz);
|
||||
auto mem_obj = glcr::MakeRefCounted<VariableMemoryObject>(program.memsz);
|
||||
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
|
||||
PANIC_ON_ERR(dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj),
|
||||
"Couldn't map in init program.");
|
||||
PANIC_ON_ERR(
|
||||
dest_proc.vmas()->MapInMemoryObject(
|
||||
program.vaddr, glcr::StaticCastRefPtr<MemoryObject>(mem_obj)),
|
||||
"Couldn't map in init program.");
|
||||
}
|
||||
return header->entry;
|
||||
}
|
||||
|
@ -121,8 +123,8 @@ const limine_file& GetInitProgram(const glcr::String& path) {
|
|||
|
||||
void WriteInitProgram(glcr::RefPtr<Port> port, glcr::String name, uint64_t id) {
|
||||
const limine_file& prog = GetInitProgram(name);
|
||||
glcr::RefPtr<MemoryObject> prog_vmmo =
|
||||
glcr::MakeRefCounted<MemoryObject>(prog.size);
|
||||
glcr::RefPtr<VariableMemoryObject> prog_vmmo =
|
||||
glcr::MakeRefCounted<VariableMemoryObject>(prog.size);
|
||||
prog_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(prog.address),
|
||||
prog.size);
|
||||
port->WriteKernel(id, MakeRefCounted<Capability>(prog_vmmo));
|
||||
|
@ -130,8 +132,8 @@ void WriteInitProgram(glcr::RefPtr<Port> port, glcr::String name, uint64_t id) {
|
|||
|
||||
glcr::ErrorCode WritePciVmmo(glcr::RefPtr<Port> port, uint64_t id) {
|
||||
ASSIGN_OR_RETURN(PcieConfiguration config, GetPciExtendedConfiguration());
|
||||
auto vmmo = glcr::MakeRefCounted<FixedMemoryObject>(config.base,
|
||||
config.offset, false);
|
||||
auto vmmo =
|
||||
glcr::MakeRefCounted<ViewMemoryObject>(config.base, config.offset);
|
||||
|
||||
port->WriteKernel(id, MakeRefCounted<Capability>(vmmo));
|
||||
|
||||
|
@ -155,8 +157,8 @@ void WriteFramebufferVmmo(glcr::RefPtr<Port> port) {
|
|||
.blue_mask_size = buf.blue_mask_size,
|
||||
.blue_mask_shift = buf.blue_mask_shift,
|
||||
};
|
||||
glcr::RefPtr<MemoryObject> ubuf_vmmo =
|
||||
glcr::MakeRefCounted<MemoryObject>(sizeof(ubuf));
|
||||
glcr::RefPtr<VariableMemoryObject> ubuf_vmmo =
|
||||
glcr::MakeRefCounted<VariableMemoryObject>(sizeof(ubuf));
|
||||
ubuf_vmmo->CopyBytesToObject(reinterpret_cast<uint64_t>(&ubuf), sizeof(ubuf));
|
||||
port->WriteKernel(Z_BOOT_FRAMEBUFFER_INFO_VMMO,
|
||||
MakeRefCounted<Capability>(ubuf_vmmo));
|
||||
|
|
|
@ -6,30 +6,8 @@
|
|||
|
||||
#define K_MEM_DEBUG 0
|
||||
|
||||
MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
||||
if ((size & 0xFFF) != 0) {
|
||||
size_ = (size & ~0xFFF) + 0x1000;
|
||||
#if K_MEM_DEBUG
|
||||
dbgln("MemoryObject: aligned {x} to {x}", size, size_);
|
||||
#endif
|
||||
}
|
||||
// FIXME: Do this lazily.
|
||||
uint64_t num_pages = size_ / 0x1000;
|
||||
for (uint64_t i = 0; i < num_pages; i++) {
|
||||
phys_page_list_.PushBack(0);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryObject::~MemoryObject() {
|
||||
for (uint64_t page : phys_page_list_) {
|
||||
if (page != 0) {
|
||||
phys_mem::FreePage(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
||||
if (offset > size_) {
|
||||
if (offset > size()) {
|
||||
panic("Invalid offset");
|
||||
}
|
||||
uint64_t page_num = offset / 0x1000;
|
||||
|
@ -37,8 +15,8 @@ uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
|||
}
|
||||
|
||||
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
||||
if (length > size_) {
|
||||
panic("Copy overruns memory object: {x} too large for {x}", length, size_);
|
||||
if (length > size()) {
|
||||
panic("Copy overruns memory object: {x} too large for {x}", length, size());
|
||||
}
|
||||
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
||||
uint64_t page_number = 0;
|
||||
|
@ -59,36 +37,50 @@ void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
|||
}
|
||||
}
|
||||
|
||||
uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
||||
auto iter = phys_page_list_.begin();
|
||||
uint64_t index = 0;
|
||||
while (index < page_num) {
|
||||
++iter;
|
||||
index++;
|
||||
VariableMemoryObject::VariableMemoryObject(uint64_t size) : size_(size) {
|
||||
if ((size & 0xFFF) != 0) {
|
||||
size_ = (size & ~0xFFF) + 0x1000;
|
||||
#if K_MEM_DEBUG
|
||||
dbgln("MemoryObject: aligned {x} to {x}", size, size_);
|
||||
#endif
|
||||
}
|
||||
// FIXME: Do this lazily.
|
||||
uint64_t num_pages = size_ / 0x1000;
|
||||
phys_page_list_ = glcr::Array<uint64_t>(num_pages);
|
||||
for (uint64_t i = 0; i < phys_page_list_.size(); i++) {
|
||||
phys_page_list_[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (*iter == 0) {
|
||||
VariableMemoryObject::~VariableMemoryObject() {
|
||||
for (uint64_t p = 0; p < phys_page_list_.size(); p++) {
|
||||
if (phys_page_list_[p] != 0) {
|
||||
// TODO: We may be able to do some sort of coalescing here.
|
||||
phys_mem::FreePage(phys_page_list_[p]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t VariableMemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
||||
if (phys_page_list_[page_num] == 0) {
|
||||
#if K_MEM_DEBUG
|
||||
dbgln("Allocating page num {} for mem object", page_num);
|
||||
#endif
|
||||
*iter = phys_mem::AllocatePage();
|
||||
phys_page_list_[page_num] = phys_mem::AllocatePage();
|
||||
}
|
||||
return *iter;
|
||||
return phys_page_list_[page_num];
|
||||
}
|
||||
|
||||
FixedMemoryObject::~FixedMemoryObject() {
|
||||
if (should_free_) {
|
||||
phys_mem::FreePages(physical_addr_, num_pages());
|
||||
}
|
||||
phys_mem::FreePages(physical_addr_, num_pages());
|
||||
}
|
||||
|
||||
glcr::ErrorOr<glcr::RefPtr<MemoryObject>> FixedMemoryObject::Duplicate(
|
||||
glcr::ErrorOr<glcr::RefPtr<MemoryObject>> ViewMemoryObject::Duplicate(
|
||||
uint64_t offset, uint64_t length) {
|
||||
if (offset + length > size()) {
|
||||
return glcr::INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return glcr::StaticCastRefPtr<MemoryObject>(
|
||||
glcr::MakeRefCounted<FixedMemoryObject>(physical_addr_ + offset, length,
|
||||
false));
|
||||
glcr::MakeRefCounted<ViewMemoryObject>(physical_addr_ + offset, length));
|
||||
}
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/container/linked_list.h>
|
||||
#include <glacier/container/array.h>
|
||||
#include <glacier/memory/ref_ptr.h>
|
||||
#include <glacier/status/error_or.h>
|
||||
|
||||
#include "include/ztypes.h"
|
||||
#include "memory/constants.h"
|
||||
#include "object/kernel_object.h"
|
||||
|
||||
class MemoryObject;
|
||||
|
@ -27,53 +28,87 @@ class MemoryObject : public KernelObject {
|
|||
kZionPerm_Transmit;
|
||||
}
|
||||
|
||||
explicit MemoryObject(uint64_t size);
|
||||
~MemoryObject();
|
||||
MemoryObject() = default;
|
||||
virtual ~MemoryObject() = default;
|
||||
|
||||
uint64_t size() { return size_; }
|
||||
uint64_t num_pages() { return ((size_ - 1) / 0x1000) + 1; }
|
||||
virtual uint64_t size() = 0;
|
||||
uint64_t num_pages() { return ((size() - 1) / kPageSize) + 1; }
|
||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||
uint64_t offset, uint64_t length) = 0;
|
||||
|
||||
uint64_t PhysicalPageAtOffset(uint64_t offset);
|
||||
|
||||
void CopyBytesToObject(uint64_t source, uint64_t length);
|
||||
|
||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(uint64_t offset,
|
||||
uint64_t length) {
|
||||
protected:
|
||||
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num) = 0;
|
||||
};
|
||||
|
||||
class VariableMemoryObject : public MemoryObject {
|
||||
public:
|
||||
explicit VariableMemoryObject(uint64_t size);
|
||||
~VariableMemoryObject() override;
|
||||
|
||||
uint64_t size() override { return size_; }
|
||||
|
||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||
uint64_t offset, uint64_t length) override {
|
||||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Hacky to avoid linked_list creation.
|
||||
MemoryObject(uint64_t size, bool) : size_(size) {}
|
||||
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num) override;
|
||||
|
||||
private:
|
||||
// Always stores the full page-aligned size.
|
||||
uint64_t size_;
|
||||
|
||||
virtual uint64_t PageNumberToPhysAddr(uint64_t page_num);
|
||||
|
||||
glcr::LinkedList<uint64_t> phys_page_list_;
|
||||
glcr::Array<uint64_t> phys_page_list_;
|
||||
};
|
||||
|
||||
class FixedMemoryObject : public MemoryObject {
|
||||
public:
|
||||
// FIXME: Validate that this is 4k aligned.
|
||||
// Create a new class object for should free.
|
||||
FixedMemoryObject(uint64_t physical_addr, uint64_t size, bool should_free)
|
||||
: MemoryObject(size, true),
|
||||
physical_addr_(physical_addr),
|
||||
should_free_(should_free) {}
|
||||
FixedMemoryObject(uint64_t physical_addr, uint64_t size)
|
||||
: size_(size), physical_addr_(physical_addr) {}
|
||||
|
||||
~FixedMemoryObject();
|
||||
~FixedMemoryObject() override;
|
||||
|
||||
virtual uint64_t size() override { return size_; }
|
||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||
uint64_t offset, uint64_t length) override {
|
||||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
protected:
|
||||
uint64_t PageNumberToPhysAddr(uint64_t page_num) override {
|
||||
return physical_addr_ + (kPageSize * page_num);
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t size_;
|
||||
uint64_t physical_addr_;
|
||||
};
|
||||
|
||||
// Like a FixedMemoryObject except it doesn't release
|
||||
// it's pages when it is done. Should be used for things
|
||||
// like HBAs and the PCI config space.
|
||||
class ViewMemoryObject : public MemoryObject {
|
||||
public:
|
||||
ViewMemoryObject(uint64_t physical_addr, uint64_t size)
|
||||
: size_(size), physical_addr_(physical_addr) {}
|
||||
|
||||
~ViewMemoryObject(){};
|
||||
|
||||
virtual uint64_t size() override { return size_; }
|
||||
virtual glcr::ErrorOr<glcr::RefPtr<MemoryObject>> Duplicate(
|
||||
uint64_t offset, uint64_t length) override;
|
||||
|
||||
private:
|
||||
uint64_t physical_addr_;
|
||||
bool should_free_;
|
||||
|
||||
protected:
|
||||
uint64_t PageNumberToPhysAddr(uint64_t page_num) override {
|
||||
return physical_addr_ + (0x1000 * page_num);
|
||||
return physical_addr_ + (kPageSize * page_num);
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t size_;
|
||||
uint64_t physical_addr_;
|
||||
};
|
||||
|
|
|
@ -6,16 +6,15 @@
|
|||
|
||||
z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req) {
|
||||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
*req->vmmo_cap =
|
||||
curr_proc.AddNewCapability(glcr::MakeRefCounted<MemoryObject>(req->size));
|
||||
*req->vmmo_cap = curr_proc.AddNewCapability(
|
||||
glcr::MakeRefCounted<VariableMemoryObject>(req->size));
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
z_err_t MemoryObjectCreatePhysical(ZMemoryObjectCreatePhysicalReq* req) {
|
||||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
uint64_t paddr = req->paddr;
|
||||
auto vmmo_ref =
|
||||
glcr::MakeRefCounted<FixedMemoryObject>(paddr, req->size, false);
|
||||
auto vmmo_ref = glcr::MakeRefCounted<ViewMemoryObject>(paddr, req->size);
|
||||
*req->vmmo_cap =
|
||||
curr_proc.AddNewCapability(StaticCastRefPtr<MemoryObject>(vmmo_ref));
|
||||
return glcr::OK;
|
||||
|
@ -25,8 +24,7 @@ z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req) {
|
|||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
uint64_t num_pages = ((req->size - 1) / 0x1000) + 1;
|
||||
uint64_t paddr = phys_mem::AllocateContinuous(num_pages);
|
||||
auto vmmo_ref =
|
||||
glcr::MakeRefCounted<FixedMemoryObject>(paddr, req->size, true);
|
||||
auto vmmo_ref = glcr::MakeRefCounted<FixedMemoryObject>(paddr, req->size);
|
||||
*req->vmmo_cap =
|
||||
curr_proc.AddNewCapability(StaticCastRefPtr<MemoryObject>(vmmo_ref));
|
||||
*req->paddr = paddr;
|
||||
|
|
Loading…
Reference in New Issue