Compare commits

..

No commits in common. "344e84c313a05ceac677b5666762c39809215f6a" and "308dd6a2035f0e2d6ec1a6593fc06210ef5783ac" have entirely different histories.

55 changed files with 258 additions and 544 deletions

View File

@ -7,7 +7,6 @@
#include "glacier/memory/ref_ptr.h" #include "glacier/memory/ref_ptr.h"
#include "glacier/memory/reference.h" #include "glacier/memory/reference.h"
#include "glacier/memory/unique_ptr.h" #include "glacier/memory/unique_ptr.h"
#include "glacier/string/str_format.h"
namespace glcr { namespace glcr {
@ -27,8 +26,6 @@ class BinaryTree {
Optional<Ref<V>> Find(K key); Optional<Ref<V>> Find(K key);
void DebugTreeIntoStr(StringBuilder& builder) const;
private: private:
// TODO: Consider adding a sharedptr type to // TODO: Consider adding a sharedptr type to
// avoid making this "RefCounted". // avoid making this "RefCounted".
@ -48,9 +45,6 @@ class BinaryTree {
// If this node exists, return it. Otherwise, this // If this node exists, return it. Otherwise, this
// will be the parent of where this node would be inserted. // will be the parent of where this node would be inserted.
RefPtr<BinaryNode> FindOrInsertionParent(K key); RefPtr<BinaryNode> FindOrInsertionParent(K key);
static void DebugNodeIntoString(StringBuilder& builder, uint64_t indent_level,
const RefPtr<BinaryNode>& node);
}; };
template <typename K, typename V> template <typename K, typename V>
@ -80,45 +74,39 @@ void BinaryTree<K, V>::Delete(K key) {
} }
RefPtr<BinaryNode> new_child = nullptr; RefPtr<BinaryNode> new_child = nullptr;
if (!node->left) { if (!node.left) {
// No children. // No children.
// Right child only. // Right child only.
new_child = node->right; new_child = node.right;
} else if (!node->right) { } else if (!node.right) {
// Left child only. // Left child only.
new_child = node->left; new_child = node.left;
} else { } else {
// Find Successor. // Find Successor.
auto successor = node->right; auto successor = node.right;
while (successor->left) { while (successor.left) {
successor = successor->left; successor = successor.left;
} }
new_child = successor; new_child = successor;
if (successor != node->right) { if (successor != node.right) {
successor->parent->left = successor->right; successor.parent.left = successor.right;
} }
} }
if (node == root_) { if (node == root_) {
root_ = new_child; root_ = new_child;
} else { } else {
if (node->parent->right == node) { if (node.parent.right == node) {
node->parent->right = new_child; node.parent.right = new_child;
} else { } else {
node->parent->left = new_child; node.parent.left = new_child;
} }
} }
if (new_child) {
new_child->parent = node->parent;
}
} }
template <typename K, typename V> template <typename K, typename V>
Optional<Ref<V>> BinaryTree<K, V>::Predecessor(K key) { Optional<Ref<V>> BinaryTree<K, V>::Predecessor(K key) {
auto current = FindOrInsertionParent(key); auto current = FindOrInsertionParent(key);
if (current.empty()) {
return {};
}
// The case where the current is the insertion parent and // The case where the current is the insertion parent and
// the predecessor is unique. If the key was going to be // the predecessor is unique. If the key was going to be
@ -151,9 +139,6 @@ Optional<Ref<V>> BinaryTree<K, V>::Predecessor(K key) {
template <typename K, typename V> template <typename K, typename V>
Optional<Ref<V>> BinaryTree<K, V>::Successor(K key) { Optional<Ref<V>> BinaryTree<K, V>::Successor(K key) {
auto current = FindOrInsertionParent(key); auto current = FindOrInsertionParent(key);
if (current.empty()) {
return {};
}
// The case where the current is the insertion parent and // The case where the current is the insertion parent and
// the predecessor is unique. If the key was going to be // the predecessor is unique. If the key was going to be
@ -186,9 +171,6 @@ Optional<Ref<V>> BinaryTree<K, V>::Successor(K key) {
template <typename K, typename V> template <typename K, typename V>
Optional<Ref<V>> BinaryTree<K, V>::Find(K key) { Optional<Ref<V>> BinaryTree<K, V>::Find(K key) {
auto current = FindOrInsertionParent(key); auto current = FindOrInsertionParent(key);
if (current.empty()) {
return {};
}
if (current->key == key) { if (current->key == key) {
return Optional<Ref<V>>(current->value); return Optional<Ref<V>>(current->value);
} }
@ -220,36 +202,4 @@ BinaryTree<K, V>::FindOrInsertionParent(K key) {
} }
} }
template <typename K, typename V>
void StrFormatValue(StringBuilder& builder, const BinaryTree<K, V>& value,
StringView opts) {
value.DebugTreeIntoStr(builder);
}
template <typename K, typename V>
void BinaryTree<K, V>::DebugTreeIntoStr(StringBuilder& builder) const {
DebugNodeIntoString(builder, 0, root_);
}
template <typename K, typename V>
void BinaryTree<K, V>::DebugNodeIntoString(StringBuilder& builder,
uint64_t indent_level,
const RefPtr<BinaryNode>& node) {
if (node.empty()) {
return;
}
for (uint64_t i = 0; i < indent_level; i++) {
builder.PushBack('\t');
}
StrFormatValue(builder, node->value, "");
builder.PushBack('\n');
if (node->left) {
builder.PushBack('L');
DebugNodeIntoString(builder, indent_level + 1, node->left);
}
if (node->right) {
builder.PushBack('R');
DebugNodeIntoString(builder, indent_level + 1, node->right);
}
}
} // namespace glcr } // namespace glcr

View File

@ -11,12 +11,10 @@ class RefCounted {
virtual ~RefCounted() {} virtual ~RefCounted() {}
// FIXME: Rethink error handling in these cases now that we can't panic the // FIXME: Rethink error handling in these cases now that we can't panic the
// kernel. // kernel.
void AdoptPtr() { ref_count_ = 1; } void Adopt() { ref_count_ = 1; }
void AcquirePtr() { ref_count_++; } void Acquire() { ref_count_++; }
bool ReleasePtr() { return (--ref_count_) == 0; } bool Release() { return (--ref_count_) == 0; }
uint64_t ref_count() { return ref_count_; }
private: private:
// FIXME: This should be an atomic type. // FIXME: This should be an atomic type.

View File

@ -18,16 +18,16 @@ class RefPtr {
RefPtr(decltype(nullptr)) : ptr_(nullptr) {} RefPtr(decltype(nullptr)) : ptr_(nullptr) {}
RefPtr(const RefPtr& other) : ptr_(other.ptr_) { RefPtr(const RefPtr& other) : ptr_(other.ptr_) {
if (ptr_) { if (ptr_) {
ptr_->AcquirePtr(); ptr_->Acquire();
} }
} }
RefPtr& operator=(const RefPtr& other) { RefPtr& operator=(const RefPtr& other) {
T* old = ptr_; T* old = ptr_;
ptr_ = other.ptr_; ptr_ = other.ptr_;
if (ptr_) { if (ptr_) {
ptr_->AcquirePtr(); ptr_->Acquire();
} }
if (old && old->ReleasePtr()) { if (old && old->Release()) {
delete old; delete old;
} }
@ -46,15 +46,7 @@ class RefPtr {
enum DontAdoptTag { enum DontAdoptTag {
DontAdopt, DontAdopt,
}; };
RefPtr(T* ptr, DontAdoptTag) : ptr_(ptr) { ptr->AcquirePtr(); } RefPtr(T* ptr, DontAdoptTag) : ptr_(ptr) { ptr->Acquire(); }
~RefPtr() {
if (ptr_) {
if (ptr_->ReleasePtr()) {
delete ptr_;
}
}
}
T* get() const { return ptr_; }; T* get() const { return ptr_; };
T& operator*() const { return *ptr_; } T& operator*() const { return *ptr_; }
@ -73,7 +65,7 @@ class RefPtr {
T* ptr_; T* ptr_;
friend RefPtr<T> AdoptPtr<T>(T* ptr); friend RefPtr<T> AdoptPtr<T>(T* ptr);
RefPtr(T* ptr) : ptr_(ptr) { ptr->AdoptPtr(); } RefPtr(T* ptr) : ptr_(ptr) { ptr->Adopt(); }
}; };
template <typename T> template <typename T>

View File

@ -10,7 +10,6 @@ class Ref {
Ref(Ref&& other) = default; Ref(Ref&& other) = default;
operator T&() const { return ref_; } operator T&() const { return ref_; }
T& get() const { return ref_; }
private: private:
T& ref_; T& ref_;

View File

@ -20,32 +20,27 @@ void StrFormatNumber(StringBuilder& builder, uint64_t value, uint64_t base) {
} // namespace } // namespace
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint8_t& value, void StrFormatValue(StringBuilder& builder, uint8_t value, StringView opts) {
StringView opts) {
StrFormatValue(builder, static_cast<uint64_t>(value), opts); StrFormatValue(builder, static_cast<uint64_t>(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint16_t& value, void StrFormatValue(StringBuilder& builder, uint16_t value, StringView opts) {
StringView opts) {
StrFormatValue(builder, static_cast<uint64_t>(value), opts); StrFormatValue(builder, static_cast<uint64_t>(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const int32_t& value, void StrFormatValue(StringBuilder& builder, int32_t value, StringView opts) {
StringView opts) {
StrFormatValue(builder, static_cast<uint64_t>(value), opts); StrFormatValue(builder, static_cast<uint64_t>(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint32_t& value, void StrFormatValue(StringBuilder& builder, uint32_t value, StringView opts) {
StringView opts) {
StrFormatValue(builder, static_cast<uint64_t>(value), opts); StrFormatValue(builder, static_cast<uint64_t>(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint64_t& value, void StrFormatValue(StringBuilder& builder, uint64_t value, StringView opts) {
StringView opts) {
if (opts.find('x') != opts.npos) { if (opts.find('x') != opts.npos) {
builder.PushBack("0x"); builder.PushBack("0x");
StrFormatNumber(builder, value, 16); StrFormatNumber(builder, value, 16);
@ -55,26 +50,28 @@ void StrFormatValue(StringBuilder& builder, const uint64_t& value,
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const ErrorCode& value, void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts) {
StringView opts) {
StrFormatValue(builder, static_cast<uint64_t>(value), opts); StrFormatValue(builder, static_cast<uint64_t>(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const char& value, void StrFormatValue(StringBuilder& builder, char value, StringView opts) {
StringView opts) {
builder.PushBack(value); builder.PushBack(value);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, char const* const& value, void StrFormatValue(StringBuilder& builder, const char* value,
StringView opts) { StringView opts) {
StrFormatInternal(builder, StringView(value)); StrFormatValue(builder, StringView(value), opts);
} }
template <> template <>
void StrFormatValue(StringBuilder& builder, const StringView& value, void StrFormatValue(StringBuilder& builder, StringView value, StringView opts) {
StringView opts) { StrFormatInternal(builder, value);
}
template <>
void StrFormatValue(StringBuilder& builder, String value, StringView opts) {
StrFormatInternal(builder, value); StrFormatInternal(builder, value);
} }

View File

@ -7,51 +7,44 @@
namespace glcr { namespace glcr {
// FIXME: We need some meta-programming here to allow pass-by-value for pointers
// and primitives.
template <typename T> template <typename T>
void StrFormatValue(StringBuilder& builder, const T& value, StringView opts); void StrFormatValue(StringBuilder& builder, T value, StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint8_t& value, void StrFormatValue(StringBuilder& builder, uint8_t value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint16_t& value, void StrFormatValue(StringBuilder& builder, uint16_t value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const int32_t& value, void StrFormatValue(StringBuilder& builder, int32_t value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint32_t& value, void StrFormatValue(StringBuilder& builder, uint32_t value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const uint64_t& value, void StrFormatValue(StringBuilder& builder, uint64_t value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const ErrorCode& value, void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const char& value, StringView opts); void StrFormatValue(StringBuilder& builder, char value, StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, char const* const& value, void StrFormatValue(StringBuilder& builder, const char* value, StringView opts);
StringView opts);
template <> template <>
void StrFormatValue(StringBuilder& builder, const StringView& value, void StrFormatValue(StringBuilder& builder, StringView value, StringView opts);
StringView opts);
template <>
void StrFormatValue(StringBuilder& builder, String value, StringView opts);
void StrFormatInternal(StringBuilder& builder, StringView format); void StrFormatInternal(StringBuilder& builder, StringView format);
template <typename T, typename... Args> template <typename T, typename... Args>
void StrFormatInternal(StringBuilder& builder, StringView format, void StrFormatInternal(StringBuilder& builder, StringView format, T value,
const T& value, Args&&... args) { Args... args) {
uint64_t posl = format.find('{'); uint64_t posl = format.find('{');
uint64_t posr = format.find('}', posl); uint64_t posr = format.find('}', posl);
if (posl == format.npos || posr == format.npos) { if (posl == format.npos || posr == format.npos) {
@ -66,7 +59,7 @@ void StrFormatInternal(StringBuilder& builder, StringView format,
} }
template <typename... Args> template <typename... Args>
String StrFormat(StringView format, Args&&... args) { String StrFormat(StringView format, Args... args) {
VariableStringBuilder builder; VariableStringBuilder builder;
StrFormatInternal(builder, format, args...); StrFormatInternal(builder, format, args...);
return builder.ToString(); return builder.ToString();
@ -74,7 +67,7 @@ String StrFormat(StringView format, Args&&... args) {
template <typename... Args> template <typename... Args>
void StrFormatIntoBuffer(StringBuilder& builder, StringView format, void StrFormatIntoBuffer(StringBuilder& builder, StringView format,
Args&&... args) { Args... args) {
StrFormatInternal(builder, format, args...); StrFormatInternal(builder, format, args...);
} }

View File

@ -46,6 +46,5 @@ char String::operator[](uint64_t offset) const {
} }
String::operator StringView() const { return StringView(cstr_, length_); } String::operator StringView() const { return StringView(cstr_, length_); }
StringView String::view() const { return this->operator StringView(); }
} // namespace glcr } // namespace glcr

View File

@ -13,17 +13,13 @@ class String {
String(const char* cstr, uint64_t str_len); String(const char* cstr, uint64_t str_len);
String(StringView str); String(StringView str);
String(const String&) = delete;
const char* cstr() const { return cstr_; } const char* cstr() const { return cstr_; }
uint64_t length() const { return length_; } uint64_t length() const { return length_; }
bool operator==(const String& str); bool operator==(const String& str);
char operator[](uint64_t offset) const; char operator[](uint64_t offset) const;
operator StringView() const; operator StringView() const;
StringView view() const;
private: private:
char* cstr_; char* cstr_;

View File

@ -3,41 +3,32 @@
#include <stdint.h> #include <stdint.h>
#include <ztypes.h> #include <ztypes.h>
/* class MappedMemoryRegion {
* Memory Region class that unmaps its memory and releases its
* capability when it goes out of scope.
*/
class OwnedMemoryRegion {
public: public:
OwnedMemoryRegion() = default; // FIXME: Introduce optional type to contain error or.
static MappedMemoryRegion DirectPhysical(uint64_t phys_addr, uint64_t size);
static MappedMemoryRegion ContiguousPhysical(uint64_t size);
static MappedMemoryRegion Default(uint64_t size);
static MappedMemoryRegion FromCapability(z_cap_t vmmo_cap);
OwnedMemoryRegion(const OwnedMemoryRegion&) = delete; MappedMemoryRegion() {}
OwnedMemoryRegion& operator=(const OwnedMemoryRegion&) = delete; // TODO: Disallow copy before doing any cleanup here.
~MappedMemoryRegion() {}
OwnedMemoryRegion(OwnedMemoryRegion&&);
OwnedMemoryRegion& operator=(OwnedMemoryRegion&&);
~OwnedMemoryRegion();
static OwnedMemoryRegion FromCapability(z_cap_t vmmo_cap);
// TODO: Consider making this its own class.
static OwnedMemoryRegion ContiguousPhysical(uint64_t size, uint64_t* paddr);
static OwnedMemoryRegion DirectPhysical(uint64_t paddr, uint64_t size);
uint64_t paddr() { return paddr_; }
uint64_t vaddr() { return vaddr_; } uint64_t vaddr() { return vaddr_; }
uint64_t size() { return size_; } uint64_t size() { return size_; }
z_cap_t cap() { return vmmo_cap_; } uint64_t cap() { return vmmo_cap_; }
z_cap_t DuplicateCap();
bool empty() { return vmmo_cap_ != 0; } operator bool() { return vmmo_cap_ != 0; }
explicit operator bool() { return vmmo_cap_ != 0; }
private: private:
OwnedMemoryRegion(uint64_t vmmo_cap, uint64_t vaddr, uint64_t size) MappedMemoryRegion(uint64_t vmmo_cap, uint64_t paddr, uint64_t vaddr,
: vmmo_cap_(vmmo_cap), vaddr_(vaddr), size_(size) {} uint64_t size)
: vmmo_cap_(vmmo_cap), paddr_(paddr), vaddr_(vaddr), size_(size) {}
uint64_t vmmo_cap_ = 0; uint64_t vmmo_cap_ = 0;
uint64_t paddr_ = 0;
uint64_t vaddr_ = 0; uint64_t vaddr_ = 0;
// TODO: We may want to differentiate between VMMO size and mapped size?
uint64_t size_ = 0; uint64_t size_ = 0;
}; };

View File

@ -43,7 +43,6 @@ z_err_t ParseInitPort(uint64_t init_port_cap) {
break; break;
case Z_BOOT_FRAMEBUFFER_INFO_VMMO: case Z_BOOT_FRAMEBUFFER_INFO_VMMO:
gBootFramebufferVmmoCap = init_cap; gBootFramebufferVmmoCap = init_cap;
break;
default: default:
dbgln("Unexpected init type {x}, continuing.", init_sig); dbgln("Unexpected init type {x}, continuing.", init_sig);
} }

View File

@ -5,66 +5,41 @@
#include "mammoth/debug.h" #include "mammoth/debug.h"
#include "mammoth/init.h" #include "mammoth/init.h"
OwnedMemoryRegion::OwnedMemoryRegion(OwnedMemoryRegion&& other) MappedMemoryRegion MappedMemoryRegion::DirectPhysical(uint64_t paddr,
: OwnedMemoryRegion(other.vmmo_cap_, other.vaddr_, other.size_) { uint64_t size) {
other.vmmo_cap_ = 0;
other.vaddr_ = 0;
other.size_ = 0;
}
OwnedMemoryRegion& OwnedMemoryRegion::operator=(OwnedMemoryRegion&& other) {
if (vmmo_cap_) {
check(ZCapRelease(vmmo_cap_));
}
vmmo_cap_ = other.vmmo_cap_;
vaddr_ = other.vaddr_;
size_ = other.size_;
other.vmmo_cap_ = 0;
other.vaddr_ = 0;
other.size_ = 0;
return *this;
}
OwnedMemoryRegion::~OwnedMemoryRegion() {
if (vmmo_cap_ != 0) {
check(ZAddressSpaceUnmap(gSelfVmasCap, vaddr_, vaddr_ + size_));
check(ZCapRelease(vmmo_cap_));
}
}
OwnedMemoryRegion OwnedMemoryRegion::FromCapability(z_cap_t vmmo_cap) {
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
uint64_t size;
check(ZMemoryObjectInspect(vmmo_cap, &size));
// FIXME: get the size here.
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
}
OwnedMemoryRegion OwnedMemoryRegion::ContiguousPhysical(uint64_t size,
uint64_t* paddr) {
uint64_t vmmo_cap;
check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, paddr));
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
}
OwnedMemoryRegion OwnedMemoryRegion::DirectPhysical(uint64_t paddr,
uint64_t size) {
uint64_t vmmo_cap; uint64_t vmmo_cap;
check(ZMemoryObjectCreatePhysical(paddr, size, &vmmo_cap)); check(ZMemoryObjectCreatePhysical(paddr, size, &vmmo_cap));
uint64_t vaddr; uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr)); check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
} }
z_cap_t OwnedMemoryRegion::DuplicateCap() { MappedMemoryRegion MappedMemoryRegion::ContiguousPhysical(uint64_t size) {
z_cap_t cap; uint64_t vmmo_cap, paddr;
check(ZCapDuplicate(vmmo_cap_, kZionPerm_All, &cap)); check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, &paddr));
return cap;
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
}
MappedMemoryRegion MappedMemoryRegion::Default(uint64_t size) {
uint64_t vmmo_cap;
check(ZMemoryObjectCreate(size, &vmmo_cap));
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
return MappedMemoryRegion(vmmo_cap, 0, vaddr, size);
}
MappedMemoryRegion MappedMemoryRegion::FromCapability(z_cap_t vmmo_cap) {
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
// FIXME: get the size here.
return MappedMemoryRegion(vmmo_cap, 0, vaddr, 0);
} }

View File

@ -13,8 +13,8 @@ AhciDevice::AhciDevice(AhciPort* port) : port_struct_(port) {
// 0x0-0x400 -> Command List // 0x0-0x400 -> Command List
// 0x400-0x500 -> Received FIS // 0x400-0x500 -> Received FIS
// 0x500-0x2500 -> Command Tables (0x100 each) (Max PRDT Length is 8 for now) // 0x500-0x2500 -> Command Tables (0x100 each) (Max PRDT Length is 8 for now)
uint64_t paddr; command_structures_ = MappedMemoryRegion::ContiguousPhysical(0x2500);
command_structures_ = OwnedMemoryRegion::ContiguousPhysical(0x2500, &paddr); uint64_t paddr = command_structures_.paddr();
command_list_ = reinterpret_cast<CommandList*>(command_structures_.vaddr()); command_list_ = reinterpret_cast<CommandList*>(command_structures_.vaddr());
port_struct_->command_list_base = paddr; port_struct_->command_list_base = paddr;

View File

@ -26,7 +26,7 @@ class AhciDevice {
private: private:
AhciPort* port_struct_ = nullptr; AhciPort* port_struct_ = nullptr;
OwnedMemoryRegion command_structures_; MappedMemoryRegion command_structures_;
CommandList* command_list_ = nullptr; CommandList* command_list_ = nullptr;
ReceivedFis* received_fis_ = nullptr; ReceivedFis* received_fis_ = nullptr;

View File

@ -21,8 +21,8 @@ void interrupt_thread(void* void_driver) {
} // namespace } // namespace
glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> AhciDriver::Init( glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> AhciDriver::Init(
OwnedMemoryRegion&& pci_region) { MappedMemoryRegion pci_region) {
glcr::UniquePtr<AhciDriver> driver(new AhciDriver(glcr::Move(pci_region))); glcr::UniquePtr<AhciDriver> driver(new AhciDriver(pci_region));
// RET_ERR(driver->LoadCapabilities()); // RET_ERR(driver->LoadCapabilities());
RET_ERR(driver->LoadHbaRegisters()); RET_ERR(driver->LoadHbaRegisters());
RET_ERR(driver->LoadDevices()); RET_ERR(driver->LoadDevices());
@ -192,7 +192,7 @@ glcr::ErrorCode AhciDriver::RegisterIrq() {
glcr::ErrorCode AhciDriver::LoadHbaRegisters() { glcr::ErrorCode AhciDriver::LoadHbaRegisters() {
ahci_region_ = ahci_region_ =
OwnedMemoryRegion ::DirectPhysical(pci_device_header_->abar, 0x1100); MappedMemoryRegion::DirectPhysical(pci_device_header_->abar, 0x1100);
ahci_hba_ = reinterpret_cast<AhciHba*>(ahci_region_.vaddr()); ahci_hba_ = reinterpret_cast<AhciHba*>(ahci_region_.vaddr());
num_ports_ = (ahci_hba_->capabilities & 0x1F) + 1; num_ports_ = (ahci_hba_->capabilities & 0x1F) + 1;
num_commands_ = ((ahci_hba_->capabilities & 0x1F00) >> 8) + 1; num_commands_ = ((ahci_hba_->capabilities & 0x1F00) >> 8) + 1;

View File

@ -11,7 +11,7 @@
class AhciDriver { class AhciDriver {
public: public:
static glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> Init( static glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> Init(
OwnedMemoryRegion&& ahci_phys); MappedMemoryRegion ahci_phys);
glcr::ErrorCode RegisterIrq(); glcr::ErrorCode RegisterIrq();
void InterruptLoop(); void InterruptLoop();
@ -22,9 +22,9 @@ class AhciDriver {
void DumpPorts(); void DumpPorts();
private: private:
OwnedMemoryRegion pci_region_; MappedMemoryRegion pci_region_;
PciDeviceHeader* pci_device_header_ = nullptr; PciDeviceHeader* pci_device_header_ = nullptr;
OwnedMemoryRegion ahci_region_; MappedMemoryRegion ahci_region_;
AhciHba* ahci_hba_ = nullptr; AhciHba* ahci_hba_ = nullptr;
// TODO: Allocate these dynamically. // TODO: Allocate these dynamically.
@ -40,8 +40,8 @@ class AhciDriver {
glcr::ErrorCode LoadHbaRegisters(); glcr::ErrorCode LoadHbaRegisters();
glcr::ErrorCode LoadDevices(); glcr::ErrorCode LoadDevices();
AhciDriver(OwnedMemoryRegion&& pci_region) AhciDriver(MappedMemoryRegion pci_region)
: pci_region_(glcr::Move(pci_region)), : pci_region_(pci_region),
pci_device_header_( pci_device_header_(
reinterpret_cast<PciDeviceHeader*>(pci_region_.vaddr())) {} reinterpret_cast<PciDeviceHeader*>(pci_region_.vaddr())) {}
}; };

View File

@ -18,9 +18,9 @@ uint64_t main(uint64_t init_port_cap) {
Empty empty; Empty empty;
AhciInfo ahci; AhciInfo ahci;
RET_ERR(stub.GetAhciInfo(empty, ahci)); RET_ERR(stub.GetAhciInfo(empty, ahci));
OwnedMemoryRegion ahci_region = MappedMemoryRegion ahci_region =
OwnedMemoryRegion::FromCapability(ahci.ahci_region()); MappedMemoryRegion::FromCapability(ahci.ahci_region());
ASSIGN_OR_RETURN(auto driver, AhciDriver::Init(glcr::Move(ahci_region))); ASSIGN_OR_RETURN(auto driver, AhciDriver::Init(ahci_region));
ASSIGN_OR_RETURN(glcr::UniquePtr<DenaliServer> server, ASSIGN_OR_RETURN(glcr::UniquePtr<DenaliServer> server,
DenaliServer::Create(*driver)); DenaliServer::Create(*driver));

View File

@ -18,11 +18,10 @@ glcr::ErrorCode DenaliServer::HandleRead(const ReadRequest& req,
ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create()); ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create());
RET_ERR(mutex.Lock()); RET_ERR(mutex.Lock());
uint64_t paddr; MappedMemoryRegion region =
OwnedMemoryRegion region = MappedMemoryRegion::ContiguousPhysical(req.size() * 512);
OwnedMemoryRegion::ContiguousPhysical(req.size() * 512, &paddr);
DmaReadCommand command(req.lba(), req.size(), paddr, mutex); DmaReadCommand command(req.lba(), req.size(), region.paddr(), mutex);
device->IssueCommand(&command); device->IssueCommand(&command);
// Wait for read operation to complete. // Wait for read operation to complete.
@ -31,7 +30,7 @@ glcr::ErrorCode DenaliServer::HandleRead(const ReadRequest& req,
resp.set_device_id(req.device_id()); resp.set_device_id(req.device_id());
resp.set_size(req.size()); resp.set_size(req.size());
resp.set_memory(region.DuplicateCap()); resp.set_memory(region.cap());
return glcr::OK; return glcr::OK;
} }
@ -41,9 +40,8 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create()); ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create());
RET_ERR(mutex.Lock()); RET_ERR(mutex.Lock());
uint64_t region_paddr; MappedMemoryRegion region =
OwnedMemoryRegion region = OwnedMemoryRegion::ContiguousPhysical( MappedMemoryRegion::ContiguousPhysical(req.lba().size() * 512);
req.lba().size() * 512, &region_paddr);
auto& vec = req.lba(); auto& vec = req.lba();
uint64_t curr_run_start = 0; uint64_t curr_run_start = 0;
@ -53,7 +51,7 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
} }
uint64_t lba = vec.at(curr_run_start); uint64_t lba = vec.at(curr_run_start);
uint64_t size = (i - curr_run_start) + 1; uint64_t size = (i - curr_run_start) + 1;
uint64_t paddr = region_paddr + curr_run_start * 512; uint64_t paddr = region.paddr() + curr_run_start * 512;
DmaReadCommand command(lba, size, paddr, mutex); DmaReadCommand command(lba, size, paddr, mutex);
device->IssueCommand(&command); device->IssueCommand(&command);
@ -65,6 +63,6 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
resp.set_device_id(req.device_id()); resp.set_device_id(req.device_id());
resp.set_size(req.lba().size()); resp.set_size(req.lba().size());
resp.set_memory(region.DuplicateCap()); resp.set_memory(region.cap());
return glcr::OK; return glcr::OK;
} }

View File

@ -17,11 +17,11 @@ class ReadRequest {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& device_id() const { return device_id_; } uint64_t device_id() const { return device_id_; }
void set_device_id(const uint64_t& value) { device_id_ = value; } void set_device_id(const uint64_t& value) { device_id_ = value; }
const uint64_t& lba() const { return lba_; } uint64_t lba() const { return lba_; }
void set_lba(const uint64_t& value) { lba_ = value; } void set_lba(const uint64_t& value) { lba_ = value; }
const uint64_t& size() const { return size_; } uint64_t size() const { return size_; }
void set_size(const uint64_t& value) { size_ = value; } void set_size(const uint64_t& value) { size_ = value; }
private: private:
@ -43,7 +43,7 @@ class ReadManyRequest {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& device_id() const { return device_id_; } uint64_t device_id() const { return device_id_; }
void set_device_id(const uint64_t& value) { device_id_ = value; } void set_device_id(const uint64_t& value) { device_id_ = value; }
const glcr::Vector<uint64_t>& lba() const { return lba_; } const glcr::Vector<uint64_t>& lba() const { return lba_; }
void add_lba(const uint64_t& value) { lba_.PushBack(value); } void add_lba(const uint64_t& value) { lba_.PushBack(value); }
@ -66,11 +66,11 @@ class ReadResponse {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& device_id() const { return device_id_; } uint64_t device_id() const { return device_id_; }
void set_device_id(const uint64_t& value) { device_id_ = value; } void set_device_id(const uint64_t& value) { device_id_ = value; }
const uint64_t& size() const { return size_; } uint64_t size() const { return size_; }
void set_size(const uint64_t& value) { size_ = value; } void set_size(const uint64_t& value) { size_ = value; }
const z_cap_t& memory() const { return memory_; } z_cap_t memory() const { return memory_; }
void set_memory(const z_cap_t& value) { memory_ = value; } void set_memory(const z_cap_t& value) { memory_ = value; }
private: private:

View File

@ -1,10 +1,12 @@
#include "framebuffer/framebuffer.h" #include "framebuffer/framebuffer.h"
#include <mammoth/memory_region.h>
Framebuffer::Framebuffer(const FramebufferInfo& info) : fb_info_(info) { Framebuffer::Framebuffer(const FramebufferInfo& info) : fb_info_(info) {
uint64_t buff_size_bytes = fb_info_.height() * fb_info_.pitch(); uint64_t buff_size_bytes = fb_info_.height() * fb_info_.pitch();
fb_memory_ = OwnedMemoryRegion::DirectPhysical(fb_info_.address_phys(), MappedMemoryRegion region = MappedMemoryRegion::DirectPhysical(
buff_size_bytes); fb_info_.address_phys(), buff_size_bytes);
fb_ = reinterpret_cast<uint32_t*>(fb_memory_.vaddr()); fb_ = reinterpret_cast<uint32_t*>(region.vaddr());
} }
void Framebuffer::DrawPixel(uint32_t row, uint32_t col, uint32_t pixel) { void Framebuffer::DrawPixel(uint32_t row, uint32_t col, uint32_t pixel) {

View File

@ -1,6 +1,5 @@
#pragma once #pragma once
#include <mammoth/memory_region.h>
#include <yellowstone/yellowstone.yunq.h> #include <yellowstone/yellowstone.yunq.h>
class Framebuffer { class Framebuffer {
@ -13,7 +12,5 @@ class Framebuffer {
// FIXME: Implement Yunq copy or move so we // FIXME: Implement Yunq copy or move so we
// don't have to store a reference here. // don't have to store a reference here.
const FramebufferInfo& fb_info_; const FramebufferInfo& fb_info_;
OwnedMemoryRegion fb_memory_;
uint32_t* fb_; uint32_t* fb_;
}; };

View File

@ -13,12 +13,12 @@ glcr::ErrorOr<glcr::SharedPtr<Ext2BlockReader>> Ext2BlockReader::Init(
req.set_size(2); req.set_size(2);
ReadResponse resp; ReadResponse resp;
RET_ERR(client.Read(req, resp)); RET_ERR(client.Read(req, resp));
OwnedMemoryRegion superblock = MappedMemoryRegion superblock =
OwnedMemoryRegion::FromCapability(resp.memory()); MappedMemoryRegion::FromCapability(resp.memory());
return glcr::SharedPtr<Ext2BlockReader>( return glcr::SharedPtr<Ext2BlockReader>(
new Ext2BlockReader(glcr::Move(client), denali_info.device_id(), new Ext2BlockReader(glcr::Move(client), denali_info.device_id(),
denali_info.lba_offset(), glcr::Move(superblock))); denali_info.lba_offset(), superblock));
} }
Superblock* Ext2BlockReader::GetSuperblock() { Superblock* Ext2BlockReader::GetSuperblock() {
@ -59,11 +59,11 @@ uint64_t Ext2BlockReader::InodeTableBlockSize() {
return (InodeSize() * GetSuperblock()->inodes_per_group) / BlockSize(); return (InodeSize() * GetSuperblock()->inodes_per_group) / BlockSize();
} }
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlock( glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlock(
uint64_t block_number) { uint64_t block_number) {
return ReadBlocks(block_number, 1); return ReadBlocks(block_number, 1);
} }
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks( glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlocks(
uint64_t block_number, uint64_t num_blocks) { uint64_t block_number, uint64_t num_blocks) {
ReadRequest req; ReadRequest req;
req.set_device_id(device_id_); req.set_device_id(device_id_);
@ -71,10 +71,10 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
req.set_size(num_blocks * SectorsPerBlock()); req.set_size(num_blocks * SectorsPerBlock());
ReadResponse resp; ReadResponse resp;
RET_ERR(denali_.Read(req, resp)); RET_ERR(denali_.Read(req, resp));
return OwnedMemoryRegion::FromCapability(resp.memory()); return MappedMemoryRegion::FromCapability(resp.memory());
} }
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks( glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlocks(
const glcr::Vector<uint64_t>& block_list) { const glcr::Vector<uint64_t>& block_list) {
ReadManyRequest req; ReadManyRequest req;
req.set_device_id(device_id_); req.set_device_id(device_id_);
@ -88,13 +88,13 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
} }
ReadResponse resp; ReadResponse resp;
RET_ERR(denali_.ReadMany(req, resp)); RET_ERR(denali_.ReadMany(req, resp));
return OwnedMemoryRegion::FromCapability(resp.memory()); return MappedMemoryRegion::FromCapability(resp.memory());
} }
Ext2BlockReader::Ext2BlockReader(DenaliClient&& denali, uint64_t device_id, Ext2BlockReader::Ext2BlockReader(DenaliClient&& denali, uint64_t device_id,
uint64_t lba_offset, uint64_t lba_offset,
OwnedMemoryRegion&& super_block) MappedMemoryRegion super_block)
: denali_(glcr::Move(denali)), : denali_(glcr::Move(denali)),
device_id_(device_id), device_id_(device_id),
lba_offset_(lba_offset), lba_offset_(lba_offset),
super_block_region_(glcr::Move(super_block)) {} super_block_region_(super_block) {}

View File

@ -29,21 +29,21 @@ class Ext2BlockReader {
// because the last table will likely be smaller. // because the last table will likely be smaller.
uint64_t InodeTableBlockSize(); uint64_t InodeTableBlockSize();
glcr::ErrorOr<OwnedMemoryRegion> ReadBlock(uint64_t block_number); glcr::ErrorOr<MappedMemoryRegion> ReadBlock(uint64_t block_number);
glcr::ErrorOr<OwnedMemoryRegion> ReadBlocks(uint64_t block_number, glcr::ErrorOr<MappedMemoryRegion> ReadBlocks(uint64_t block_number,
uint64_t num_blocks); uint64_t num_blocks);
glcr::ErrorOr<OwnedMemoryRegion> ReadBlocks( glcr::ErrorOr<MappedMemoryRegion> ReadBlocks(
const glcr::Vector<uint64_t>& block_list); const glcr::Vector<uint64_t>& block_list);
private: private:
DenaliClient denali_; DenaliClient denali_;
uint64_t device_id_; uint64_t device_id_;
uint64_t lba_offset_; uint64_t lba_offset_;
OwnedMemoryRegion super_block_region_; MappedMemoryRegion super_block_region_;
Ext2BlockReader(DenaliClient&& denali, uint64_t device_id, Ext2BlockReader(DenaliClient&& denali, uint64_t device_id,
uint64_t lba_offset, OwnedMemoryRegion&& super_block); uint64_t lba_offset, MappedMemoryRegion super_block);
uint64_t SectorsPerBlock(); uint64_t SectorsPerBlock();
}; };

View File

@ -8,10 +8,11 @@ glcr::ErrorOr<Ext2Driver> Ext2Driver::Init(const DenaliInfo& denali_info) {
Ext2BlockReader::Init(glcr::Move(denali_info))); Ext2BlockReader::Init(glcr::Move(denali_info)));
ASSIGN_OR_RETURN( ASSIGN_OR_RETURN(
OwnedMemoryRegion bgdt, MappedMemoryRegion bgdt,
reader->ReadBlocks(reader->BgdtBlockNum(), reader->BgdtBlockSize())); reader->ReadBlocks(reader->BgdtBlockNum(), reader->BgdtBlockSize()));
glcr::UniquePtr<InodeTable> inode_table( BlockGroupDescriptor* bgds =
new InodeTable(reader, glcr::Move(bgdt))); reinterpret_cast<BlockGroupDescriptor*>(bgdt.vaddr());
glcr::UniquePtr<InodeTable> inode_table(new InodeTable(reader, bgds));
return Ext2Driver(reader, glcr::Move(inode_table)); return Ext2Driver(reader, glcr::Move(inode_table));
} }
@ -63,7 +64,7 @@ glcr::ErrorOr<glcr::Vector<DirEntry>> Ext2Driver::ReadDirectory(
glcr::Vector<DirEntry> directory; glcr::Vector<DirEntry> directory;
for (uint64_t i = 0; i < real_block_cnt; i++) { for (uint64_t i = 0; i < real_block_cnt; i++) {
dbgln("Getting block {x}", inode->block[i]); dbgln("Getting block {x}", inode->block[i]);
ASSIGN_OR_RETURN(OwnedMemoryRegion block, ASSIGN_OR_RETURN(MappedMemoryRegion block,
ext2_reader_->ReadBlock(inode->block[i])); ext2_reader_->ReadBlock(inode->block[i]));
uint64_t addr = block.vaddr(); uint64_t addr = block.vaddr();
while (addr < block.vaddr() + ext2_reader_->BlockSize()) { while (addr < block.vaddr() + ext2_reader_->BlockSize()) {
@ -86,7 +87,7 @@ glcr::ErrorOr<glcr::Vector<DirEntry>> Ext2Driver::ReadDirectory(
return directory; return directory;
} }
glcr::ErrorOr<OwnedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) { glcr::ErrorOr<MappedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) {
ASSIGN_OR_RETURN(Inode * inode, inode_table_->GetInode(inode_number)); ASSIGN_OR_RETURN(Inode * inode, inode_table_->GetInode(inode_number));
if (!(inode->mode & 0x8000)) { if (!(inode->mode & 0x8000)) {
@ -108,7 +109,7 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) {
return glcr::UNIMPLEMENTED; return glcr::UNIMPLEMENTED;
} }
OwnedMemoryRegion indirect_block; MappedMemoryRegion indirect_block;
if (inode->block[12]) { if (inode->block[12]) {
ASSIGN_OR_RETURN(indirect_block, ext2_reader_->ReadBlock(inode->block[12])); ASSIGN_OR_RETURN(indirect_block, ext2_reader_->ReadBlock(inode->block[12]));
} }

View File

@ -18,7 +18,7 @@ class Ext2Driver {
glcr::ErrorOr<glcr::Vector<DirEntry>> ReadDirectory(uint32_t inode_number); glcr::ErrorOr<glcr::Vector<DirEntry>> ReadDirectory(uint32_t inode_number);
glcr::ErrorOr<OwnedMemoryRegion> ReadFile(uint64_t inode_number); glcr::ErrorOr<MappedMemoryRegion> ReadFile(uint64_t inode_number);
private: private:
glcr::SharedPtr<Ext2BlockReader> ext2_reader_; glcr::SharedPtr<Ext2BlockReader> ext2_reader_;

View File

@ -3,10 +3,8 @@
#include <mammoth/debug.h> #include <mammoth/debug.h>
InodeTable::InodeTable(const glcr::SharedPtr<Ext2BlockReader>& reader, InodeTable::InodeTable(const glcr::SharedPtr<Ext2BlockReader>& reader,
OwnedMemoryRegion&& bgdt_region) BlockGroupDescriptor* bgdt)
: ext2_reader_(reader), : ext2_reader_(reader), bgdt_(bgdt) {
bgdt_region_(glcr::Move(bgdt_region)),
bgdt_(reinterpret_cast<BlockGroupDescriptor*>(bgdt_region_.vaddr())) {
inode_tables_.Resize(ext2_reader_->NumberOfBlockGroups()); inode_tables_.Resize(ext2_reader_->NumberOfBlockGroups());
} }

View File

@ -8,16 +8,15 @@
class InodeTable { class InodeTable {
public: public:
InodeTable(const glcr::SharedPtr<Ext2BlockReader>& driver, InodeTable(const glcr::SharedPtr<Ext2BlockReader>& driver,
OwnedMemoryRegion&& bgdt_region); BlockGroupDescriptor* bgdt);
glcr::ErrorOr<Inode*> GetInode(uint32_t inode_num); glcr::ErrorOr<Inode*> GetInode(uint32_t inode_num);
private: private:
glcr::SharedPtr<Ext2BlockReader> ext2_reader_; glcr::SharedPtr<Ext2BlockReader> ext2_reader_;
OwnedMemoryRegion bgdt_region_;
BlockGroupDescriptor* bgdt_; BlockGroupDescriptor* bgdt_;
glcr::Vector<OwnedMemoryRegion> inode_tables_; glcr::Vector<MappedMemoryRegion> inode_tables_;
glcr::ErrorOr<Inode*> GetRootOfInodeTable(uint64_t block_group_num); glcr::ErrorOr<Inode*> GetRootOfInodeTable(uint64_t block_group_num);
}; };

View File

@ -17,7 +17,7 @@ class OpenFileRequest {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const glcr::String& path() const { return path_; } glcr::String path() const { return path_; }
void set_path(const glcr::String& value) { path_ = value; } void set_path(const glcr::String& value) { path_ = value; }
private: private:
@ -37,11 +37,11 @@ class OpenFileResponse {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const glcr::String& path() const { return path_; } glcr::String path() const { return path_; }
void set_path(const glcr::String& value) { path_ = value; } void set_path(const glcr::String& value) { path_ = value; }
const uint64_t& size() const { return size_; } uint64_t size() const { return size_; }
void set_size(const uint64_t& value) { size_ = value; } void set_size(const uint64_t& value) { size_ = value; }
const z_cap_t& memory() const { return memory_; } z_cap_t memory() const { return memory_; }
void set_memory(const z_cap_t& value) { memory_ = value; } void set_memory(const z_cap_t& value) { memory_ = value; }
private: private:

View File

@ -38,7 +38,7 @@ glcr::ErrorCode VFSServer::HandleOpenFile(const OpenFileRequest& request,
} }
uint64_t inode_num; uint64_t inode_num;
OwnedMemoryRegion region; MappedMemoryRegion region;
for (uint64_t j = 0; j < files.size(); j++) { for (uint64_t j = 0; j < files.size(); j++) {
if (path_tokens.at(path_tokens.size() - 1) == if (path_tokens.at(path_tokens.size() - 1) ==
glcr::StringView(files.at(j).name, files.at(j).name_len)) { glcr::StringView(files.at(j).name, files.at(j).name_len)) {
@ -53,10 +53,7 @@ glcr::ErrorCode VFSServer::HandleOpenFile(const OpenFileRequest& request,
} }
response.set_path(request.path()); response.set_path(request.path());
// FIXME: There isn't really a reason we need to map the file into memory then response.set_memory(region.cap());
// duplicate the cap. In the future just get the cap from the read then pass
// it to the caller directly.
response.set_memory(region.DuplicateCap());
// TODO: Consider folding this up into the actual read call. // TODO: Consider folding this up into the actual read call.
ASSIGN_OR_RETURN(Inode * inode, driver_.GetInode(inode_num)); ASSIGN_OR_RETURN(Inode * inode, driver_.GetInode(inode_num));
// FIXME: This technically only sets the lower 32 bits. // FIXME: This technically only sets the lower 32 bits.

View File

@ -64,8 +64,8 @@ glcr::ErrorCode GptReader::ParsePartitionTables() {
req.set_size(2); req.set_size(2);
ReadResponse resp; ReadResponse resp;
RET_ERR(denali_->Read(req, resp)); RET_ERR(denali_->Read(req, resp));
OwnedMemoryRegion lba_1_and_2 = MappedMemoryRegion lba_1_and_2 =
OwnedMemoryRegion::FromCapability(resp.memory()); MappedMemoryRegion::FromCapability(resp.memory());
uint16_t* mbr_sig = reinterpret_cast<uint16_t*>(lba_1_and_2.vaddr() + 0x1FE); uint16_t* mbr_sig = reinterpret_cast<uint16_t*>(lba_1_and_2.vaddr() + 0x1FE);
if (*mbr_sig != 0xAA55) { if (*mbr_sig != 0xAA55) {
dbgln("Invalid MBR Sig: {x}", *mbr_sig); dbgln("Invalid MBR Sig: {x}", *mbr_sig);
@ -106,8 +106,8 @@ glcr::ErrorCode GptReader::ParsePartitionTables() {
req.set_lba(header->lba_partition_entries); req.set_lba(header->lba_partition_entries);
req.set_size(num_blocks); req.set_size(num_blocks);
RET_ERR(denali_->Read(req, resp)); RET_ERR(denali_->Read(req, resp));
OwnedMemoryRegion part_table = MappedMemoryRegion part_table =
OwnedMemoryRegion::FromCapability(resp.memory()); MappedMemoryRegion::FromCapability(resp.memory());
for (uint64_t i = 0; i < num_partitions; i++) { for (uint64_t i = 0; i < num_partitions; i++) {
PartitionEntry* entry = reinterpret_cast<PartitionEntry*>( PartitionEntry* entry = reinterpret_cast<PartitionEntry*>(
part_table.vaddr() + (i * entry_size)); part_table.vaddr() + (i * entry_size));

View File

@ -17,9 +17,9 @@ class RegisterEndpointRequest {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const glcr::String& endpoint_name() const { return endpoint_name_; } glcr::String endpoint_name() const { return endpoint_name_; }
void set_endpoint_name(const glcr::String& value) { endpoint_name_ = value; } void set_endpoint_name(const glcr::String& value) { endpoint_name_ = value; }
const z_cap_t& endpoint_capability() const { return endpoint_capability_; } z_cap_t endpoint_capability() const { return endpoint_capability_; }
void set_endpoint_capability(const z_cap_t& value) { endpoint_capability_ = value; } void set_endpoint_capability(const z_cap_t& value) { endpoint_capability_ = value; }
private: private:
@ -57,9 +57,9 @@ class AhciInfo {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const z_cap_t& ahci_region() const { return ahci_region_; } z_cap_t ahci_region() const { return ahci_region_; }
void set_ahci_region(const z_cap_t& value) { ahci_region_ = value; } void set_ahci_region(const z_cap_t& value) { ahci_region_ = value; }
const uint64_t& region_length() const { return region_length_; } uint64_t region_length() const { return region_length_; }
void set_region_length(const uint64_t& value) { region_length_ = value; } void set_region_length(const uint64_t& value) { region_length_ = value; }
private: private:
@ -80,29 +80,29 @@ class FramebufferInfo {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& address_phys() const { return address_phys_; } uint64_t address_phys() const { return address_phys_; }
void set_address_phys(const uint64_t& value) { address_phys_ = value; } void set_address_phys(const uint64_t& value) { address_phys_ = value; }
const uint64_t& width() const { return width_; } uint64_t width() const { return width_; }
void set_width(const uint64_t& value) { width_ = value; } void set_width(const uint64_t& value) { width_ = value; }
const uint64_t& height() const { return height_; } uint64_t height() const { return height_; }
void set_height(const uint64_t& value) { height_ = value; } void set_height(const uint64_t& value) { height_ = value; }
const uint64_t& pitch() const { return pitch_; } uint64_t pitch() const { return pitch_; }
void set_pitch(const uint64_t& value) { pitch_ = value; } void set_pitch(const uint64_t& value) { pitch_ = value; }
const uint64_t& bpp() const { return bpp_; } uint64_t bpp() const { return bpp_; }
void set_bpp(const uint64_t& value) { bpp_ = value; } void set_bpp(const uint64_t& value) { bpp_ = value; }
const uint64_t& memory_model() const { return memory_model_; } uint64_t memory_model() const { return memory_model_; }
void set_memory_model(const uint64_t& value) { memory_model_ = value; } void set_memory_model(const uint64_t& value) { memory_model_ = value; }
const uint64_t& red_mask_size() const { return red_mask_size_; } uint64_t red_mask_size() const { return red_mask_size_; }
void set_red_mask_size(const uint64_t& value) { red_mask_size_ = value; } void set_red_mask_size(const uint64_t& value) { red_mask_size_ = value; }
const uint64_t& red_mask_shift() const { return red_mask_shift_; } uint64_t red_mask_shift() const { return red_mask_shift_; }
void set_red_mask_shift(const uint64_t& value) { red_mask_shift_ = value; } void set_red_mask_shift(const uint64_t& value) { red_mask_shift_ = value; }
const uint64_t& green_mask_size() const { return green_mask_size_; } uint64_t green_mask_size() const { return green_mask_size_; }
void set_green_mask_size(const uint64_t& value) { green_mask_size_ = value; } void set_green_mask_size(const uint64_t& value) { green_mask_size_ = value; }
const uint64_t& green_mask_shift() const { return green_mask_shift_; } uint64_t green_mask_shift() const { return green_mask_shift_; }
void set_green_mask_shift(const uint64_t& value) { green_mask_shift_ = value; } void set_green_mask_shift(const uint64_t& value) { green_mask_shift_ = value; }
const uint64_t& blue_mask_size() const { return blue_mask_size_; } uint64_t blue_mask_size() const { return blue_mask_size_; }
void set_blue_mask_size(const uint64_t& value) { blue_mask_size_ = value; } void set_blue_mask_size(const uint64_t& value) { blue_mask_size_ = value; }
const uint64_t& blue_mask_shift() const { return blue_mask_shift_; } uint64_t blue_mask_shift() const { return blue_mask_shift_; }
void set_blue_mask_shift(const uint64_t& value) { blue_mask_shift_ = value; } void set_blue_mask_shift(const uint64_t& value) { blue_mask_shift_ = value; }
private: private:
@ -133,11 +133,11 @@ class DenaliInfo {
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&); void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const; uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const z_cap_t& denali_endpoint() const { return denali_endpoint_; } z_cap_t denali_endpoint() const { return denali_endpoint_; }
void set_denali_endpoint(const z_cap_t& value) { denali_endpoint_ = value; } void set_denali_endpoint(const z_cap_t& value) { denali_endpoint_ = value; }
const uint64_t& device_id() const { return device_id_; } uint64_t device_id() const { return device_id_; }
void set_device_id(const uint64_t& value) { device_id_ = value; } void set_device_id(const uint64_t& value) { device_id_ = value; }
const uint64_t& lba_offset() const { return lba_offset_; } uint64_t lba_offset() const { return lba_offset_; }
void set_lba_offset(const uint64_t& value) { lba_offset_ = value; } void set_lba_offset(const uint64_t& value) { lba_offset_ = value; }
private: private:

View File

@ -43,8 +43,8 @@ uint64_t main(uint64_t port_cap) {
OpenFileResponse response; OpenFileResponse response;
check(vfs_client->OpenFile(request, response)); check(vfs_client->OpenFile(request, response));
OwnedMemoryRegion filemem = MappedMemoryRegion filemem =
OwnedMemoryRegion::FromCapability(response.memory()); MappedMemoryRegion::FromCapability(response.memory());
glcr::String file(reinterpret_cast<const char*>(filemem.vaddr()), glcr::String file(reinterpret_cast<const char*>(filemem.vaddr()),
response.size()); response.size());

View File

@ -60,8 +60,8 @@ glcr::ErrorCode YellowstoneServer::HandleGetAhciInfo(const Empty&,
glcr::ErrorCode YellowstoneServer::HandleGetFramebufferInfo( glcr::ErrorCode YellowstoneServer::HandleGetFramebufferInfo(
const Empty&, FramebufferInfo& info) { const Empty&, FramebufferInfo& info) {
// FIXME: Don't do this for each request. // FIXME: Don't do this for each request.
OwnedMemoryRegion region = MappedMemoryRegion region =
OwnedMemoryRegion::FromCapability(gBootFramebufferVmmoCap); MappedMemoryRegion::FromCapability(gBootFramebufferVmmoCap);
ZFramebufferInfo* fb = reinterpret_cast<ZFramebufferInfo*>(region.vaddr()); ZFramebufferInfo* fb = reinterpret_cast<ZFramebufferInfo*>(region.vaddr());
info.set_address_phys(fb->address_phys); info.set_address_phys(fb->address_phys);
@ -92,7 +92,7 @@ glcr::ErrorCode YellowstoneServer::HandleGetDenali(const Empty&,
glcr::ErrorCode YellowstoneServer::HandleRegisterEndpoint( glcr::ErrorCode YellowstoneServer::HandleRegisterEndpoint(
const RegisterEndpointRequest& req, Empty&) { const RegisterEndpointRequest& req, Empty&) {
dbgln("Registering {}.", req.endpoint_name().view()); dbgln("Registering {}.", req.endpoint_name());
if (req.endpoint_name() == "denali") { if (req.endpoint_name() == "denali") {
// FIXME: Rather than blocking and calling the denali service // FIXME: Rather than blocking and calling the denali service
// immediately we should signal the main thread that it can continue init. // immediately we should signal the main thread that it can continue init.

View File

@ -23,7 +23,7 @@ class {{message.name}} {
{%- for field in message.fields %} {%- for field in message.fields %}
{%- if not field.repeated %} {%- if not field.repeated %}
const {{field.cpp_type()}}& {{field.name}}() const { return {{field.name}}_; } {{field.cpp_type()}} {{field.name}}() const { return {{field.name}}_; }
void set_{{field.name}}(const {{field.cpp_type()}}& value) { {{field.name}}_ = value; } void set_{{field.name}}(const {{field.cpp_type()}}& value) { {{field.name}}_ = value; }
{%- else %} {%- else %}
const glcr::Vector<{{field.cpp_type()}}>& {{field.name}}() const { return {{field.name}}_; } const glcr::Vector<{{field.cpp_type()}}>& {{field.name}}() const { return {{field.name}}_; }

View File

@ -11,7 +11,6 @@ add_executable(zion
interrupt/interrupt.cpp interrupt/interrupt.cpp
interrupt/interrupt_enter.s interrupt/interrupt_enter.s
interrupt/timer.cpp interrupt/timer.cpp
lib/memory_mapping_tree.cpp
lib/message_queue.cpp lib/message_queue.cpp
loader/init_loader.cpp loader/init_loader.cpp
memory/kernel_heap.cpp memory/kernel_heap.cpp
@ -64,7 +63,7 @@ target_link_libraries(zion
# -mno-red-zone -- Don't put data below the stack pointer (clobbered by interrupts). # -mno-red-zone -- Don't put data below the stack pointer (clobbered by interrupts).
# -mcmodel=kernel -- Assume the kernel code is running in the higher half. # -mcmodel=kernel -- Assume the kernel code is running in the higher half.
# -mgeneral-regs-only -- Prevent GCC from using a whole host of nonsense registers (that we have to enable). # -mgeneral-regs-only -- Prevent GCC from using a whole host of nonsense registers (that we have to enable).
set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -fno-rtti -fno-exceptions -fno-use-cxa-atexit -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only") set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -fno-rtti -fno-exceptions -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only")
set(_Z_LINK_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/linker.ld") set(_Z_LINK_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/linker.ld")

View File

@ -17,8 +17,7 @@ uint64_t CapabilityTable::AddExistingCapability(
glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) { glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
MutexHolder h(lock_); MutexHolder h(lock_);
if (!capabilities_.Contains(id)) { if (!capabilities_.Contains(id)) {
dbgln("Bad cap access {}", id); panic("Bad cap access {}", id);
return {};
} }
return capabilities_.at(id); return capabilities_.at(id);
} }
@ -26,8 +25,7 @@ glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) { glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
MutexHolder h(lock_); MutexHolder h(lock_);
if (!capabilities_.Contains(id)) { if (!capabilities_.Contains(id)) {
dbgln("Bad cap release {}", id); panic("Bad cap release {}", id);
return {};
} }
auto cap = capabilities_.at(id); auto cap = capabilities_.at(id);
(void)capabilities_.Delete(id); (void)capabilities_.Delete(id);

View File

@ -9,7 +9,7 @@
void dbgln(const glcr::StringView& str); void dbgln(const glcr::StringView& str);
template <typename... Args> template <typename... Args>
void dbgln(const char* str, Args&&... args) { void dbgln(const char* str, Args... args) {
char buffer[256]; char buffer[256];
glcr::FixedStringBuilder builder(buffer, 256); glcr::FixedStringBuilder builder(buffer, 256);
glcr::StrFormatIntoBuffer(builder, str, args...); glcr::StrFormatIntoBuffer(builder, str, args...);
@ -17,12 +17,12 @@ void dbgln(const char* str, Args&&... args) {
} }
template <typename... Args> template <typename... Args>
void dbgln_large(const char* str, Args&&... args) { void dbgln_large(const char* str, Args... args) {
dbgln(glcr::StrFormat(str, args...)); dbgln(glcr::StrFormat(str, args...));
} }
template <typename... Args> template <typename... Args>
void panic(const char* str, Args&&... args) { void panic(const char* str, Args... args) {
dbgln(str, args...); dbgln(str, args...);
dbgln("PANIC"); dbgln("PANIC");
asm volatile("cli; hlt;"); asm volatile("cli; hlt;");
@ -34,6 +34,6 @@ void panic(const char* str, Args&&... args) {
panic(str); \ panic(str); \
} \ } \
} }
#define UNREACHABLE \ #define UNREACHABLE \
panic("Unreachable {}, {}", glcr::StringView(__FILE__), __LINE__); \ panic("Unreachable {}, {}", __FILE__, __LINE__); \
__builtin_unreachable(); __builtin_unreachable();

View File

@ -19,8 +19,6 @@ SYS1(ThreadWait, z_cap_t, thread_cap);
SYS4(AddressSpaceMap, z_cap_t, vmas_cap, uint64_t, vmas_offset, z_cap_t, SYS4(AddressSpaceMap, z_cap_t, vmas_cap, uint64_t, vmas_offset, z_cap_t,
vmmo_cap, uint64_t*, vaddr); vmmo_cap, uint64_t*, vaddr);
SYS3(AddressSpaceUnmap, z_cap_t, vmas_cap, uint64_t, lower_addr, uint64_t,
upper_addr);
SYS2(MemoryObjectCreate, uint64_t, size, z_cap_t*, vmmo_cap); SYS2(MemoryObjectCreate, uint64_t, size, z_cap_t*, vmmo_cap);
SYS3(MemoryObjectCreatePhysical, uint64_t, paddr, uint64_t, size, z_cap_t*, SYS3(MemoryObjectCreatePhysical, uint64_t, paddr, uint64_t, size, z_cap_t*,
@ -30,7 +28,6 @@ SYS3(MemoryObjectCreateContiguous, uint64_t, size, z_cap_t*, vmmo_cap,
SYS4(MemoryObjectDuplicate, z_cap_t, vmmo_cap, uint64_t, base_offset, uint64_t, SYS4(MemoryObjectDuplicate, z_cap_t, vmmo_cap, uint64_t, base_offset, uint64_t,
length, z_cap_t*, new_vmmo_cap); length, z_cap_t*, new_vmmo_cap);
SYS2(MemoryObjectInspect, z_cap_t, vmmo_cap, uint64_t*, size);
SYS2(ChannelCreate, z_cap_t*, channel1, z_cap_t*, channel2); SYS2(ChannelCreate, z_cap_t*, channel1, z_cap_t*, channel2);
SYS5(ChannelSend, z_cap_t, chan_cap, uint64_t, num_bytes, const void*, data, SYS5(ChannelSend, z_cap_t, chan_cap, uint64_t, num_bytes, const void*, data,
@ -59,7 +56,6 @@ SYS5(ReplyPortRecv, z_cap_t, reply_port_cap, uint64_t*, num_bytes, void*, data,
uint64_t*, num_caps, z_cap_t*, caps); uint64_t*, num_caps, z_cap_t*, caps);
SYS3(CapDuplicate, z_cap_t, cap_in, z_perm_t, perm_mask, z_cap_t*, cap_out); SYS3(CapDuplicate, z_cap_t, cap_in, z_perm_t, perm_mask, z_cap_t*, cap_out);
SYS1(CapRelease, z_cap_t, cap);
SYS1(MutexCreate, z_cap_t*, mutex_cap); SYS1(MutexCreate, z_cap_t*, mutex_cap);
SYS1(MutexLock, z_cap_t, mutex_cap); SYS1(MutexLock, z_cap_t, mutex_cap);

View File

@ -21,14 +21,13 @@ const uint64_t kZionThreadWait = 0x13;
// Memory Calls // Memory Calls
const uint64_t kZionAddressSpaceMap = 0x21; const uint64_t kZionAddressSpaceMap = 0x21;
const uint64_t kZionAddressSpaceUnmap = 0x22; const uint64_t kZionAddressSpaceUnMap = 0x21;
const uint64_t kZionMemoryObjectCreate = 0x30; const uint64_t kZionMemoryObjectCreate = 0x30;
const uint64_t kZionMemoryObjectCreatePhysical = 0x31; const uint64_t kZionMemoryObjectCreatePhysical = 0x31;
const uint64_t kZionMemoryObjectCreateContiguous = 0x32; const uint64_t kZionMemoryObjectCreateContiguous = 0x32;
const uint64_t kZionMemoryObjectDuplicate = 0x38; const uint64_t kZionMemoryObjectDuplicate = 0x38;
const uint64_t kZionMemoryObjectInspect = 0x39;
// IPC Calls // IPC Calls
const uint64_t kZionChannelCreate = 0x40; const uint64_t kZionChannelCreate = 0x40;
@ -54,7 +53,6 @@ const uint64_t kZionEndpointCall = 0x65;
// Capability Calls // Capability Calls
const uint64_t kZionCapDuplicate = 0x70; const uint64_t kZionCapDuplicate = 0x70;
const uint64_t kZionCapRelease = 0x71;
// Syncronization Calls // Syncronization Calls
const uint64_t kZionMutexCreate = 0x80; const uint64_t kZionMutexCreate = 0x80;

View File

@ -1,93 +0,0 @@
#include "lib/memory_mapping_tree.h"
#include <glacier/string/str_format.h>
#include "debug/debug.h"
template <>
void glcr::StrFormatValue(glcr::StringBuilder& builder,
const MemoryMappingTree::MemoryMapping& value,
glcr::StringView opts) {
builder.PushBack(
glcr::StrFormat("Range {x}-{x}", value.vaddr_base, value.vaddr_limit));
}
glcr::ErrorCode MemoryMappingTree::AddInMemoryObject(
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& object) {
// TODO: This implementation is inefficient as it traverses the tree a lot, we
// should have some solution with iterators to avoid this.
auto predecessor_or = mapping_tree_.Predecessor(vaddr);
if (predecessor_or && predecessor_or.value().get().vaddr_limit > vaddr) {
return glcr::ALREADY_EXISTS;
}
if (mapping_tree_.Find(vaddr)) {
return glcr::ALREADY_EXISTS;
}
auto successor_or = mapping_tree_.Successor(vaddr);
if (successor_or &&
successor_or.value().get().vaddr_base < vaddr + object->size()) {
return glcr::ALREADY_EXISTS;
}
mapping_tree_.Insert(vaddr, MemoryMapping{
.vaddr_base = vaddr,
.vaddr_limit = vaddr + object->size(),
.mem_object = object,
});
return glcr::OK;
}
glcr::ErrorCode MemoryMappingTree::FreeMemoryRange(uint64_t vaddr_base,
uint64_t vaddr_limit) {
if (vaddr_limit <= vaddr_base) {
return glcr::INVALID_ARGUMENT;
}
auto predecessor_or = mapping_tree_.Predecessor(vaddr_base);
if (predecessor_or && predecessor_or.value().get().vaddr_limit > vaddr_base) {
return glcr::FAILED_PRECONDITION;
}
auto last_predecessor_or = mapping_tree_.Predecessor(vaddr_limit);
if (last_predecessor_or &&
last_predecessor_or.value().get().vaddr_limit > vaddr_limit) {
return glcr::FAILED_PRECONDITION;
}
auto find_or = mapping_tree_.Find(vaddr_base);
if (find_or) {
dbgln("Mem addr {x} refcnt {}",
(uint64_t)find_or.value().get().mem_object.get(),
find_or.value().get().mem_object->ref_count());
mapping_tree_.Delete(vaddr_base);
}
while (true) {
auto successor_or = mapping_tree_.Successor(vaddr_base);
if (!successor_or || successor_or.value().get().vaddr_base >= vaddr_limit) {
return glcr::OK;
}
mapping_tree_.Delete(successor_or.value().get().vaddr_base);
}
}
glcr::ErrorOr<uint64_t> MemoryMappingTree::GetPhysicalPageAtVaddr(
uint64_t vaddr) {
auto mapping_or = GetMemoryMappingForAddr(vaddr);
if (!mapping_or) {
return glcr::NOT_FOUND;
}
MemoryMapping& mapping = mapping_or.value();
return mapping.mem_object->PhysicalPageAtOffset(vaddr - mapping.vaddr_base);
}
glcr::Optional<glcr::Ref<MemoryMappingTree::MemoryMapping>>
MemoryMappingTree::GetMemoryMappingForAddr(uint64_t vaddr) {
auto mapping_or = mapping_tree_.Predecessor(vaddr + 1);
if (!mapping_or) {
return mapping_or;
}
MemoryMapping& mapping = mapping_or.value();
if (mapping.vaddr_base + mapping.mem_object->size() <= vaddr) {
return {};
}
return mapping_or;
}

View File

@ -1,41 +0,0 @@
#pragma once
#include <glacier/container/binary_tree.h>
#include "object/memory_object.h"
/* AddressRangeTree stores memory objects referred to by
* ranges and ensures those ranges do not overlap.
*/
class MemoryMappingTree {
public:
MemoryMappingTree() = default;
MemoryMappingTree(const MemoryMappingTree&) = delete;
MemoryMappingTree(MemoryMappingTree&&) = delete;
glcr::ErrorCode AddInMemoryObject(uint64_t vaddr,
const glcr::RefPtr<MemoryObject>& object);
glcr::ErrorCode FreeMemoryRange(uint64_t vaddr_base, uint64_t vaddr_limit);
glcr::ErrorOr<uint64_t> GetPhysicalPageAtVaddr(uint64_t vaddr);
struct MemoryMapping {
uint64_t vaddr_base;
uint64_t vaddr_limit;
glcr::RefPtr<MemoryObject> mem_object;
};
private:
// TODO: Consider adding a red-black tree implementation here.
// As is this tree functions about as well as a linked list
// because mappings are likely to be added in near-perfect ascedning order.
// Also worth considering creating a special tree implementation for
// just this purpose, or maybe a BinaryTree implementation that accepts
// ranges rather than a single key.
glcr::BinaryTree<uint64_t, MemoryMapping> mapping_tree_;
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
uint64_t vaddr);
};

View File

@ -76,8 +76,7 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
#endif #endif
auto mem_obj = glcr::MakeRefCounted<MemoryObject>(program.memsz); auto mem_obj = glcr::MakeRefCounted<MemoryObject>(program.memsz);
mem_obj->CopyBytesToObject(base + program.offset, program.filesz); mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
PANIC_ON_ERR(dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj), dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj);
"Couldn't map in init program.");
} }
return header->entry; return header->entry;
} }
@ -107,7 +106,7 @@ void DumpModules() {
#endif #endif
} }
const limine_file& GetInitProgram(const glcr::String& path) { const limine_file& GetInitProgram(glcr::String path) {
const limine_module_response& resp = boot::GetModules(); const limine_module_response& resp = boot::GetModules();
for (uint64_t i = 0; i < resp.module_count; i++) { for (uint64_t i = 0; i < resp.module_count; i++) {
const limine_file& file = *resp.modules[i]; const limine_file& file = *resp.modules[i];
@ -115,7 +114,7 @@ const limine_file& GetInitProgram(const glcr::String& path) {
return file; return file;
} }
} }
panic("Program not found: {}", path.view()); panic("Program not found: {}", path);
UNREACHABLE UNREACHABLE
} }

View File

@ -164,8 +164,3 @@ void operator delete[](void* addr) {
SlabFree(addr); SlabFree(addr);
} }
} }
void operator delete[](void* addr, uint64_t size) {
if (IsSlab(addr)) {
SlabFree(addr);
}
}

View File

@ -25,10 +25,6 @@ uint64_t KernelVmm::AcquireKernelStack() {
return gKernelVmm->stack_manager_->AllocateKernelStack(); return gKernelVmm->stack_manager_->AllocateKernelStack();
} }
void KernelVmm::FreeKernelStack(uint64_t stack_addr) {
return gKernelVmm->stack_manager_->FreeKernelStack(stack_addr);
}
uint64_t KernelVmm::AcquireSlabHeapRegionInternal(uint64_t slab_size_bytes) { uint64_t KernelVmm::AcquireSlabHeapRegionInternal(uint64_t slab_size_bytes) {
uint64_t next_slab = next_slab_heap_page_; uint64_t next_slab = next_slab_heap_page_;
if (next_slab >= kKernelBuddyHeapEnd) { if (next_slab >= kKernelBuddyHeapEnd) {

View File

@ -35,15 +35,15 @@ uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
return addr; return addr;
} }
glcr::ErrorCode AddressSpace::MapInMemoryObject( void AddressSpace::MapInMemoryObject(
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj) { uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj) {
return mapping_tree_.AddInMemoryObject(vaddr, mem_obj); memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
} }
glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject( uint64_t AddressSpace::MapInMemoryObject(
const glcr::RefPtr<MemoryObject>& mem_obj) { const glcr::RefPtr<MemoryObject>& mem_obj) {
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size()); uint64_t vaddr = GetNextMemMapAddr(mem_obj->size());
RET_ERR(mapping_tree_.AddInMemoryObject(vaddr, mem_obj)); memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
return vaddr; return vaddr;
} }
@ -55,23 +55,38 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
#if K_VMAS_DEBUG #if K_VMAS_DEBUG
dbgln("[VMAS] Page Fault!"); dbgln("[VMAS] Page Fault!");
#endif #endif
if (vaddr < kPageSize) {
// Invalid page access.
return false;
}
if (user_stacks_.IsValidStack(vaddr)) { if (user_stacks_.IsValidStack(vaddr)) {
MapPage(cr3_, vaddr, phys_mem::AllocatePage()); MapPage(cr3_, vaddr, phys_mem::AllocatePage());
return true; return true;
} }
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr); auto mapping_or = GetMemoryMappingForAddr(vaddr);
if (!offset_or.ok()) { if (!mapping_or) {
return false;
}
MemoryMapping& mapping = mapping_or.value();
uint64_t offset = vaddr - mapping.vaddr;
uint64_t physical_addr = mapping.mem_obj->PhysicalPageAtOffset(offset);
if (physical_addr == 0) {
dbgln("WARN: Memory object returned invalid physical addr.");
return false; return false;
} }
#if K_VMAS_DEBUG #if K_VMAS_DEBUG
dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr); dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr);
#endif #endif
MapPage(cr3_, vaddr, offset_or.value()); MapPage(cr3_, vaddr, physical_addr);
return true; return true;
} }
glcr::Optional<glcr::Ref<AddressSpace::MemoryMapping>>
AddressSpace::GetMemoryMappingForAddr(uint64_t vaddr) {
auto mapping_or = memory_mappings_.Predecessor(vaddr + 1);
if (!mapping_or) {
return mapping_or;
}
MemoryMapping& mapping = mapping_or.value();
if (mapping.vaddr + mapping.mem_obj->size() <= vaddr) {
return {};
}
return mapping_or;
}

View File

@ -5,7 +5,6 @@
#include <stdint.h> #include <stdint.h>
#include "include/ztypes.h" #include "include/ztypes.h"
#include "lib/memory_mapping_tree.h"
#include "memory/user_stack_manager.h" #include "memory/user_stack_manager.h"
#include "object/memory_object.h" #include "object/memory_object.h"
@ -70,22 +69,16 @@ class AddressSpace : public KernelObject {
// Maps in a memory object at a specific address. // Maps in a memory object at a specific address.
// Note this is unsafe for now as it may clobber other mappings. // Note this is unsafe for now as it may clobber other mappings.
[[nodiscard]] glcr::ErrorCode MapInMemoryObject( void MapInMemoryObject(uint64_t vaddr,
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj); const glcr::RefPtr<MemoryObject>& mem_obj);
[[nodiscard]] glcr::ErrorOr<uint64_t> MapInMemoryObject( uint64_t MapInMemoryObject(const glcr::RefPtr<MemoryObject>& mem_obj);
const glcr::RefPtr<MemoryObject>& mem_obj);
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
uint64_t vaddr_limit) {
return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit);
}
// Kernel Mappings. // Kernel Mappings.
uint64_t AllocateKernelStack(); uint64_t AllocateKernelStack();
// Returns true if the page fault has been resolved. // Returns true if the page fault has been resolved.
[[nodiscard]] bool HandlePageFault(uint64_t vaddr); bool HandlePageFault(uint64_t vaddr);
private: private:
friend class glcr::MakeRefCountedFriend<AddressSpace>; friend class glcr::MakeRefCountedFriend<AddressSpace>;
@ -95,5 +88,19 @@ class AddressSpace : public KernelObject {
UserStackManager user_stacks_; UserStackManager user_stacks_;
uint64_t next_memmap_addr_ = 0x20'00000000; uint64_t next_memmap_addr_ = 0x20'00000000;
MemoryMappingTree mapping_tree_; struct MemoryMapping {
uint64_t vaddr;
glcr::RefPtr<MemoryObject> mem_obj;
};
// TODO: Consider adding a red-black tree implementation here.
// As is this tree functions about as well as a linked list
// because mappings are likely to be added in near-perfect ascedning order.
// Also worth considering creating a special tree implementation for
// just this purpose, or maybe a BinaryTree implementation that accepts
// ranges rather than a single key.
glcr::BinaryTree<uint64_t, MemoryMapping> memory_mappings_;
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
uint64_t vaddr);
}; };

View File

@ -20,8 +20,6 @@ MemoryObject::MemoryObject(uint64_t size) : size_(size) {
} }
} }
MemoryObject::~MemoryObject() { dbgln("Memory Object Freed"); }
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) { uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
if (offset > size_) { if (offset > size_) {
panic("Invalid offset"); panic("Invalid offset");

View File

@ -27,8 +27,7 @@ class MemoryObject : public KernelObject {
kZionPerm_Transmit; kZionPerm_Transmit;
} }
explicit MemoryObject(uint64_t size); MemoryObject(uint64_t size);
~MemoryObject();
uint64_t size() { return size_; } uint64_t size() { return size_; }
uint64_t num_pages() { return size_ / 0x1000; } uint64_t num_pages() { return size_ / 0x1000; }

View File

@ -75,12 +75,12 @@ void Thread::Exit() {
curr_thread->tid(), pid(), tid()); curr_thread->tid(), pid(), tid());
} }
Cleanup(); Cleanup();
process_.CheckState();
gScheduler->Yield(); gScheduler->Yield();
} }
void Thread::Cleanup() { void Thread::Cleanup() {
state_ = CLEANUP; state_ = CLEANUP;
process_.CheckState();
while (blocked_threads_.size() != 0) { while (blocked_threads_.size() != 0) {
auto thread = blocked_threads_.PopFront(); auto thread = blocked_threads_.PopFront();
thread->SetState(Thread::RUNNABLE); thread->SetState(Thread::RUNNABLE);

View File

@ -15,19 +15,10 @@ z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req) {
// FIXME: Validation necessary. // FIXME: Validation necessary.
if (req->vmas_offset != 0) { if (req->vmas_offset != 0) {
RET_ERR(vmas->MapInMemoryObject(req->vmas_offset, vmmo)); vmas->MapInMemoryObject(req->vmas_offset, vmmo);
*req->vaddr = req->vmas_offset; *req->vaddr = req->vmas_offset;
} else { } else {
ASSIGN_OR_RETURN(*req->vaddr, vmas->MapInMemoryObject(vmmo)); *req->vaddr = vmas->MapInMemoryObject(vmmo);
} }
return glcr::OK; return glcr::OK;
} }
z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) {
auto& curr_proc = gScheduler->CurrentProcess();
auto vmas_cap = curr_proc.GetCapability(req->vmas_cap);
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
auto vmas = vmas_cap->obj<AddressSpace>();
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr);
}

View File

@ -3,4 +3,3 @@
#include "include/zcall.h" #include "include/zcall.h"
z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req); z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req);
z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req);

View File

@ -18,11 +18,3 @@ z_err_t CapDuplicate(ZCapDuplicateReq* req) {
cap->permissions() & req->perm_mask); cap->permissions() & req->perm_mask);
return glcr::OK; return glcr::OK;
} }
z_err_t CapRelease(ZCapReleaseReq* req) {
auto& proc = gScheduler->CurrentProcess();
if (proc.ReleaseCapability(req->cap).empty()) {
return glcr::CAP_NOT_FOUND;
}
return glcr::OK;
}

View File

@ -3,4 +3,3 @@
#include "include/zcall.h" #include "include/zcall.h"
z_err_t CapDuplicate(ZCapDuplicateReq* req); z_err_t CapDuplicate(ZCapDuplicateReq* req);
z_err_t CapRelease(ZCapReleaseReq* req);

View File

@ -33,6 +33,7 @@ z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req) {
z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req) { z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req) {
auto& curr_proc = gScheduler->CurrentProcess(); auto& curr_proc = gScheduler->CurrentProcess();
auto vmmo_cap = curr_proc.GetCapability(req->vmmo_cap); auto vmmo_cap = curr_proc.GetCapability(req->vmmo_cap);
// FIXME: Check a duplication permission here.
RET_ERR(ValidateCapability<MemoryObject>(vmmo_cap, kZionPerm_Duplicate)); RET_ERR(ValidateCapability<MemoryObject>(vmmo_cap, kZionPerm_Duplicate));
ASSIGN_OR_RETURN( ASSIGN_OR_RETURN(
@ -42,14 +43,3 @@ z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req) {
curr_proc.AddNewCapability(new_vmmo, vmmo_cap->permissions()); curr_proc.AddNewCapability(new_vmmo, vmmo_cap->permissions());
return glcr::OK; return glcr::OK;
} }
z_err_t MemoryObjectInspect(ZMemoryObjectInspectReq* req) {
auto& curr_proc = gScheduler->CurrentProcess();
auto vmmo_cap = curr_proc.GetCapability(req->vmmo_cap);
RET_ERR(ValidateCapability<MemoryObject>(vmmo_cap, kZionPerm_Read));
auto vmmo = vmmo_cap->obj<MemoryObject>();
*req->size = vmmo->size();
return glcr::OK;
}

View File

@ -6,4 +6,3 @@ z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req);
z_err_t MemoryObjectCreatePhysical(ZMemoryObjectCreatePhysicalReq* req); z_err_t MemoryObjectCreatePhysical(ZMemoryObjectCreatePhysicalReq* req);
z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req); z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req);
z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req); z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req);
z_err_t MemoryObjectInspect(ZMemoryObjectInspectReq* req);

View File

@ -60,13 +60,11 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
CASE(ThreadWait); CASE(ThreadWait);
// syscall/address_space.h // syscall/address_space.h
CASE(AddressSpaceMap); CASE(AddressSpaceMap);
CASE(AddressSpaceUnmap);
// syscall/memory_object.h // syscall/memory_object.h
CASE(MemoryObjectCreate); CASE(MemoryObjectCreate);
CASE(MemoryObjectCreatePhysical); CASE(MemoryObjectCreatePhysical);
CASE(MemoryObjectCreateContiguous); CASE(MemoryObjectCreateContiguous);
CASE(MemoryObjectDuplicate); CASE(MemoryObjectDuplicate);
CASE(MemoryObjectInspect);
// syscall/ipc.h // syscall/ipc.h
CASE(ChannelCreate); CASE(ChannelCreate);
CASE(ChannelSend); CASE(ChannelSend);
@ -83,7 +81,6 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
CASE(ReplyPortRecv); CASE(ReplyPortRecv);
// syscall/capability.h // syscall/capability.h
CASE(CapDuplicate); CASE(CapDuplicate);
CASE(CapRelease);
// syscall/syncronization.h // syscall/syncronization.h
CASE(MutexCreate); CASE(MutexCreate);
CASE(MutexLock); CASE(MutexLock);