Compare commits
No commits in common. "344e84c313a05ceac677b5666762c39809215f6a" and "308dd6a2035f0e2d6ec1a6593fc06210ef5783ac" have entirely different histories.
344e84c313
...
308dd6a203
|
|
@ -7,7 +7,6 @@
|
|||
#include "glacier/memory/ref_ptr.h"
|
||||
#include "glacier/memory/reference.h"
|
||||
#include "glacier/memory/unique_ptr.h"
|
||||
#include "glacier/string/str_format.h"
|
||||
|
||||
namespace glcr {
|
||||
|
||||
|
|
@ -27,8 +26,6 @@ class BinaryTree {
|
|||
|
||||
Optional<Ref<V>> Find(K key);
|
||||
|
||||
void DebugTreeIntoStr(StringBuilder& builder) const;
|
||||
|
||||
private:
|
||||
// TODO: Consider adding a sharedptr type to
|
||||
// avoid making this "RefCounted".
|
||||
|
|
@ -48,9 +45,6 @@ class BinaryTree {
|
|||
// If this node exists, return it. Otherwise, this
|
||||
// will be the parent of where this node would be inserted.
|
||||
RefPtr<BinaryNode> FindOrInsertionParent(K key);
|
||||
|
||||
static void DebugNodeIntoString(StringBuilder& builder, uint64_t indent_level,
|
||||
const RefPtr<BinaryNode>& node);
|
||||
};
|
||||
|
||||
template <typename K, typename V>
|
||||
|
|
@ -80,45 +74,39 @@ void BinaryTree<K, V>::Delete(K key) {
|
|||
}
|
||||
|
||||
RefPtr<BinaryNode> new_child = nullptr;
|
||||
if (!node->left) {
|
||||
if (!node.left) {
|
||||
// No children.
|
||||
// Right child only.
|
||||
new_child = node->right;
|
||||
} else if (!node->right) {
|
||||
new_child = node.right;
|
||||
} else if (!node.right) {
|
||||
// Left child only.
|
||||
new_child = node->left;
|
||||
new_child = node.left;
|
||||
} else {
|
||||
// Find Successor.
|
||||
auto successor = node->right;
|
||||
while (successor->left) {
|
||||
successor = successor->left;
|
||||
auto successor = node.right;
|
||||
while (successor.left) {
|
||||
successor = successor.left;
|
||||
}
|
||||
new_child = successor;
|
||||
if (successor != node->right) {
|
||||
successor->parent->left = successor->right;
|
||||
if (successor != node.right) {
|
||||
successor.parent.left = successor.right;
|
||||
}
|
||||
}
|
||||
|
||||
if (node == root_) {
|
||||
root_ = new_child;
|
||||
} else {
|
||||
if (node->parent->right == node) {
|
||||
node->parent->right = new_child;
|
||||
if (node.parent.right == node) {
|
||||
node.parent.right = new_child;
|
||||
} else {
|
||||
node->parent->left = new_child;
|
||||
node.parent.left = new_child;
|
||||
}
|
||||
}
|
||||
if (new_child) {
|
||||
new_child->parent = node->parent;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
Optional<Ref<V>> BinaryTree<K, V>::Predecessor(K key) {
|
||||
auto current = FindOrInsertionParent(key);
|
||||
if (current.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// The case where the current is the insertion parent and
|
||||
// the predecessor is unique. If the key was going to be
|
||||
|
|
@ -151,9 +139,6 @@ Optional<Ref<V>> BinaryTree<K, V>::Predecessor(K key) {
|
|||
template <typename K, typename V>
|
||||
Optional<Ref<V>> BinaryTree<K, V>::Successor(K key) {
|
||||
auto current = FindOrInsertionParent(key);
|
||||
if (current.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// The case where the current is the insertion parent and
|
||||
// the predecessor is unique. If the key was going to be
|
||||
|
|
@ -186,9 +171,6 @@ Optional<Ref<V>> BinaryTree<K, V>::Successor(K key) {
|
|||
template <typename K, typename V>
|
||||
Optional<Ref<V>> BinaryTree<K, V>::Find(K key) {
|
||||
auto current = FindOrInsertionParent(key);
|
||||
if (current.empty()) {
|
||||
return {};
|
||||
}
|
||||
if (current->key == key) {
|
||||
return Optional<Ref<V>>(current->value);
|
||||
}
|
||||
|
|
@ -220,36 +202,4 @@ BinaryTree<K, V>::FindOrInsertionParent(K key) {
|
|||
}
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void StrFormatValue(StringBuilder& builder, const BinaryTree<K, V>& value,
|
||||
StringView opts) {
|
||||
value.DebugTreeIntoStr(builder);
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void BinaryTree<K, V>::DebugTreeIntoStr(StringBuilder& builder) const {
|
||||
DebugNodeIntoString(builder, 0, root_);
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
void BinaryTree<K, V>::DebugNodeIntoString(StringBuilder& builder,
|
||||
uint64_t indent_level,
|
||||
const RefPtr<BinaryNode>& node) {
|
||||
if (node.empty()) {
|
||||
return;
|
||||
}
|
||||
for (uint64_t i = 0; i < indent_level; i++) {
|
||||
builder.PushBack('\t');
|
||||
}
|
||||
StrFormatValue(builder, node->value, "");
|
||||
builder.PushBack('\n');
|
||||
if (node->left) {
|
||||
builder.PushBack('L');
|
||||
DebugNodeIntoString(builder, indent_level + 1, node->left);
|
||||
}
|
||||
if (node->right) {
|
||||
builder.PushBack('R');
|
||||
DebugNodeIntoString(builder, indent_level + 1, node->right);
|
||||
}
|
||||
}
|
||||
} // namespace glcr
|
||||
|
|
|
|||
|
|
@ -11,12 +11,10 @@ class RefCounted {
|
|||
virtual ~RefCounted() {}
|
||||
// FIXME: Rethink error handling in these cases now that we can't panic the
|
||||
// kernel.
|
||||
void AdoptPtr() { ref_count_ = 1; }
|
||||
void Adopt() { ref_count_ = 1; }
|
||||
|
||||
void AcquirePtr() { ref_count_++; }
|
||||
bool ReleasePtr() { return (--ref_count_) == 0; }
|
||||
|
||||
uint64_t ref_count() { return ref_count_; }
|
||||
void Acquire() { ref_count_++; }
|
||||
bool Release() { return (--ref_count_) == 0; }
|
||||
|
||||
private:
|
||||
// FIXME: This should be an atomic type.
|
||||
|
|
|
|||
|
|
@ -18,16 +18,16 @@ class RefPtr {
|
|||
RefPtr(decltype(nullptr)) : ptr_(nullptr) {}
|
||||
RefPtr(const RefPtr& other) : ptr_(other.ptr_) {
|
||||
if (ptr_) {
|
||||
ptr_->AcquirePtr();
|
||||
ptr_->Acquire();
|
||||
}
|
||||
}
|
||||
RefPtr& operator=(const RefPtr& other) {
|
||||
T* old = ptr_;
|
||||
ptr_ = other.ptr_;
|
||||
if (ptr_) {
|
||||
ptr_->AcquirePtr();
|
||||
ptr_->Acquire();
|
||||
}
|
||||
if (old && old->ReleasePtr()) {
|
||||
if (old && old->Release()) {
|
||||
delete old;
|
||||
}
|
||||
|
||||
|
|
@ -46,15 +46,7 @@ class RefPtr {
|
|||
enum DontAdoptTag {
|
||||
DontAdopt,
|
||||
};
|
||||
RefPtr(T* ptr, DontAdoptTag) : ptr_(ptr) { ptr->AcquirePtr(); }
|
||||
|
||||
~RefPtr() {
|
||||
if (ptr_) {
|
||||
if (ptr_->ReleasePtr()) {
|
||||
delete ptr_;
|
||||
}
|
||||
}
|
||||
}
|
||||
RefPtr(T* ptr, DontAdoptTag) : ptr_(ptr) { ptr->Acquire(); }
|
||||
|
||||
T* get() const { return ptr_; };
|
||||
T& operator*() const { return *ptr_; }
|
||||
|
|
@ -73,7 +65,7 @@ class RefPtr {
|
|||
T* ptr_;
|
||||
|
||||
friend RefPtr<T> AdoptPtr<T>(T* ptr);
|
||||
RefPtr(T* ptr) : ptr_(ptr) { ptr->AdoptPtr(); }
|
||||
RefPtr(T* ptr) : ptr_(ptr) { ptr->Adopt(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ class Ref {
|
|||
Ref(Ref&& other) = default;
|
||||
|
||||
operator T&() const { return ref_; }
|
||||
T& get() const { return ref_; }
|
||||
|
||||
private:
|
||||
T& ref_;
|
||||
|
|
|
|||
|
|
@ -20,32 +20,27 @@ void StrFormatNumber(StringBuilder& builder, uint64_t value, uint64_t base) {
|
|||
} // namespace
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint8_t& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, uint8_t value, StringView opts) {
|
||||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint16_t& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, uint16_t value, StringView opts) {
|
||||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const int32_t& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, int32_t value, StringView opts) {
|
||||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint32_t& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, uint32_t value, StringView opts) {
|
||||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint64_t& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, uint64_t value, StringView opts) {
|
||||
if (opts.find('x') != opts.npos) {
|
||||
builder.PushBack("0x");
|
||||
StrFormatNumber(builder, value, 16);
|
||||
|
|
@ -55,26 +50,28 @@ void StrFormatValue(StringBuilder& builder, const uint64_t& value,
|
|||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const ErrorCode& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts) {
|
||||
StrFormatValue(builder, static_cast<uint64_t>(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const char& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, char value, StringView opts) {
|
||||
builder.PushBack(value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, char const* const& value,
|
||||
void StrFormatValue(StringBuilder& builder, const char* value,
|
||||
StringView opts) {
|
||||
StrFormatInternal(builder, StringView(value));
|
||||
StrFormatValue(builder, StringView(value), opts);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const StringView& value,
|
||||
StringView opts) {
|
||||
void StrFormatValue(StringBuilder& builder, StringView value, StringView opts) {
|
||||
StrFormatInternal(builder, value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, String value, StringView opts) {
|
||||
StrFormatInternal(builder, value);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,51 +7,44 @@
|
|||
|
||||
namespace glcr {
|
||||
|
||||
// FIXME: We need some meta-programming here to allow pass-by-value for pointers
|
||||
// and primitives.
|
||||
template <typename T>
|
||||
void StrFormatValue(StringBuilder& builder, const T& value, StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, T value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint8_t& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, uint8_t value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint16_t& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, uint16_t value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const int32_t& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, int32_t value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint32_t& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, uint32_t value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const uint64_t& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, uint64_t value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const ErrorCode& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, ErrorCode value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const char& value, StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, char value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, char const* const& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, const char* value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, const StringView& value,
|
||||
StringView opts);
|
||||
void StrFormatValue(StringBuilder& builder, StringView value, StringView opts);
|
||||
|
||||
template <>
|
||||
void StrFormatValue(StringBuilder& builder, String value, StringView opts);
|
||||
|
||||
void StrFormatInternal(StringBuilder& builder, StringView format);
|
||||
|
||||
template <typename T, typename... Args>
|
||||
void StrFormatInternal(StringBuilder& builder, StringView format,
|
||||
const T& value, Args&&... args) {
|
||||
void StrFormatInternal(StringBuilder& builder, StringView format, T value,
|
||||
Args... args) {
|
||||
uint64_t posl = format.find('{');
|
||||
uint64_t posr = format.find('}', posl);
|
||||
if (posl == format.npos || posr == format.npos) {
|
||||
|
|
@ -66,7 +59,7 @@ void StrFormatInternal(StringBuilder& builder, StringView format,
|
|||
}
|
||||
|
||||
template <typename... Args>
|
||||
String StrFormat(StringView format, Args&&... args) {
|
||||
String StrFormat(StringView format, Args... args) {
|
||||
VariableStringBuilder builder;
|
||||
StrFormatInternal(builder, format, args...);
|
||||
return builder.ToString();
|
||||
|
|
@ -74,7 +67,7 @@ String StrFormat(StringView format, Args&&... args) {
|
|||
|
||||
template <typename... Args>
|
||||
void StrFormatIntoBuffer(StringBuilder& builder, StringView format,
|
||||
Args&&... args) {
|
||||
Args... args) {
|
||||
StrFormatInternal(builder, format, args...);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,6 +46,5 @@ char String::operator[](uint64_t offset) const {
|
|||
}
|
||||
|
||||
String::operator StringView() const { return StringView(cstr_, length_); }
|
||||
StringView String::view() const { return this->operator StringView(); }
|
||||
|
||||
} // namespace glcr
|
||||
|
|
|
|||
|
|
@ -13,17 +13,13 @@ class String {
|
|||
String(const char* cstr, uint64_t str_len);
|
||||
String(StringView str);
|
||||
|
||||
String(const String&) = delete;
|
||||
|
||||
const char* cstr() const { return cstr_; }
|
||||
uint64_t length() const { return length_; }
|
||||
|
||||
bool operator==(const String& str);
|
||||
|
||||
char operator[](uint64_t offset) const;
|
||||
|
||||
operator StringView() const;
|
||||
StringView view() const;
|
||||
|
||||
private:
|
||||
char* cstr_;
|
||||
|
|
|
|||
|
|
@ -3,41 +3,32 @@
|
|||
#include <stdint.h>
|
||||
#include <ztypes.h>
|
||||
|
||||
/*
|
||||
* Memory Region class that unmaps its memory and releases its
|
||||
* capability when it goes out of scope.
|
||||
*/
|
||||
class OwnedMemoryRegion {
|
||||
class MappedMemoryRegion {
|
||||
public:
|
||||
OwnedMemoryRegion() = default;
|
||||
// FIXME: Introduce optional type to contain error or.
|
||||
static MappedMemoryRegion DirectPhysical(uint64_t phys_addr, uint64_t size);
|
||||
static MappedMemoryRegion ContiguousPhysical(uint64_t size);
|
||||
static MappedMemoryRegion Default(uint64_t size);
|
||||
static MappedMemoryRegion FromCapability(z_cap_t vmmo_cap);
|
||||
|
||||
OwnedMemoryRegion(const OwnedMemoryRegion&) = delete;
|
||||
OwnedMemoryRegion& operator=(const OwnedMemoryRegion&) = delete;
|
||||
|
||||
OwnedMemoryRegion(OwnedMemoryRegion&&);
|
||||
OwnedMemoryRegion& operator=(OwnedMemoryRegion&&);
|
||||
|
||||
~OwnedMemoryRegion();
|
||||
|
||||
static OwnedMemoryRegion FromCapability(z_cap_t vmmo_cap);
|
||||
// TODO: Consider making this its own class.
|
||||
static OwnedMemoryRegion ContiguousPhysical(uint64_t size, uint64_t* paddr);
|
||||
static OwnedMemoryRegion DirectPhysical(uint64_t paddr, uint64_t size);
|
||||
MappedMemoryRegion() {}
|
||||
// TODO: Disallow copy before doing any cleanup here.
|
||||
~MappedMemoryRegion() {}
|
||||
|
||||
uint64_t paddr() { return paddr_; }
|
||||
uint64_t vaddr() { return vaddr_; }
|
||||
uint64_t size() { return size_; }
|
||||
|
||||
z_cap_t cap() { return vmmo_cap_; }
|
||||
z_cap_t DuplicateCap();
|
||||
uint64_t cap() { return vmmo_cap_; }
|
||||
|
||||
bool empty() { return vmmo_cap_ != 0; }
|
||||
explicit operator bool() { return vmmo_cap_ != 0; }
|
||||
operator bool() { return vmmo_cap_ != 0; }
|
||||
|
||||
private:
|
||||
OwnedMemoryRegion(uint64_t vmmo_cap, uint64_t vaddr, uint64_t size)
|
||||
: vmmo_cap_(vmmo_cap), vaddr_(vaddr), size_(size) {}
|
||||
MappedMemoryRegion(uint64_t vmmo_cap, uint64_t paddr, uint64_t vaddr,
|
||||
uint64_t size)
|
||||
: vmmo_cap_(vmmo_cap), paddr_(paddr), vaddr_(vaddr), size_(size) {}
|
||||
uint64_t vmmo_cap_ = 0;
|
||||
uint64_t paddr_ = 0;
|
||||
uint64_t vaddr_ = 0;
|
||||
// TODO: We may want to differentiate between VMMO size and mapped size?
|
||||
uint64_t size_ = 0;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -43,7 +43,6 @@ z_err_t ParseInitPort(uint64_t init_port_cap) {
|
|||
break;
|
||||
case Z_BOOT_FRAMEBUFFER_INFO_VMMO:
|
||||
gBootFramebufferVmmoCap = init_cap;
|
||||
break;
|
||||
default:
|
||||
dbgln("Unexpected init type {x}, continuing.", init_sig);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,66 +5,41 @@
|
|||
#include "mammoth/debug.h"
|
||||
#include "mammoth/init.h"
|
||||
|
||||
OwnedMemoryRegion::OwnedMemoryRegion(OwnedMemoryRegion&& other)
|
||||
: OwnedMemoryRegion(other.vmmo_cap_, other.vaddr_, other.size_) {
|
||||
other.vmmo_cap_ = 0;
|
||||
other.vaddr_ = 0;
|
||||
other.size_ = 0;
|
||||
}
|
||||
|
||||
OwnedMemoryRegion& OwnedMemoryRegion::operator=(OwnedMemoryRegion&& other) {
|
||||
if (vmmo_cap_) {
|
||||
check(ZCapRelease(vmmo_cap_));
|
||||
}
|
||||
vmmo_cap_ = other.vmmo_cap_;
|
||||
vaddr_ = other.vaddr_;
|
||||
size_ = other.size_;
|
||||
other.vmmo_cap_ = 0;
|
||||
other.vaddr_ = 0;
|
||||
other.size_ = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
OwnedMemoryRegion::~OwnedMemoryRegion() {
|
||||
if (vmmo_cap_ != 0) {
|
||||
check(ZAddressSpaceUnmap(gSelfVmasCap, vaddr_, vaddr_ + size_));
|
||||
check(ZCapRelease(vmmo_cap_));
|
||||
}
|
||||
}
|
||||
|
||||
OwnedMemoryRegion OwnedMemoryRegion::FromCapability(z_cap_t vmmo_cap) {
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
|
||||
uint64_t size;
|
||||
check(ZMemoryObjectInspect(vmmo_cap, &size));
|
||||
// FIXME: get the size here.
|
||||
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
|
||||
}
|
||||
|
||||
OwnedMemoryRegion OwnedMemoryRegion::ContiguousPhysical(uint64_t size,
|
||||
uint64_t* paddr) {
|
||||
uint64_t vmmo_cap;
|
||||
check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, paddr));
|
||||
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
|
||||
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
|
||||
}
|
||||
|
||||
OwnedMemoryRegion OwnedMemoryRegion::DirectPhysical(uint64_t paddr,
|
||||
MappedMemoryRegion MappedMemoryRegion::DirectPhysical(uint64_t paddr,
|
||||
uint64_t size) {
|
||||
uint64_t vmmo_cap;
|
||||
check(ZMemoryObjectCreatePhysical(paddr, size, &vmmo_cap));
|
||||
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
return OwnedMemoryRegion(vmmo_cap, vaddr, size);
|
||||
|
||||
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
|
||||
}
|
||||
|
||||
z_cap_t OwnedMemoryRegion::DuplicateCap() {
|
||||
z_cap_t cap;
|
||||
check(ZCapDuplicate(vmmo_cap_, kZionPerm_All, &cap));
|
||||
return cap;
|
||||
MappedMemoryRegion MappedMemoryRegion::ContiguousPhysical(uint64_t size) {
|
||||
uint64_t vmmo_cap, paddr;
|
||||
check(ZMemoryObjectCreateContiguous(size, &vmmo_cap, &paddr));
|
||||
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
|
||||
return MappedMemoryRegion(vmmo_cap, paddr, vaddr, size);
|
||||
}
|
||||
|
||||
MappedMemoryRegion MappedMemoryRegion::Default(uint64_t size) {
|
||||
uint64_t vmmo_cap;
|
||||
check(ZMemoryObjectCreate(size, &vmmo_cap));
|
||||
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
|
||||
return MappedMemoryRegion(vmmo_cap, 0, vaddr, size);
|
||||
}
|
||||
|
||||
MappedMemoryRegion MappedMemoryRegion::FromCapability(z_cap_t vmmo_cap) {
|
||||
uint64_t vaddr;
|
||||
check(ZAddressSpaceMap(gSelfVmasCap, 0, vmmo_cap, &vaddr));
|
||||
|
||||
// FIXME: get the size here.
|
||||
return MappedMemoryRegion(vmmo_cap, 0, vaddr, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ AhciDevice::AhciDevice(AhciPort* port) : port_struct_(port) {
|
|||
// 0x0-0x400 -> Command List
|
||||
// 0x400-0x500 -> Received FIS
|
||||
// 0x500-0x2500 -> Command Tables (0x100 each) (Max PRDT Length is 8 for now)
|
||||
uint64_t paddr;
|
||||
command_structures_ = OwnedMemoryRegion::ContiguousPhysical(0x2500, &paddr);
|
||||
command_structures_ = MappedMemoryRegion::ContiguousPhysical(0x2500);
|
||||
uint64_t paddr = command_structures_.paddr();
|
||||
|
||||
command_list_ = reinterpret_cast<CommandList*>(command_structures_.vaddr());
|
||||
port_struct_->command_list_base = paddr;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class AhciDevice {
|
|||
|
||||
private:
|
||||
AhciPort* port_struct_ = nullptr;
|
||||
OwnedMemoryRegion command_structures_;
|
||||
MappedMemoryRegion command_structures_;
|
||||
|
||||
CommandList* command_list_ = nullptr;
|
||||
ReceivedFis* received_fis_ = nullptr;
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ void interrupt_thread(void* void_driver) {
|
|||
} // namespace
|
||||
|
||||
glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> AhciDriver::Init(
|
||||
OwnedMemoryRegion&& pci_region) {
|
||||
glcr::UniquePtr<AhciDriver> driver(new AhciDriver(glcr::Move(pci_region)));
|
||||
MappedMemoryRegion pci_region) {
|
||||
glcr::UniquePtr<AhciDriver> driver(new AhciDriver(pci_region));
|
||||
// RET_ERR(driver->LoadCapabilities());
|
||||
RET_ERR(driver->LoadHbaRegisters());
|
||||
RET_ERR(driver->LoadDevices());
|
||||
|
|
@ -192,7 +192,7 @@ glcr::ErrorCode AhciDriver::RegisterIrq() {
|
|||
|
||||
glcr::ErrorCode AhciDriver::LoadHbaRegisters() {
|
||||
ahci_region_ =
|
||||
OwnedMemoryRegion ::DirectPhysical(pci_device_header_->abar, 0x1100);
|
||||
MappedMemoryRegion::DirectPhysical(pci_device_header_->abar, 0x1100);
|
||||
ahci_hba_ = reinterpret_cast<AhciHba*>(ahci_region_.vaddr());
|
||||
num_ports_ = (ahci_hba_->capabilities & 0x1F) + 1;
|
||||
num_commands_ = ((ahci_hba_->capabilities & 0x1F00) >> 8) + 1;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
class AhciDriver {
|
||||
public:
|
||||
static glcr::ErrorOr<glcr::UniquePtr<AhciDriver>> Init(
|
||||
OwnedMemoryRegion&& ahci_phys);
|
||||
MappedMemoryRegion ahci_phys);
|
||||
glcr::ErrorCode RegisterIrq();
|
||||
|
||||
void InterruptLoop();
|
||||
|
|
@ -22,9 +22,9 @@ class AhciDriver {
|
|||
void DumpPorts();
|
||||
|
||||
private:
|
||||
OwnedMemoryRegion pci_region_;
|
||||
MappedMemoryRegion pci_region_;
|
||||
PciDeviceHeader* pci_device_header_ = nullptr;
|
||||
OwnedMemoryRegion ahci_region_;
|
||||
MappedMemoryRegion ahci_region_;
|
||||
AhciHba* ahci_hba_ = nullptr;
|
||||
|
||||
// TODO: Allocate these dynamically.
|
||||
|
|
@ -40,8 +40,8 @@ class AhciDriver {
|
|||
glcr::ErrorCode LoadHbaRegisters();
|
||||
glcr::ErrorCode LoadDevices();
|
||||
|
||||
AhciDriver(OwnedMemoryRegion&& pci_region)
|
||||
: pci_region_(glcr::Move(pci_region)),
|
||||
AhciDriver(MappedMemoryRegion pci_region)
|
||||
: pci_region_(pci_region),
|
||||
pci_device_header_(
|
||||
reinterpret_cast<PciDeviceHeader*>(pci_region_.vaddr())) {}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ uint64_t main(uint64_t init_port_cap) {
|
|||
Empty empty;
|
||||
AhciInfo ahci;
|
||||
RET_ERR(stub.GetAhciInfo(empty, ahci));
|
||||
OwnedMemoryRegion ahci_region =
|
||||
OwnedMemoryRegion::FromCapability(ahci.ahci_region());
|
||||
ASSIGN_OR_RETURN(auto driver, AhciDriver::Init(glcr::Move(ahci_region)));
|
||||
MappedMemoryRegion ahci_region =
|
||||
MappedMemoryRegion::FromCapability(ahci.ahci_region());
|
||||
ASSIGN_OR_RETURN(auto driver, AhciDriver::Init(ahci_region));
|
||||
|
||||
ASSIGN_OR_RETURN(glcr::UniquePtr<DenaliServer> server,
|
||||
DenaliServer::Create(*driver));
|
||||
|
|
|
|||
|
|
@ -18,11 +18,10 @@ glcr::ErrorCode DenaliServer::HandleRead(const ReadRequest& req,
|
|||
ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create());
|
||||
RET_ERR(mutex.Lock());
|
||||
|
||||
uint64_t paddr;
|
||||
OwnedMemoryRegion region =
|
||||
OwnedMemoryRegion::ContiguousPhysical(req.size() * 512, &paddr);
|
||||
MappedMemoryRegion region =
|
||||
MappedMemoryRegion::ContiguousPhysical(req.size() * 512);
|
||||
|
||||
DmaReadCommand command(req.lba(), req.size(), paddr, mutex);
|
||||
DmaReadCommand command(req.lba(), req.size(), region.paddr(), mutex);
|
||||
device->IssueCommand(&command);
|
||||
|
||||
// Wait for read operation to complete.
|
||||
|
|
@ -31,7 +30,7 @@ glcr::ErrorCode DenaliServer::HandleRead(const ReadRequest& req,
|
|||
|
||||
resp.set_device_id(req.device_id());
|
||||
resp.set_size(req.size());
|
||||
resp.set_memory(region.DuplicateCap());
|
||||
resp.set_memory(region.cap());
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
|
|
@ -41,9 +40,8 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
|
|||
ASSIGN_OR_RETURN(Mutex mutex, Mutex::Create());
|
||||
RET_ERR(mutex.Lock());
|
||||
|
||||
uint64_t region_paddr;
|
||||
OwnedMemoryRegion region = OwnedMemoryRegion::ContiguousPhysical(
|
||||
req.lba().size() * 512, ®ion_paddr);
|
||||
MappedMemoryRegion region =
|
||||
MappedMemoryRegion::ContiguousPhysical(req.lba().size() * 512);
|
||||
|
||||
auto& vec = req.lba();
|
||||
uint64_t curr_run_start = 0;
|
||||
|
|
@ -53,7 +51,7 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
|
|||
}
|
||||
uint64_t lba = vec.at(curr_run_start);
|
||||
uint64_t size = (i - curr_run_start) + 1;
|
||||
uint64_t paddr = region_paddr + curr_run_start * 512;
|
||||
uint64_t paddr = region.paddr() + curr_run_start * 512;
|
||||
DmaReadCommand command(lba, size, paddr, mutex);
|
||||
device->IssueCommand(&command);
|
||||
|
||||
|
|
@ -65,6 +63,6 @@ glcr::ErrorCode DenaliServer::HandleReadMany(const ReadManyRequest& req,
|
|||
|
||||
resp.set_device_id(req.device_id());
|
||||
resp.set_size(req.lba().size());
|
||||
resp.set_memory(region.DuplicateCap());
|
||||
resp.set_memory(region.cap());
|
||||
return glcr::OK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,11 +17,11 @@ class ReadRequest {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const uint64_t& device_id() const { return device_id_; }
|
||||
uint64_t device_id() const { return device_id_; }
|
||||
void set_device_id(const uint64_t& value) { device_id_ = value; }
|
||||
const uint64_t& lba() const { return lba_; }
|
||||
uint64_t lba() const { return lba_; }
|
||||
void set_lba(const uint64_t& value) { lba_ = value; }
|
||||
const uint64_t& size() const { return size_; }
|
||||
uint64_t size() const { return size_; }
|
||||
void set_size(const uint64_t& value) { size_ = value; }
|
||||
|
||||
private:
|
||||
|
|
@ -43,7 +43,7 @@ class ReadManyRequest {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const uint64_t& device_id() const { return device_id_; }
|
||||
uint64_t device_id() const { return device_id_; }
|
||||
void set_device_id(const uint64_t& value) { device_id_ = value; }
|
||||
const glcr::Vector<uint64_t>& lba() const { return lba_; }
|
||||
void add_lba(const uint64_t& value) { lba_.PushBack(value); }
|
||||
|
|
@ -66,11 +66,11 @@ class ReadResponse {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const uint64_t& device_id() const { return device_id_; }
|
||||
uint64_t device_id() const { return device_id_; }
|
||||
void set_device_id(const uint64_t& value) { device_id_ = value; }
|
||||
const uint64_t& size() const { return size_; }
|
||||
uint64_t size() const { return size_; }
|
||||
void set_size(const uint64_t& value) { size_ = value; }
|
||||
const z_cap_t& memory() const { return memory_; }
|
||||
z_cap_t memory() const { return memory_; }
|
||||
void set_memory(const z_cap_t& value) { memory_ = value; }
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
#include "framebuffer/framebuffer.h"
|
||||
|
||||
#include <mammoth/memory_region.h>
|
||||
|
||||
Framebuffer::Framebuffer(const FramebufferInfo& info) : fb_info_(info) {
|
||||
uint64_t buff_size_bytes = fb_info_.height() * fb_info_.pitch();
|
||||
fb_memory_ = OwnedMemoryRegion::DirectPhysical(fb_info_.address_phys(),
|
||||
buff_size_bytes);
|
||||
fb_ = reinterpret_cast<uint32_t*>(fb_memory_.vaddr());
|
||||
MappedMemoryRegion region = MappedMemoryRegion::DirectPhysical(
|
||||
fb_info_.address_phys(), buff_size_bytes);
|
||||
fb_ = reinterpret_cast<uint32_t*>(region.vaddr());
|
||||
}
|
||||
|
||||
void Framebuffer::DrawPixel(uint32_t row, uint32_t col, uint32_t pixel) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
#pragma once
|
||||
|
||||
#include <mammoth/memory_region.h>
|
||||
#include <yellowstone/yellowstone.yunq.h>
|
||||
|
||||
class Framebuffer {
|
||||
|
|
@ -13,7 +12,5 @@ class Framebuffer {
|
|||
// FIXME: Implement Yunq copy or move so we
|
||||
// don't have to store a reference here.
|
||||
const FramebufferInfo& fb_info_;
|
||||
|
||||
OwnedMemoryRegion fb_memory_;
|
||||
uint32_t* fb_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -13,12 +13,12 @@ glcr::ErrorOr<glcr::SharedPtr<Ext2BlockReader>> Ext2BlockReader::Init(
|
|||
req.set_size(2);
|
||||
ReadResponse resp;
|
||||
RET_ERR(client.Read(req, resp));
|
||||
OwnedMemoryRegion superblock =
|
||||
OwnedMemoryRegion::FromCapability(resp.memory());
|
||||
MappedMemoryRegion superblock =
|
||||
MappedMemoryRegion::FromCapability(resp.memory());
|
||||
|
||||
return glcr::SharedPtr<Ext2BlockReader>(
|
||||
new Ext2BlockReader(glcr::Move(client), denali_info.device_id(),
|
||||
denali_info.lba_offset(), glcr::Move(superblock)));
|
||||
denali_info.lba_offset(), superblock));
|
||||
}
|
||||
|
||||
Superblock* Ext2BlockReader::GetSuperblock() {
|
||||
|
|
@ -59,11 +59,11 @@ uint64_t Ext2BlockReader::InodeTableBlockSize() {
|
|||
return (InodeSize() * GetSuperblock()->inodes_per_group) / BlockSize();
|
||||
}
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlock(
|
||||
glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlock(
|
||||
uint64_t block_number) {
|
||||
return ReadBlocks(block_number, 1);
|
||||
}
|
||||
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
||||
glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
||||
uint64_t block_number, uint64_t num_blocks) {
|
||||
ReadRequest req;
|
||||
req.set_device_id(device_id_);
|
||||
|
|
@ -71,10 +71,10 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
|||
req.set_size(num_blocks * SectorsPerBlock());
|
||||
ReadResponse resp;
|
||||
RET_ERR(denali_.Read(req, resp));
|
||||
return OwnedMemoryRegion::FromCapability(resp.memory());
|
||||
return MappedMemoryRegion::FromCapability(resp.memory());
|
||||
}
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
||||
glcr::ErrorOr<MappedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
||||
const glcr::Vector<uint64_t>& block_list) {
|
||||
ReadManyRequest req;
|
||||
req.set_device_id(device_id_);
|
||||
|
|
@ -88,13 +88,13 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2BlockReader::ReadBlocks(
|
|||
}
|
||||
ReadResponse resp;
|
||||
RET_ERR(denali_.ReadMany(req, resp));
|
||||
return OwnedMemoryRegion::FromCapability(resp.memory());
|
||||
return MappedMemoryRegion::FromCapability(resp.memory());
|
||||
}
|
||||
|
||||
Ext2BlockReader::Ext2BlockReader(DenaliClient&& denali, uint64_t device_id,
|
||||
uint64_t lba_offset,
|
||||
OwnedMemoryRegion&& super_block)
|
||||
MappedMemoryRegion super_block)
|
||||
: denali_(glcr::Move(denali)),
|
||||
device_id_(device_id),
|
||||
lba_offset_(lba_offset),
|
||||
super_block_region_(glcr::Move(super_block)) {}
|
||||
super_block_region_(super_block) {}
|
||||
|
|
|
|||
|
|
@ -29,21 +29,21 @@ class Ext2BlockReader {
|
|||
// because the last table will likely be smaller.
|
||||
uint64_t InodeTableBlockSize();
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> ReadBlock(uint64_t block_number);
|
||||
glcr::ErrorOr<OwnedMemoryRegion> ReadBlocks(uint64_t block_number,
|
||||
glcr::ErrorOr<MappedMemoryRegion> ReadBlock(uint64_t block_number);
|
||||
glcr::ErrorOr<MappedMemoryRegion> ReadBlocks(uint64_t block_number,
|
||||
uint64_t num_blocks);
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> ReadBlocks(
|
||||
glcr::ErrorOr<MappedMemoryRegion> ReadBlocks(
|
||||
const glcr::Vector<uint64_t>& block_list);
|
||||
|
||||
private:
|
||||
DenaliClient denali_;
|
||||
uint64_t device_id_;
|
||||
uint64_t lba_offset_;
|
||||
OwnedMemoryRegion super_block_region_;
|
||||
MappedMemoryRegion super_block_region_;
|
||||
|
||||
Ext2BlockReader(DenaliClient&& denali, uint64_t device_id,
|
||||
uint64_t lba_offset, OwnedMemoryRegion&& super_block);
|
||||
uint64_t lba_offset, MappedMemoryRegion super_block);
|
||||
|
||||
uint64_t SectorsPerBlock();
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@ glcr::ErrorOr<Ext2Driver> Ext2Driver::Init(const DenaliInfo& denali_info) {
|
|||
Ext2BlockReader::Init(glcr::Move(denali_info)));
|
||||
|
||||
ASSIGN_OR_RETURN(
|
||||
OwnedMemoryRegion bgdt,
|
||||
MappedMemoryRegion bgdt,
|
||||
reader->ReadBlocks(reader->BgdtBlockNum(), reader->BgdtBlockSize()));
|
||||
glcr::UniquePtr<InodeTable> inode_table(
|
||||
new InodeTable(reader, glcr::Move(bgdt)));
|
||||
BlockGroupDescriptor* bgds =
|
||||
reinterpret_cast<BlockGroupDescriptor*>(bgdt.vaddr());
|
||||
glcr::UniquePtr<InodeTable> inode_table(new InodeTable(reader, bgds));
|
||||
|
||||
return Ext2Driver(reader, glcr::Move(inode_table));
|
||||
}
|
||||
|
|
@ -63,7 +64,7 @@ glcr::ErrorOr<glcr::Vector<DirEntry>> Ext2Driver::ReadDirectory(
|
|||
glcr::Vector<DirEntry> directory;
|
||||
for (uint64_t i = 0; i < real_block_cnt; i++) {
|
||||
dbgln("Getting block {x}", inode->block[i]);
|
||||
ASSIGN_OR_RETURN(OwnedMemoryRegion block,
|
||||
ASSIGN_OR_RETURN(MappedMemoryRegion block,
|
||||
ext2_reader_->ReadBlock(inode->block[i]));
|
||||
uint64_t addr = block.vaddr();
|
||||
while (addr < block.vaddr() + ext2_reader_->BlockSize()) {
|
||||
|
|
@ -86,7 +87,7 @@ glcr::ErrorOr<glcr::Vector<DirEntry>> Ext2Driver::ReadDirectory(
|
|||
return directory;
|
||||
}
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) {
|
||||
glcr::ErrorOr<MappedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) {
|
||||
ASSIGN_OR_RETURN(Inode * inode, inode_table_->GetInode(inode_number));
|
||||
|
||||
if (!(inode->mode & 0x8000)) {
|
||||
|
|
@ -108,7 +109,7 @@ glcr::ErrorOr<OwnedMemoryRegion> Ext2Driver::ReadFile(uint64_t inode_number) {
|
|||
return glcr::UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
OwnedMemoryRegion indirect_block;
|
||||
MappedMemoryRegion indirect_block;
|
||||
if (inode->block[12]) {
|
||||
ASSIGN_OR_RETURN(indirect_block, ext2_reader_->ReadBlock(inode->block[12]));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class Ext2Driver {
|
|||
|
||||
glcr::ErrorOr<glcr::Vector<DirEntry>> ReadDirectory(uint32_t inode_number);
|
||||
|
||||
glcr::ErrorOr<OwnedMemoryRegion> ReadFile(uint64_t inode_number);
|
||||
glcr::ErrorOr<MappedMemoryRegion> ReadFile(uint64_t inode_number);
|
||||
|
||||
private:
|
||||
glcr::SharedPtr<Ext2BlockReader> ext2_reader_;
|
||||
|
|
|
|||
|
|
@ -3,10 +3,8 @@
|
|||
#include <mammoth/debug.h>
|
||||
|
||||
InodeTable::InodeTable(const glcr::SharedPtr<Ext2BlockReader>& reader,
|
||||
OwnedMemoryRegion&& bgdt_region)
|
||||
: ext2_reader_(reader),
|
||||
bgdt_region_(glcr::Move(bgdt_region)),
|
||||
bgdt_(reinterpret_cast<BlockGroupDescriptor*>(bgdt_region_.vaddr())) {
|
||||
BlockGroupDescriptor* bgdt)
|
||||
: ext2_reader_(reader), bgdt_(bgdt) {
|
||||
inode_tables_.Resize(ext2_reader_->NumberOfBlockGroups());
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,16 +8,15 @@
|
|||
class InodeTable {
|
||||
public:
|
||||
InodeTable(const glcr::SharedPtr<Ext2BlockReader>& driver,
|
||||
OwnedMemoryRegion&& bgdt_region);
|
||||
BlockGroupDescriptor* bgdt);
|
||||
|
||||
glcr::ErrorOr<Inode*> GetInode(uint32_t inode_num);
|
||||
|
||||
private:
|
||||
glcr::SharedPtr<Ext2BlockReader> ext2_reader_;
|
||||
OwnedMemoryRegion bgdt_region_;
|
||||
BlockGroupDescriptor* bgdt_;
|
||||
|
||||
glcr::Vector<OwnedMemoryRegion> inode_tables_;
|
||||
glcr::Vector<MappedMemoryRegion> inode_tables_;
|
||||
|
||||
glcr::ErrorOr<Inode*> GetRootOfInodeTable(uint64_t block_group_num);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class OpenFileRequest {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const glcr::String& path() const { return path_; }
|
||||
glcr::String path() const { return path_; }
|
||||
void set_path(const glcr::String& value) { path_ = value; }
|
||||
|
||||
private:
|
||||
|
|
@ -37,11 +37,11 @@ class OpenFileResponse {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const glcr::String& path() const { return path_; }
|
||||
glcr::String path() const { return path_; }
|
||||
void set_path(const glcr::String& value) { path_ = value; }
|
||||
const uint64_t& size() const { return size_; }
|
||||
uint64_t size() const { return size_; }
|
||||
void set_size(const uint64_t& value) { size_ = value; }
|
||||
const z_cap_t& memory() const { return memory_; }
|
||||
z_cap_t memory() const { return memory_; }
|
||||
void set_memory(const z_cap_t& value) { memory_ = value; }
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ glcr::ErrorCode VFSServer::HandleOpenFile(const OpenFileRequest& request,
|
|||
}
|
||||
|
||||
uint64_t inode_num;
|
||||
OwnedMemoryRegion region;
|
||||
MappedMemoryRegion region;
|
||||
for (uint64_t j = 0; j < files.size(); j++) {
|
||||
if (path_tokens.at(path_tokens.size() - 1) ==
|
||||
glcr::StringView(files.at(j).name, files.at(j).name_len)) {
|
||||
|
|
@ -53,10 +53,7 @@ glcr::ErrorCode VFSServer::HandleOpenFile(const OpenFileRequest& request,
|
|||
}
|
||||
|
||||
response.set_path(request.path());
|
||||
// FIXME: There isn't really a reason we need to map the file into memory then
|
||||
// duplicate the cap. In the future just get the cap from the read then pass
|
||||
// it to the caller directly.
|
||||
response.set_memory(region.DuplicateCap());
|
||||
response.set_memory(region.cap());
|
||||
// TODO: Consider folding this up into the actual read call.
|
||||
ASSIGN_OR_RETURN(Inode * inode, driver_.GetInode(inode_num));
|
||||
// FIXME: This technically only sets the lower 32 bits.
|
||||
|
|
|
|||
|
|
@ -64,8 +64,8 @@ glcr::ErrorCode GptReader::ParsePartitionTables() {
|
|||
req.set_size(2);
|
||||
ReadResponse resp;
|
||||
RET_ERR(denali_->Read(req, resp));
|
||||
OwnedMemoryRegion lba_1_and_2 =
|
||||
OwnedMemoryRegion::FromCapability(resp.memory());
|
||||
MappedMemoryRegion lba_1_and_2 =
|
||||
MappedMemoryRegion::FromCapability(resp.memory());
|
||||
uint16_t* mbr_sig = reinterpret_cast<uint16_t*>(lba_1_and_2.vaddr() + 0x1FE);
|
||||
if (*mbr_sig != 0xAA55) {
|
||||
dbgln("Invalid MBR Sig: {x}", *mbr_sig);
|
||||
|
|
@ -106,8 +106,8 @@ glcr::ErrorCode GptReader::ParsePartitionTables() {
|
|||
req.set_lba(header->lba_partition_entries);
|
||||
req.set_size(num_blocks);
|
||||
RET_ERR(denali_->Read(req, resp));
|
||||
OwnedMemoryRegion part_table =
|
||||
OwnedMemoryRegion::FromCapability(resp.memory());
|
||||
MappedMemoryRegion part_table =
|
||||
MappedMemoryRegion::FromCapability(resp.memory());
|
||||
for (uint64_t i = 0; i < num_partitions; i++) {
|
||||
PartitionEntry* entry = reinterpret_cast<PartitionEntry*>(
|
||||
part_table.vaddr() + (i * entry_size));
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ class RegisterEndpointRequest {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const glcr::String& endpoint_name() const { return endpoint_name_; }
|
||||
glcr::String endpoint_name() const { return endpoint_name_; }
|
||||
void set_endpoint_name(const glcr::String& value) { endpoint_name_ = value; }
|
||||
const z_cap_t& endpoint_capability() const { return endpoint_capability_; }
|
||||
z_cap_t endpoint_capability() const { return endpoint_capability_; }
|
||||
void set_endpoint_capability(const z_cap_t& value) { endpoint_capability_ = value; }
|
||||
|
||||
private:
|
||||
|
|
@ -57,9 +57,9 @@ class AhciInfo {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const z_cap_t& ahci_region() const { return ahci_region_; }
|
||||
z_cap_t ahci_region() const { return ahci_region_; }
|
||||
void set_ahci_region(const z_cap_t& value) { ahci_region_ = value; }
|
||||
const uint64_t& region_length() const { return region_length_; }
|
||||
uint64_t region_length() const { return region_length_; }
|
||||
void set_region_length(const uint64_t& value) { region_length_ = value; }
|
||||
|
||||
private:
|
||||
|
|
@ -80,29 +80,29 @@ class FramebufferInfo {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const uint64_t& address_phys() const { return address_phys_; }
|
||||
uint64_t address_phys() const { return address_phys_; }
|
||||
void set_address_phys(const uint64_t& value) { address_phys_ = value; }
|
||||
const uint64_t& width() const { return width_; }
|
||||
uint64_t width() const { return width_; }
|
||||
void set_width(const uint64_t& value) { width_ = value; }
|
||||
const uint64_t& height() const { return height_; }
|
||||
uint64_t height() const { return height_; }
|
||||
void set_height(const uint64_t& value) { height_ = value; }
|
||||
const uint64_t& pitch() const { return pitch_; }
|
||||
uint64_t pitch() const { return pitch_; }
|
||||
void set_pitch(const uint64_t& value) { pitch_ = value; }
|
||||
const uint64_t& bpp() const { return bpp_; }
|
||||
uint64_t bpp() const { return bpp_; }
|
||||
void set_bpp(const uint64_t& value) { bpp_ = value; }
|
||||
const uint64_t& memory_model() const { return memory_model_; }
|
||||
uint64_t memory_model() const { return memory_model_; }
|
||||
void set_memory_model(const uint64_t& value) { memory_model_ = value; }
|
||||
const uint64_t& red_mask_size() const { return red_mask_size_; }
|
||||
uint64_t red_mask_size() const { return red_mask_size_; }
|
||||
void set_red_mask_size(const uint64_t& value) { red_mask_size_ = value; }
|
||||
const uint64_t& red_mask_shift() const { return red_mask_shift_; }
|
||||
uint64_t red_mask_shift() const { return red_mask_shift_; }
|
||||
void set_red_mask_shift(const uint64_t& value) { red_mask_shift_ = value; }
|
||||
const uint64_t& green_mask_size() const { return green_mask_size_; }
|
||||
uint64_t green_mask_size() const { return green_mask_size_; }
|
||||
void set_green_mask_size(const uint64_t& value) { green_mask_size_ = value; }
|
||||
const uint64_t& green_mask_shift() const { return green_mask_shift_; }
|
||||
uint64_t green_mask_shift() const { return green_mask_shift_; }
|
||||
void set_green_mask_shift(const uint64_t& value) { green_mask_shift_ = value; }
|
||||
const uint64_t& blue_mask_size() const { return blue_mask_size_; }
|
||||
uint64_t blue_mask_size() const { return blue_mask_size_; }
|
||||
void set_blue_mask_size(const uint64_t& value) { blue_mask_size_ = value; }
|
||||
const uint64_t& blue_mask_shift() const { return blue_mask_shift_; }
|
||||
uint64_t blue_mask_shift() const { return blue_mask_shift_; }
|
||||
void set_blue_mask_shift(const uint64_t& value) { blue_mask_shift_ = value; }
|
||||
|
||||
private:
|
||||
|
|
@ -133,11 +133,11 @@ class DenaliInfo {
|
|||
void ParseFromBytes(const glcr::ByteBuffer&, uint64_t offset, const glcr::CapBuffer&);
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
|
||||
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
|
||||
const z_cap_t& denali_endpoint() const { return denali_endpoint_; }
|
||||
z_cap_t denali_endpoint() const { return denali_endpoint_; }
|
||||
void set_denali_endpoint(const z_cap_t& value) { denali_endpoint_ = value; }
|
||||
const uint64_t& device_id() const { return device_id_; }
|
||||
uint64_t device_id() const { return device_id_; }
|
||||
void set_device_id(const uint64_t& value) { device_id_ = value; }
|
||||
const uint64_t& lba_offset() const { return lba_offset_; }
|
||||
uint64_t lba_offset() const { return lba_offset_; }
|
||||
void set_lba_offset(const uint64_t& value) { lba_offset_ = value; }
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -43,8 +43,8 @@ uint64_t main(uint64_t port_cap) {
|
|||
OpenFileResponse response;
|
||||
check(vfs_client->OpenFile(request, response));
|
||||
|
||||
OwnedMemoryRegion filemem =
|
||||
OwnedMemoryRegion::FromCapability(response.memory());
|
||||
MappedMemoryRegion filemem =
|
||||
MappedMemoryRegion::FromCapability(response.memory());
|
||||
glcr::String file(reinterpret_cast<const char*>(filemem.vaddr()),
|
||||
response.size());
|
||||
|
||||
|
|
|
|||
|
|
@ -60,8 +60,8 @@ glcr::ErrorCode YellowstoneServer::HandleGetAhciInfo(const Empty&,
|
|||
glcr::ErrorCode YellowstoneServer::HandleGetFramebufferInfo(
|
||||
const Empty&, FramebufferInfo& info) {
|
||||
// FIXME: Don't do this for each request.
|
||||
OwnedMemoryRegion region =
|
||||
OwnedMemoryRegion::FromCapability(gBootFramebufferVmmoCap);
|
||||
MappedMemoryRegion region =
|
||||
MappedMemoryRegion::FromCapability(gBootFramebufferVmmoCap);
|
||||
ZFramebufferInfo* fb = reinterpret_cast<ZFramebufferInfo*>(region.vaddr());
|
||||
|
||||
info.set_address_phys(fb->address_phys);
|
||||
|
|
@ -92,7 +92,7 @@ glcr::ErrorCode YellowstoneServer::HandleGetDenali(const Empty&,
|
|||
|
||||
glcr::ErrorCode YellowstoneServer::HandleRegisterEndpoint(
|
||||
const RegisterEndpointRequest& req, Empty&) {
|
||||
dbgln("Registering {}.", req.endpoint_name().view());
|
||||
dbgln("Registering {}.", req.endpoint_name());
|
||||
if (req.endpoint_name() == "denali") {
|
||||
// FIXME: Rather than blocking and calling the denali service
|
||||
// immediately we should signal the main thread that it can continue init.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class {{message.name}} {
|
|||
|
||||
{%- for field in message.fields %}
|
||||
{%- if not field.repeated %}
|
||||
const {{field.cpp_type()}}& {{field.name}}() const { return {{field.name}}_; }
|
||||
{{field.cpp_type()}} {{field.name}}() const { return {{field.name}}_; }
|
||||
void set_{{field.name}}(const {{field.cpp_type()}}& value) { {{field.name}}_ = value; }
|
||||
{%- else %}
|
||||
const glcr::Vector<{{field.cpp_type()}}>& {{field.name}}() const { return {{field.name}}_; }
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ add_executable(zion
|
|||
interrupt/interrupt.cpp
|
||||
interrupt/interrupt_enter.s
|
||||
interrupt/timer.cpp
|
||||
lib/memory_mapping_tree.cpp
|
||||
lib/message_queue.cpp
|
||||
loader/init_loader.cpp
|
||||
memory/kernel_heap.cpp
|
||||
|
|
@ -64,7 +63,7 @@ target_link_libraries(zion
|
|||
# -mno-red-zone -- Don't put data below the stack pointer (clobbered by interrupts).
|
||||
# -mcmodel=kernel -- Assume the kernel code is running in the higher half.
|
||||
# -mgeneral-regs-only -- Prevent GCC from using a whole host of nonsense registers (that we have to enable).
|
||||
set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -fno-rtti -fno-exceptions -fno-use-cxa-atexit -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only")
|
||||
set(_Z_COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -c -ffreestanding -fno-rtti -fno-exceptions -nostdlib -mabi=sysv -mno-red-zone -mcmodel=kernel -mgeneral-regs-only")
|
||||
|
||||
set(_Z_LINK_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/linker.ld")
|
||||
|
||||
|
|
|
|||
|
|
@ -17,8 +17,7 @@ uint64_t CapabilityTable::AddExistingCapability(
|
|||
glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
|
||||
MutexHolder h(lock_);
|
||||
if (!capabilities_.Contains(id)) {
|
||||
dbgln("Bad cap access {}", id);
|
||||
return {};
|
||||
panic("Bad cap access {}", id);
|
||||
}
|
||||
return capabilities_.at(id);
|
||||
}
|
||||
|
|
@ -26,8 +25,7 @@ glcr::RefPtr<Capability> CapabilityTable::GetCapability(uint64_t id) {
|
|||
glcr::RefPtr<Capability> CapabilityTable::ReleaseCapability(uint64_t id) {
|
||||
MutexHolder h(lock_);
|
||||
if (!capabilities_.Contains(id)) {
|
||||
dbgln("Bad cap release {}", id);
|
||||
return {};
|
||||
panic("Bad cap release {}", id);
|
||||
}
|
||||
auto cap = capabilities_.at(id);
|
||||
(void)capabilities_.Delete(id);
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
void dbgln(const glcr::StringView& str);
|
||||
|
||||
template <typename... Args>
|
||||
void dbgln(const char* str, Args&&... args) {
|
||||
void dbgln(const char* str, Args... args) {
|
||||
char buffer[256];
|
||||
glcr::FixedStringBuilder builder(buffer, 256);
|
||||
glcr::StrFormatIntoBuffer(builder, str, args...);
|
||||
|
|
@ -17,12 +17,12 @@ void dbgln(const char* str, Args&&... args) {
|
|||
}
|
||||
|
||||
template <typename... Args>
|
||||
void dbgln_large(const char* str, Args&&... args) {
|
||||
void dbgln_large(const char* str, Args... args) {
|
||||
dbgln(glcr::StrFormat(str, args...));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void panic(const char* str, Args&&... args) {
|
||||
void panic(const char* str, Args... args) {
|
||||
dbgln(str, args...);
|
||||
dbgln("PANIC");
|
||||
asm volatile("cli; hlt;");
|
||||
|
|
@ -35,5 +35,5 @@ void panic(const char* str, Args&&... args) {
|
|||
} \
|
||||
}
|
||||
#define UNREACHABLE \
|
||||
panic("Unreachable {}, {}", glcr::StringView(__FILE__), __LINE__); \
|
||||
panic("Unreachable {}, {}", __FILE__, __LINE__); \
|
||||
__builtin_unreachable();
|
||||
|
|
|
|||
|
|
@ -19,8 +19,6 @@ SYS1(ThreadWait, z_cap_t, thread_cap);
|
|||
|
||||
SYS4(AddressSpaceMap, z_cap_t, vmas_cap, uint64_t, vmas_offset, z_cap_t,
|
||||
vmmo_cap, uint64_t*, vaddr);
|
||||
SYS3(AddressSpaceUnmap, z_cap_t, vmas_cap, uint64_t, lower_addr, uint64_t,
|
||||
upper_addr);
|
||||
|
||||
SYS2(MemoryObjectCreate, uint64_t, size, z_cap_t*, vmmo_cap);
|
||||
SYS3(MemoryObjectCreatePhysical, uint64_t, paddr, uint64_t, size, z_cap_t*,
|
||||
|
|
@ -30,7 +28,6 @@ SYS3(MemoryObjectCreateContiguous, uint64_t, size, z_cap_t*, vmmo_cap,
|
|||
|
||||
SYS4(MemoryObjectDuplicate, z_cap_t, vmmo_cap, uint64_t, base_offset, uint64_t,
|
||||
length, z_cap_t*, new_vmmo_cap);
|
||||
SYS2(MemoryObjectInspect, z_cap_t, vmmo_cap, uint64_t*, size);
|
||||
|
||||
SYS2(ChannelCreate, z_cap_t*, channel1, z_cap_t*, channel2);
|
||||
SYS5(ChannelSend, z_cap_t, chan_cap, uint64_t, num_bytes, const void*, data,
|
||||
|
|
@ -59,7 +56,6 @@ SYS5(ReplyPortRecv, z_cap_t, reply_port_cap, uint64_t*, num_bytes, void*, data,
|
|||
uint64_t*, num_caps, z_cap_t*, caps);
|
||||
|
||||
SYS3(CapDuplicate, z_cap_t, cap_in, z_perm_t, perm_mask, z_cap_t*, cap_out);
|
||||
SYS1(CapRelease, z_cap_t, cap);
|
||||
|
||||
SYS1(MutexCreate, z_cap_t*, mutex_cap);
|
||||
SYS1(MutexLock, z_cap_t, mutex_cap);
|
||||
|
|
|
|||
|
|
@ -21,14 +21,13 @@ const uint64_t kZionThreadWait = 0x13;
|
|||
|
||||
// Memory Calls
|
||||
const uint64_t kZionAddressSpaceMap = 0x21;
|
||||
const uint64_t kZionAddressSpaceUnmap = 0x22;
|
||||
const uint64_t kZionAddressSpaceUnMap = 0x21;
|
||||
|
||||
const uint64_t kZionMemoryObjectCreate = 0x30;
|
||||
const uint64_t kZionMemoryObjectCreatePhysical = 0x31;
|
||||
const uint64_t kZionMemoryObjectCreateContiguous = 0x32;
|
||||
|
||||
const uint64_t kZionMemoryObjectDuplicate = 0x38;
|
||||
const uint64_t kZionMemoryObjectInspect = 0x39;
|
||||
|
||||
// IPC Calls
|
||||
const uint64_t kZionChannelCreate = 0x40;
|
||||
|
|
@ -54,7 +53,6 @@ const uint64_t kZionEndpointCall = 0x65;
|
|||
|
||||
// Capability Calls
|
||||
const uint64_t kZionCapDuplicate = 0x70;
|
||||
const uint64_t kZionCapRelease = 0x71;
|
||||
|
||||
// Syncronization Calls
|
||||
const uint64_t kZionMutexCreate = 0x80;
|
||||
|
|
|
|||
|
|
@ -1,93 +0,0 @@
|
|||
#include "lib/memory_mapping_tree.h"
|
||||
|
||||
#include <glacier/string/str_format.h>
|
||||
|
||||
#include "debug/debug.h"
|
||||
|
||||
template <>
|
||||
void glcr::StrFormatValue(glcr::StringBuilder& builder,
|
||||
const MemoryMappingTree::MemoryMapping& value,
|
||||
glcr::StringView opts) {
|
||||
builder.PushBack(
|
||||
glcr::StrFormat("Range {x}-{x}", value.vaddr_base, value.vaddr_limit));
|
||||
}
|
||||
|
||||
glcr::ErrorCode MemoryMappingTree::AddInMemoryObject(
|
||||
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& object) {
|
||||
// TODO: This implementation is inefficient as it traverses the tree a lot, we
|
||||
// should have some solution with iterators to avoid this.
|
||||
auto predecessor_or = mapping_tree_.Predecessor(vaddr);
|
||||
if (predecessor_or && predecessor_or.value().get().vaddr_limit > vaddr) {
|
||||
return glcr::ALREADY_EXISTS;
|
||||
}
|
||||
if (mapping_tree_.Find(vaddr)) {
|
||||
return glcr::ALREADY_EXISTS;
|
||||
}
|
||||
auto successor_or = mapping_tree_.Successor(vaddr);
|
||||
if (successor_or &&
|
||||
successor_or.value().get().vaddr_base < vaddr + object->size()) {
|
||||
return glcr::ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
mapping_tree_.Insert(vaddr, MemoryMapping{
|
||||
.vaddr_base = vaddr,
|
||||
.vaddr_limit = vaddr + object->size(),
|
||||
.mem_object = object,
|
||||
});
|
||||
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
glcr::ErrorCode MemoryMappingTree::FreeMemoryRange(uint64_t vaddr_base,
|
||||
uint64_t vaddr_limit) {
|
||||
if (vaddr_limit <= vaddr_base) {
|
||||
return glcr::INVALID_ARGUMENT;
|
||||
}
|
||||
auto predecessor_or = mapping_tree_.Predecessor(vaddr_base);
|
||||
if (predecessor_or && predecessor_or.value().get().vaddr_limit > vaddr_base) {
|
||||
return glcr::FAILED_PRECONDITION;
|
||||
}
|
||||
auto last_predecessor_or = mapping_tree_.Predecessor(vaddr_limit);
|
||||
if (last_predecessor_or &&
|
||||
last_predecessor_or.value().get().vaddr_limit > vaddr_limit) {
|
||||
return glcr::FAILED_PRECONDITION;
|
||||
}
|
||||
|
||||
auto find_or = mapping_tree_.Find(vaddr_base);
|
||||
if (find_or) {
|
||||
dbgln("Mem addr {x} refcnt {}",
|
||||
(uint64_t)find_or.value().get().mem_object.get(),
|
||||
find_or.value().get().mem_object->ref_count());
|
||||
mapping_tree_.Delete(vaddr_base);
|
||||
}
|
||||
while (true) {
|
||||
auto successor_or = mapping_tree_.Successor(vaddr_base);
|
||||
if (!successor_or || successor_or.value().get().vaddr_base >= vaddr_limit) {
|
||||
return glcr::OK;
|
||||
}
|
||||
mapping_tree_.Delete(successor_or.value().get().vaddr_base);
|
||||
}
|
||||
}
|
||||
|
||||
glcr::ErrorOr<uint64_t> MemoryMappingTree::GetPhysicalPageAtVaddr(
|
||||
uint64_t vaddr) {
|
||||
auto mapping_or = GetMemoryMappingForAddr(vaddr);
|
||||
if (!mapping_or) {
|
||||
return glcr::NOT_FOUND;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
return mapping.mem_object->PhysicalPageAtOffset(vaddr - mapping.vaddr_base);
|
||||
}
|
||||
|
||||
glcr::Optional<glcr::Ref<MemoryMappingTree::MemoryMapping>>
|
||||
MemoryMappingTree::GetMemoryMappingForAddr(uint64_t vaddr) {
|
||||
auto mapping_or = mapping_tree_.Predecessor(vaddr + 1);
|
||||
if (!mapping_or) {
|
||||
return mapping_or;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
if (mapping.vaddr_base + mapping.mem_object->size() <= vaddr) {
|
||||
return {};
|
||||
}
|
||||
return mapping_or;
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <glacier/container/binary_tree.h>
|
||||
|
||||
#include "object/memory_object.h"
|
||||
|
||||
/* AddressRangeTree stores memory objects referred to by
|
||||
* ranges and ensures those ranges do not overlap.
|
||||
*/
|
||||
class MemoryMappingTree {
|
||||
public:
|
||||
MemoryMappingTree() = default;
|
||||
|
||||
MemoryMappingTree(const MemoryMappingTree&) = delete;
|
||||
MemoryMappingTree(MemoryMappingTree&&) = delete;
|
||||
|
||||
glcr::ErrorCode AddInMemoryObject(uint64_t vaddr,
|
||||
const glcr::RefPtr<MemoryObject>& object);
|
||||
|
||||
glcr::ErrorCode FreeMemoryRange(uint64_t vaddr_base, uint64_t vaddr_limit);
|
||||
|
||||
glcr::ErrorOr<uint64_t> GetPhysicalPageAtVaddr(uint64_t vaddr);
|
||||
|
||||
struct MemoryMapping {
|
||||
uint64_t vaddr_base;
|
||||
uint64_t vaddr_limit;
|
||||
glcr::RefPtr<MemoryObject> mem_object;
|
||||
};
|
||||
|
||||
private:
|
||||
// TODO: Consider adding a red-black tree implementation here.
|
||||
// As is this tree functions about as well as a linked list
|
||||
// because mappings are likely to be added in near-perfect ascedning order.
|
||||
// Also worth considering creating a special tree implementation for
|
||||
// just this purpose, or maybe a BinaryTree implementation that accepts
|
||||
// ranges rather than a single key.
|
||||
glcr::BinaryTree<uint64_t, MemoryMapping> mapping_tree_;
|
||||
|
||||
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
|
||||
uint64_t vaddr);
|
||||
};
|
||||
|
|
@ -76,8 +76,7 @@ uint64_t LoadElfProgram(Process& dest_proc, uint64_t base, uint64_t offset) {
|
|||
#endif
|
||||
auto mem_obj = glcr::MakeRefCounted<MemoryObject>(program.memsz);
|
||||
mem_obj->CopyBytesToObject(base + program.offset, program.filesz);
|
||||
PANIC_ON_ERR(dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj),
|
||||
"Couldn't map in init program.");
|
||||
dest_proc.vmas()->MapInMemoryObject(program.vaddr, mem_obj);
|
||||
}
|
||||
return header->entry;
|
||||
}
|
||||
|
|
@ -107,7 +106,7 @@ void DumpModules() {
|
|||
#endif
|
||||
}
|
||||
|
||||
const limine_file& GetInitProgram(const glcr::String& path) {
|
||||
const limine_file& GetInitProgram(glcr::String path) {
|
||||
const limine_module_response& resp = boot::GetModules();
|
||||
for (uint64_t i = 0; i < resp.module_count; i++) {
|
||||
const limine_file& file = *resp.modules[i];
|
||||
|
|
@ -115,7 +114,7 @@ const limine_file& GetInitProgram(const glcr::String& path) {
|
|||
return file;
|
||||
}
|
||||
}
|
||||
panic("Program not found: {}", path.view());
|
||||
panic("Program not found: {}", path);
|
||||
UNREACHABLE
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -164,8 +164,3 @@ void operator delete[](void* addr) {
|
|||
SlabFree(addr);
|
||||
}
|
||||
}
|
||||
void operator delete[](void* addr, uint64_t size) {
|
||||
if (IsSlab(addr)) {
|
||||
SlabFree(addr);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,10 +25,6 @@ uint64_t KernelVmm::AcquireKernelStack() {
|
|||
return gKernelVmm->stack_manager_->AllocateKernelStack();
|
||||
}
|
||||
|
||||
void KernelVmm::FreeKernelStack(uint64_t stack_addr) {
|
||||
return gKernelVmm->stack_manager_->FreeKernelStack(stack_addr);
|
||||
}
|
||||
|
||||
uint64_t KernelVmm::AcquireSlabHeapRegionInternal(uint64_t slab_size_bytes) {
|
||||
uint64_t next_slab = next_slab_heap_page_;
|
||||
if (next_slab >= kKernelBuddyHeapEnd) {
|
||||
|
|
|
|||
|
|
@ -35,15 +35,15 @@ uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
|
|||
return addr;
|
||||
}
|
||||
|
||||
glcr::ErrorCode AddressSpace::MapInMemoryObject(
|
||||
void AddressSpace::MapInMemoryObject(
|
||||
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj) {
|
||||
return mapping_tree_.AddInMemoryObject(vaddr, mem_obj);
|
||||
memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
|
||||
}
|
||||
|
||||
glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
|
||||
uint64_t AddressSpace::MapInMemoryObject(
|
||||
const glcr::RefPtr<MemoryObject>& mem_obj) {
|
||||
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size());
|
||||
RET_ERR(mapping_tree_.AddInMemoryObject(vaddr, mem_obj));
|
||||
memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
|
|
@ -55,23 +55,38 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
|||
#if K_VMAS_DEBUG
|
||||
dbgln("[VMAS] Page Fault!");
|
||||
#endif
|
||||
if (vaddr < kPageSize) {
|
||||
// Invalid page access.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (user_stacks_.IsValidStack(vaddr)) {
|
||||
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
|
||||
return true;
|
||||
}
|
||||
|
||||
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
|
||||
if (!offset_or.ok()) {
|
||||
auto mapping_or = GetMemoryMappingForAddr(vaddr);
|
||||
if (!mapping_or) {
|
||||
return false;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
uint64_t offset = vaddr - mapping.vaddr;
|
||||
uint64_t physical_addr = mapping.mem_obj->PhysicalPageAtOffset(offset);
|
||||
if (physical_addr == 0) {
|
||||
dbgln("WARN: Memory object returned invalid physical addr.");
|
||||
return false;
|
||||
}
|
||||
#if K_VMAS_DEBUG
|
||||
dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr);
|
||||
#endif
|
||||
MapPage(cr3_, vaddr, offset_or.value());
|
||||
MapPage(cr3_, vaddr, physical_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
glcr::Optional<glcr::Ref<AddressSpace::MemoryMapping>>
|
||||
AddressSpace::GetMemoryMappingForAddr(uint64_t vaddr) {
|
||||
auto mapping_or = memory_mappings_.Predecessor(vaddr + 1);
|
||||
if (!mapping_or) {
|
||||
return mapping_or;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
if (mapping.vaddr + mapping.mem_obj->size() <= vaddr) {
|
||||
return {};
|
||||
}
|
||||
return mapping_or;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "include/ztypes.h"
|
||||
#include "lib/memory_mapping_tree.h"
|
||||
#include "memory/user_stack_manager.h"
|
||||
#include "object/memory_object.h"
|
||||
|
||||
|
|
@ -70,22 +69,16 @@ class AddressSpace : public KernelObject {
|
|||
|
||||
// Maps in a memory object at a specific address.
|
||||
// Note this is unsafe for now as it may clobber other mappings.
|
||||
[[nodiscard]] glcr::ErrorCode MapInMemoryObject(
|
||||
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
[[nodiscard]] glcr::ErrorOr<uint64_t> MapInMemoryObject(
|
||||
void MapInMemoryObject(uint64_t vaddr,
|
||||
const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
[[nodiscard]] glcr::ErrorCode FreeAddressRange(uint64_t vaddr_base,
|
||||
uint64_t vaddr_limit) {
|
||||
return mapping_tree_.FreeMemoryRange(vaddr_base, vaddr_limit);
|
||||
}
|
||||
uint64_t MapInMemoryObject(const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
// Kernel Mappings.
|
||||
uint64_t AllocateKernelStack();
|
||||
|
||||
// Returns true if the page fault has been resolved.
|
||||
[[nodiscard]] bool HandlePageFault(uint64_t vaddr);
|
||||
bool HandlePageFault(uint64_t vaddr);
|
||||
|
||||
private:
|
||||
friend class glcr::MakeRefCountedFriend<AddressSpace>;
|
||||
|
|
@ -95,5 +88,19 @@ class AddressSpace : public KernelObject {
|
|||
UserStackManager user_stacks_;
|
||||
uint64_t next_memmap_addr_ = 0x20'00000000;
|
||||
|
||||
MemoryMappingTree mapping_tree_;
|
||||
struct MemoryMapping {
|
||||
uint64_t vaddr;
|
||||
glcr::RefPtr<MemoryObject> mem_obj;
|
||||
};
|
||||
|
||||
// TODO: Consider adding a red-black tree implementation here.
|
||||
// As is this tree functions about as well as a linked list
|
||||
// because mappings are likely to be added in near-perfect ascedning order.
|
||||
// Also worth considering creating a special tree implementation for
|
||||
// just this purpose, or maybe a BinaryTree implementation that accepts
|
||||
// ranges rather than a single key.
|
||||
glcr::BinaryTree<uint64_t, MemoryMapping> memory_mappings_;
|
||||
|
||||
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
|
||||
uint64_t vaddr);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
|||
}
|
||||
}
|
||||
|
||||
MemoryObject::~MemoryObject() { dbgln("Memory Object Freed"); }
|
||||
|
||||
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
||||
if (offset > size_) {
|
||||
panic("Invalid offset");
|
||||
|
|
|
|||
|
|
@ -27,8 +27,7 @@ class MemoryObject : public KernelObject {
|
|||
kZionPerm_Transmit;
|
||||
}
|
||||
|
||||
explicit MemoryObject(uint64_t size);
|
||||
~MemoryObject();
|
||||
MemoryObject(uint64_t size);
|
||||
|
||||
uint64_t size() { return size_; }
|
||||
uint64_t num_pages() { return size_ / 0x1000; }
|
||||
|
|
|
|||
|
|
@ -75,12 +75,12 @@ void Thread::Exit() {
|
|||
curr_thread->tid(), pid(), tid());
|
||||
}
|
||||
Cleanup();
|
||||
process_.CheckState();
|
||||
gScheduler->Yield();
|
||||
}
|
||||
|
||||
void Thread::Cleanup() {
|
||||
state_ = CLEANUP;
|
||||
process_.CheckState();
|
||||
while (blocked_threads_.size() != 0) {
|
||||
auto thread = blocked_threads_.PopFront();
|
||||
thread->SetState(Thread::RUNNABLE);
|
||||
|
|
|
|||
|
|
@ -15,19 +15,10 @@ z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req) {
|
|||
|
||||
// FIXME: Validation necessary.
|
||||
if (req->vmas_offset != 0) {
|
||||
RET_ERR(vmas->MapInMemoryObject(req->vmas_offset, vmmo));
|
||||
vmas->MapInMemoryObject(req->vmas_offset, vmmo);
|
||||
*req->vaddr = req->vmas_offset;
|
||||
} else {
|
||||
ASSIGN_OR_RETURN(*req->vaddr, vmas->MapInMemoryObject(vmmo));
|
||||
*req->vaddr = vmas->MapInMemoryObject(vmmo);
|
||||
}
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req) {
|
||||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
auto vmas_cap = curr_proc.GetCapability(req->vmas_cap);
|
||||
RET_ERR(ValidateCapability<AddressSpace>(vmas_cap, kZionPerm_Write));
|
||||
|
||||
auto vmas = vmas_cap->obj<AddressSpace>();
|
||||
return vmas->FreeAddressRange(req->lower_addr, req->upper_addr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,4 +3,3 @@
|
|||
#include "include/zcall.h"
|
||||
|
||||
z_err_t AddressSpaceMap(ZAddressSpaceMapReq* req);
|
||||
z_err_t AddressSpaceUnmap(ZAddressSpaceUnmapReq* req);
|
||||
|
|
|
|||
|
|
@ -18,11 +18,3 @@ z_err_t CapDuplicate(ZCapDuplicateReq* req) {
|
|||
cap->permissions() & req->perm_mask);
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
z_err_t CapRelease(ZCapReleaseReq* req) {
|
||||
auto& proc = gScheduler->CurrentProcess();
|
||||
if (proc.ReleaseCapability(req->cap).empty()) {
|
||||
return glcr::CAP_NOT_FOUND;
|
||||
}
|
||||
return glcr::OK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,4 +3,3 @@
|
|||
#include "include/zcall.h"
|
||||
|
||||
z_err_t CapDuplicate(ZCapDuplicateReq* req);
|
||||
z_err_t CapRelease(ZCapReleaseReq* req);
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req) {
|
|||
z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req) {
|
||||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
auto vmmo_cap = curr_proc.GetCapability(req->vmmo_cap);
|
||||
// FIXME: Check a duplication permission here.
|
||||
RET_ERR(ValidateCapability<MemoryObject>(vmmo_cap, kZionPerm_Duplicate));
|
||||
|
||||
ASSIGN_OR_RETURN(
|
||||
|
|
@ -42,14 +43,3 @@ z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req) {
|
|||
curr_proc.AddNewCapability(new_vmmo, vmmo_cap->permissions());
|
||||
return glcr::OK;
|
||||
}
|
||||
|
||||
z_err_t MemoryObjectInspect(ZMemoryObjectInspectReq* req) {
|
||||
auto& curr_proc = gScheduler->CurrentProcess();
|
||||
auto vmmo_cap = curr_proc.GetCapability(req->vmmo_cap);
|
||||
RET_ERR(ValidateCapability<MemoryObject>(vmmo_cap, kZionPerm_Read));
|
||||
|
||||
auto vmmo = vmmo_cap->obj<MemoryObject>();
|
||||
*req->size = vmmo->size();
|
||||
|
||||
return glcr::OK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,4 +6,3 @@ z_err_t MemoryObjectCreate(ZMemoryObjectCreateReq* req);
|
|||
z_err_t MemoryObjectCreatePhysical(ZMemoryObjectCreatePhysicalReq* req);
|
||||
z_err_t MemoryObjectCreateContiguous(ZMemoryObjectCreateContiguousReq* req);
|
||||
z_err_t MemoryObjectDuplicate(ZMemoryObjectDuplicateReq* req);
|
||||
z_err_t MemoryObjectInspect(ZMemoryObjectInspectReq* req);
|
||||
|
|
|
|||
|
|
@ -60,13 +60,11 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
|
|||
CASE(ThreadWait);
|
||||
// syscall/address_space.h
|
||||
CASE(AddressSpaceMap);
|
||||
CASE(AddressSpaceUnmap);
|
||||
// syscall/memory_object.h
|
||||
CASE(MemoryObjectCreate);
|
||||
CASE(MemoryObjectCreatePhysical);
|
||||
CASE(MemoryObjectCreateContiguous);
|
||||
CASE(MemoryObjectDuplicate);
|
||||
CASE(MemoryObjectInspect);
|
||||
// syscall/ipc.h
|
||||
CASE(ChannelCreate);
|
||||
CASE(ChannelSend);
|
||||
|
|
@ -83,7 +81,6 @@ extern "C" z_err_t SyscallHandler(uint64_t call_id, void* req) {
|
|||
CASE(ReplyPortRecv);
|
||||
// syscall/capability.h
|
||||
CASE(CapDuplicate);
|
||||
CASE(CapRelease);
|
||||
// syscall/syncronization.h
|
||||
CASE(MutexCreate);
|
||||
CASE(MutexLock);
|
||||
|
|
|
|||
Loading…
Reference in New Issue