Merge pull request #8013 from bunnei/kernel-slab-rework-v2
Kernel Memory Updates (Part 6): Use guest memory for slab heaps & update TLS.
This commit is contained in:
commit
2db5076ec9
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
#include "common/parent_of_member.h"
|
#include "common/parent_of_member.h"
|
||||||
#include "common/tree.h"
|
#include "common/tree.h"
|
||||||
|
|
||||||
|
@ -15,32 +17,33 @@ class IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#pragma pack(push, 4)
|
||||||
struct IntrusiveRedBlackTreeNode {
|
struct IntrusiveRedBlackTreeNode {
|
||||||
|
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeNode);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
|
using RBEntry = freebsd::RBEntry<IntrusiveRedBlackTreeNode>;
|
||||||
|
|
||||||
constexpr IntrusiveRedBlackTreeNode() = default;
|
|
||||||
|
|
||||||
void SetEntry(const EntryType& new_entry) {
|
|
||||||
entry = new_entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] EntryType& GetEntry() {
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] const EntryType& GetEntry() const {
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
EntryType entry{};
|
RBEntry m_entry;
|
||||||
|
|
||||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
public:
|
||||||
|
explicit IntrusiveRedBlackTreeNode() = default;
|
||||||
|
|
||||||
template <class, class, class>
|
[[nodiscard]] constexpr RBEntry& GetRBEntry() {
|
||||||
friend class IntrusiveRedBlackTree;
|
return m_entry;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr const RBEntry& GetRBEntry() const {
|
||||||
|
return m_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetRBEntry(const RBEntry& entry) {
|
||||||
|
m_entry = entry;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
static_assert(sizeof(IntrusiveRedBlackTreeNode) ==
|
||||||
|
3 * sizeof(void*) + std::max<size_t>(sizeof(freebsd::RBColor), 4));
|
||||||
|
#pragma pack(pop)
|
||||||
|
|
||||||
template <class T, class Traits, class Comparator>
|
template <class T, class Traits, class Comparator>
|
||||||
class IntrusiveRedBlackTree;
|
class IntrusiveRedBlackTree;
|
||||||
|
@ -48,12 +51,17 @@ class IntrusiveRedBlackTree;
|
||||||
namespace impl {
|
namespace impl {
|
||||||
|
|
||||||
class IntrusiveRedBlackTreeImpl {
|
class IntrusiveRedBlackTreeImpl {
|
||||||
|
YUZU_NON_COPYABLE(IntrusiveRedBlackTreeImpl);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class, class, class>
|
template <class, class, class>
|
||||||
friend class ::Common::IntrusiveRedBlackTree;
|
friend class ::Common::IntrusiveRedBlackTree;
|
||||||
|
|
||||||
using RootType = RBHead<IntrusiveRedBlackTreeNode>;
|
private:
|
||||||
RootType root;
|
using RootType = freebsd::RBHead<IntrusiveRedBlackTreeNode>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
RootType m_root;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
template <bool Const>
|
template <bool Const>
|
||||||
|
@ -81,149 +89,150 @@ public:
|
||||||
IntrusiveRedBlackTreeImpl::reference>;
|
IntrusiveRedBlackTreeImpl::reference>;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
pointer node;
|
pointer m_node;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit Iterator(pointer n) : node(n) {}
|
constexpr explicit Iterator(pointer n) : m_node(n) {}
|
||||||
|
|
||||||
bool operator==(const Iterator& rhs) const {
|
constexpr bool operator==(const Iterator& rhs) const {
|
||||||
return this->node == rhs.node;
|
return m_node == rhs.m_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=(const Iterator& rhs) const {
|
constexpr bool operator!=(const Iterator& rhs) const {
|
||||||
return !(*this == rhs);
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
pointer operator->() const {
|
constexpr pointer operator->() const {
|
||||||
return this->node;
|
return m_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
reference operator*() const {
|
constexpr reference operator*() const {
|
||||||
return *this->node;
|
return *m_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator& operator++() {
|
constexpr Iterator& operator++() {
|
||||||
this->node = GetNext(this->node);
|
m_node = GetNext(m_node);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator& operator--() {
|
constexpr Iterator& operator--() {
|
||||||
this->node = GetPrev(this->node);
|
m_node = GetPrev(m_node);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator operator++(int) {
|
constexpr Iterator operator++(int) {
|
||||||
const Iterator it{*this};
|
const Iterator it{*this};
|
||||||
++(*this);
|
++(*this);
|
||||||
return it;
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator operator--(int) {
|
constexpr Iterator operator--(int) {
|
||||||
const Iterator it{*this};
|
const Iterator it{*this};
|
||||||
--(*this);
|
--(*this);
|
||||||
return it;
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
operator Iterator<true>() const {
|
constexpr operator Iterator<true>() const {
|
||||||
return Iterator<true>(this->node);
|
return Iterator<true>(m_node);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Define accessors using RB_* functions.
|
constexpr bool EmptyImpl() const {
|
||||||
bool EmptyImpl() const {
|
return m_root.IsEmpty();
|
||||||
return root.IsEmpty();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
constexpr IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||||
return RB_MIN(const_cast<RootType*>(&root));
|
return freebsd::RB_MIN(const_cast<RootType&>(m_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
constexpr IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||||
return RB_MAX(const_cast<RootType*>(&root));
|
return freebsd::RB_MAX(const_cast<RootType&>(m_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
constexpr IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||||
return RB_REMOVE(&root, node);
|
return freebsd::RB_REMOVE(m_root, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
static constexpr IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||||
return RB_NEXT(node);
|
return freebsd::RB_NEXT(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
static constexpr IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||||
return RB_PREV(node);
|
return freebsd::RB_PREV(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
|
static constexpr IntrusiveRedBlackTreeNode const* GetNext(
|
||||||
|
IntrusiveRedBlackTreeNode const* node) {
|
||||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||||
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
|
static constexpr IntrusiveRedBlackTreeNode const* GetPrev(
|
||||||
|
IntrusiveRedBlackTreeNode const* node) {
|
||||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||||
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
constexpr IntrusiveRedBlackTreeImpl() {}
|
constexpr IntrusiveRedBlackTreeImpl() = default;
|
||||||
|
|
||||||
// Iterator accessors.
|
// Iterator accessors.
|
||||||
iterator begin() {
|
constexpr iterator begin() {
|
||||||
return iterator(this->GetMinImpl());
|
return iterator(this->GetMinImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator begin() const {
|
constexpr const_iterator begin() const {
|
||||||
return const_iterator(this->GetMinImpl());
|
return const_iterator(this->GetMinImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator end() {
|
constexpr iterator end() {
|
||||||
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator end() const {
|
constexpr const_iterator end() const {
|
||||||
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator cbegin() const {
|
constexpr const_iterator cbegin() const {
|
||||||
return this->begin();
|
return this->begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator cend() const {
|
constexpr const_iterator cend() const {
|
||||||
return this->end();
|
return this->end();
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator iterator_to(reference ref) {
|
constexpr iterator iterator_to(reference ref) {
|
||||||
return iterator(&ref);
|
return iterator(std::addressof(ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator iterator_to(const_reference ref) const {
|
constexpr const_iterator iterator_to(const_reference ref) const {
|
||||||
return const_iterator(&ref);
|
return const_iterator(std::addressof(ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content management.
|
// Content management.
|
||||||
bool empty() const {
|
constexpr bool empty() const {
|
||||||
return this->EmptyImpl();
|
return this->EmptyImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
reference back() {
|
constexpr reference back() {
|
||||||
return *this->GetMaxImpl();
|
return *this->GetMaxImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference back() const {
|
constexpr const_reference back() const {
|
||||||
return *this->GetMaxImpl();
|
return *this->GetMaxImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
reference front() {
|
constexpr reference front() {
|
||||||
return *this->GetMinImpl();
|
return *this->GetMinImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference front() const {
|
constexpr const_reference front() const {
|
||||||
return *this->GetMinImpl();
|
return *this->GetMinImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator erase(iterator it) {
|
constexpr iterator erase(iterator it) {
|
||||||
auto cur = std::addressof(*it);
|
auto cur = std::addressof(*it);
|
||||||
auto next = GetNext(cur);
|
auto next = GetNext(cur);
|
||||||
this->RemoveImpl(cur);
|
this->RemoveImpl(cur);
|
||||||
|
@ -234,16 +243,16 @@ public:
|
||||||
} // namespace impl
|
} // namespace impl
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
concept HasLightCompareType = requires {
|
concept HasRedBlackKeyType = requires {
|
||||||
{ std::is_same<typename T::LightCompareType, void>::value } -> std::convertible_to<bool>;
|
{ std::is_same<typename T::RedBlackKeyType, void>::value } -> std::convertible_to<bool>;
|
||||||
};
|
};
|
||||||
|
|
||||||
namespace impl {
|
namespace impl {
|
||||||
|
|
||||||
template <typename T, typename Default>
|
template <typename T, typename Default>
|
||||||
consteval auto* GetLightCompareType() {
|
consteval auto* GetRedBlackKeyType() {
|
||||||
if constexpr (HasLightCompareType<T>) {
|
if constexpr (HasRedBlackKeyType<T>) {
|
||||||
return static_cast<typename T::LightCompareType*>(nullptr);
|
return static_cast<typename T::RedBlackKeyType*>(nullptr);
|
||||||
} else {
|
} else {
|
||||||
return static_cast<Default*>(nullptr);
|
return static_cast<Default*>(nullptr);
|
||||||
}
|
}
|
||||||
|
@ -252,16 +261,17 @@ namespace impl {
|
||||||
} // namespace impl
|
} // namespace impl
|
||||||
|
|
||||||
template <typename T, typename Default>
|
template <typename T, typename Default>
|
||||||
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
|
using RedBlackKeyType = std::remove_pointer_t<decltype(impl::GetRedBlackKeyType<T, Default>())>;
|
||||||
|
|
||||||
template <class T, class Traits, class Comparator>
|
template <class T, class Traits, class Comparator>
|
||||||
class IntrusiveRedBlackTree {
|
class IntrusiveRedBlackTree {
|
||||||
|
YUZU_NON_COPYABLE(IntrusiveRedBlackTree);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ImplType impl{};
|
ImplType m_impl;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
template <bool Const>
|
template <bool Const>
|
||||||
|
@ -277,9 +287,9 @@ public:
|
||||||
using iterator = Iterator<false>;
|
using iterator = Iterator<false>;
|
||||||
using const_iterator = Iterator<true>;
|
using const_iterator = Iterator<true>;
|
||||||
|
|
||||||
using light_value_type = LightCompareType<Comparator, value_type>;
|
using key_type = RedBlackKeyType<Comparator, value_type>;
|
||||||
using const_light_pointer = const light_value_type*;
|
using const_key_pointer = const key_type*;
|
||||||
using const_light_reference = const light_value_type&;
|
using const_key_reference = const key_type&;
|
||||||
|
|
||||||
template <bool Const>
|
template <bool Const>
|
||||||
class Iterator {
|
class Iterator {
|
||||||
|
@ -298,183 +308,201 @@ public:
|
||||||
IntrusiveRedBlackTree::reference>;
|
IntrusiveRedBlackTree::reference>;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ImplIterator iterator;
|
ImplIterator m_impl;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
explicit Iterator(ImplIterator it) : iterator(it) {}
|
constexpr explicit Iterator(ImplIterator it) : m_impl(it) {}
|
||||||
|
|
||||||
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
|
constexpr explicit Iterator(typename ImplIterator::pointer p) : m_impl(p) {}
|
||||||
ImplType::iterator>::type::pointer ptr)
|
|
||||||
: iterator(ptr) {}
|
|
||||||
|
|
||||||
ImplIterator GetImplIterator() const {
|
constexpr ImplIterator GetImplIterator() const {
|
||||||
return this->iterator;
|
return m_impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
bool operator==(const Iterator& rhs) const {
|
constexpr bool operator==(const Iterator& rhs) const {
|
||||||
return this->iterator == rhs.iterator;
|
return m_impl == rhs.m_impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=(const Iterator& rhs) const {
|
constexpr bool operator!=(const Iterator& rhs) const {
|
||||||
return !(*this == rhs);
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
pointer operator->() const {
|
constexpr pointer operator->() const {
|
||||||
return Traits::GetParent(std::addressof(*this->iterator));
|
return Traits::GetParent(std::addressof(*m_impl));
|
||||||
}
|
}
|
||||||
|
|
||||||
reference operator*() const {
|
constexpr reference operator*() const {
|
||||||
return *Traits::GetParent(std::addressof(*this->iterator));
|
return *Traits::GetParent(std::addressof(*m_impl));
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator& operator++() {
|
constexpr Iterator& operator++() {
|
||||||
++this->iterator;
|
++m_impl;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator& operator--() {
|
constexpr Iterator& operator--() {
|
||||||
--this->iterator;
|
--m_impl;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator operator++(int) {
|
constexpr Iterator operator++(int) {
|
||||||
const Iterator it{*this};
|
const Iterator it{*this};
|
||||||
++this->iterator;
|
++m_impl;
|
||||||
return it;
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator operator--(int) {
|
constexpr Iterator operator--(int) {
|
||||||
const Iterator it{*this};
|
const Iterator it{*this};
|
||||||
--this->iterator;
|
--m_impl;
|
||||||
return it;
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
operator Iterator<true>() const {
|
constexpr operator Iterator<true>() const {
|
||||||
return Iterator<true>(this->iterator);
|
return Iterator<true>(m_impl);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
static constexpr int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||||
const IntrusiveRedBlackTreeNode* rhs) {
|
const IntrusiveRedBlackTreeNode* rhs) {
|
||||||
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
|
static constexpr int CompareKeyImpl(const_key_reference key,
|
||||||
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
|
const IntrusiveRedBlackTreeNode* rhs) {
|
||||||
|
return Comparator::Compare(key, *Traits::GetParent(rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Define accessors using RB_* functions.
|
// Define accessors using RB_* functions.
|
||||||
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
constexpr IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||||
return RB_INSERT(&impl.root, node, CompareImpl);
|
return freebsd::RB_INSERT(m_impl.m_root, node, CompareImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
constexpr IntrusiveRedBlackTreeNode* FindImpl(IntrusiveRedBlackTreeNode const* node) const {
|
||||||
return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
|
return freebsd::RB_FIND(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
constexpr IntrusiveRedBlackTreeNode* NFindImpl(IntrusiveRedBlackTreeNode const* node) const {
|
||||||
return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
|
return freebsd::RB_NFIND(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
|
constexpr IntrusiveRedBlackTreeNode* FindKeyImpl(const_key_reference key) const {
|
||||||
return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
return freebsd::RB_FIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||||
static_cast<const void*>(lelm), LightCompareImpl);
|
CompareKeyImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
|
constexpr IntrusiveRedBlackTreeNode* NFindKeyImpl(const_key_reference key) const {
|
||||||
return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
return freebsd::RB_NFIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||||
static_cast<const void*>(lelm), LightCompareImpl);
|
CompareKeyImpl);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr IntrusiveRedBlackTreeNode* FindExistingImpl(
|
||||||
|
IntrusiveRedBlackTreeNode const* node) const {
|
||||||
|
return freebsd::RB_FIND_EXISTING(const_cast<ImplType::RootType&>(m_impl.m_root),
|
||||||
|
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr IntrusiveRedBlackTreeNode* FindExistingKeyImpl(const_key_reference key) const {
|
||||||
|
return freebsd::RB_FIND_EXISTING_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
|
||||||
|
CompareKeyImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
constexpr IntrusiveRedBlackTree() = default;
|
constexpr IntrusiveRedBlackTree() = default;
|
||||||
|
|
||||||
// Iterator accessors.
|
// Iterator accessors.
|
||||||
iterator begin() {
|
constexpr iterator begin() {
|
||||||
return iterator(this->impl.begin());
|
return iterator(m_impl.begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator begin() const {
|
constexpr const_iterator begin() const {
|
||||||
return const_iterator(this->impl.begin());
|
return const_iterator(m_impl.begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator end() {
|
constexpr iterator end() {
|
||||||
return iterator(this->impl.end());
|
return iterator(m_impl.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator end() const {
|
constexpr const_iterator end() const {
|
||||||
return const_iterator(this->impl.end());
|
return const_iterator(m_impl.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator cbegin() const {
|
constexpr const_iterator cbegin() const {
|
||||||
return this->begin();
|
return this->begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator cend() const {
|
constexpr const_iterator cend() const {
|
||||||
return this->end();
|
return this->end();
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator iterator_to(reference ref) {
|
constexpr iterator iterator_to(reference ref) {
|
||||||
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
return iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator iterator_to(const_reference ref) const {
|
constexpr const_iterator iterator_to(const_reference ref) const {
|
||||||
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
return const_iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content management.
|
// Content management.
|
||||||
bool empty() const {
|
constexpr bool empty() const {
|
||||||
return this->impl.empty();
|
return m_impl.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
reference back() {
|
constexpr reference back() {
|
||||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
return *Traits::GetParent(std::addressof(m_impl.back()));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference back() const {
|
constexpr const_reference back() const {
|
||||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
return *Traits::GetParent(std::addressof(m_impl.back()));
|
||||||
}
|
}
|
||||||
|
|
||||||
reference front() {
|
constexpr reference front() {
|
||||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
return *Traits::GetParent(std::addressof(m_impl.front()));
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference front() const {
|
constexpr const_reference front() const {
|
||||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
return *Traits::GetParent(std::addressof(m_impl.front()));
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator erase(iterator it) {
|
constexpr iterator erase(iterator it) {
|
||||||
return iterator(this->impl.erase(it.GetImplIterator()));
|
return iterator(m_impl.erase(it.GetImplIterator()));
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator insert(reference ref) {
|
constexpr iterator insert(reference ref) {
|
||||||
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
||||||
this->InsertImpl(node);
|
this->InsertImpl(node);
|
||||||
return iterator(node);
|
return iterator(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator find(const_reference ref) const {
|
constexpr iterator find(const_reference ref) const {
|
||||||
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator nfind(const_reference ref) const {
|
constexpr iterator nfind(const_reference ref) const {
|
||||||
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator find_light(const_light_reference ref) const {
|
constexpr iterator find_key(const_key_reference ref) const {
|
||||||
return iterator(this->FindLightImpl(std::addressof(ref)));
|
return iterator(this->FindKeyImpl(ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator nfind_light(const_light_reference ref) const {
|
constexpr iterator nfind_key(const_key_reference ref) const {
|
||||||
return iterator(this->NFindLightImpl(std::addressof(ref)));
|
return iterator(this->NFindKeyImpl(ref));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr iterator find_existing(const_reference ref) const {
|
||||||
|
return iterator(this->FindExistingImpl(Traits::GetNode(std::addressof(ref))));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr iterator find_existing_key(const_key_reference ref) const {
|
||||||
|
return iterator(this->FindExistingKeyImpl(ref));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <auto T, class Derived = impl::GetParentType<T>>
|
template <auto T, class Derived = Common::impl::GetParentType<T>>
|
||||||
class IntrusiveRedBlackTreeMemberTraits;
|
class IntrusiveRedBlackTreeMemberTraits;
|
||||||
|
|
||||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||||
|
@ -498,19 +526,16 @@ private:
|
||||||
return std::addressof(parent->*Member);
|
return std::addressof(parent->*Member);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
return GetParentPointer<Member, Derived>(node);
|
return Common::GetParentPointer<Member, Derived>(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||||
return GetParentPointer<Member, Derived>(node);
|
return Common::GetParentPointer<Member, Derived>(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
static constexpr TypedStorage<Derived> DerivedStorage = {};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <auto T, class Derived = impl::GetParentType<T>>
|
template <auto T, class Derived = Common::impl::GetParentType<T>>
|
||||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
||||||
|
|
||||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||||
|
@ -521,11 +546,6 @@ public:
|
||||||
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
||||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
static constexpr bool IsValid() {
|
|
||||||
TypedStorage<Derived> DerivedStorage = {};
|
|
||||||
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class, class, class>
|
template <class, class, class>
|
||||||
friend class IntrusiveRedBlackTree;
|
friend class IntrusiveRedBlackTree;
|
||||||
|
@ -540,30 +560,36 @@ private:
|
||||||
return std::addressof(parent->*Member);
|
return std::addressof(parent->*Member);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
return GetParentPointer<Member, Derived>(node);
|
return Common::GetParentPointer<Member, Derived>(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||||
return GetParentPointer<Member, Derived>(node);
|
return Common::GetParentPointer<Member, Derived>(node);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class Derived>
|
template <class Derived>
|
||||||
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
class alignas(void*) IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||||
public:
|
public:
|
||||||
|
using IntrusiveRedBlackTreeNode::IntrusiveRedBlackTreeNode;
|
||||||
|
|
||||||
constexpr Derived* GetPrev() {
|
constexpr Derived* GetPrev() {
|
||||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
|
||||||
|
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
|
||||||
}
|
}
|
||||||
constexpr const Derived* GetPrev() const {
|
constexpr const Derived* GetPrev() const {
|
||||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
|
||||||
|
impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Derived* GetNext() {
|
constexpr Derived* GetNext() {
|
||||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
|
||||||
|
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
|
||||||
}
|
}
|
||||||
constexpr const Derived* GetNext() const {
|
constexpr const Derived* GetNext() const {
|
||||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
|
||||||
|
impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -581,19 +607,22 @@ private:
|
||||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||||
|
|
||||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||||
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
|
return static_cast<IntrusiveRedBlackTreeNode*>(
|
||||||
|
static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||||
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
|
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||||
return static_cast<Derived*>(node);
|
return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
static constexpr Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
|
||||||
return static_cast<const Derived*>(node);
|
return static_cast<const Derived*>(
|
||||||
|
static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -43,246 +43,445 @@
|
||||||
* The maximum height of a red-black tree is 2lg (n+1).
|
* The maximum height of a red-black tree is 2lg (n+1).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "common/assert.h"
|
namespace Common::freebsd {
|
||||||
|
|
||||||
namespace Common {
|
enum class RBColor {
|
||||||
template <typename T>
|
RB_BLACK = 0,
|
||||||
class RBHead {
|
RB_RED = 1,
|
||||||
public:
|
|
||||||
[[nodiscard]] T* Root() {
|
|
||||||
return rbh_root;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] const T* Root() const {
|
|
||||||
return rbh_root;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetRoot(T* root) {
|
|
||||||
rbh_root = root;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool IsEmpty() const {
|
|
||||||
return Root() == nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
T* rbh_root = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class EntryColor {
|
|
||||||
Black,
|
|
||||||
Red,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#pragma pack(push, 4)
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class RBEntry {
|
class RBEntry {
|
||||||
public:
|
public:
|
||||||
[[nodiscard]] T* Left() {
|
constexpr RBEntry() = default;
|
||||||
return rbe_left;
|
|
||||||
|
[[nodiscard]] constexpr T* Left() {
|
||||||
|
return m_rbe_left;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr const T* Left() const {
|
||||||
|
return m_rbe_left;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] const T* Left() const {
|
constexpr void SetLeft(T* e) {
|
||||||
return rbe_left;
|
m_rbe_left = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetLeft(T* left) {
|
[[nodiscard]] constexpr T* Right() {
|
||||||
rbe_left = left;
|
return m_rbe_right;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr const T* Right() const {
|
||||||
|
return m_rbe_right;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] T* Right() {
|
constexpr void SetRight(T* e) {
|
||||||
return rbe_right;
|
m_rbe_right = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] const T* Right() const {
|
[[nodiscard]] constexpr T* Parent() {
|
||||||
return rbe_right;
|
return m_rbe_parent;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr const T* Parent() const {
|
||||||
|
return m_rbe_parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetRight(T* right) {
|
constexpr void SetParent(T* e) {
|
||||||
rbe_right = right;
|
m_rbe_parent = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] T* Parent() {
|
[[nodiscard]] constexpr bool IsBlack() const {
|
||||||
return rbe_parent;
|
return m_rbe_color == RBColor::RB_BLACK;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr bool IsRed() const {
|
||||||
|
return m_rbe_color == RBColor::RB_RED;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr RBColor Color() const {
|
||||||
|
return m_rbe_color;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] const T* Parent() const {
|
constexpr void SetColor(RBColor c) {
|
||||||
return rbe_parent;
|
m_rbe_color = c;
|
||||||
}
|
|
||||||
|
|
||||||
void SetParent(T* parent) {
|
|
||||||
rbe_parent = parent;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool IsBlack() const {
|
|
||||||
return rbe_color == EntryColor::Black;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool IsRed() const {
|
|
||||||
return rbe_color == EntryColor::Red;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] EntryColor Color() const {
|
|
||||||
return rbe_color;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetColor(EntryColor color) {
|
|
||||||
rbe_color = color;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T* rbe_left = nullptr;
|
T* m_rbe_left{};
|
||||||
T* rbe_right = nullptr;
|
T* m_rbe_right{};
|
||||||
T* rbe_parent = nullptr;
|
T* m_rbe_parent{};
|
||||||
EntryColor rbe_color{};
|
RBColor m_rbe_color{RBColor::RB_BLACK};
|
||||||
|
};
|
||||||
|
#pragma pack(pop)
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct CheckRBEntry {
|
||||||
|
static constexpr bool value = false;
|
||||||
|
};
|
||||||
|
template <typename T>
|
||||||
|
struct CheckRBEntry<RBEntry<T>> {
|
||||||
|
static constexpr bool value = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
|
concept IsRBEntry = CheckRBEntry<T>::value;
|
||||||
return node->GetEntry();
|
|
||||||
|
template <typename T>
|
||||||
|
concept HasRBEntry = requires(T& t, const T& ct) {
|
||||||
|
{ t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
|
||||||
|
{ ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
class RBHead {
|
||||||
|
private:
|
||||||
|
T* m_rbh_root = nullptr;
|
||||||
|
|
||||||
|
public:
|
||||||
|
[[nodiscard]] constexpr T* Root() {
|
||||||
|
return m_rbh_root;
|
||||||
|
}
|
||||||
|
[[nodiscard]] constexpr const T* Root() const {
|
||||||
|
return m_rbh_root;
|
||||||
|
}
|
||||||
|
constexpr void SetRoot(T* root) {
|
||||||
|
m_rbh_root = root;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
[[nodiscard]] constexpr bool IsEmpty() const {
|
||||||
[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
|
return this->Root() == nullptr;
|
||||||
return node->GetEntry();
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr RBEntry<T>& RB_ENTRY(T* t) {
|
||||||
|
return t->GetRBEntry();
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr const RBEntry<T>& RB_ENTRY(const T* t) {
|
||||||
|
return t->GetRBEntry();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] Node* RB_PARENT(Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Parent();
|
[[nodiscard]] constexpr T* RB_LEFT(T* t) {
|
||||||
|
return RB_ENTRY(t).Left();
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr const T* RB_LEFT(const T* t) {
|
||||||
|
return RB_ENTRY(t).Left();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] const Node* RB_PARENT(const Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Parent();
|
[[nodiscard]] constexpr T* RB_RIGHT(T* t) {
|
||||||
|
return RB_ENTRY(t).Right();
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr const T* RB_RIGHT(const T* t) {
|
||||||
|
return RB_ENTRY(t).Right();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
void RB_SET_PARENT(Node* node, Node* parent) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).SetParent(parent);
|
[[nodiscard]] constexpr T* RB_PARENT(T* t) {
|
||||||
|
return RB_ENTRY(t).Parent();
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr const T* RB_PARENT(const T* t) {
|
||||||
|
return RB_ENTRY(t).Parent();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] Node* RB_LEFT(Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Left();
|
constexpr void RB_SET_LEFT(T* t, T* e) {
|
||||||
|
RB_ENTRY(t).SetLeft(e);
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr void RB_SET_RIGHT(T* t, T* e) {
|
||||||
|
RB_ENTRY(t).SetRight(e);
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr void RB_SET_PARENT(T* t, T* e) {
|
||||||
|
RB_ENTRY(t).SetParent(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] const Node* RB_LEFT(const Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Left();
|
[[nodiscard]] constexpr bool RB_IS_BLACK(const T* t) {
|
||||||
|
return RB_ENTRY(t).IsBlack();
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
[[nodiscard]] constexpr bool RB_IS_RED(const T* t) {
|
||||||
|
return RB_ENTRY(t).IsRed();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
void RB_SET_LEFT(Node* node, Node* left) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).SetLeft(left);
|
[[nodiscard]] constexpr RBColor RB_COLOR(const T* t) {
|
||||||
|
return RB_ENTRY(t).Color();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] Node* RB_RIGHT(Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Right();
|
constexpr void RB_SET_COLOR(T* t, RBColor c) {
|
||||||
|
RB_ENTRY(t).SetColor(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).Right();
|
constexpr void RB_SET(T* elm, T* parent) {
|
||||||
|
auto& rb_entry = RB_ENTRY(elm);
|
||||||
|
rb_entry.SetParent(parent);
|
||||||
|
rb_entry.SetLeft(nullptr);
|
||||||
|
rb_entry.SetRight(nullptr);
|
||||||
|
rb_entry.SetColor(RBColor::RB_RED);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
void RB_SET_RIGHT(Node* node, Node* right) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).SetRight(right);
|
constexpr void RB_SET_BLACKRED(T* black, T* red) {
|
||||||
|
RB_SET_COLOR(black, RBColor::RB_BLACK);
|
||||||
|
RB_SET_COLOR(red, RBColor::RB_RED);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
|
requires HasRBEntry<T>
|
||||||
return RB_ENTRY(node).IsBlack();
|
constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
[[nodiscard]] bool RB_IS_RED(const Node* node) {
|
|
||||||
return RB_ENTRY(node).IsRed();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
|
|
||||||
return RB_ENTRY(node).Color();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
void RB_SET_COLOR(Node* node, EntryColor color) {
|
|
||||||
return RB_ENTRY(node).SetColor(color);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
void RB_SET(Node* node, Node* parent) {
|
|
||||||
auto& entry = RB_ENTRY(node);
|
|
||||||
entry.SetParent(parent);
|
|
||||||
entry.SetLeft(nullptr);
|
|
||||||
entry.SetRight(nullptr);
|
|
||||||
entry.SetColor(EntryColor::Red);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
void RB_SET_BLACKRED(Node* black, Node* red) {
|
|
||||||
RB_SET_COLOR(black, EntryColor::Black);
|
|
||||||
RB_SET_COLOR(red, EntryColor::Red);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
|
||||||
tmp = RB_RIGHT(elm);
|
tmp = RB_RIGHT(elm);
|
||||||
RB_SET_RIGHT(elm, RB_LEFT(tmp));
|
if (RB_SET_RIGHT(elm, RB_LEFT(tmp)); RB_RIGHT(elm) != nullptr) {
|
||||||
if (RB_RIGHT(elm) != nullptr) {
|
|
||||||
RB_SET_PARENT(RB_LEFT(tmp), elm);
|
RB_SET_PARENT(RB_LEFT(tmp), elm);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
|
||||||
if (RB_PARENT(tmp) != nullptr) {
|
|
||||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||||
} else {
|
} else {
|
||||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
head->SetRoot(tmp);
|
head.SetRoot(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_SET_LEFT(tmp, elm);
|
RB_SET_LEFT(tmp, elm);
|
||||||
RB_SET_PARENT(elm, tmp);
|
RB_SET_PARENT(elm, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
requires HasRBEntry<T>
|
||||||
|
constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
|
||||||
tmp = RB_LEFT(elm);
|
tmp = RB_LEFT(elm);
|
||||||
RB_SET_LEFT(elm, RB_RIGHT(tmp));
|
if (RB_SET_LEFT(elm, RB_RIGHT(tmp)); RB_LEFT(elm) != nullptr) {
|
||||||
if (RB_LEFT(elm) != nullptr) {
|
|
||||||
RB_SET_PARENT(RB_RIGHT(tmp), elm);
|
RB_SET_PARENT(RB_RIGHT(tmp), elm);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
|
||||||
if (RB_PARENT(tmp) != nullptr) {
|
|
||||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||||
} else {
|
} else {
|
||||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
head->SetRoot(tmp);
|
head.SetRoot(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_SET_RIGHT(tmp, elm);
|
RB_SET_RIGHT(tmp, elm);
|
||||||
RB_SET_PARENT(elm, tmp);
|
RB_SET_PARENT(elm, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
|
requires HasRBEntry<T>
|
||||||
Node* parent = nullptr;
|
constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
|
||||||
Node* tmp = nullptr;
|
T* tmp;
|
||||||
|
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head.Root()) {
|
||||||
|
if (RB_LEFT(parent) == elm) {
|
||||||
|
tmp = RB_RIGHT(parent);
|
||||||
|
if (RB_IS_RED(tmp)) {
|
||||||
|
RB_SET_BLACKRED(tmp, parent);
|
||||||
|
RB_ROTATE_LEFT(head, parent, tmp);
|
||||||
|
tmp = RB_RIGHT(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||||
|
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||||
|
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||||
|
elm = parent;
|
||||||
|
parent = RB_PARENT(elm);
|
||||||
|
} else {
|
||||||
|
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
|
||||||
|
T* oleft;
|
||||||
|
if ((oleft = RB_LEFT(tmp)) != nullptr) {
|
||||||
|
RB_SET_COLOR(oleft, RBColor::RB_BLACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||||
|
RB_ROTATE_RIGHT(head, tmp, oleft);
|
||||||
|
tmp = RB_RIGHT(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||||
|
RB_SET_COLOR(parent, RBColor::RB_BLACK);
|
||||||
|
if (RB_RIGHT(tmp)) {
|
||||||
|
RB_SET_COLOR(RB_RIGHT(tmp), RBColor::RB_BLACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_ROTATE_LEFT(head, parent, tmp);
|
||||||
|
elm = head.Root();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmp = RB_LEFT(parent);
|
||||||
|
if (RB_IS_RED(tmp)) {
|
||||||
|
RB_SET_BLACKRED(tmp, parent);
|
||||||
|
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||||
|
tmp = RB_LEFT(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||||
|
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||||
|
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||||
|
elm = parent;
|
||||||
|
parent = RB_PARENT(elm);
|
||||||
|
} else {
|
||||||
|
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
|
||||||
|
T* oright;
|
||||||
|
if ((oright = RB_RIGHT(tmp)) != nullptr) {
|
||||||
|
RB_SET_COLOR(oright, RBColor::RB_BLACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_SET_COLOR(tmp, RBColor::RB_RED);
|
||||||
|
RB_ROTATE_LEFT(head, tmp, oright);
|
||||||
|
tmp = RB_LEFT(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||||
|
RB_SET_COLOR(parent, RBColor::RB_BLACK);
|
||||||
|
|
||||||
|
if (RB_LEFT(tmp)) {
|
||||||
|
RB_SET_COLOR(RB_LEFT(tmp), RBColor::RB_BLACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||||
|
elm = head.Root();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (elm) {
|
||||||
|
RB_SET_COLOR(elm, RBColor::RB_BLACK);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
|
||||||
|
T* child = nullptr;
|
||||||
|
T* parent = nullptr;
|
||||||
|
T* old = elm;
|
||||||
|
RBColor color = RBColor::RB_BLACK;
|
||||||
|
|
||||||
|
if (RB_LEFT(elm) == nullptr) {
|
||||||
|
child = RB_RIGHT(elm);
|
||||||
|
} else if (RB_RIGHT(elm) == nullptr) {
|
||||||
|
child = RB_LEFT(elm);
|
||||||
|
} else {
|
||||||
|
T* left;
|
||||||
|
elm = RB_RIGHT(elm);
|
||||||
|
while ((left = RB_LEFT(elm)) != nullptr) {
|
||||||
|
elm = left;
|
||||||
|
}
|
||||||
|
|
||||||
|
child = RB_RIGHT(elm);
|
||||||
|
parent = RB_PARENT(elm);
|
||||||
|
color = RB_COLOR(elm);
|
||||||
|
|
||||||
|
if (child) {
|
||||||
|
RB_SET_PARENT(child, parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parent) {
|
||||||
|
if (RB_LEFT(parent) == elm) {
|
||||||
|
RB_SET_LEFT(parent, child);
|
||||||
|
} else {
|
||||||
|
RB_SET_RIGHT(parent, child);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
head.SetRoot(child);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (RB_PARENT(elm) == old) {
|
||||||
|
parent = elm;
|
||||||
|
}
|
||||||
|
|
||||||
|
elm->SetRBEntry(old->GetRBEntry());
|
||||||
|
|
||||||
|
if (RB_PARENT(old)) {
|
||||||
|
if (RB_LEFT(RB_PARENT(old)) == old) {
|
||||||
|
RB_SET_LEFT(RB_PARENT(old), elm);
|
||||||
|
} else {
|
||||||
|
RB_SET_RIGHT(RB_PARENT(old), elm);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
head.SetRoot(elm);
|
||||||
|
}
|
||||||
|
|
||||||
|
RB_SET_PARENT(RB_LEFT(old), elm);
|
||||||
|
|
||||||
|
if (RB_RIGHT(old)) {
|
||||||
|
RB_SET_PARENT(RB_RIGHT(old), elm);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parent) {
|
||||||
|
left = parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (color == RBColor::RB_BLACK) {
|
||||||
|
RB_REMOVE_COLOR(head, parent, child);
|
||||||
|
}
|
||||||
|
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
parent = RB_PARENT(elm);
|
||||||
|
color = RB_COLOR(elm);
|
||||||
|
|
||||||
|
if (child) {
|
||||||
|
RB_SET_PARENT(child, parent);
|
||||||
|
}
|
||||||
|
if (parent) {
|
||||||
|
if (RB_LEFT(parent) == elm) {
|
||||||
|
RB_SET_LEFT(parent, child);
|
||||||
|
} else {
|
||||||
|
RB_SET_RIGHT(parent, child);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
head.SetRoot(child);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (color == RBColor::RB_BLACK) {
|
||||||
|
RB_REMOVE_COLOR(head, parent, child);
|
||||||
|
}
|
||||||
|
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
|
||||||
|
T *parent = nullptr, *tmp = nullptr;
|
||||||
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
|
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
|
||||||
Node* gparent = RB_PARENT(parent);
|
T* gparent = RB_PARENT(parent);
|
||||||
if (parent == RB_LEFT(gparent)) {
|
if (parent == RB_LEFT(gparent)) {
|
||||||
tmp = RB_RIGHT(gparent);
|
tmp = RB_RIGHT(gparent);
|
||||||
if (tmp && RB_IS_RED(tmp)) {
|
if (tmp && RB_IS_RED(tmp)) {
|
||||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
|
||||||
RB_SET_BLACKRED(parent, gparent);
|
RB_SET_BLACKRED(parent, gparent);
|
||||||
elm = gparent;
|
elm = gparent;
|
||||||
continue;
|
continue;
|
||||||
|
@ -300,7 +499,7 @@ void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
|
||||||
} else {
|
} else {
|
||||||
tmp = RB_LEFT(gparent);
|
tmp = RB_LEFT(gparent);
|
||||||
if (tmp && RB_IS_RED(tmp)) {
|
if (tmp && RB_IS_RED(tmp)) {
|
||||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
RB_SET_COLOR(tmp, RBColor::RB_BLACK);
|
||||||
RB_SET_BLACKRED(parent, gparent);
|
RB_SET_BLACKRED(parent, gparent);
|
||||||
elm = gparent;
|
elm = gparent;
|
||||||
continue;
|
continue;
|
||||||
|
@ -318,194 +517,14 @@ void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_SET_COLOR(head->Root(), EntryColor::Black);
|
RB_SET_COLOR(head.Root(), RBColor::RB_BLACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T, typename Compare>
|
||||||
void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
|
requires HasRBEntry<T>
|
||||||
Node* tmp;
|
constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
|
||||||
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root() && parent != nullptr) {
|
T* parent = nullptr;
|
||||||
if (RB_LEFT(parent) == elm) {
|
T* tmp = head.Root();
|
||||||
tmp = RB_RIGHT(parent);
|
|
||||||
if (!tmp) {
|
|
||||||
ASSERT_MSG(false, "tmp is invalid!");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (RB_IS_RED(tmp)) {
|
|
||||||
RB_SET_BLACKRED(tmp, parent);
|
|
||||||
RB_ROTATE_LEFT(head, parent, tmp);
|
|
||||||
tmp = RB_RIGHT(parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
|
||||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
|
||||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
|
||||||
elm = parent;
|
|
||||||
parent = RB_PARENT(elm);
|
|
||||||
} else {
|
|
||||||
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
|
|
||||||
Node* oleft;
|
|
||||||
if ((oleft = RB_LEFT(tmp)) != nullptr) {
|
|
||||||
RB_SET_COLOR(oleft, EntryColor::Black);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
|
||||||
RB_ROTATE_RIGHT(head, tmp, oleft);
|
|
||||||
tmp = RB_RIGHT(parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
|
||||||
RB_SET_COLOR(parent, EntryColor::Black);
|
|
||||||
if (RB_RIGHT(tmp)) {
|
|
||||||
RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_ROTATE_LEFT(head, parent, tmp);
|
|
||||||
elm = head->Root();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tmp = RB_LEFT(parent);
|
|
||||||
if (RB_IS_RED(tmp)) {
|
|
||||||
RB_SET_BLACKRED(tmp, parent);
|
|
||||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
|
||||||
tmp = RB_LEFT(parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tmp) {
|
|
||||||
ASSERT_MSG(false, "tmp is invalid!");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
|
||||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
|
||||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
|
||||||
elm = parent;
|
|
||||||
parent = RB_PARENT(elm);
|
|
||||||
} else {
|
|
||||||
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
|
|
||||||
Node* oright;
|
|
||||||
if ((oright = RB_RIGHT(tmp)) != nullptr) {
|
|
||||||
RB_SET_COLOR(oright, EntryColor::Black);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
|
||||||
RB_ROTATE_LEFT(head, tmp, oright);
|
|
||||||
tmp = RB_LEFT(parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
|
||||||
RB_SET_COLOR(parent, EntryColor::Black);
|
|
||||||
|
|
||||||
if (RB_LEFT(tmp)) {
|
|
||||||
RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
|
||||||
elm = head->Root();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (elm) {
|
|
||||||
RB_SET_COLOR(elm, EntryColor::Black);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Node>
|
|
||||||
Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
|
|
||||||
Node* child = nullptr;
|
|
||||||
Node* parent = nullptr;
|
|
||||||
Node* old = elm;
|
|
||||||
EntryColor color{};
|
|
||||||
|
|
||||||
const auto finalize = [&] {
|
|
||||||
if (color == EntryColor::Black) {
|
|
||||||
RB_REMOVE_COLOR(head, parent, child);
|
|
||||||
}
|
|
||||||
|
|
||||||
return old;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (RB_LEFT(elm) == nullptr) {
|
|
||||||
child = RB_RIGHT(elm);
|
|
||||||
} else if (RB_RIGHT(elm) == nullptr) {
|
|
||||||
child = RB_LEFT(elm);
|
|
||||||
} else {
|
|
||||||
Node* left;
|
|
||||||
elm = RB_RIGHT(elm);
|
|
||||||
while ((left = RB_LEFT(elm)) != nullptr) {
|
|
||||||
elm = left;
|
|
||||||
}
|
|
||||||
|
|
||||||
child = RB_RIGHT(elm);
|
|
||||||
parent = RB_PARENT(elm);
|
|
||||||
color = RB_COLOR(elm);
|
|
||||||
|
|
||||||
if (child) {
|
|
||||||
RB_SET_PARENT(child, parent);
|
|
||||||
}
|
|
||||||
if (parent) {
|
|
||||||
if (RB_LEFT(parent) == elm) {
|
|
||||||
RB_SET_LEFT(parent, child);
|
|
||||||
} else {
|
|
||||||
RB_SET_RIGHT(parent, child);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
head->SetRoot(child);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (RB_PARENT(elm) == old) {
|
|
||||||
parent = elm;
|
|
||||||
}
|
|
||||||
|
|
||||||
elm->SetEntry(old->GetEntry());
|
|
||||||
|
|
||||||
if (RB_PARENT(old)) {
|
|
||||||
if (RB_LEFT(RB_PARENT(old)) == old) {
|
|
||||||
RB_SET_LEFT(RB_PARENT(old), elm);
|
|
||||||
} else {
|
|
||||||
RB_SET_RIGHT(RB_PARENT(old), elm);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
head->SetRoot(elm);
|
|
||||||
}
|
|
||||||
RB_SET_PARENT(RB_LEFT(old), elm);
|
|
||||||
if (RB_RIGHT(old)) {
|
|
||||||
RB_SET_PARENT(RB_RIGHT(old), elm);
|
|
||||||
}
|
|
||||||
if (parent) {
|
|
||||||
left = parent;
|
|
||||||
}
|
|
||||||
|
|
||||||
return finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
parent = RB_PARENT(elm);
|
|
||||||
color = RB_COLOR(elm);
|
|
||||||
|
|
||||||
if (child) {
|
|
||||||
RB_SET_PARENT(child, parent);
|
|
||||||
}
|
|
||||||
if (parent) {
|
|
||||||
if (RB_LEFT(parent) == elm) {
|
|
||||||
RB_SET_LEFT(parent, child);
|
|
||||||
} else {
|
|
||||||
RB_SET_RIGHT(parent, child);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
head->SetRoot(child);
|
|
||||||
}
|
|
||||||
|
|
||||||
return finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserts a node into the RB tree
|
|
||||||
template <typename Node, typename CompareFunction>
|
|
||||||
Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
|
||||||
Node* parent = nullptr;
|
|
||||||
Node* tmp = head->Root();
|
|
||||||
int comp = 0;
|
int comp = 0;
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
|
@ -529,17 +548,17 @@ Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||||
RB_SET_RIGHT(parent, elm);
|
RB_SET_RIGHT(parent, elm);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
head->SetRoot(elm);
|
head.SetRoot(elm);
|
||||||
}
|
}
|
||||||
|
|
||||||
RB_INSERT_COLOR(head, elm);
|
RB_INSERT_COLOR(head, elm);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the node with the same key as elm
|
template <typename T, typename Compare>
|
||||||
template <typename Node, typename CompareFunction>
|
requires HasRBEntry<T>
|
||||||
Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||||
Node* tmp = head->Root();
|
T* tmp = head.Root();
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
const int comp = cmp(elm, tmp);
|
const int comp = cmp(elm, tmp);
|
||||||
|
@ -555,11 +574,11 @@ Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the first node greater than or equal to the search key
|
template <typename T, typename Compare>
|
||||||
template <typename Node, typename CompareFunction>
|
requires HasRBEntry<T>
|
||||||
Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
|
||||||
Node* tmp = head->Root();
|
T* tmp = head.Root();
|
||||||
Node* res = nullptr;
|
T* res = nullptr;
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
const int comp = cmp(elm, tmp);
|
const int comp = cmp(elm, tmp);
|
||||||
|
@ -576,13 +595,13 @@ Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the node with the same key as lelm
|
template <typename T, typename U, typename Compare>
|
||||||
template <typename Node, typename CompareFunction>
|
requires HasRBEntry<T>
|
||||||
Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||||
Node* tmp = head->Root();
|
T* tmp = head.Root();
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
const int comp = lcmp(lelm, tmp);
|
const int comp = cmp(key, tmp);
|
||||||
if (comp < 0) {
|
if (comp < 0) {
|
||||||
tmp = RB_LEFT(tmp);
|
tmp = RB_LEFT(tmp);
|
||||||
} else if (comp > 0) {
|
} else if (comp > 0) {
|
||||||
|
@ -595,14 +614,14 @@ Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finds the first node greater than or equal to the search key
|
template <typename T, typename U, typename Compare>
|
||||||
template <typename Node, typename CompareFunction>
|
requires HasRBEntry<T>
|
||||||
Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||||
Node* tmp = head->Root();
|
T* tmp = head.Root();
|
||||||
Node* res = nullptr;
|
T* res = nullptr;
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
const int comp = lcmp(lelm, tmp);
|
const int comp = cmp(key, tmp);
|
||||||
if (comp < 0) {
|
if (comp < 0) {
|
||||||
res = tmp;
|
res = tmp;
|
||||||
tmp = RB_LEFT(tmp);
|
tmp = RB_LEFT(tmp);
|
||||||
|
@ -616,8 +635,43 @@ Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp)
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T, typename Compare>
|
||||||
Node* RB_NEXT(Node* elm) {
|
requires HasRBEntry<T>
|
||||||
|
constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
|
||||||
|
T* tmp = head.Root();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const int comp = cmp(elm, tmp);
|
||||||
|
if (comp < 0) {
|
||||||
|
tmp = RB_LEFT(tmp);
|
||||||
|
} else if (comp > 0) {
|
||||||
|
tmp = RB_RIGHT(tmp);
|
||||||
|
} else {
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U, typename Compare>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
|
||||||
|
T* tmp = head.Root();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const int comp = cmp(key, tmp);
|
||||||
|
if (comp < 0) {
|
||||||
|
tmp = RB_LEFT(tmp);
|
||||||
|
} else if (comp > 0) {
|
||||||
|
tmp = RB_RIGHT(tmp);
|
||||||
|
} else {
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires HasRBEntry<T>
|
||||||
|
constexpr T* RB_NEXT(T* elm) {
|
||||||
if (RB_RIGHT(elm)) {
|
if (RB_RIGHT(elm)) {
|
||||||
elm = RB_RIGHT(elm);
|
elm = RB_RIGHT(elm);
|
||||||
while (RB_LEFT(elm)) {
|
while (RB_LEFT(elm)) {
|
||||||
|
@ -636,8 +690,9 @@ Node* RB_NEXT(Node* elm) {
|
||||||
return elm;
|
return elm;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
Node* RB_PREV(Node* elm) {
|
requires HasRBEntry<T>
|
||||||
|
constexpr T* RB_PREV(T* elm) {
|
||||||
if (RB_LEFT(elm)) {
|
if (RB_LEFT(elm)) {
|
||||||
elm = RB_LEFT(elm);
|
elm = RB_LEFT(elm);
|
||||||
while (RB_RIGHT(elm)) {
|
while (RB_RIGHT(elm)) {
|
||||||
|
@ -656,30 +711,32 @@ Node* RB_PREV(Node* elm) {
|
||||||
return elm;
|
return elm;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
|
requires HasRBEntry<T>
|
||||||
Node* tmp = head->Root();
|
constexpr T* RB_MIN(RBHead<T>& head) {
|
||||||
Node* parent = nullptr;
|
T* tmp = head.Root();
|
||||||
|
T* parent = nullptr;
|
||||||
|
|
||||||
while (tmp) {
|
while (tmp) {
|
||||||
parent = tmp;
|
parent = tmp;
|
||||||
if (is_min) {
|
|
||||||
tmp = RB_LEFT(tmp);
|
tmp = RB_LEFT(tmp);
|
||||||
} else {
|
|
||||||
tmp = RB_RIGHT(tmp);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return parent;
|
return parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
template <typename T>
|
||||||
Node* RB_MIN(RBHead<Node>* head) {
|
requires HasRBEntry<T>
|
||||||
return RB_MINMAX(head, true);
|
constexpr T* RB_MAX(RBHead<T>& head) {
|
||||||
|
T* tmp = head.Root();
|
||||||
|
T* parent = nullptr;
|
||||||
|
|
||||||
|
while (tmp) {
|
||||||
|
parent = tmp;
|
||||||
|
tmp = RB_RIGHT(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Node>
|
return parent;
|
||||||
Node* RB_MAX(RBHead<Node>* head) {
|
|
||||||
return RB_MINMAX(head, false);
|
|
||||||
}
|
}
|
||||||
} // namespace Common
|
|
||||||
|
} // namespace Common::freebsd
|
||||||
|
|
|
@ -207,6 +207,7 @@ add_library(core STATIC
|
||||||
hle/kernel/k_memory_region.h
|
hle/kernel/k_memory_region.h
|
||||||
hle/kernel/k_memory_region_type.h
|
hle/kernel/k_memory_region_type.h
|
||||||
hle/kernel/k_page_bitmap.h
|
hle/kernel/k_page_bitmap.h
|
||||||
|
hle/kernel/k_page_buffer.h
|
||||||
hle/kernel/k_page_heap.cpp
|
hle/kernel/k_page_heap.cpp
|
||||||
hle/kernel/k_page_heap.h
|
hle/kernel/k_page_heap.h
|
||||||
hle/kernel/k_page_linked_list.h
|
hle/kernel/k_page_linked_list.h
|
||||||
|
@ -244,6 +245,8 @@ add_library(core STATIC
|
||||||
hle/kernel/k_system_control.h
|
hle/kernel/k_system_control.h
|
||||||
hle/kernel/k_thread.cpp
|
hle/kernel/k_thread.cpp
|
||||||
hle/kernel/k_thread.h
|
hle/kernel/k_thread.h
|
||||||
|
hle/kernel/k_thread_local_page.cpp
|
||||||
|
hle/kernel/k_thread_local_page.h
|
||||||
hle/kernel/k_thread_queue.cpp
|
hle/kernel/k_thread_queue.cpp
|
||||||
hle/kernel/k_thread_queue.h
|
hle/kernel/k_thread_queue.h
|
||||||
hle/kernel/k_trace.h
|
hle/kernel/k_trace.h
|
||||||
|
|
|
@ -385,7 +385,7 @@ public:
|
||||||
T PopRaw();
|
T PopRaw();
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
std::shared_ptr<T> PopIpcInterface() {
|
std::weak_ptr<T> PopIpcInterface() {
|
||||||
ASSERT(context->Session()->IsDomain());
|
ASSERT(context->Session()->IsDomain());
|
||||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||||
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
||||||
|
|
|
@ -45,7 +45,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||||
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
|
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return DomainHandler(object_id - 1) != nullptr;
|
return DomainHandler(object_id - 1).lock() != nullptr;
|
||||||
} else {
|
} else {
|
||||||
return session_handler != nullptr;
|
return session_handler != nullptr;
|
||||||
}
|
}
|
||||||
|
@ -53,9 +53,6 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||||
|
|
||||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||||
session->ClientConnected(shared_from_this());
|
session->ClientConnected(shared_from_this());
|
||||||
|
|
||||||
// Ensure our server session is tracked globally.
|
|
||||||
kernel.RegisterServerSession(session);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
||||||
|
|
|
@ -94,6 +94,7 @@ protected:
|
||||||
std::weak_ptr<ServiceThread> service_thread;
|
std::weak_ptr<ServiceThread> service_thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
|
||||||
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
|
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -139,7 +140,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SessionRequestHandlerPtr DomainHandler(std::size_t index) const {
|
SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
|
||||||
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
|
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
|
||||||
return domain_handlers.at(index);
|
return domain_handlers.at(index);
|
||||||
}
|
}
|
||||||
|
@ -328,10 +329,10 @@ public:
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
|
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
|
||||||
return std::static_pointer_cast<T>(manager->DomainHandler(index));
|
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
|
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
|
||||||
manager = std::move(manager_);
|
manager = std::move(manager_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,7 +375,7 @@ private:
|
||||||
u32 handles_offset{};
|
u32 handles_offset{};
|
||||||
u32 domain_offset{};
|
u32 domain_offset{};
|
||||||
|
|
||||||
std::shared_ptr<SessionRequestManager> manager;
|
std::weak_ptr<SessionRequestManager> manager;
|
||||||
|
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
Core::Memory::Memory& memory;
|
Core::Memory::Memory& memory;
|
||||||
|
|
|
@ -7,19 +7,23 @@
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/device_memory.h"
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||||
#include "core/hle/kernel/k_code_memory.h"
|
#include "core/hle/kernel/k_code_memory.h"
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_memory_layout.h"
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
|
#include "core/hle/kernel/k_page_buffer.h"
|
||||||
#include "core/hle/kernel/k_port.h"
|
#include "core/hle/kernel/k_port.h"
|
||||||
#include "core/hle/kernel/k_process.h"
|
#include "core/hle/kernel/k_process.h"
|
||||||
#include "core/hle/kernel/k_resource_limit.h"
|
#include "core/hle/kernel/k_resource_limit.h"
|
||||||
#include "core/hle/kernel/k_session.h"
|
#include "core/hle/kernel/k_session.h"
|
||||||
#include "core/hle/kernel/k_shared_memory.h"
|
#include "core/hle/kernel/k_shared_memory.h"
|
||||||
|
#include "core/hle/kernel/k_shared_memory_info.h"
|
||||||
#include "core/hle/kernel/k_system_control.h"
|
#include "core/hle/kernel/k_system_control.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
#include "core/hle/kernel/k_transfer_memory.h"
|
#include "core/hle/kernel/k_transfer_memory.h"
|
||||||
|
|
||||||
namespace Kernel::Init {
|
namespace Kernel::Init {
|
||||||
|
@ -32,9 +36,13 @@ namespace Kernel::Init {
|
||||||
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
||||||
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
||||||
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
||||||
|
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
|
||||||
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
||||||
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
|
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
|
||||||
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
|
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
|
||||||
|
HANDLER(KThreadLocalPage, \
|
||||||
|
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
|
||||||
|
##__VA_ARGS__) \
|
||||||
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -50,38 +58,46 @@ enum KSlabType : u32 {
|
||||||
// Constexpr counts.
|
// Constexpr counts.
|
||||||
constexpr size_t SlabCountKProcess = 80;
|
constexpr size_t SlabCountKProcess = 80;
|
||||||
constexpr size_t SlabCountKThread = 800;
|
constexpr size_t SlabCountKThread = 800;
|
||||||
constexpr size_t SlabCountKEvent = 700;
|
constexpr size_t SlabCountKEvent = 900;
|
||||||
constexpr size_t SlabCountKInterruptEvent = 100;
|
constexpr size_t SlabCountKInterruptEvent = 100;
|
||||||
constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
|
constexpr size_t SlabCountKPort = 384;
|
||||||
constexpr size_t SlabCountKSharedMemory = 80;
|
constexpr size_t SlabCountKSharedMemory = 80;
|
||||||
constexpr size_t SlabCountKTransferMemory = 200;
|
constexpr size_t SlabCountKTransferMemory = 200;
|
||||||
constexpr size_t SlabCountKCodeMemory = 10;
|
constexpr size_t SlabCountKCodeMemory = 10;
|
||||||
constexpr size_t SlabCountKDeviceAddressSpace = 300;
|
constexpr size_t SlabCountKDeviceAddressSpace = 300;
|
||||||
constexpr size_t SlabCountKSession = 933;
|
constexpr size_t SlabCountKSession = 1133;
|
||||||
constexpr size_t SlabCountKLightSession = 100;
|
constexpr size_t SlabCountKLightSession = 100;
|
||||||
constexpr size_t SlabCountKObjectName = 7;
|
constexpr size_t SlabCountKObjectName = 7;
|
||||||
constexpr size_t SlabCountKResourceLimit = 5;
|
constexpr size_t SlabCountKResourceLimit = 5;
|
||||||
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
|
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
|
||||||
constexpr size_t SlabCountKAlpha = 1;
|
constexpr size_t SlabCountKIoPool = 1;
|
||||||
constexpr size_t SlabCountKBeta = 6;
|
constexpr size_t SlabCountKIoRegion = 6;
|
||||||
|
|
||||||
constexpr size_t SlabCountExtraKThread = 160;
|
constexpr size_t SlabCountExtraKThread = 160;
|
||||||
|
|
||||||
|
/// Helper function to translate from the slab virtual address to the reserved location in physical
|
||||||
|
/// memory.
|
||||||
|
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
|
||||||
|
slab_addr -= memory_layout.GetSlabRegionAddress();
|
||||||
|
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
||||||
size_t num_objects) {
|
size_t num_objects) {
|
||||||
// TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
|
|
||||||
// kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
|
|
||||||
|
|
||||||
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
||||||
VAddr start = Common::AlignUp(address, alignof(T));
|
VAddr start = Common::AlignUp(address, alignof(T));
|
||||||
|
|
||||||
// This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
|
// This should use the virtual memory address passed in, but currently, we do not setup the
|
||||||
// the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
|
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
|
||||||
// host memory.
|
// that we reserve for the slab heaps.
|
||||||
void* backing_kernel_memory{};
|
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
||||||
|
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
|
void* backing_kernel_memory{
|
||||||
|
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||||
|
|
||||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||||
ASSERT(region != nullptr);
|
ASSERT(region != nullptr);
|
||||||
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
|
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
|
||||||
|
@ -91,6 +107,12 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||||
return start + size;
|
return start + size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t CalculateSlabHeapGapSize() {
|
||||||
|
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
|
||||||
|
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
||||||
|
return KernelSlabHeapGapSize;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
||||||
|
@ -109,8 +131,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
||||||
.num_KObjectName = SlabCountKObjectName,
|
.num_KObjectName = SlabCountKObjectName,
|
||||||
.num_KResourceLimit = SlabCountKResourceLimit,
|
.num_KResourceLimit = SlabCountKResourceLimit,
|
||||||
.num_KDebug = SlabCountKDebug,
|
.num_KDebug = SlabCountKDebug,
|
||||||
.num_KAlpha = SlabCountKAlpha,
|
.num_KIoPool = SlabCountKIoPool,
|
||||||
.num_KBeta = SlabCountKBeta,
|
.num_KIoRegion = SlabCountKIoRegion,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,11 +158,34 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
|
||||||
#undef ADD_SLAB_SIZE
|
#undef ADD_SLAB_SIZE
|
||||||
|
|
||||||
// Add the reserved size.
|
// Add the reserved size.
|
||||||
size += KernelSlabHeapGapsSize;
|
size += CalculateSlabHeapGapSize();
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InitializeKPageBufferSlabHeap(Core::System& system) {
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
const auto& counts = kernel.SlabResourceCounts();
|
||||||
|
const size_t num_pages =
|
||||||
|
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||||
|
const size_t slab_size = num_pages * PageSize;
|
||||||
|
|
||||||
|
// Reserve memory from the system resource limit.
|
||||||
|
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
|
||||||
|
|
||||||
|
// Allocate memory for the slab.
|
||||||
|
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
||||||
|
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
||||||
|
const PAddr slab_address =
|
||||||
|
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||||
|
ASSERT(slab_address != 0);
|
||||||
|
|
||||||
|
// Initialize the slabheap.
|
||||||
|
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
||||||
|
slab_size);
|
||||||
|
}
|
||||||
|
|
||||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
@ -160,13 +205,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an array to represent the gaps between the slabs.
|
// Create an array to represent the gaps between the slabs.
|
||||||
const size_t total_gap_size = KernelSlabHeapGapsSize;
|
const size_t total_gap_size = CalculateSlabHeapGapSize();
|
||||||
std::array<size_t, slab_types.size()> slab_gaps;
|
std::array<size_t, slab_types.size()> slab_gaps;
|
||||||
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
for (auto& slab_gap : slab_gaps) {
|
||||||
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
|
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
|
||||||
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
|
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
|
||||||
// will include it ourselves.
|
// will include it ourselves.
|
||||||
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort the array, so that we can treat differences between values as offsets to the starts of
|
// Sort the array, so that we can treat differences between values as offsets to the starts of
|
||||||
|
@ -177,13 +222,21 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < slab_types.size(); i++) {
|
// Track the gaps, so that we can free them to the unused slab tree.
|
||||||
|
VAddr gap_start = address;
|
||||||
|
size_t gap_size = 0;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
||||||
// Add the random gap to the address.
|
// Add the random gap to the address.
|
||||||
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||||
|
address += cur_gap;
|
||||||
|
gap_size += cur_gap;
|
||||||
|
|
||||||
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
||||||
case KSlabType_##NAME: \
|
case KSlabType_##NAME: \
|
||||||
|
if (COUNT > 0) { \
|
||||||
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
|
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
|
||||||
|
} \
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Initialize the slabheap.
|
// Initialize the slabheap.
|
||||||
|
@ -192,7 +245,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||||
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
||||||
// If we somehow get an invalid type, abort.
|
// If we somehow get an invalid type, abort.
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've hit the end of a gap, free it.
|
||||||
|
if (gap_start + gap_size != address) {
|
||||||
|
gap_start = address;
|
||||||
|
gap_size = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,12 +32,13 @@ struct KSlabResourceCounts {
|
||||||
size_t num_KObjectName;
|
size_t num_KObjectName;
|
||||||
size_t num_KResourceLimit;
|
size_t num_KResourceLimit;
|
||||||
size_t num_KDebug;
|
size_t num_KDebug;
|
||||||
size_t num_KAlpha;
|
size_t num_KIoPool;
|
||||||
size_t num_KBeta;
|
size_t num_KIoRegion;
|
||||||
};
|
};
|
||||||
|
|
||||||
void InitializeSlabResourceCounts(KernelCore& kernel);
|
void InitializeSlabResourceCounts(KernelCore& kernel);
|
||||||
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
|
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
|
||||||
|
void InitializeKPageBufferSlabHeap(Core::System& system);
|
||||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
|
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
|
||||||
|
|
||||||
} // namespace Kernel::Init
|
} // namespace Kernel::Init
|
||||||
|
|
|
@ -115,7 +115,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_key({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
// End the thread's wait.
|
// End the thread's wait.
|
||||||
|
@ -148,7 +148,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||||
return ResultInvalidState;
|
return ResultInvalidState;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_key({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
// End the thread's wait.
|
// End the thread's wait.
|
||||||
|
@ -171,7 +171,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||||
{
|
{
|
||||||
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_key({addr, -1});
|
||||||
// Determine the updated value.
|
// Determine the updated value.
|
||||||
s32 new_value{};
|
s32 new_value{};
|
||||||
if (count <= 0) {
|
if (count <= 0) {
|
||||||
|
|
|
@ -244,7 +244,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
auto it = thread_tree.nfind_light({cv_key, -1});
|
auto it = thread_tree.nfind_key({cv_key, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetConditionVariableKey() == cv_key)) {
|
(it->GetConditionVariableKey() == cv_key)) {
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
|
|
|
@ -57,11 +57,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor
|
||||||
constexpr std::size_t KernelInitialPageHeapSize = 128_KiB;
|
constexpr std::size_t KernelInitialPageHeapSize = 128_KiB;
|
||||||
|
|
||||||
constexpr std::size_t KernelSlabHeapDataSize = 5_MiB;
|
constexpr std::size_t KernelSlabHeapDataSize = 5_MiB;
|
||||||
constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB;
|
constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
|
||||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
|
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
|
||||||
|
|
||||||
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
||||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB;
|
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
|
||||||
|
|
||||||
constexpr std::size_t KernelResourceSize =
|
constexpr std::size_t KernelResourceSize =
|
||||||
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||||
|
|
34
src/core/hle/kernel/k_page_buffer.h
Normal file
34
src/core/hle/kernel/k_page_buffer.h
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/device_memory.h"
|
||||||
|
#include "core/hle/kernel/memory_types.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
|
||||||
|
public:
|
||||||
|
KPageBuffer() = default;
|
||||||
|
|
||||||
|
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
||||||
|
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
||||||
|
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(sizeof(KPageBuffer) == PageSize);
|
||||||
|
static_assert(alignof(KPageBuffer) == PageSize);
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -424,6 +424,68 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||||
|
std::size_t num_pages, std::size_t alignment, std::size_t offset,
|
||||||
|
std::size_t guard_pages) {
|
||||||
|
VAddr address = 0;
|
||||||
|
|
||||||
|
if (num_pages <= region_num_pages) {
|
||||||
|
if (this->IsAslrEnabled()) {
|
||||||
|
// Try to directly find a free area up to 8 times.
|
||||||
|
for (std::size_t i = 0; i < 8; i++) {
|
||||||
|
const std::size_t random_offset =
|
||||||
|
KSystemControl::GenerateRandomRange(
|
||||||
|
0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
|
||||||
|
alignment;
|
||||||
|
const VAddr candidate =
|
||||||
|
Common::AlignDown((region_start + random_offset), alignment) + offset;
|
||||||
|
|
||||||
|
KMemoryInfo info = this->QueryInfoImpl(candidate);
|
||||||
|
|
||||||
|
if (info.state != KMemoryState::Free) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (region_start > candidate) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (info.GetAddress() + guard_pages * PageSize > candidate) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
|
||||||
|
if (candidate_end > info.GetLastAddress()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (candidate_end > region_start + region_num_pages * PageSize - 1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
address = candidate;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Fall back to finding the first free area with a random offset.
|
||||||
|
if (address == 0) {
|
||||||
|
// NOTE: Nintendo does not account for guard pages here.
|
||||||
|
// This may theoretically cause an offset to be chosen that cannot be mapped. We
|
||||||
|
// will account for guard pages.
|
||||||
|
const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
|
||||||
|
0, region_num_pages - num_pages - guard_pages);
|
||||||
|
address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
|
||||||
|
region_num_pages - offset_pages, num_pages,
|
||||||
|
alignment, offset, guard_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the first free area.
|
||||||
|
if (address == 0) {
|
||||||
|
address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
|
||||||
|
alignment, offset, guard_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||||
KPageTable& src_page_table, VAddr src_addr) {
|
KPageTable& src_page_table, VAddr src_addr) {
|
||||||
KScopedLightLock lk(general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
|
@ -1055,6 +1117,46 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||||
|
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||||
|
std::size_t region_num_pages, KMemoryState state,
|
||||||
|
KMemoryPermission perm) {
|
||||||
|
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||||
|
|
||||||
|
// Ensure this is a valid map request.
|
||||||
|
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
|
||||||
|
ResultInvalidCurrentMemory);
|
||||||
|
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
|
// Find a random address to map at.
|
||||||
|
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
|
||||||
|
this->GetNumGuardPages());
|
||||||
|
R_UNLESS(addr != 0, ResultOutOfMemory);
|
||||||
|
ASSERT(Common::IsAligned(addr, alignment));
|
||||||
|
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
|
||||||
|
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
|
||||||
|
KMemoryPermission::None, KMemoryPermission::None,
|
||||||
|
KMemoryAttribute::None, KMemoryAttribute::None)
|
||||||
|
.IsSuccess());
|
||||||
|
|
||||||
|
// Perform mapping operation.
|
||||||
|
if (is_pa_valid) {
|
||||||
|
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
|
||||||
|
} else {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the blocks.
|
||||||
|
block_manager->Update(addr, num_pages, state, perm);
|
||||||
|
|
||||||
|
// We successfully mapped the pages.
|
||||||
|
*out_addr = addr;
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
|
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
@ -1097,6 +1199,30 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||||
|
// Check that the unmap is in range.
|
||||||
|
const std::size_t size = num_pages * PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
|
// Check the memory state.
|
||||||
|
std::size_t num_allocator_blocks{};
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
|
||||||
|
KMemoryState::All, state, KMemoryPermission::None,
|
||||||
|
KMemoryPermission::None, KMemoryAttribute::All,
|
||||||
|
KMemoryAttribute::None));
|
||||||
|
|
||||||
|
// Perform the unmap.
|
||||||
|
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||||
|
|
||||||
|
// Update the blocks.
|
||||||
|
block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||||
Svc::MemoryPermission svc_perm) {
|
Svc::MemoryPermission svc_perm) {
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
|
|
|
@ -46,7 +46,14 @@ public:
|
||||||
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||||
KMemoryPermission perm);
|
KMemoryPermission perm);
|
||||||
|
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||||
|
PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||||
|
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||||
|
state, perm);
|
||||||
|
}
|
||||||
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
|
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
|
||||||
|
ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||||
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||||
Svc::MemoryPermission svc_perm);
|
Svc::MemoryPermission svc_perm);
|
||||||
KMemoryInfo QueryInfo(VAddr addr);
|
KMemoryInfo QueryInfo(VAddr addr);
|
||||||
|
@ -91,6 +98,9 @@ private:
|
||||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||||
KMemoryPermission perm);
|
KMemoryPermission perm);
|
||||||
|
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||||
|
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||||
|
std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
|
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
|
||||||
bool IsRegionMapped(VAddr address, u64 size);
|
bool IsRegionMapped(VAddr address, u64 size);
|
||||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||||
|
@ -105,6 +115,9 @@ private:
|
||||||
VAddr GetRegionAddress(KMemoryState state) const;
|
VAddr GetRegionAddress(KMemoryState state) const;
|
||||||
std::size_t GetRegionSize(KMemoryState state) const;
|
std::size_t GetRegionSize(KMemoryState state) const;
|
||||||
|
|
||||||
|
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||||
|
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||||
|
|
||||||
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||||
std::size_t size, KMemoryState state_mask,
|
std::size_t size, KMemoryState state_mask,
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
|
@ -137,7 +150,7 @@ private:
|
||||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||||
}
|
}
|
||||||
ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask,
|
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
KMemoryAttribute attr,
|
KMemoryAttribute attr,
|
||||||
|
@ -210,7 +223,7 @@ public:
|
||||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||||
return alias_code_region_end - alias_code_region_start;
|
return alias_code_region_end - alias_code_region_start;
|
||||||
}
|
}
|
||||||
size_t GetNormalMemorySize() {
|
std::size_t GetNormalMemorySize() {
|
||||||
KScopedLightLock lk(general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
return GetHeapSize() + mapped_physical_memory_size;
|
return GetHeapSize() + mapped_physical_memory_size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,12 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
|
||||||
R_UNLESS(state == State::Normal, ResultPortClosed);
|
R_UNLESS(state == State::Normal, ResultPortClosed);
|
||||||
|
|
||||||
server.EnqueueSession(session);
|
server.EnqueueSession(session);
|
||||||
server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession());
|
|
||||||
|
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
|
||||||
|
session_ptr->ClientConnected(server.AcceptSession());
|
||||||
|
} else {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,58 +70,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
// Represents a page used for thread-local storage.
|
|
||||||
//
|
|
||||||
// Each TLS page contains slots that may be used by processes and threads.
|
|
||||||
// Every process and thread is created with a slot in some arbitrary page
|
|
||||||
// (whichever page happens to have an available slot).
|
|
||||||
class TLSPage {
|
|
||||||
public:
|
|
||||||
static constexpr std::size_t num_slot_entries =
|
|
||||||
Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
|
|
||||||
|
|
||||||
explicit TLSPage(VAddr address) : base_address{address} {}
|
|
||||||
|
|
||||||
bool HasAvailableSlots() const {
|
|
||||||
return !is_slot_used.all();
|
|
||||||
}
|
|
||||||
|
|
||||||
VAddr GetBaseAddress() const {
|
|
||||||
return base_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<VAddr> ReserveSlot() {
|
|
||||||
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
|
|
||||||
if (is_slot_used[i]) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
is_slot_used[i] = true;
|
|
||||||
return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReleaseSlot(VAddr address) {
|
|
||||||
// Ensure that all given addresses are consistent with how TLS pages
|
|
||||||
// are intended to be used when releasing slots.
|
|
||||||
ASSERT(IsWithinPage(address));
|
|
||||||
ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
|
|
||||||
|
|
||||||
const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
|
|
||||||
is_slot_used[index] = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool IsWithinPage(VAddr address) const {
|
|
||||||
return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
VAddr base_address;
|
|
||||||
std::bitset<num_slot_entries> is_slot_used;
|
|
||||||
};
|
|
||||||
|
|
||||||
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||||
ProcessType type, KResourceLimit* res_limit) {
|
ProcessType type, KResourceLimit* res_limit) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
@ -404,7 +352,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TLS region
|
// Create TLS region
|
||||||
tls_region_address = CreateTLSRegion();
|
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
|
||||||
memory_reservation.Commit();
|
memory_reservation.Commit();
|
||||||
|
|
||||||
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||||
|
@ -444,7 +392,7 @@ void KProcess::PrepareForTermination() {
|
||||||
|
|
||||||
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||||
|
|
||||||
FreeTLSRegion(tls_region_address);
|
this->DeleteThreadLocalRegion(tls_region_address);
|
||||||
tls_region_address = 0;
|
tls_region_address = 0;
|
||||||
|
|
||||||
if (resource_limit) {
|
if (resource_limit) {
|
||||||
|
@ -456,9 +404,6 @@ void KProcess::PrepareForTermination() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::Finalize() {
|
void KProcess::Finalize() {
|
||||||
// Finalize the handle table and close any open handles.
|
|
||||||
handle_table.Finalize();
|
|
||||||
|
|
||||||
// Free all shared memory infos.
|
// Free all shared memory infos.
|
||||||
{
|
{
|
||||||
auto it = shared_memory_list.begin();
|
auto it = shared_memory_list.begin();
|
||||||
|
@ -483,67 +428,110 @@ void KProcess::Finalize() {
|
||||||
resource_limit = nullptr;
|
resource_limit = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Finalize the page table.
|
||||||
|
page_table.reset();
|
||||||
|
|
||||||
// Perform inherited finalization.
|
// Perform inherited finalization.
|
||||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||||
* Attempts to find a TLS page that contains a free slot for
|
KThreadLocalPage* tlp = nullptr;
|
||||||
* use by a thread.
|
VAddr tlr = 0;
|
||||||
*
|
|
||||||
* @returns If a page with an available slot is found, then an iterator
|
// See if we can get a region from a partially used TLP.
|
||||||
* pointing to the page is returned. Otherwise the end iterator
|
{
|
||||||
* is returned instead.
|
KScopedSchedulerLock sl{kernel};
|
||||||
*/
|
|
||||||
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
|
if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
|
||||||
return std::find_if(tls_pages.begin(), tls_pages.end(),
|
tlr = it->Reserve();
|
||||||
[](const auto& page) { return page.HasAvailableSlots(); });
|
ASSERT(tlr != 0);
|
||||||
|
|
||||||
|
if (it->IsAllUsed()) {
|
||||||
|
tlp = std::addressof(*it);
|
||||||
|
partially_used_tlp_tree.erase(it);
|
||||||
|
fully_used_tlp_tree.insert(*tlp);
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr KProcess::CreateTLSRegion() {
|
*out = tlr;
|
||||||
KScopedSchedulerLock lock(kernel);
|
return ResultSuccess;
|
||||||
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
}
|
||||||
tls_page_iter != tls_pages.cend()) {
|
|
||||||
return *tls_page_iter->ReserveSlot();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
// Allocate a new page.
|
||||||
ASSERT(tls_page_ptr);
|
tlp = KThreadLocalPage::Allocate(kernel);
|
||||||
|
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
|
||||||
|
auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
|
||||||
|
|
||||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
// Initialize the new page.
|
||||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
R_TRY(tlp->Initialize(kernel, this));
|
||||||
const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
|
||||||
const VAddr tls_page_addr{page_table
|
|
||||||
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
|
|
||||||
KMemoryState::ThreadLocal,
|
|
||||||
KMemoryPermission::UserReadWrite,
|
|
||||||
tls_map_addr)
|
|
||||||
.ValueOr(0)};
|
|
||||||
|
|
||||||
ASSERT(tls_page_addr);
|
// Reserve a TLR.
|
||||||
|
tlr = tlp->Reserve();
|
||||||
|
ASSERT(tlr != 0);
|
||||||
|
|
||||||
std::memset(tls_page_ptr, 0, PageSize);
|
// Insert into our tree.
|
||||||
tls_pages.emplace_back(tls_page_addr);
|
{
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
if (tlp->IsAllUsed()) {
|
||||||
ASSERT(reserve_result.has_value());
|
fully_used_tlp_tree.insert(*tlp);
|
||||||
|
} else {
|
||||||
return *reserve_result;
|
partially_used_tlp_tree.insert(*tlp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::FreeTLSRegion(VAddr tls_address) {
|
// We succeeded!
|
||||||
KScopedSchedulerLock lock(kernel);
|
tlp_guard.Cancel();
|
||||||
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
*out = tlr;
|
||||||
auto iter =
|
return ResultSuccess;
|
||||||
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
}
|
||||||
return page.GetBaseAddress() == aligned_address;
|
|
||||||
});
|
|
||||||
|
|
||||||
// Something has gone very wrong if we're freeing a region
|
ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||||
// with no actual page available.
|
KThreadLocalPage* page_to_free = nullptr;
|
||||||
ASSERT(iter != tls_pages.cend());
|
|
||||||
|
|
||||||
iter->ReleaseSlot(tls_address);
|
// Release the region.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
// Try to find the page in the partially used list.
|
||||||
|
auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||||
|
if (it == partially_used_tlp_tree.end()) {
|
||||||
|
// If we don't find it, it has to be in the fully used list.
|
||||||
|
it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
|
||||||
|
R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
|
||||||
|
|
||||||
|
// Release the region.
|
||||||
|
it->Release(addr);
|
||||||
|
|
||||||
|
// Move the page out of the fully used list.
|
||||||
|
KThreadLocalPage* tlp = std::addressof(*it);
|
||||||
|
fully_used_tlp_tree.erase(it);
|
||||||
|
if (tlp->IsAllFree()) {
|
||||||
|
page_to_free = tlp;
|
||||||
|
} else {
|
||||||
|
partially_used_tlp_tree.insert(*tlp);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Release the region.
|
||||||
|
it->Release(addr);
|
||||||
|
|
||||||
|
// Handle the all-free case.
|
||||||
|
KThreadLocalPage* tlp = std::addressof(*it);
|
||||||
|
if (tlp->IsAllFree()) {
|
||||||
|
partially_used_tlp_tree.erase(it);
|
||||||
|
page_to_free = tlp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we should free the page it was in, do so.
|
||||||
|
if (page_to_free != nullptr) {
|
||||||
|
page_to_free->Finalize();
|
||||||
|
|
||||||
|
KThreadLocalPage::Free(kernel, page_to_free);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include "core/hle/kernel/k_condition_variable.h"
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
#include "core/hle/kernel/k_worker_task.h"
|
#include "core/hle/kernel/k_worker_task.h"
|
||||||
#include "core/hle/kernel/process_capability.h"
|
#include "core/hle/kernel/process_capability.h"
|
||||||
#include "core/hle/kernel/slab_helpers.h"
|
#include "core/hle/kernel/slab_helpers.h"
|
||||||
|
@ -362,10 +363,10 @@ public:
|
||||||
// Thread-local storage management
|
// Thread-local storage management
|
||||||
|
|
||||||
// Marks the next available region as used and returns the address of the slot.
|
// Marks the next available region as used and returns the address of the slot.
|
||||||
[[nodiscard]] VAddr CreateTLSRegion();
|
[[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
|
||||||
|
|
||||||
// Frees a used TLS slot identified by the given address
|
// Frees a used TLS slot identified by the given address
|
||||||
void FreeTLSRegion(VAddr tls_address);
|
ResultCode DeleteThreadLocalRegion(VAddr addr);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void PinThread(s32 core_id, KThread* thread) {
|
void PinThread(s32 core_id, KThread* thread) {
|
||||||
|
@ -413,13 +414,6 @@ private:
|
||||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||||
u8 ideal_core = 0;
|
u8 ideal_core = 0;
|
||||||
|
|
||||||
/// The Thread Local Storage area is allocated as processes create threads,
|
|
||||||
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
|
||||||
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
|
||||||
/// page as a bitmask.
|
|
||||||
/// This vector will grow as more pages are allocated for new threads.
|
|
||||||
std::vector<TLSPage> tls_pages;
|
|
||||||
|
|
||||||
/// Contains the parsed process capability descriptors.
|
/// Contains the parsed process capability descriptors.
|
||||||
ProcessCapabilities capabilities;
|
ProcessCapabilities capabilities;
|
||||||
|
|
||||||
|
@ -482,6 +476,12 @@ private:
|
||||||
KThread* exception_thread{};
|
KThread* exception_thread{};
|
||||||
|
|
||||||
KLightLock state_lock;
|
KLightLock state_lock;
|
||||||
|
|
||||||
|
using TLPTree =
|
||||||
|
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||||
|
using TLPIterator = TLPTree::iterator;
|
||||||
|
TLPTree fully_used_tlp_tree;
|
||||||
|
TLPTree partially_used_tlp_tree;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -30,11 +30,11 @@ public:
|
||||||
|
|
||||||
/// Whether or not this server port has an HLE handler available.
|
/// Whether or not this server port has an HLE handler available.
|
||||||
bool HasSessionRequestHandler() const {
|
bool HasSessionRequestHandler() const {
|
||||||
return session_handler != nullptr;
|
return !session_handler.expired();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the HLE handler for this port.
|
/// Gets the HLE handler for this port.
|
||||||
SessionRequestHandlerPtr GetSessionRequestHandler() const {
|
SessionRequestHandlerWeakPtr GetSessionRequestHandler() const {
|
||||||
return session_handler;
|
return session_handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ public:
|
||||||
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
||||||
* will inherit a reference to this handler.
|
* will inherit a reference to this handler.
|
||||||
*/
|
*/
|
||||||
void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
|
void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) {
|
||||||
session_handler = std::move(handler);
|
session_handler = std::move(handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ private:
|
||||||
void CleanupSessions();
|
void CleanupSessions();
|
||||||
|
|
||||||
SessionList session_list;
|
SessionList session_list;
|
||||||
SessionRequestHandlerPtr session_handler;
|
SessionRequestHandlerWeakPtr session_handler;
|
||||||
KPort* parent{};
|
KPort* parent{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -27,10 +27,7 @@ namespace Kernel {
|
||||||
|
|
||||||
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
||||||
|
|
||||||
KServerSession::~KServerSession() {
|
KServerSession::~KServerSession() = default;
|
||||||
// Ensure that the global list tracking server sessions does not hold on to a reference.
|
|
||||||
kernel.UnregisterServerSession(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
|
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
|
||||||
std::shared_ptr<SessionRequestManager> manager_) {
|
std::shared_ptr<SessionRequestManager> manager_) {
|
||||||
|
@ -49,6 +46,9 @@ void KServerSession::Destroy() {
|
||||||
parent->OnServerClosed();
|
parent->OnServerClosed();
|
||||||
|
|
||||||
parent->Close();
|
parent->Close();
|
||||||
|
|
||||||
|
// Release host emulation members.
|
||||||
|
manager.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KServerSession::OnClientClosed() {
|
void KServerSession::OnClientClosed() {
|
||||||
|
@ -98,7 +98,12 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
return ResultSuccess; // Ignore error if asserts are off
|
return ResultSuccess; // Ignore error if asserts are off
|
||||||
}
|
}
|
||||||
return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context);
|
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
|
||||||
|
return strong_ptr->HandleSyncRequest(*this, context);
|
||||||
|
} else {
|
||||||
|
UNREACHABLE();
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||||
|
|
|
@ -16,39 +16,34 @@ class KernelCore;
|
||||||
|
|
||||||
namespace impl {
|
namespace impl {
|
||||||
|
|
||||||
class KSlabHeapImpl final {
|
class KSlabHeapImpl {
|
||||||
public:
|
|
||||||
YUZU_NON_COPYABLE(KSlabHeapImpl);
|
YUZU_NON_COPYABLE(KSlabHeapImpl);
|
||||||
YUZU_NON_MOVEABLE(KSlabHeapImpl);
|
YUZU_NON_MOVEABLE(KSlabHeapImpl);
|
||||||
|
|
||||||
|
public:
|
||||||
struct Node {
|
struct Node {
|
||||||
Node* next{};
|
Node* next{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
constexpr KSlabHeapImpl() = default;
|
constexpr KSlabHeapImpl() = default;
|
||||||
constexpr ~KSlabHeapImpl() = default;
|
|
||||||
|
|
||||||
void Initialize(std::size_t size) {
|
void Initialize() {
|
||||||
ASSERT(head == nullptr);
|
ASSERT(m_head == nullptr);
|
||||||
obj_size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr std::size_t GetObjectSize() const {
|
|
||||||
return obj_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* GetHead() const {
|
Node* GetHead() const {
|
||||||
return head;
|
return m_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Allocate() {
|
void* Allocate() {
|
||||||
Node* ret = head.load();
|
Node* ret = m_head.load();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (ret == nullptr) {
|
if (ret == nullptr) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (!head.compare_exchange_weak(ret, ret->next));
|
} while (!m_head.compare_exchange_weak(ret, ret->next));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -56,170 +51,157 @@ public:
|
||||||
void Free(void* obj) {
|
void Free(void* obj) {
|
||||||
Node* node = static_cast<Node*>(obj);
|
Node* node = static_cast<Node*>(obj);
|
||||||
|
|
||||||
Node* cur_head = head.load();
|
Node* cur_head = m_head.load();
|
||||||
do {
|
do {
|
||||||
node->next = cur_head;
|
node->next = cur_head;
|
||||||
} while (!head.compare_exchange_weak(cur_head, node));
|
} while (!m_head.compare_exchange_weak(cur_head, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::atomic<Node*> head{};
|
std::atomic<Node*> m_head{};
|
||||||
std::size_t obj_size{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace impl
|
} // namespace impl
|
||||||
|
|
||||||
class KSlabHeapBase {
|
template <bool SupportDynamicExpansion>
|
||||||
public:
|
class KSlabHeapBase : protected impl::KSlabHeapImpl {
|
||||||
YUZU_NON_COPYABLE(KSlabHeapBase);
|
YUZU_NON_COPYABLE(KSlabHeapBase);
|
||||||
YUZU_NON_MOVEABLE(KSlabHeapBase);
|
YUZU_NON_MOVEABLE(KSlabHeapBase);
|
||||||
|
|
||||||
constexpr KSlabHeapBase() = default;
|
private:
|
||||||
constexpr ~KSlabHeapBase() = default;
|
size_t m_obj_size{};
|
||||||
|
uintptr_t m_peak{};
|
||||||
constexpr bool Contains(uintptr_t addr) const {
|
uintptr_t m_start{};
|
||||||
return start <= addr && addr < end;
|
uintptr_t m_end{};
|
||||||
}
|
|
||||||
|
|
||||||
constexpr std::size_t GetSlabHeapSize() const {
|
|
||||||
return (end - start) / GetObjectSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr std::size_t GetObjectSize() const {
|
|
||||||
return impl.GetObjectSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr uintptr_t GetSlabHeapAddress() const {
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t GetObjectIndexImpl(const void* obj) const {
|
|
||||||
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t GetPeakIndex() const {
|
|
||||||
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
|
||||||
}
|
|
||||||
|
|
||||||
void* AllocateImpl() {
|
|
||||||
return impl.Allocate();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FreeImpl(void* obj) {
|
|
||||||
// Don't allow freeing an object that wasn't allocated from this heap
|
|
||||||
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
|
||||||
|
|
||||||
impl.Free(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
|
||||||
// Ensure we don't initialize a slab using null memory
|
|
||||||
ASSERT(memory != nullptr);
|
|
||||||
|
|
||||||
// Initialize the base allocator
|
|
||||||
impl.Initialize(obj_size);
|
|
||||||
|
|
||||||
// Set our tracking variables
|
|
||||||
const std::size_t num_obj = (memory_size / obj_size);
|
|
||||||
start = reinterpret_cast<uintptr_t>(memory);
|
|
||||||
end = start + num_obj * obj_size;
|
|
||||||
peak = start;
|
|
||||||
|
|
||||||
// Free the objects
|
|
||||||
u8* cur = reinterpret_cast<u8*>(end);
|
|
||||||
|
|
||||||
for (std::size_t i{}; i < num_obj; i++) {
|
|
||||||
cur -= obj_size;
|
|
||||||
impl.Free(cur);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using Impl = impl::KSlabHeapImpl;
|
void UpdatePeakImpl(uintptr_t obj) {
|
||||||
|
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
||||||
|
std::atomic_ref<uintptr_t> peak_ref(m_peak);
|
||||||
|
|
||||||
Impl impl;
|
const uintptr_t alloc_peak = obj + this->GetObjectSize();
|
||||||
uintptr_t peak{};
|
uintptr_t cur_peak = m_peak;
|
||||||
uintptr_t start{};
|
do {
|
||||||
uintptr_t end{};
|
if (alloc_peak <= cur_peak) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr KSlabHeapBase() = default;
|
||||||
|
|
||||||
|
bool Contains(uintptr_t address) const {
|
||||||
|
return m_start <= address && address < m_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
|
||||||
|
// Ensure we don't initialize a slab using null memory.
|
||||||
|
ASSERT(memory != nullptr);
|
||||||
|
|
||||||
|
// Set our object size.
|
||||||
|
m_obj_size = obj_size;
|
||||||
|
|
||||||
|
// Initialize the base allocator.
|
||||||
|
KSlabHeapImpl::Initialize();
|
||||||
|
|
||||||
|
// Set our tracking variables.
|
||||||
|
const size_t num_obj = (memory_size / obj_size);
|
||||||
|
m_start = reinterpret_cast<uintptr_t>(memory);
|
||||||
|
m_end = m_start + num_obj * obj_size;
|
||||||
|
m_peak = m_start;
|
||||||
|
|
||||||
|
// Free the objects.
|
||||||
|
u8* cur = reinterpret_cast<u8*>(m_end);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < num_obj; i++) {
|
||||||
|
cur -= obj_size;
|
||||||
|
KSlabHeapImpl::Free(cur);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSlabHeapSize() const {
|
||||||
|
return (m_end - m_start) / this->GetObjectSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetObjectSize() const {
|
||||||
|
return m_obj_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Allocate() {
|
||||||
|
void* obj = KSlabHeapImpl::Allocate();
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free(void* obj) {
|
||||||
|
// Don't allow freeing an object that wasn't allocated from this heap.
|
||||||
|
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
|
||||||
|
ASSERT(contained);
|
||||||
|
KSlabHeapImpl::Free(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetObjectIndex(const void* obj) const {
|
||||||
|
if constexpr (SupportDynamicExpansion) {
|
||||||
|
if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
|
||||||
|
return std::numeric_limits<size_t>::max();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetPeakIndex() const {
|
||||||
|
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t GetSlabHeapAddress() const {
|
||||||
|
return m_start;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetNumRemaining() const {
|
||||||
|
// Only calculate the number of remaining objects under debug configuration.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class KSlabHeap final : public KSlabHeapBase {
|
class KSlabHeap final : public KSlabHeapBase<false> {
|
||||||
|
private:
|
||||||
|
using BaseHeap = KSlabHeapBase<false>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum class AllocationType {
|
constexpr KSlabHeap() = default;
|
||||||
Host,
|
|
||||||
Guest,
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
|
void Initialize(void* memory, size_t memory_size) {
|
||||||
: KSlabHeapBase(), allocation_type{allocation_type_} {}
|
BaseHeap::Initialize(sizeof(T), memory, memory_size);
|
||||||
|
|
||||||
void Initialize(void* memory, std::size_t memory_size) {
|
|
||||||
if (allocation_type == AllocationType::Guest) {
|
|
||||||
InitializeImpl(sizeof(T), memory, memory_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
T* Allocate() {
|
T* Allocate() {
|
||||||
switch (allocation_type) {
|
T* obj = static_cast<T*>(BaseHeap::Allocate());
|
||||||
case AllocationType::Host:
|
|
||||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
|
||||||
// heap, such as for kernel memory regions.
|
|
||||||
return new T;
|
|
||||||
|
|
||||||
case AllocationType::Guest:
|
if (obj != nullptr) [[likely]] {
|
||||||
T* obj = static_cast<T*>(AllocateImpl());
|
std::construct_at(obj);
|
||||||
if (obj != nullptr) {
|
|
||||||
new (obj) T();
|
|
||||||
}
|
}
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
T* Allocate(KernelCore& kernel) {
|
||||||
return nullptr;
|
T* obj = static_cast<T*>(BaseHeap::Allocate());
|
||||||
}
|
|
||||||
|
|
||||||
T* AllocateWithKernel(KernelCore& kernel) {
|
if (obj != nullptr) [[likely]] {
|
||||||
switch (allocation_type) {
|
std::construct_at(obj, kernel);
|
||||||
case AllocationType::Host:
|
|
||||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
|
||||||
// heap, such as for kernel memory regions.
|
|
||||||
return new T(kernel);
|
|
||||||
|
|
||||||
case AllocationType::Guest:
|
|
||||||
T* obj = static_cast<T*>(AllocateImpl());
|
|
||||||
if (obj != nullptr) {
|
|
||||||
new (obj) T(kernel);
|
|
||||||
}
|
}
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Free(T* obj) {
|
void Free(T* obj) {
|
||||||
switch (allocation_type) {
|
BaseHeap::Free(obj);
|
||||||
case AllocationType::Host:
|
|
||||||
// Fallback for cases where we do not yet support allocating guest memory from the slab
|
|
||||||
// heap, such as for kernel memory regions.
|
|
||||||
delete obj;
|
|
||||||
return;
|
|
||||||
|
|
||||||
case AllocationType::Guest:
|
|
||||||
FreeImpl(obj);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
|
size_t GetObjectIndex(const T* obj) const {
|
||||||
|
return BaseHeap::GetObjectIndex(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
|
||||||
return GetObjectIndexImpl(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const AllocationType allocation_type;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -210,7 +210,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
if (owner != nullptr) {
|
if (owner != nullptr) {
|
||||||
// Setup the TLS, if needed.
|
// Setup the TLS, if needed.
|
||||||
if (type == ThreadType::User) {
|
if (type == ThreadType::User) {
|
||||||
tls_address = owner->CreateTLSRegion();
|
R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address)));
|
||||||
}
|
}
|
||||||
|
|
||||||
parent = owner;
|
parent = owner;
|
||||||
|
@ -305,7 +305,7 @@ void KThread::Finalize() {
|
||||||
|
|
||||||
// If the thread has a local region, delete it.
|
// If the thread has a local region, delete it.
|
||||||
if (tls_address != 0) {
|
if (tls_address != 0) {
|
||||||
parent->FreeTLSRegion(tls_address);
|
ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release any waiters.
|
// Release any waiters.
|
||||||
|
@ -326,6 +326,9 @@ void KThread::Finalize() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Release host emulation members.
|
||||||
|
host_context.reset();
|
||||||
|
|
||||||
// Perform inherited finalization.
|
// Perform inherited finalization.
|
||||||
KSynchronizationObject::Finalize();
|
KSynchronizationObject::Finalize();
|
||||||
}
|
}
|
||||||
|
|
|
@ -656,7 +656,7 @@ private:
|
||||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||||
|
|
||||||
struct ConditionVariableComparator {
|
struct ConditionVariableComparator {
|
||||||
struct LightCompareType {
|
struct RedBlackKeyType {
|
||||||
u64 cv_key{};
|
u64 cv_key{};
|
||||||
s32 priority{};
|
s32 priority{};
|
||||||
|
|
||||||
|
@ -672,7 +672,7 @@ private:
|
||||||
template <typename T>
|
template <typename T>
|
||||||
requires(
|
requires(
|
||||||
std::same_as<T, KThread> ||
|
std::same_as<T, KThread> ||
|
||||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
|
||||||
const KThread& rhs) {
|
const KThread& rhs) {
|
||||||
const u64 l_key = lhs.GetConditionVariableKey();
|
const u64 l_key = lhs.GetConditionVariableKey();
|
||||||
const u64 r_key = rhs.GetConditionVariableKey();
|
const u64 r_key = rhs.GetConditionVariableKey();
|
||||||
|
|
65
src/core/hle/kernel/k_thread_local_page.cpp
Normal file
65
src/core/hle/kernel/k_thread_local_page.cpp
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/scope_exit.h"
|
||||||
|
#include "core/hle/kernel/k_memory_block.h"
|
||||||
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
|
#include "core/hle/kernel/k_process.h"
|
||||||
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||||
|
// Set that this process owns us.
|
||||||
|
m_owner = process;
|
||||||
|
m_kernel = &kernel;
|
||||||
|
|
||||||
|
// Allocate a new page.
|
||||||
|
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
|
||||||
|
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
|
||||||
|
auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
|
||||||
|
|
||||||
|
// Map the address in.
|
||||||
|
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
|
||||||
|
R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr,
|
||||||
|
KMemoryState::ThreadLocal,
|
||||||
|
KMemoryPermission::UserReadWrite));
|
||||||
|
|
||||||
|
// We succeeded.
|
||||||
|
page_buf_guard.Cancel();
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KThreadLocalPage::Finalize() {
|
||||||
|
// Get the physical address of the page.
|
||||||
|
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
|
||||||
|
ASSERT(phys_addr);
|
||||||
|
|
||||||
|
// Unmap the page.
|
||||||
|
R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
|
||||||
|
|
||||||
|
// Free the page.
|
||||||
|
KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr));
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr KThreadLocalPage::Reserve() {
|
||||||
|
for (size_t i = 0; i < m_is_region_free.size(); i++) {
|
||||||
|
if (m_is_region_free[i]) {
|
||||||
|
m_is_region_free[i] = false;
|
||||||
|
return this->GetRegionAddress(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThreadLocalPage::Release(VAddr addr) {
|
||||||
|
m_is_region_free[this->GetRegionIndex(addr)] = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
112
src/core/hle/kernel/k_thread_local_page.h
Normal file
112
src/core/hle/kernel/k_thread_local_page.h
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/intrusive_red_black_tree.h"
|
||||||
|
#include "core/hle/kernel/k_page_buffer.h"
|
||||||
|
#include "core/hle/kernel/memory_types.h"
|
||||||
|
#include "core/hle/kernel/slab_helpers.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
class KProcess;
|
||||||
|
|
||||||
|
class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>,
|
||||||
|
public KSlabAllocated<KThreadLocalPage> {
|
||||||
|
public:
|
||||||
|
static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize;
|
||||||
|
static_assert(RegionsPerPage > 0);
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
|
||||||
|
m_is_region_free.fill(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetAddress() const {
|
||||||
|
return m_virt_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode Initialize(KernelCore& kernel, KProcess* process);
|
||||||
|
ResultCode Finalize();
|
||||||
|
|
||||||
|
VAddr Reserve();
|
||||||
|
void Release(VAddr addr);
|
||||||
|
|
||||||
|
bool IsAllUsed() const {
|
||||||
|
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
|
||||||
|
[](bool is_free) { return !is_free; });
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsAllFree() const {
|
||||||
|
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
|
||||||
|
[](bool is_free) { return is_free; });
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsAnyUsed() const {
|
||||||
|
return !this->IsAllFree();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsAnyFree() const {
|
||||||
|
return !this->IsAllUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
using RedBlackKeyType = VAddr;
|
||||||
|
|
||||||
|
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) {
|
||||||
|
return v.GetAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(std::same_as<T, KThreadLocalPage> ||
|
||||||
|
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
|
||||||
|
const KThreadLocalPage&
|
||||||
|
rhs) {
|
||||||
|
const VAddr lval = GetRedBlackKey(lhs);
|
||||||
|
const VAddr rval = GetRedBlackKey(rhs);
|
||||||
|
|
||||||
|
if (lval < rval) {
|
||||||
|
return -1;
|
||||||
|
} else if (lval == rval) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr VAddr GetRegionAddress(size_t i) const {
|
||||||
|
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Contains(VAddr addr) const {
|
||||||
|
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t GetRegionIndex(VAddr addr) const {
|
||||||
|
ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
|
||||||
|
ASSERT(this->Contains(addr));
|
||||||
|
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
VAddr m_virt_addr{};
|
||||||
|
KProcess* m_owner{};
|
||||||
|
KernelCore* m_kernel{};
|
||||||
|
std::array<bool, RegionsPerPage> m_is_region_free{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -52,7 +52,7 @@ namespace Kernel {
|
||||||
|
|
||||||
struct KernelCore::Impl {
|
struct KernelCore::Impl {
|
||||||
explicit Impl(Core::System& system_, KernelCore& kernel_)
|
explicit Impl(Core::System& system_, KernelCore& kernel_)
|
||||||
: time_manager{system_}, object_list_container{kernel_},
|
: time_manager{system_},
|
||||||
service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {}
|
service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {}
|
||||||
|
|
||||||
void SetMulticore(bool is_multi) {
|
void SetMulticore(bool is_multi) {
|
||||||
|
@ -60,6 +60,7 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KernelCore& kernel) {
|
void Initialize(KernelCore& kernel) {
|
||||||
|
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
|
||||||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||||
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
||||||
global_handle_table->Initialize(KHandleTable::MaxTableSize);
|
global_handle_table->Initialize(KHandleTable::MaxTableSize);
|
||||||
|
@ -76,7 +77,7 @@ struct KernelCore::Impl {
|
||||||
// Initialize kernel memory and resources.
|
// Initialize kernel memory and resources.
|
||||||
InitializeSystemResourceLimit(kernel, system.CoreTiming());
|
InitializeSystemResourceLimit(kernel, system.CoreTiming());
|
||||||
InitializeMemoryLayout();
|
InitializeMemoryLayout();
|
||||||
InitializePageSlab();
|
Init::InitializeKPageBufferSlabHeap(system);
|
||||||
InitializeSchedulers();
|
InitializeSchedulers();
|
||||||
InitializeSuspendThreads();
|
InitializeSuspendThreads();
|
||||||
InitializePreemption(kernel);
|
InitializePreemption(kernel);
|
||||||
|
@ -107,19 +108,6 @@ struct KernelCore::Impl {
|
||||||
for (auto* server_port : server_ports_) {
|
for (auto* server_port : server_ports_) {
|
||||||
server_port->Close();
|
server_port->Close();
|
||||||
}
|
}
|
||||||
// Close all open server sessions.
|
|
||||||
std::unordered_set<KServerSession*> server_sessions_;
|
|
||||||
{
|
|
||||||
std::lock_guard lk(server_sessions_lock);
|
|
||||||
server_sessions_ = server_sessions;
|
|
||||||
server_sessions.clear();
|
|
||||||
}
|
|
||||||
for (auto* server_session : server_sessions_) {
|
|
||||||
server_session->Close();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that the object list container is finalized and properly shutdown.
|
|
||||||
object_list_container.Finalize();
|
|
||||||
|
|
||||||
// Ensures all service threads gracefully shutdown.
|
// Ensures all service threads gracefully shutdown.
|
||||||
ClearServiceThreads();
|
ClearServiceThreads();
|
||||||
|
@ -194,11 +182,15 @@ struct KernelCore::Impl {
|
||||||
{
|
{
|
||||||
std::lock_guard lk(registered_objects_lock);
|
std::lock_guard lk(registered_objects_lock);
|
||||||
if (registered_objects.size()) {
|
if (registered_objects.size()) {
|
||||||
LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
|
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||||
registered_objects.size());
|
registered_objects.size());
|
||||||
registered_objects.clear();
|
registered_objects.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that the object list container is finalized and properly shutdown.
|
||||||
|
global_object_list_container->Finalize();
|
||||||
|
global_object_list_container.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializePhysicalCores() {
|
void InitializePhysicalCores() {
|
||||||
|
@ -291,15 +283,16 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
||||||
KThread* GetHostDummyThread() {
|
KThread* GetHostDummyThread() {
|
||||||
auto make_thread = [this]() {
|
auto initialize = [this](KThread* thread) {
|
||||||
KThread* thread = KThread::Create(system.Kernel());
|
|
||||||
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
|
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
|
||||||
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
|
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
|
||||||
return thread;
|
return thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
thread_local KThread* saved_thread = make_thread();
|
thread_local auto raw_thread = KThread(system.Kernel());
|
||||||
return saved_thread;
|
thread_local auto thread = initialize(&raw_thread);
|
||||||
|
|
||||||
|
return thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Registers a CPU core thread by allocating a host thread ID for it
|
/// Registers a CPU core thread by allocating a host thread ID for it
|
||||||
|
@ -660,22 +653,6 @@ struct KernelCore::Impl {
|
||||||
time_phys_addr, time_size, "Time:SharedMemory");
|
time_phys_addr, time_size, "Time:SharedMemory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializePageSlab() {
|
|
||||||
// Allocate slab heaps
|
|
||||||
user_slab_heap_pages =
|
|
||||||
std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
|
|
||||||
|
|
||||||
// TODO(ameerj): This should be derived, not hardcoded within the kernel
|
|
||||||
constexpr u64 user_slab_heap_size{0x3de000};
|
|
||||||
// Reserve slab heaps
|
|
||||||
ASSERT(
|
|
||||||
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
|
||||||
// Initialize slab heap
|
|
||||||
user_slab_heap_pages->Initialize(
|
|
||||||
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
|
||||||
user_slab_heap_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
KClientPort* CreateNamedServicePort(std::string name) {
|
KClientPort* CreateNamedServicePort(std::string name) {
|
||||||
auto search = service_interface_factory.find(name);
|
auto search = service_interface_factory.find(name);
|
||||||
if (search == service_interface_factory.end()) {
|
if (search == service_interface_factory.end()) {
|
||||||
|
@ -713,7 +690,6 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::mutex server_ports_lock;
|
std::mutex server_ports_lock;
|
||||||
std::mutex server_sessions_lock;
|
|
||||||
std::mutex registered_objects_lock;
|
std::mutex registered_objects_lock;
|
||||||
std::mutex registered_in_use_objects_lock;
|
std::mutex registered_in_use_objects_lock;
|
||||||
|
|
||||||
|
@ -737,14 +713,13 @@ struct KernelCore::Impl {
|
||||||
// stores all the objects in place.
|
// stores all the objects in place.
|
||||||
std::unique_ptr<KHandleTable> global_handle_table;
|
std::unique_ptr<KHandleTable> global_handle_table;
|
||||||
|
|
||||||
KAutoObjectWithListContainer object_list_container;
|
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
|
||||||
|
|
||||||
/// Map of named ports managed by the kernel, which can be retrieved using
|
/// Map of named ports managed by the kernel, which can be retrieved using
|
||||||
/// the ConnectToPort SVC.
|
/// the ConnectToPort SVC.
|
||||||
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
|
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
|
||||||
NamedPortTable named_ports;
|
NamedPortTable named_ports;
|
||||||
std::unordered_set<KServerPort*> server_ports;
|
std::unordered_set<KServerPort*> server_ports;
|
||||||
std::unordered_set<KServerSession*> server_sessions;
|
|
||||||
std::unordered_set<KAutoObject*> registered_objects;
|
std::unordered_set<KAutoObject*> registered_objects;
|
||||||
std::unordered_set<KAutoObject*> registered_in_use_objects;
|
std::unordered_set<KAutoObject*> registered_in_use_objects;
|
||||||
|
|
||||||
|
@ -756,7 +731,6 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
// Kernel memory management
|
// Kernel memory management
|
||||||
std::unique_ptr<KMemoryManager> memory_manager;
|
std::unique_ptr<KMemoryManager> memory_manager;
|
||||||
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
|
||||||
|
|
||||||
// Shared memory for services
|
// Shared memory for services
|
||||||
Kernel::KSharedMemory* hid_shared_mem{};
|
Kernel::KSharedMemory* hid_shared_mem{};
|
||||||
|
@ -915,11 +889,11 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
|
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
|
||||||
return impl->object_list_container;
|
return *impl->global_object_list_container;
|
||||||
}
|
}
|
||||||
|
|
||||||
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
|
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
|
||||||
return impl->object_list_container;
|
return *impl->global_object_list_container;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::InvalidateAllInstructionCaches() {
|
void KernelCore::InvalidateAllInstructionCaches() {
|
||||||
|
@ -949,16 +923,6 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
|
||||||
return impl->CreateNamedServicePort(std::move(name));
|
return impl->CreateNamedServicePort(std::move(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::RegisterServerSession(KServerSession* server_session) {
|
|
||||||
std::lock_guard lk(impl->server_sessions_lock);
|
|
||||||
impl->server_sessions.insert(server_session);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KernelCore::UnregisterServerSession(KServerSession* server_session) {
|
|
||||||
std::lock_guard lk(impl->server_sessions_lock);
|
|
||||||
impl->server_sessions.erase(server_session);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
||||||
std::lock_guard lk(impl->registered_objects_lock);
|
std::lock_guard lk(impl->registered_objects_lock);
|
||||||
impl->registered_objects.insert(object);
|
impl->registered_objects.insert(object);
|
||||||
|
@ -1031,14 +995,6 @@ const KMemoryManager& KernelCore::MemoryManager() const {
|
||||||
return *impl->memory_manager;
|
return *impl->memory_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
|
|
||||||
return *impl->user_slab_heap_pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
|
|
||||||
return *impl->user_slab_heap_pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||||
return *impl->hid_shared_mem;
|
return *impl->hid_shared_mem;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,7 @@ class KHandleTable;
|
||||||
class KLinkedListNode;
|
class KLinkedListNode;
|
||||||
class KMemoryLayout;
|
class KMemoryLayout;
|
||||||
class KMemoryManager;
|
class KMemoryManager;
|
||||||
|
class KPageBuffer;
|
||||||
class KPort;
|
class KPort;
|
||||||
class KProcess;
|
class KProcess;
|
||||||
class KResourceLimit;
|
class KResourceLimit;
|
||||||
|
@ -52,6 +53,7 @@ class KSession;
|
||||||
class KSharedMemory;
|
class KSharedMemory;
|
||||||
class KSharedMemoryInfo;
|
class KSharedMemoryInfo;
|
||||||
class KThread;
|
class KThread;
|
||||||
|
class KThreadLocalPage;
|
||||||
class KTransferMemory;
|
class KTransferMemory;
|
||||||
class KWorkerTaskManager;
|
class KWorkerTaskManager;
|
||||||
class KWritableEvent;
|
class KWritableEvent;
|
||||||
|
@ -194,14 +196,6 @@ public:
|
||||||
/// Opens a port to a service previously registered with RegisterNamedService.
|
/// Opens a port to a service previously registered with RegisterNamedService.
|
||||||
KClientPort* CreateNamedServicePort(std::string name);
|
KClientPort* CreateNamedServicePort(std::string name);
|
||||||
|
|
||||||
/// Registers a server session with the gobal emulation state, to be freed on shutdown. This is
|
|
||||||
/// necessary because we do not emulate processes for HLE sessions.
|
|
||||||
void RegisterServerSession(KServerSession* server_session);
|
|
||||||
|
|
||||||
/// Unregisters a server session previously registered with RegisterServerSession when it was
|
|
||||||
/// destroyed during the current emulation session.
|
|
||||||
void UnregisterServerSession(KServerSession* server_session);
|
|
||||||
|
|
||||||
/// Registers all kernel objects with the global emulation state, this is purely for tracking
|
/// Registers all kernel objects with the global emulation state, this is purely for tracking
|
||||||
/// leaks after emulation has been shutdown.
|
/// leaks after emulation has been shutdown.
|
||||||
void RegisterKernelObject(KAutoObject* object);
|
void RegisterKernelObject(KAutoObject* object);
|
||||||
|
@ -239,12 +233,6 @@ public:
|
||||||
/// Gets the virtual memory manager for the kernel.
|
/// Gets the virtual memory manager for the kernel.
|
||||||
const KMemoryManager& MemoryManager() const;
|
const KMemoryManager& MemoryManager() const;
|
||||||
|
|
||||||
/// Gets the slab heap allocated for user space pages.
|
|
||||||
KSlabHeap<Page>& GetUserSlabHeapPages();
|
|
||||||
|
|
||||||
/// Gets the slab heap allocated for user space pages.
|
|
||||||
const KSlabHeap<Page>& GetUserSlabHeapPages() const;
|
|
||||||
|
|
||||||
/// Gets the shared memory object for HID services.
|
/// Gets the shared memory object for HID services.
|
||||||
Kernel::KSharedMemory& GetHidSharedMem();
|
Kernel::KSharedMemory& GetHidSharedMem();
|
||||||
|
|
||||||
|
@ -336,6 +324,10 @@ public:
|
||||||
return slab_heap_container->writeable_event;
|
return slab_heap_container->writeable_event;
|
||||||
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
|
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
|
||||||
return slab_heap_container->code_memory;
|
return slab_heap_container->code_memory;
|
||||||
|
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
|
||||||
|
return slab_heap_container->page_buffer;
|
||||||
|
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
|
||||||
|
return slab_heap_container->thread_local_page;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,6 +389,8 @@ private:
|
||||||
KSlabHeap<KTransferMemory> transfer_memory;
|
KSlabHeap<KTransferMemory> transfer_memory;
|
||||||
KSlabHeap<KWritableEvent> writeable_event;
|
KSlabHeap<KWritableEvent> writeable_event;
|
||||||
KSlabHeap<KCodeMemory> code_memory;
|
KSlabHeap<KCodeMemory> code_memory;
|
||||||
|
KSlabHeap<KPageBuffer> page_buffer;
|
||||||
|
KSlabHeap<KThreadLocalPage> thread_local_page;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<SlabHeapContainer> slab_heap_container;
|
std::unique_ptr<SlabHeapContainer> slab_heap_container;
|
||||||
|
|
|
@ -49,12 +49,9 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allocate a dummy guest thread for this host thread.
|
||||||
kernel.RegisterHostThread();
|
kernel.RegisterHostThread();
|
||||||
|
|
||||||
// Ensure the dummy thread allocated for this host thread is closed on exit.
|
|
||||||
auto* dummy_thread = kernel.GetCurrentEmuThread();
|
|
||||||
SCOPE_EXIT({ dummy_thread->Close(); });
|
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
std::function<void()> task;
|
std::function<void()> task;
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static Derived* Allocate(KernelCore& kernel) {
|
static Derived* Allocate(KernelCore& kernel) {
|
||||||
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
|
return kernel.SlabHeap<Derived>().Allocate(kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void Free(KernelCore& kernel, Derived* obj) {
|
static void Free(KernelCore& kernel, Derived* obj) {
|
||||||
|
|
|
@ -96,4 +96,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
|
||||||
constexpr inline s32 LowestThreadPriority = 63;
|
constexpr inline s32 LowestThreadPriority = 63;
|
||||||
constexpr inline s32 HighestThreadPriority = 0;
|
constexpr inline s32 HighestThreadPriority = 0;
|
||||||
|
|
||||||
|
constexpr inline size_t ThreadLocalRegionSize = 0x200;
|
||||||
|
|
||||||
} // namespace Kernel::Svc
|
} // namespace Kernel::Svc
|
||||||
|
|
|
@ -980,7 +980,7 @@ private:
|
||||||
LOG_DEBUG(Service_AM, "called");
|
LOG_DEBUG(Service_AM, "called");
|
||||||
|
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>());
|
applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>().lock());
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
|
@ -1007,7 +1007,7 @@ private:
|
||||||
LOG_DEBUG(Service_AM, "called");
|
LOG_DEBUG(Service_AM, "called");
|
||||||
|
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>());
|
applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>().lock());
|
||||||
|
|
||||||
ASSERT(applet->IsInitialized());
|
ASSERT(applet->IsInitialized());
|
||||||
applet->ExecuteInteractive();
|
applet->ExecuteInteractive();
|
||||||
|
|
|
@ -17,21 +17,12 @@ namespace Service::KernelHelpers {
|
||||||
|
|
||||||
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
|
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
|
||||||
: kernel(system_.Kernel()) {
|
: kernel(system_.Kernel()) {
|
||||||
|
|
||||||
// Create a resource limit for the process.
|
|
||||||
const auto physical_memory_size =
|
|
||||||
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::System);
|
|
||||||
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system_, physical_memory_size);
|
|
||||||
|
|
||||||
// Create the process.
|
// Create the process.
|
||||||
process = Kernel::KProcess::Create(kernel);
|
process = Kernel::KProcess::Create(kernel);
|
||||||
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
|
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
|
||||||
Kernel::KProcess::ProcessType::KernelInternal,
|
Kernel::KProcess::ProcessType::KernelInternal,
|
||||||
resource_limit)
|
kernel.GetSystemResourceLimit())
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
|
|
||||||
// Close reference to our resource limit, as the process opens one.
|
|
||||||
resource_limit->Close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ServiceContext::~ServiceContext() {
|
ServiceContext::~ServiceContext() {
|
||||||
|
|
|
@ -81,6 +81,8 @@ ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* port = Kernel::KPort::Create(kernel);
|
auto* port = Kernel::KPort::Create(kernel);
|
||||||
|
SCOPE_EXIT({ port->Close(); });
|
||||||
|
|
||||||
port->Initialize(ServerSessionCountMax, false, name);
|
port->Initialize(ServerSessionCountMax, false, name);
|
||||||
auto handler = it->second;
|
auto handler = it->second;
|
||||||
port->GetServerPort().SetSessionHandler(std::move(handler));
|
port->GetServerPort().SetSessionHandler(std::move(handler));
|
||||||
|
|
Loading…
Reference in a new issue