core: Implement separate A32/A64 ARM interfaces.
This commit is contained in:
parent
6fc485a607
commit
c083ea7d78
|
@ -595,8 +595,10 @@ endif()
|
|||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
target_sources(core PRIVATE
|
||||
arm/dynarmic/arm_dynarmic.cpp
|
||||
arm/dynarmic/arm_dynarmic.h
|
||||
arm/dynarmic/arm_dynarmic_32.cpp
|
||||
arm/dynarmic/arm_dynarmic_32.h
|
||||
arm/dynarmic/arm_dynarmic_64.cpp
|
||||
arm/dynarmic/arm_dynarmic_64.h
|
||||
arm/dynarmic/arm_dynarmic_cp15.cpp
|
||||
arm/dynarmic/arm_dynarmic_cp15.h
|
||||
)
|
||||
|
|
|
@ -25,7 +25,20 @@ public:
|
|||
explicit ARM_Interface(System& system_) : system{system_} {}
|
||||
virtual ~ARM_Interface() = default;
|
||||
|
||||
struct ThreadContext {
|
||||
struct ThreadContext32 {
|
||||
std::array<u32, 16> cpu_registers;
|
||||
u32 cpsr;
|
||||
std::array<u8, 4> padding;
|
||||
std::array<u64, 32> fprs;
|
||||
u32 fpscr;
|
||||
u32 fpexc;
|
||||
u32 tpidr;
|
||||
};
|
||||
// Internally within the kernel, it expects the AArch32 version of the
|
||||
// thread context to be 344 bytes in size.
|
||||
static_assert(sizeof(ThreadContext32) == 0x158);
|
||||
|
||||
struct ThreadContext64 {
|
||||
std::array<u64, 31> cpu_registers;
|
||||
u64 sp;
|
||||
u64 pc;
|
||||
|
@ -38,7 +51,7 @@ public:
|
|||
};
|
||||
// Internally within the kernel, it expects the AArch64 version of the
|
||||
// thread context to be 800 bytes in size.
|
||||
static_assert(sizeof(ThreadContext) == 0x320);
|
||||
static_assert(sizeof(ThreadContext64) == 0x320);
|
||||
|
||||
/// Runs the CPU until an event happens
|
||||
virtual void Run() = 0;
|
||||
|
@ -130,17 +143,10 @@ public:
|
|||
*/
|
||||
virtual void SetTPIDR_EL0(u64 value) = 0;
|
||||
|
||||
/**
|
||||
* Saves the current CPU context
|
||||
* @param ctx Thread context to save
|
||||
*/
|
||||
virtual void SaveContext(ThreadContext& ctx) = 0;
|
||||
|
||||
/**
|
||||
* Loads a CPU context
|
||||
* @param ctx Thread context to load
|
||||
*/
|
||||
virtual void LoadContext(const ThreadContext& ctx) = 0;
|
||||
virtual void SaveContext(ThreadContext32& ctx) = 0;
|
||||
virtual void SaveContext(ThreadContext64& ctx) = 0;
|
||||
virtual void LoadContext(const ThreadContext32& ctx) = 0;
|
||||
virtual void LoadContext(const ThreadContext64& ctx) = 0;
|
||||
|
||||
/// Clears the exclusive monitor's state.
|
||||
virtual void ClearExclusiveState() = 0;
|
||||
|
|
208
src/core/arm/dynarmic/arm_dynarmic_32.cpp
Normal file
208
src/core/arm/dynarmic/arm_dynarmic_32.cpp
Normal file
|
@ -0,0 +1,208 @@
|
|||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cinttypes>
|
||||
#include <memory>
|
||||
#include <dynarmic/A32/a32.h>
|
||||
#include <dynarmic/A32/config.h>
|
||||
#include <dynarmic/A32/context.h>
|
||||
#include "common/microprofile.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent) : parent(parent) {}
|
||||
|
||||
u8 MemoryRead8(u32 vaddr) override {
|
||||
return parent.system.Memory().Read8(vaddr);
|
||||
}
|
||||
u16 MemoryRead16(u32 vaddr) override {
|
||||
return parent.system.Memory().Read16(vaddr);
|
||||
}
|
||||
u32 MemoryRead32(u32 vaddr) override {
|
||||
return parent.system.Memory().Read32(vaddr);
|
||||
}
|
||||
u64 MemoryRead64(u32 vaddr) override {
|
||||
return parent.system.Memory().Read64(vaddr);
|
||||
}
|
||||
|
||||
void MemoryWrite8(u32 vaddr, u8 value) override {
|
||||
parent.system.Memory().Write8(vaddr, value);
|
||||
}
|
||||
void MemoryWrite16(u32 vaddr, u16 value) override {
|
||||
parent.system.Memory().Write16(vaddr, value);
|
||||
}
|
||||
void MemoryWrite32(u32 vaddr, u32 value) override {
|
||||
parent.system.Memory().Write32(vaddr, value);
|
||||
}
|
||||
void MemoryWrite64(u32 vaddr, u64 value) override {
|
||||
parent.system.Memory().Write64(vaddr, value);
|
||||
}
|
||||
|
||||
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||
switch (exception) {
|
||||
case Dynarmic::A32::Exception::UndefinedInstruction:
|
||||
case Dynarmic::A32::Exception::UnpredictableInstruction:
|
||||
break;
|
||||
case Dynarmic::A32::Exception::Breakpoint:
|
||||
break;
|
||||
}
|
||||
LOG_CRITICAL(HW_GPU, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void CallSVC(u32 swi) override {
|
||||
Kernel::CallSVC(parent.system, swi);
|
||||
}
|
||||
|
||||
void AddTicks(u64 ticks) override {
|
||||
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
|
||||
// rough approximation of the amount of executed ticks in the system, it may be thrown off
|
||||
// if not all cores are doing a similar amount of work. Instead of doing this, we should
|
||||
// device a way so that timing is consistent across all cores without increasing the ticks 4
|
||||
// times.
|
||||
u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES;
|
||||
// Always execute at least one tick.
|
||||
amortized_ticks = std::max<u64>(amortized_ticks, 1);
|
||||
|
||||
parent.system.CoreTiming().AddTicks(amortized_ticks);
|
||||
num_interpreted_instructions = 0;
|
||||
}
|
||||
u64 GetTicksRemaining() override {
|
||||
return std::max(parent.system.CoreTiming().GetDowncount(), {});
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32& parent;
|
||||
std::size_t num_interpreted_instructions{};
|
||||
u64 tpidrro_el0{};
|
||||
u64 tpidr_el0{};
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
Dynarmic::A32::UserConfig config;
|
||||
config.callbacks = cb.get();
|
||||
// TODO(bunnei): Implement page table for 32-bit
|
||||
// config.page_table = &page_table.pointers;
|
||||
config.coprocessors[15] = std::make_shared<DynarmicCP15>((u32*)&CP15_regs[0]);
|
||||
config.define_unpredictable_behaviour = true;
|
||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_32, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
|
||||
|
||||
void ARM_Dynarmic_32::Run() {
|
||||
MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_32);
|
||||
jit->Run();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::Step() {
|
||||
cb->InterpreterFallback(jit->Regs()[15], 1);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor,
|
||||
std::size_t core_index)
|
||||
: ARM_Interface{system},
|
||||
cb(std::make_unique<DynarmicCallbacks32>(*this)), core_index{core_index},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
|
||||
ARM_Dynarmic_32::~ARM_Dynarmic_32() = default;
|
||||
|
||||
void ARM_Dynarmic_32::SetPC(u64 pc) {
|
||||
jit->Regs()[15] = static_cast<u32>(pc);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_32::GetPC() const {
|
||||
return jit->Regs()[15];
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_32::GetReg(int index) const {
|
||||
return jit->Regs()[index];
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetReg(int index, u64 value) {
|
||||
jit->Regs()[index] = static_cast<u32>(value);
|
||||
}
|
||||
|
||||
u128 ARM_Dynarmic_32::GetVectorReg(int index) const {
|
||||
return {};
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetVectorReg(int index, u128 value) {}
|
||||
|
||||
u32 ARM_Dynarmic_32::GetPSTATE() const {
|
||||
return jit->Cpsr();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetPSTATE(u32 cpsr) {
|
||||
jit->SetCpsr(cpsr);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_32::GetTlsAddress() const {
|
||||
return CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)];
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetTlsAddress(VAddr address) {
|
||||
CP15_regs[static_cast<std::size_t>(CP15Register::CP15_THREAD_URO)] = static_cast<u32>(address);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_32::GetTPIDR_EL0() const {
|
||||
return cb->tpidr_el0;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
|
||||
cb->tpidr_el0 = value;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
Dynarmic::A32::Context context;
|
||||
jit->SaveContext(context);
|
||||
ctx.cpu_registers = context.Regs();
|
||||
ctx.cpsr = context.Cpsr();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
|
||||
Dynarmic::A32::Context context;
|
||||
context.Regs() = ctx.cpu_registers;
|
||||
context.SetCpsr(ctx.cpsr);
|
||||
jit->LoadContext(context);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::PrepareReschedule() {
|
||||
jit->HaltExecution();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearInstructionCache() {
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearExclusiveState() {}
|
||||
|
||||
void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
if (iter != jit_cache.end()) {
|
||||
jit = iter->second;
|
||||
return;
|
||||
}
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
} // namespace Core
|
77
src/core/arm/dynarmic/arm_dynarmic_32.h
Normal file
77
src/core/arm/dynarmic/arm_dynarmic_32.h
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <dynarmic/A32/a32.h>
|
||||
#include <dynarmic/A64/a64.h>
|
||||
#include <dynarmic/A64/exclusive_monitor.h>
|
||||
#include "common/common_types.h"
|
||||
#include "common/hash.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
class DynarmicCallbacks32;
|
||||
class DynarmicExclusiveMonitor;
|
||||
class System;
|
||||
|
||||
class ARM_Dynarmic_32 final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
|
||||
~ARM_Dynarmic_32() override;
|
||||
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
void Run() override;
|
||||
void Step() override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
void SetTlsAddress(VAddr address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
|
||||
void SaveContext(ThreadContext32& ctx) override;
|
||||
void SaveContext(ThreadContext64& ctx) override {}
|
||||
void LoadContext(const ThreadContext32& ctx) override;
|
||||
void LoadContext(const ThreadContext64& ctx) override {}
|
||||
|
||||
void PrepareReschedule() override;
|
||||
void ClearExclusiveState() override;
|
||||
|
||||
void ClearInstructionCache() override;
|
||||
void PageTableChanged(Common::PageTable& new_page_table,
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
using JitCacheType =
|
||||
std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>;
|
||||
|
||||
friend class DynarmicCallbacks32;
|
||||
std::unique_ptr<DynarmicCallbacks32> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> jit;
|
||||
std::size_t core_index;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
std::array<u32, 84> CP15_regs{};
|
||||
};
|
||||
|
||||
} // namespace Core
|
|
@ -8,7 +8,7 @@
|
|||
#include <dynarmic/A64/config.h>
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
|
@ -25,9 +25,9 @@ namespace Core {
|
|||
|
||||
using Vector = Dynarmic::A64::Vector;
|
||||
|
||||
class ARM_Dynarmic_Callbacks : public Dynarmic::A64::UserCallbacks {
|
||||
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
|
||||
public:
|
||||
explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {}
|
||||
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent) : parent(parent) {}
|
||||
|
||||
u8 MemoryRead8(u64 vaddr) override {
|
||||
return parent.system.Memory().Read8(vaddr);
|
||||
|
@ -68,7 +68,7 @@ public:
|
|||
LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryReadCode(pc));
|
||||
|
||||
ARM_Interface::ThreadContext ctx;
|
||||
ARM_Interface::ThreadContext64 ctx;
|
||||
parent.SaveContext(ctx);
|
||||
parent.inner_unicorn.LoadContext(ctx);
|
||||
parent.inner_unicorn.ExecuteInstructions(num_instructions);
|
||||
|
@ -90,7 +90,7 @@ public:
|
|||
parent.jit->HaltExecution();
|
||||
parent.SetPC(pc);
|
||||
Kernel::Thread* const thread = parent.system.CurrentScheduler().GetCurrentThread();
|
||||
parent.SaveContext(thread->GetContext());
|
||||
parent.SaveContext(thread->GetContext64());
|
||||
GDBStub::Break();
|
||||
GDBStub::SendTrap(thread, 5);
|
||||
return;
|
||||
|
@ -126,13 +126,13 @@ public:
|
|||
return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());
|
||||
}
|
||||
|
||||
ARM_Dynarmic& parent;
|
||||
ARM_Dynarmic_64& parent;
|
||||
std::size_t num_interpreted_instructions = 0;
|
||||
u64 tpidrro_el0 = 0;
|
||||
u64 tpidr_el0 = 0;
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit(Common::PageTable& page_table,
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
Dynarmic::A64::UserConfig config;
|
||||
|
||||
|
@ -162,76 +162,76 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit(Common::PageTable& pag
|
|||
return std::make_shared<Dynarmic::A64::Jit>(config);
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(ARM_Jit_Dynarmic, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
|
||||
MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_64, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
|
||||
|
||||
void ARM_Dynarmic::Run() {
|
||||
MICROPROFILE_SCOPE(ARM_Jit_Dynarmic);
|
||||
void ARM_Dynarmic_64::Run() {
|
||||
MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_64);
|
||||
|
||||
jit->Run();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::Step() {
|
||||
void ARM_Dynarmic_64::Step() {
|
||||
cb->InterpreterFallback(jit->GetPC(), 1);
|
||||
}
|
||||
|
||||
ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor,
|
||||
ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor,
|
||||
std::size_t core_index)
|
||||
: ARM_Interface{system},
|
||||
cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system},
|
||||
cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system},
|
||||
core_index{core_index}, exclusive_monitor{
|
||||
dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
|
||||
ARM_Dynarmic::~ARM_Dynarmic() = default;
|
||||
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
|
||||
|
||||
void ARM_Dynarmic::SetPC(u64 pc) {
|
||||
void ARM_Dynarmic_64::SetPC(u64 pc) {
|
||||
jit->SetPC(pc);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic::GetPC() const {
|
||||
u64 ARM_Dynarmic_64::GetPC() const {
|
||||
return jit->GetPC();
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic::GetReg(int index) const {
|
||||
u64 ARM_Dynarmic_64::GetReg(int index) const {
|
||||
return jit->GetRegister(index);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetReg(int index, u64 value) {
|
||||
void ARM_Dynarmic_64::SetReg(int index, u64 value) {
|
||||
jit->SetRegister(index, value);
|
||||
}
|
||||
|
||||
u128 ARM_Dynarmic::GetVectorReg(int index) const {
|
||||
u128 ARM_Dynarmic_64::GetVectorReg(int index) const {
|
||||
return jit->GetVector(index);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetVectorReg(int index, u128 value) {
|
||||
void ARM_Dynarmic_64::SetVectorReg(int index, u128 value) {
|
||||
jit->SetVector(index, value);
|
||||
}
|
||||
|
||||
u32 ARM_Dynarmic::GetPSTATE() const {
|
||||
u32 ARM_Dynarmic_64::GetPSTATE() const {
|
||||
return jit->GetPstate();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetPSTATE(u32 pstate) {
|
||||
void ARM_Dynarmic_64::SetPSTATE(u32 pstate) {
|
||||
jit->SetPstate(pstate);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic::GetTlsAddress() const {
|
||||
u64 ARM_Dynarmic_64::GetTlsAddress() const {
|
||||
return cb->tpidrro_el0;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetTlsAddress(VAddr address) {
|
||||
void ARM_Dynarmic_64::SetTlsAddress(VAddr address) {
|
||||
cb->tpidrro_el0 = address;
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic::GetTPIDR_EL0() const {
|
||||
u64 ARM_Dynarmic_64::GetTPIDR_EL0() const {
|
||||
return cb->tpidr_el0;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetTPIDR_EL0(u64 value) {
|
||||
void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
|
||||
cb->tpidr_el0 = value;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SaveContext(ThreadContext& ctx) {
|
||||
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
ctx.cpu_registers = jit->GetRegisters();
|
||||
ctx.sp = jit->GetSP();
|
||||
ctx.pc = jit->GetPC();
|
||||
|
@ -242,7 +242,7 @@ void ARM_Dynarmic::SaveContext(ThreadContext& ctx) {
|
|||
ctx.tpidr = cb->tpidr_el0;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::LoadContext(const ThreadContext& ctx) {
|
||||
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
|
||||
jit->SetRegisters(ctx.cpu_registers);
|
||||
jit->SetSP(ctx.sp);
|
||||
jit->SetPC(ctx.pc);
|
||||
|
@ -253,19 +253,19 @@ void ARM_Dynarmic::LoadContext(const ThreadContext& ctx) {
|
|||
SetTPIDR_EL0(ctx.tpidr);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::PrepareReschedule() {
|
||||
void ARM_Dynarmic_64::PrepareReschedule() {
|
||||
jit->HaltExecution();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::ClearInstructionCache() {
|
||||
void ARM_Dynarmic_64::ClearInstructionCache() {
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::ClearExclusiveState() {
|
||||
void ARM_Dynarmic_64::ClearExclusiveState() {
|
||||
jit->ClearExclusiveState();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::PageTableChanged(Common::PageTable& page_table,
|
||||
void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
|
@ -277,8 +277,8 @@ void ARM_Dynarmic::PageTableChanged(Common::PageTable& page_table,
|
|||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count)
|
||||
: monitor(core_count), memory{memory_} {}
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
|
||||
: monitor(core_count), memory{memory} {}
|
||||
|
||||
DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
|
||||
|
|
@ -21,18 +21,14 @@ class Memory;
|
|||
|
||||
namespace Core {
|
||||
|
||||
class ARM_Dynarmic_Callbacks;
|
||||
class DynarmicCallbacks64;
|
||||
class DynarmicExclusiveMonitor;
|
||||
class System;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
using JitCacheType =
|
||||
std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A64::Jit>, Common::PairHash>;
|
||||
|
||||
class ARM_Dynarmic final : public ARM_Interface {
|
||||
class ARM_Dynarmic_64 final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
|
||||
~ARM_Dynarmic() override;
|
||||
ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
|
||||
~ARM_Dynarmic_64() override;
|
||||
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
|
@ -49,8 +45,10 @@ public:
|
|||
void SetTPIDR_EL0(u64 value) override;
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
|
||||
void SaveContext(ThreadContext& ctx) override;
|
||||
void LoadContext(const ThreadContext& ctx) override;
|
||||
void SaveContext(ThreadContext32& ctx) override {}
|
||||
void SaveContext(ThreadContext64& ctx) override;
|
||||
void LoadContext(const ThreadContext32& ctx) override {}
|
||||
void LoadContext(const ThreadContext64& ctx) override;
|
||||
|
||||
void PrepareReschedule() override;
|
||||
void ClearExclusiveState() override;
|
||||
|
@ -63,8 +61,12 @@ private:
|
|||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
|
||||
friend class ARM_Dynarmic_Callbacks;
|
||||
std::unique_ptr<ARM_Dynarmic_Callbacks> cb;
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
using JitCacheType =
|
||||
std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A64::Jit>, Common::PairHash>;
|
||||
|
||||
friend class DynarmicCallbacks64;
|
||||
std::unique_ptr<DynarmicCallbacks64> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A64::Jit> jit;
|
||||
ARM_Unicorn inner_unicorn;
|
||||
|
@ -75,7 +77,7 @@ private:
|
|||
|
||||
class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
|
||||
public:
|
||||
explicit DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count);
|
||||
explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
|
||||
~DynarmicExclusiveMonitor() override;
|
||||
|
||||
void SetExclusive(std::size_t core_index, VAddr addr) override;
|
||||
|
@ -88,7 +90,7 @@ public:
|
|||
bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
|
||||
|
||||
private:
|
||||
friend class ARM_Dynarmic;
|
||||
friend class ARM_Dynarmic_64;
|
||||
Dynarmic::A64::ExclusiveMonitor monitor;
|
||||
Memory::Memory& memory;
|
||||
};
|
|
@ -3,7 +3,7 @@
|
|||
// Refer to the license.txt file included.
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#endif
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -53,7 +53,7 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
|
|||
void* user_data) {
|
||||
auto* const system = static_cast<System*>(user_data);
|
||||
|
||||
ARM_Interface::ThreadContext ctx{};
|
||||
ARM_Interface::ThreadContext64 ctx{};
|
||||
system->CurrentArmInterface().SaveContext(ctx);
|
||||
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x{:X}, pc=0x{:X}, lr=0x{:X}", addr,
|
||||
ctx.pc, ctx.cpu_registers[30]);
|
||||
|
@ -179,7 +179,7 @@ void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
|
|||
}
|
||||
|
||||
Kernel::Thread* const thread = system.CurrentScheduler().GetCurrentThread();
|
||||
SaveContext(thread->GetContext());
|
||||
SaveContext(thread->GetContext64());
|
||||
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
|
||||
last_bkpt_hit = false;
|
||||
GDBStub::Break();
|
||||
|
@ -188,7 +188,7 @@ void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
|
|||
}
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SaveContext(ThreadContext& ctx) {
|
||||
void ARM_Unicorn::SaveContext(ThreadContext64& ctx) {
|
||||
int uregs[32];
|
||||
void* tregs[32];
|
||||
|
||||
|
@ -215,7 +215,7 @@ void ARM_Unicorn::SaveContext(ThreadContext& ctx) {
|
|||
CHECKED(uc_reg_read_batch(uc, uregs, tregs, 32));
|
||||
}
|
||||
|
||||
void ARM_Unicorn::LoadContext(const ThreadContext& ctx) {
|
||||
void ARM_Unicorn::LoadContext(const ThreadContext64& ctx) {
|
||||
int uregs[32];
|
||||
void* tregs[32];
|
||||
|
||||
|
|
|
@ -30,8 +30,6 @@ public:
|
|||
void SetTlsAddress(VAddr address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
void SaveContext(ThreadContext& ctx) override;
|
||||
void LoadContext(const ThreadContext& ctx) override;
|
||||
void PrepareReschedule() override;
|
||||
void ClearExclusiveState() override;
|
||||
void ExecuteInstructions(std::size_t num_instructions);
|
||||
|
@ -41,6 +39,11 @@ public:
|
|||
void PageTableChanged(Common::PageTable&, std::size_t) override {}
|
||||
void RecordBreak(GDBStub::BreakpointAddress bkpt);
|
||||
|
||||
void SaveContext(ThreadContext32& ctx) override {}
|
||||
void SaveContext(ThreadContext64& ctx) override;
|
||||
void LoadContext(const ThreadContext32& ctx) override {}
|
||||
void LoadContext(const ThreadContext64& ctx) override;
|
||||
|
||||
private:
|
||||
static void InterruptHook(uc_engine* uc, u32 int_no, void* user_data);
|
||||
|
||||
|
|
|
@ -6,9 +6,6 @@
|
|||
#include <mutex>
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#endif
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/arm/unicorn/arm_unicorn.h"
|
||||
#include "core/core.h"
|
||||
|
|
|
@ -217,7 +217,7 @@ static u64 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
const auto& thread_context = thread->GetContext();
|
||||
const auto& thread_context = thread->GetContext64();
|
||||
|
||||
if (id < SP_REGISTER) {
|
||||
return thread_context.cpu_registers[id];
|
||||
|
@ -239,7 +239,7 @@ static void RegWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr)
|
|||
return;
|
||||
}
|
||||
|
||||
auto& thread_context = thread->GetContext();
|
||||
auto& thread_context = thread->GetContext64();
|
||||
|
||||
if (id < SP_REGISTER) {
|
||||
thread_context.cpu_registers[id] = val;
|
||||
|
@ -259,7 +259,7 @@ static u128 FpuRead(std::size_t id, Kernel::Thread* thread = nullptr) {
|
|||
return u128{0};
|
||||
}
|
||||
|
||||
auto& thread_context = thread->GetContext();
|
||||
auto& thread_context = thread->GetContext64();
|
||||
|
||||
if (id >= UC_ARM64_REG_Q0 && id < FPCR_REGISTER) {
|
||||
return thread_context.vector_registers[id - UC_ARM64_REG_Q0];
|
||||
|
@ -275,7 +275,7 @@ static void FpuWrite(std::size_t id, u128 val, Kernel::Thread* thread = nullptr)
|
|||
return;
|
||||
}
|
||||
|
||||
auto& thread_context = thread->GetContext();
|
||||
auto& thread_context = thread->GetContext64();
|
||||
|
||||
if (id >= UC_ARM64_REG_Q0 && id < FPCR_REGISTER) {
|
||||
thread_context.vector_registers[id - UC_ARM64_REG_Q0] = val;
|
||||
|
@ -916,7 +916,7 @@ static void WriteRegister() {
|
|||
// Update ARM context, skipping scheduler - no running threads at this point
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
.LoadContext(current_thread->GetContext64());
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -947,7 +947,7 @@ static void WriteRegisters() {
|
|||
// Update ARM context, skipping scheduler - no running threads at this point
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
.LoadContext(current_thread->GetContext64());
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -1019,7 +1019,7 @@ static void Step() {
|
|||
// Update ARM context, skipping scheduler - no running threads at this point
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
.LoadContext(current_thread->GetContext64());
|
||||
}
|
||||
step_loop = true;
|
||||
halt_loop = true;
|
||||
|
|
|
@ -186,6 +186,10 @@ struct KernelCore::Impl {
|
|||
return;
|
||||
}
|
||||
|
||||
for (auto& core : cores) {
|
||||
core.SetIs64Bit(process->Is64BitProcess());
|
||||
}
|
||||
|
||||
system.Memory().SetCurrentPageTable(*process);
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
#include "common/logging/log.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#endif
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/arm/unicorn/arm_unicorn.h"
|
||||
|
@ -20,13 +21,17 @@ PhysicalCore::PhysicalCore(Core::System& system, std::size_t id,
|
|||
Core::ExclusiveMonitor& exclusive_monitor)
|
||||
: core_index{id} {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
arm_interface = std::make_unique<Core::ARM_Dynarmic>(system, exclusive_monitor, core_index);
|
||||
arm_interface_32 =
|
||||
std::make_unique<Core::ARM_Dynarmic_32>(system, exclusive_monitor, core_index);
|
||||
arm_interface_64 =
|
||||
std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
|
||||
|
||||
#else
|
||||
arm_interface = std::make_shared<Core::ARM_Unicorn>(system);
|
||||
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
||||
#endif
|
||||
|
||||
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface, core_index);
|
||||
scheduler = std::make_unique<Kernel::Scheduler>(system, core_index);
|
||||
}
|
||||
|
||||
PhysicalCore::~PhysicalCore() = default;
|
||||
|
@ -48,4 +53,12 @@ void PhysicalCore::Shutdown() {
|
|||
scheduler->Shutdown();
|
||||
}
|
||||
|
||||
void PhysicalCore::SetIs64Bit(bool is_64_bit) {
|
||||
if (is_64_bit) {
|
||||
arm_interface = arm_interface_64.get();
|
||||
} else {
|
||||
arm_interface = arm_interface_32.get();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -68,10 +68,14 @@ public:
|
|||
return *scheduler;
|
||||
}
|
||||
|
||||
void SetIs64Bit(bool is_64_bit);
|
||||
|
||||
private:
|
||||
std::size_t core_index;
|
||||
std::unique_ptr<Core::ARM_Interface> arm_interface;
|
||||
std::unique_ptr<Core::ARM_Interface> arm_interface_32;
|
||||
std::unique_ptr<Core::ARM_Interface> arm_interface_64;
|
||||
std::unique_ptr<Kernel::Scheduler> scheduler;
|
||||
Core::ARM_Interface* arm_interface{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -42,7 +42,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
|
|||
|
||||
// Register 1 must be a handle to the main thread
|
||||
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
|
||||
thread->GetContext().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
thread->ResumeFromWait();
|
||||
|
|
|
@ -383,8 +383,8 @@ void GlobalScheduler::Unlock() {
|
|||
// TODO(Blinkhawk): Setup the interrupts and change context on current core.
|
||||
}
|
||||
|
||||
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id)
|
||||
: system(system), cpu_core(cpu_core), core_id(core_id) {}
|
||||
Scheduler::Scheduler(Core::System& system, std::size_t core_id)
|
||||
: system{system}, core_id{core_id} {}
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
|
||||
|
@ -422,9 +422,10 @@ void Scheduler::UnloadThread() {
|
|||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
cpu_core.SaveContext(previous_thread->GetContext());
|
||||
system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32());
|
||||
system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
|
||||
|
||||
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
||||
// This is only the case when a reschedule is triggered without the current thread
|
||||
|
@ -451,9 +452,10 @@ void Scheduler::SwitchContext() {
|
|||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
cpu_core.SaveContext(previous_thread->GetContext());
|
||||
system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32());
|
||||
system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
|
||||
|
||||
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
||||
// This is only the case when a reschedule is triggered without the current thread
|
||||
|
@ -481,9 +483,10 @@ void Scheduler::SwitchContext() {
|
|||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||
}
|
||||
|
||||
cpu_core.LoadContext(new_thread->GetContext());
|
||||
cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
||||
system.ArmInterface(core_id).LoadContext(new_thread->GetContext32());
|
||||
system.ArmInterface(core_id).LoadContext(new_thread->GetContext64());
|
||||
system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress());
|
||||
system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
||||
} else {
|
||||
current_thread = nullptr;
|
||||
// Note: We do not reset the current process and current page table when idling because
|
||||
|
|
|
@ -181,7 +181,7 @@ private:
|
|||
|
||||
class Scheduler final {
|
||||
public:
|
||||
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id);
|
||||
explicit Scheduler(Core::System& system, std::size_t core_id);
|
||||
~Scheduler();
|
||||
|
||||
/// Returns whether there are any threads that are ready to run.
|
||||
|
@ -235,7 +235,6 @@ private:
|
|||
std::shared_ptr<Thread> selected_thread = nullptr;
|
||||
|
||||
Core::System& system;
|
||||
Core::ARM_Interface& cpu_core;
|
||||
u64 last_context_switch_time = 0;
|
||||
u64 idle_selection_count = 0;
|
||||
const std::size_t core_id;
|
||||
|
|
|
@ -133,14 +133,15 @@ void Thread::CancelWait() {
|
|||
ResumeFromWait();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets a thread context, making it ready to be scheduled and run by the CPU
|
||||
* @param context Thread context to reset
|
||||
* @param stack_top Address of the top of the stack
|
||||
* @param entry_point Address of entry point for execution
|
||||
* @param arg User argument for thread
|
||||
*/
|
||||
static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAddr stack_top,
|
||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||
u32 entry_point, u32 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
context.cpu_registers[15] = entry_point;
|
||||
context.cpu_registers[13] = stack_top;
|
||||
}
|
||||
|
||||
static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
|
||||
VAddr entry_point, u64 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
|
@ -198,9 +199,9 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
|
|||
|
||||
thread->owner_process->RegisterThread(thread.get());
|
||||
|
||||
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
|
||||
// to initialize the context
|
||||
ResetThreadContext(thread->context, stack_top, entry_point, arg);
|
||||
ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
|
||||
static_cast<u32>(entry_point), static_cast<u32>(arg));
|
||||
ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
@ -213,11 +214,13 @@ void Thread::SetPriority(u32 priority) {
|
|||
}
|
||||
|
||||
void Thread::SetWaitSynchronizationResult(ResultCode result) {
|
||||
context.cpu_registers[0] = result.raw;
|
||||
context_32.cpu_registers[0] = result.raw;
|
||||
context_64.cpu_registers[0] = result.raw;
|
||||
}
|
||||
|
||||
void Thread::SetWaitSynchronizationOutput(s32 output) {
|
||||
context.cpu_registers[1] = output;
|
||||
context_32.cpu_registers[1] = output;
|
||||
context_64.cpu_registers[1] = output;
|
||||
}
|
||||
|
||||
s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
|
||||
|
|
|
@ -102,7 +102,8 @@ public:
|
|||
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
|
||||
|
||||
using ThreadContext = Core::ARM_Interface::ThreadContext;
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
|
||||
using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
|
||||
|
||||
|
@ -273,12 +274,20 @@ public:
|
|||
return status == ThreadStatus::WaitSynch;
|
||||
}
|
||||
|
||||
ThreadContext& GetContext() {
|
||||
return context;
|
||||
ThreadContext32& GetContext32() {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
const ThreadContext& GetContext() const {
|
||||
return context;
|
||||
const ThreadContext32& GetContext32() const {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
ThreadContext64& GetContext64() {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
const ThreadContext64& GetContext64() const {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
ThreadStatus GetStatus() const {
|
||||
|
@ -466,7 +475,8 @@ private:
|
|||
void AdjustSchedulingOnPriority(u32 old_priority);
|
||||
void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
|
||||
|
||||
Core::ARM_Interface::ThreadContext context{};
|
||||
ThreadContext32 context_32{};
|
||||
ThreadContext64 context_64{};
|
||||
|
||||
u64 thread_id = 0;
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ json GetProcessorStateDataAuto(Core::System& system) {
|
|||
const auto& vm_manager{process->VMManager()};
|
||||
auto& arm{system.CurrentArmInterface()};
|
||||
|
||||
Core::ARM_Interface::ThreadContext context{};
|
||||
Core::ARM_Interface::ThreadContext64 context{};
|
||||
arm.SaveContext(context);
|
||||
|
||||
return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
|
||||
|
|
|
@ -116,7 +116,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
|
|||
|
||||
constexpr std::size_t BaseRegister = 29;
|
||||
auto& memory = Core::System::GetInstance().Memory();
|
||||
u64 base_pointer = thread.GetContext().cpu_registers[BaseRegister];
|
||||
u64 base_pointer = thread.GetContext64().cpu_registers[BaseRegister];
|
||||
|
||||
while (base_pointer != 0) {
|
||||
const u64 lr = memory.Read64(base_pointer + sizeof(u64));
|
||||
|
@ -240,7 +240,7 @@ QString WaitTreeThread::GetText() const {
|
|||
break;
|
||||
}
|
||||
|
||||
const auto& context = thread.GetContext();
|
||||
const auto& context = thread.GetContext64();
|
||||
const QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
|
||||
.arg(context.pc, 8, 16, QLatin1Char{'0'})
|
||||
.arg(context.cpu_registers[30], 8, 16, QLatin1Char{'0'});
|
||||
|
|
Loading…
Reference in a new issue