dynarmic: Better interrupts
This commit is contained in:
parent
51a8dd4919
commit
f8b8af47ad
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 8bcd46b7e9dc487da217b216c908f2ef15e7a8cf
|
||||
Subproject commit 644172477eaf0d822178cb7e96c62b75caa96573
|
|
@ -171,6 +171,9 @@ public:
|
|||
/// Prepare core for thread reschedule (if needed to correctly handle state)
|
||||
virtual void PrepareReschedule() = 0;
|
||||
|
||||
/// Signal an interrupt and ask the core to halt as soon as possible.
|
||||
virtual void SignalInterrupt() = 0;
|
||||
|
||||
struct BacktraceEntry {
|
||||
std::string module;
|
||||
u64 address;
|
||||
|
|
|
@ -88,9 +88,8 @@ public:
|
|||
}
|
||||
|
||||
void AddTicks(u64 ticks) override {
|
||||
if (parent.uses_wall_clock) {
|
||||
return;
|
||||
}
|
||||
ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
|
||||
|
||||
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
|
||||
// rough approximation of the amount of executed ticks in the system, it may be thrown off
|
||||
// if not all cores are doing a similar amount of work. Instead of doing this, we should
|
||||
|
@ -106,12 +105,8 @@ public:
|
|||
}
|
||||
|
||||
u64 GetTicksRemaining() override {
|
||||
if (parent.uses_wall_clock) {
|
||||
if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
|
||||
return minimum_run_cycles;
|
||||
}
|
||||
return 0U;
|
||||
}
|
||||
ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
|
||||
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
|
@ -146,6 +141,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
|||
|
||||
// Timing
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
config.enable_cycle_counting = !uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 512_MiB;
|
||||
|
@ -222,13 +218,13 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
|||
|
||||
void ARM_Dynarmic_32::Run() {
|
||||
while (true) {
|
||||
jit->Run();
|
||||
const auto hr = jit->Run();
|
||||
if (!svc_called) {
|
||||
break;
|
||||
}
|
||||
svc_called = false;
|
||||
Kernel::Svc::Call(system, svc_swi);
|
||||
if (shutdown) {
|
||||
if (shutdown || Has(hr, Dynarmic::HaltReason::UserDefined2)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -318,6 +314,10 @@ void ARM_Dynarmic_32::PrepareReschedule() {
|
|||
shutdown = true;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SignalInterrupt() {
|
||||
jit->HaltExecution(Dynarmic::HaltReason::UserDefined2);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearInstructionCache() {
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ public:
|
|||
void LoadContext(const ThreadContext64& ctx) override {}
|
||||
|
||||
void PrepareReschedule() override;
|
||||
void SignalInterrupt() override;
|
||||
void ClearExclusiveState() override;
|
||||
|
||||
void ClearInstructionCache() override;
|
||||
|
|
|
@ -130,9 +130,7 @@ public:
|
|||
}
|
||||
|
||||
void AddTicks(u64 ticks) override {
|
||||
if (parent.uses_wall_clock) {
|
||||
return;
|
||||
}
|
||||
ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
|
||||
|
||||
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
|
||||
// rough approximation of the amount of executed ticks in the system, it may be thrown off
|
||||
|
@ -147,12 +145,8 @@ public:
|
|||
}
|
||||
|
||||
u64 GetTicksRemaining() override {
|
||||
if (parent.uses_wall_clock) {
|
||||
if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
|
||||
return minimum_run_cycles;
|
||||
}
|
||||
return 0U;
|
||||
}
|
||||
ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
|
||||
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
|
@ -208,6 +202,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
|||
|
||||
// Timing
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
config.enable_cycle_counting = !uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 512_MiB;
|
||||
|
@ -284,13 +279,13 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
|||
|
||||
void ARM_Dynarmic_64::Run() {
|
||||
while (true) {
|
||||
jit->Run();
|
||||
const auto hr = jit->Run();
|
||||
if (!svc_called) {
|
||||
break;
|
||||
}
|
||||
svc_called = false;
|
||||
Kernel::Svc::Call(system, svc_swi);
|
||||
if (shutdown) {
|
||||
if (shutdown || Has(hr, Dynarmic::HaltReason::UserDefined2)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -385,6 +380,10 @@ void ARM_Dynarmic_64::PrepareReschedule() {
|
|||
shutdown = true;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SignalInterrupt() {
|
||||
jit->HaltExecution(Dynarmic::HaltReason::UserDefined2);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::ClearInstructionCache() {
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ public:
|
|||
void LoadContext(const ThreadContext64& ctx) override;
|
||||
|
||||
void PrepareReschedule() override;
|
||||
void SignalInterrupt() override;
|
||||
void ClearExclusiveState() override;
|
||||
|
||||
void ClearInstructionCache() override;
|
||||
|
|
|
@ -58,6 +58,7 @@ bool PhysicalCore::IsInterrupted() const {
|
|||
void PhysicalCore::Interrupt() {
|
||||
guard->lock();
|
||||
interrupts[core_index].SetInterrupt(true);
|
||||
arm_interface->SignalInterrupt();
|
||||
guard->unlock();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue