2018-11-22 06:27:23 +00:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "core/arm/exclusive_monitor.h"
|
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_cpu.h"
|
2019-09-10 01:37:29 +00:00
|
|
|
#include "core/core_timing.h"
|
2018-11-22 06:27:23 +00:00
|
|
|
#include "core/cpu_core_manager.h"
|
|
|
|
#include "core/gdbstub/gdbstub.h"
|
|
|
|
#include "core/settings.h"
|
|
|
|
|
|
|
|
namespace Core {
|
|
|
|
namespace {
|
|
|
|
void RunCpuCore(const System& system, Cpu& cpu_state) {
|
|
|
|
while (system.IsPoweredOn()) {
|
|
|
|
cpu_state.RunLoop(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // Anonymous namespace
|
|
|
|
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 17:25:54 +00:00
|
|
|
CpuCoreManager::CpuCoreManager(System& system) : system{system} {}
|
2018-11-22 06:27:23 +00:00
|
|
|
CpuCoreManager::~CpuCoreManager() = default;
|
|
|
|
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 17:25:54 +00:00
|
|
|
void CpuCoreManager::Initialize() {
|
2018-11-22 06:27:23 +00:00
|
|
|
barrier = std::make_unique<CpuBarrier>();
|
2019-11-26 22:39:57 +00:00
|
|
|
exclusive_monitor = Cpu::MakeExclusiveMonitor(system.Memory(), cores.size());
|
2018-11-22 06:27:23 +00:00
|
|
|
|
|
|
|
for (std::size_t index = 0; index < cores.size(); ++index) {
|
2019-03-04 21:02:59 +00:00
|
|
|
cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index);
|
2018-11-22 06:27:23 +00:00
|
|
|
}
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 17:25:54 +00:00
|
|
|
}
|
2018-11-22 06:27:23 +00:00
|
|
|
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 17:25:54 +00:00
|
|
|
void CpuCoreManager::StartThreads() {
|
2018-11-22 06:27:23 +00:00
|
|
|
// Create threads for CPU cores 1-3, and build thread_to_cpu map
|
|
|
|
// CPU core 0 is run on the main thread
|
|
|
|
thread_to_cpu[std::this_thread::get_id()] = cores[0].get();
|
|
|
|
if (!Settings::values.use_multi_core) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (std::size_t index = 0; index < core_threads.size(); ++index) {
|
|
|
|
core_threads[index] = std::make_unique<std::thread>(RunCpuCore, std::cref(system),
|
|
|
|
std::ref(*cores[index + 1]));
|
|
|
|
thread_to_cpu[core_threads[index]->get_id()] = cores[index + 1].get();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuCoreManager::Shutdown() {
|
|
|
|
barrier->NotifyEnd();
|
|
|
|
if (Settings::values.use_multi_core) {
|
|
|
|
for (auto& thread : core_threads) {
|
|
|
|
thread->join();
|
|
|
|
thread.reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
thread_to_cpu.clear();
|
|
|
|
for (auto& cpu_core : cores) {
|
2019-10-12 12:21:51 +00:00
|
|
|
cpu_core->Shutdown();
|
2018-11-22 06:27:23 +00:00
|
|
|
cpu_core.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
exclusive_monitor.reset();
|
|
|
|
barrier.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
Cpu& CpuCoreManager::GetCore(std::size_t index) {
|
|
|
|
return *cores.at(index);
|
|
|
|
}
|
|
|
|
|
|
|
|
const Cpu& CpuCoreManager::GetCore(std::size_t index) const {
|
|
|
|
return *cores.at(index);
|
|
|
|
}
|
|
|
|
|
|
|
|
ExclusiveMonitor& CpuCoreManager::GetExclusiveMonitor() {
|
|
|
|
return *exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ExclusiveMonitor& CpuCoreManager::GetExclusiveMonitor() const {
|
|
|
|
return *exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
|
|
|
Cpu& CpuCoreManager::GetCurrentCore() {
|
|
|
|
if (Settings::values.use_multi_core) {
|
|
|
|
const auto& search = thread_to_cpu.find(std::this_thread::get_id());
|
|
|
|
ASSERT(search != thread_to_cpu.end());
|
|
|
|
ASSERT(search->second);
|
|
|
|
return *search->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use single-threaded mode active_core variable
|
|
|
|
return *cores[active_core];
|
|
|
|
}
|
|
|
|
|
|
|
|
const Cpu& CpuCoreManager::GetCurrentCore() const {
|
|
|
|
if (Settings::values.use_multi_core) {
|
|
|
|
const auto& search = thread_to_cpu.find(std::this_thread::get_id());
|
|
|
|
ASSERT(search != thread_to_cpu.end());
|
|
|
|
ASSERT(search->second);
|
|
|
|
return *search->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use single-threaded mode active_core variable
|
|
|
|
return *cores[active_core];
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuCoreManager::RunLoop(bool tight_loop) {
|
|
|
|
// Update thread_to_cpu in case Core 0 is run from a different host thread
|
|
|
|
thread_to_cpu[std::this_thread::get_id()] = cores[0].get();
|
|
|
|
|
|
|
|
if (GDBStub::IsServerEnabled()) {
|
|
|
|
GDBStub::HandlePacket();
|
|
|
|
|
|
|
|
// If the loop is halted and we want to step, use a tiny (1) number of instructions to
|
|
|
|
// execute. Otherwise, get out of the loop function.
|
|
|
|
if (GDBStub::GetCpuHaltFlag()) {
|
|
|
|
if (GDBStub::GetCpuStepFlag()) {
|
|
|
|
tight_loop = false;
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 01:37:29 +00:00
|
|
|
auto& core_timing = system.CoreTiming();
|
|
|
|
core_timing.ResetRun();
|
|
|
|
bool keep_running{};
|
|
|
|
do {
|
|
|
|
keep_running = false;
|
|
|
|
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
|
|
|
|
core_timing.SwitchContext(active_core);
|
2019-10-11 18:44:14 +00:00
|
|
|
if (core_timing.CanCurrentContextRun()) {
|
2019-09-10 01:37:29 +00:00
|
|
|
cores[active_core]->RunLoop(tight_loop);
|
|
|
|
}
|
2019-10-11 18:44:14 +00:00
|
|
|
keep_running |= core_timing.CanCurrentContextRun();
|
2018-11-22 06:27:23 +00:00
|
|
|
}
|
2019-09-10 01:37:29 +00:00
|
|
|
} while (keep_running);
|
2018-11-22 06:27:23 +00:00
|
|
|
|
|
|
|
if (GDBStub::IsServerEnabled()) {
|
|
|
|
GDBStub::SetCpuStepFlag(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuCoreManager::InvalidateAllInstructionCaches() {
|
|
|
|
for (auto& cpu : cores) {
|
|
|
|
cpu->ArmInterface().ClearInstructionCache();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Core
|