video_core/gpu: Create threads separately from initialization
Like with CPU emulation, we generally don't want to fire off the threads immediately after the relevant classes are initialized, we want to do this after all necessary data is done loading first. This splits the thread creation into its own interface member function to allow controlling when these threads in particular get created.
This commit is contained in:
parent
f2331a804a
commit
6d0551196d
|
@ -3,9 +3,7 @@
|
|||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
#include "common/file_util.h"
|
||||
|
@ -38,8 +36,6 @@
|
|||
#include "frontend/applets/software_keyboard.h"
|
||||
#include "frontend/applets/web_browser.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/gpu_asynch.h"
|
||||
#include "video_core/gpu_synch.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
|
@ -135,13 +131,9 @@ struct System::Impl {
|
|||
return ResultStatus::ErrorVideoCore;
|
||||
}
|
||||
|
||||
is_powered_on = true;
|
||||
gpu_core = VideoCore::CreateGPU(system);
|
||||
|
||||
if (Settings::values.use_asynchronous_gpu_emulation) {
|
||||
gpu_core = std::make_unique<VideoCommon::GPUAsynch>(system, *renderer);
|
||||
} else {
|
||||
gpu_core = std::make_unique<VideoCommon::GPUSynch>(system, *renderer);
|
||||
}
|
||||
is_powered_on = true;
|
||||
|
||||
LOG_DEBUG(Core, "Initialized OK");
|
||||
|
||||
|
@ -188,7 +180,8 @@ struct System::Impl {
|
|||
}
|
||||
|
||||
// Main process has been loaded and been made current.
|
||||
// Begin CPU execution.
|
||||
// Begin GPU and CPU execution.
|
||||
gpu_core->Start();
|
||||
cpu_core_manager.StartThreads();
|
||||
|
||||
status = ResultStatus::Success;
|
||||
|
|
|
@ -207,6 +207,11 @@ public:
|
|||
};
|
||||
} regs{};
|
||||
|
||||
/// Performs any additional setup necessary in order to begin GPU emulation.
|
||||
/// This can be used to launch any necessary threads and register any necessary
|
||||
/// core timing events.
|
||||
virtual void Start() = 0;
|
||||
|
||||
/// Push GPU command entries to be processed
|
||||
virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
|
||||
|
||||
|
|
|
@ -9,10 +9,14 @@
|
|||
namespace VideoCommon {
|
||||
|
||||
GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer)
|
||||
: Tegra::GPU(system, renderer), gpu_thread{system, renderer, *dma_pusher} {}
|
||||
: GPU(system, renderer), gpu_thread{system} {}
|
||||
|
||||
GPUAsynch::~GPUAsynch() = default;
|
||||
|
||||
void GPUAsynch::Start() {
|
||||
gpu_thread.StartThread(renderer, *dma_pusher);
|
||||
}
|
||||
|
||||
void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) {
|
||||
gpu_thread.SubmitList(std::move(entries));
|
||||
}
|
||||
|
|
|
@ -13,16 +13,13 @@ class RendererBase;
|
|||
|
||||
namespace VideoCommon {
|
||||
|
||||
namespace GPUThread {
|
||||
class ThreadManager;
|
||||
} // namespace GPUThread
|
||||
|
||||
/// Implementation of GPU interface that runs the GPU asynchronously
|
||||
class GPUAsynch : public Tegra::GPU {
|
||||
public:
|
||||
explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer);
|
||||
~GPUAsynch() override;
|
||||
|
||||
void Start() override;
|
||||
void PushGPUEntries(Tegra::CommandList&& entries) override;
|
||||
void SwapBuffers(
|
||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
|
||||
|
|
|
@ -8,10 +8,12 @@
|
|||
namespace VideoCommon {
|
||||
|
||||
GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer)
|
||||
: Tegra::GPU(system, renderer) {}
|
||||
: GPU(system, renderer) {}
|
||||
|
||||
GPUSynch::~GPUSynch() = default;
|
||||
|
||||
void GPUSynch::Start() {}
|
||||
|
||||
void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) {
|
||||
dma_pusher->Push(std::move(entries));
|
||||
dma_pusher->DispatchCalls();
|
||||
|
|
|
@ -18,6 +18,7 @@ public:
|
|||
explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer);
|
||||
~GPUSynch() override;
|
||||
|
||||
void Start() override;
|
||||
void PushGPUEntries(Tegra::CommandList&& entries) override;
|
||||
void SwapBuffers(
|
||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
|
||||
|
|
|
@ -55,19 +55,24 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
|
|||
}
|
||||
}
|
||||
|
||||
ThreadManager::ThreadManager(Core::System& system, VideoCore::RendererBase& renderer,
|
||||
Tegra::DmaPusher& dma_pusher)
|
||||
: system{system}, thread{RunThread, std::ref(renderer), std::ref(dma_pusher), std::ref(state)} {
|
||||
synchronization_event = system.CoreTiming().RegisterEvent(
|
||||
"GPUThreadSynch", [this](u64 fence, s64) { state.WaitForSynchronization(fence); });
|
||||
}
|
||||
ThreadManager::ThreadManager(Core::System& system) : system{system} {}
|
||||
|
||||
ThreadManager::~ThreadManager() {
|
||||
if (!thread.joinable()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Notify GPU thread that a shutdown is pending
|
||||
PushCommand(EndProcessingCommand());
|
||||
thread.join();
|
||||
}
|
||||
|
||||
void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) {
|
||||
thread = std::thread{RunThread, std::ref(renderer), std::ref(dma_pusher), std::ref(state)};
|
||||
synchronization_event = system.CoreTiming().RegisterEvent(
|
||||
"GPUThreadSynch", [this](u64 fence, s64) { state.WaitForSynchronization(fence); });
|
||||
}
|
||||
|
||||
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
|
||||
const u64 fence{PushCommand(SubmitListCommand(std::move(entries)))};
|
||||
const s64 synchronization_ticks{Core::Timing::usToCycles(9000)};
|
||||
|
|
|
@ -138,10 +138,12 @@ struct SynchState final {
|
|||
/// Class used to manage the GPU thread
|
||||
class ThreadManager final {
|
||||
public:
|
||||
explicit ThreadManager(Core::System& system, VideoCore::RendererBase& renderer,
|
||||
Tegra::DmaPusher& dma_pusher);
|
||||
explicit ThreadManager(Core::System& system);
|
||||
~ThreadManager();
|
||||
|
||||
/// Creates and starts the GPU thread.
|
||||
void StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher);
|
||||
|
||||
/// Push GPU command entries to be processed
|
||||
void SubmitList(Tegra::CommandList&& entries);
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
#include <memory>
|
||||
#include "core/core.h"
|
||||
#include "core/settings.h"
|
||||
#include "video_core/gpu_asynch.h"
|
||||
#include "video_core/gpu_synch.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
@ -16,6 +18,14 @@ std::unique_ptr<RendererBase> CreateRenderer(Core::Frontend::EmuWindow& emu_wind
|
|||
return std::make_unique<OpenGL::RendererOpenGL>(emu_window, system);
|
||||
}
|
||||
|
||||
std::unique_ptr<Tegra::GPU> CreateGPU(Core::System& system) {
|
||||
if (Settings::values.use_asynchronous_gpu_emulation) {
|
||||
return std::make_unique<VideoCommon::GPUAsynch>(system, system.Renderer());
|
||||
}
|
||||
|
||||
return std::make_unique<VideoCommon::GPUSynch>(system, system.Renderer());
|
||||
}
|
||||
|
||||
u16 GetResolutionScaleFactor(const RendererBase& renderer) {
|
||||
return static_cast<u16>(
|
||||
Settings::values.resolution_factor
|
||||
|
|
|
@ -14,6 +14,10 @@ namespace Core::Frontend {
|
|||
class EmuWindow;
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
class GPU;
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
class RendererBase;
|
||||
|
@ -27,6 +31,9 @@ class RendererBase;
|
|||
std::unique_ptr<RendererBase> CreateRenderer(Core::Frontend::EmuWindow& emu_window,
|
||||
Core::System& system);
|
||||
|
||||
/// Creates an emulated GPU instance using the given system context.
|
||||
std::unique_ptr<Tegra::GPU> CreateGPU(Core::System& system);
|
||||
|
||||
u16 GetResolutionScaleFactor(const RendererBase& renderer);
|
||||
|
||||
} // namespace VideoCore
|
||||
|
|
Loading…
Reference in a new issue