Core/HostTiming: Allow events to be advanced manually.
This commit is contained in:
parent
1f7dd36499
commit
49a7e0984a
|
@ -110,7 +110,7 @@ Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_paramete
|
|||
FiberStartFunc);
|
||||
}
|
||||
|
||||
Fiber::Fiber() : guard{}, entry_point{}, start_parameter{}, previous_fiber{} {
|
||||
Fiber::Fiber() {
|
||||
impl = std::make_unique<FiberImpl>();
|
||||
}
|
||||
|
||||
|
|
|
@ -42,14 +42,15 @@ public:
|
|||
|
||||
u64 GetClockCycles() override {
|
||||
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||
const u128 temporal = Common::Multiply64Into128(time_now.count(), emulated_clock_frequency);
|
||||
return Common::Divide128On32(temporal, 1000000000).first;
|
||||
const u128 temporary =
|
||||
Common::Multiply64Into128(time_now.count(), emulated_clock_frequency);
|
||||
return Common::Divide128On32(temporary, 1000000000).first;
|
||||
}
|
||||
|
||||
u64 GetCPUCycles() override {
|
||||
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||
const u128 temporal = Common::Multiply64Into128(time_now.count(), emulated_cpu_frequency);
|
||||
return Common::Divide128On32(temporal, 1000000000).first;
|
||||
const u128 temporary = Common::Multiply64Into128(time_now.count(), emulated_cpu_frequency);
|
||||
return Common::Divide128On32(temporary, 1000000000).first;
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -42,7 +42,7 @@ CoreTiming::CoreTiming() {
|
|||
CoreTiming::~CoreTiming() = default;
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
instance.Advance();
|
||||
instance.ThreadLoop();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize() {
|
||||
|
@ -137,11 +137,8 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
|||
basic_lock.unlock();
|
||||
}
|
||||
|
||||
void CoreTiming::Advance() {
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
std::optional<u64> CoreTiming::Advance() {
|
||||
advance_lock.lock();
|
||||
basic_lock.lock();
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
|
@ -159,16 +156,30 @@ void CoreTiming::Advance() {
|
|||
}
|
||||
|
||||
if (!event_queue.empty()) {
|
||||
std::chrono::nanoseconds next_time =
|
||||
std::chrono::nanoseconds(event_queue.front().time - global_timer);
|
||||
const u64 next_time = event_queue.front().time - global_timer;
|
||||
basic_lock.unlock();
|
||||
event.WaitFor(next_time);
|
||||
advance_lock.unlock();
|
||||
return next_time;
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
advance_lock.unlock();
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadLoop() {
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
|
||||
event.WaitFor(next_time_ns);
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
}
|
||||
|
||||
wait_set = false;
|
||||
}
|
||||
paused_set = true;
|
||||
|
|
|
@ -103,6 +103,9 @@ public:
|
|||
/// Returns current time in nanoseconds.
|
||||
std::chrono::nanoseconds GetGlobalTimeNs() const;
|
||||
|
||||
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
|
||||
std::optional<u64> Advance();
|
||||
|
||||
private:
|
||||
struct Event;
|
||||
|
||||
|
@ -110,7 +113,7 @@ private:
|
|||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
void Advance();
|
||||
void ThreadLoop();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
|
||||
|
@ -128,6 +131,7 @@ private:
|
|||
std::shared_ptr<EventType> ev_lost;
|
||||
Common::Event event{};
|
||||
Common::SpinLock basic_lock{};
|
||||
Common::SpinLock advance_lock{};
|
||||
std::unique_ptr<std::thread> timer_thread;
|
||||
std::atomic<bool> paused{};
|
||||
std::atomic<bool> paused_set{};
|
||||
|
|
Loading…
Reference in a new issue