2020-12-03 02:08:35 +00:00
|
|
|
// Copyright 2020 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
|
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
|
|
|
|
2021-01-15 07:02:57 +00:00
|
|
|
#include <bit>
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/bit_util.h"
|
|
|
|
#include "common/fiber.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
#include "core/arm/arm_interface.h"
|
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_timing.h"
|
|
|
|
#include "core/cpu_manager.h"
|
2020-12-04 00:43:18 +00:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2020-12-04 05:56:02 +00:00
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2020-12-31 07:01:08 +00:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2020-12-03 02:08:35 +00:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
|
#include "core/hle/kernel/physical_core.h"
|
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
|
#include "core/hle/kernel/time_manager.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
static void IncrementScheduledCount(Kernel::KThread* thread) {
|
2020-12-03 02:08:35 +00:00
|
|
|
if (auto process = thread->GetOwnerProcess(); process) {
|
|
|
|
process->IncrementScheduledCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
|
|
|
|
auto scheduler = kernel.CurrentScheduler();
|
|
|
|
|
|
|
|
u32 current_core{0xF};
|
|
|
|
bool must_context_switch{};
|
|
|
|
if (scheduler) {
|
|
|
|
current_core = scheduler->core_id;
|
2021-01-21 06:27:38 +00:00
|
|
|
// TODO(bunnei): Should be set to true when we deprecate single core
|
|
|
|
must_context_switch = !kernel.IsPhantomModeForSingleCore();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2020-12-03 02:08:35 +00:00
|
|
|
|
|
|
|
while (cores_pending_reschedule != 0) {
|
2021-01-15 07:02:57 +00:00
|
|
|
const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
if (!must_context_switch || core != current_core) {
|
|
|
|
auto& phys_core = kernel.PhysicalCore(core);
|
|
|
|
phys_core.Interrupt();
|
|
|
|
} else {
|
|
|
|
must_context_switch = true;
|
|
|
|
}
|
|
|
|
cores_pending_reschedule &= ~(1ULL << core);
|
|
|
|
}
|
|
|
|
if (must_context_switch) {
|
|
|
|
auto core_scheduler = kernel.CurrentScheduler();
|
|
|
|
kernel.ExitSVCProfile();
|
|
|
|
core_scheduler->RescheduleCurrentCore();
|
|
|
|
kernel.EnterSVCProfile();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
2020-12-03 02:08:35 +00:00
|
|
|
std::scoped_lock lock{guard};
|
2021-01-20 21:42:27 +00:00
|
|
|
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
2020-12-03 02:08:35 +00:00
|
|
|
prev_highest_thread != highest_thread) {
|
|
|
|
if (prev_highest_thread != nullptr) {
|
|
|
|
IncrementScheduledCount(prev_highest_thread);
|
|
|
|
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
|
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
if (state.should_count_idle) {
|
2020-12-03 02:08:35 +00:00
|
|
|
if (highest_thread != nullptr) {
|
2021-01-20 21:42:27 +00:00
|
|
|
if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
|
|
|
process->SetRunningThread(core_id, highest_thread, state.idle_count);
|
|
|
|
}
|
2020-12-03 02:08:35 +00:00
|
|
|
} else {
|
2021-01-20 21:42:27 +00:00
|
|
|
state.idle_count++;
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
state.highest_priority_thread = highest_thread;
|
2021-01-21 19:26:00 +00:00
|
|
|
state.needs_scheduling.store(true);
|
2021-01-20 21:42:27 +00:00
|
|
|
return (1ULL << core_id);
|
2020-12-03 02:08:35 +00:00
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Clear that we need to update.
|
2020-12-03 02:08:35 +00:00
|
|
|
ClearSchedulerUpdateNeeded(kernel);
|
|
|
|
|
|
|
|
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
2020-12-03 02:08:35 +00:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
/// We want to go over all cores, finding the highest priority thread and determining if
|
|
|
|
/// scheduling is needed for that core.
|
2020-12-03 02:08:35 +00:00
|
|
|
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_thread != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread has no waiters, we need to check if the process has a thread pinned.
|
2021-01-20 21:42:27 +00:00
|
|
|
if (top_thread->GetNumKernelWaiters() == 0) {
|
|
|
|
if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
|
|
|
|
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
|
|
|
|
pinned != nullptr && pinned != top_thread) {
|
|
|
|
// We prefer our parent's pinned thread if possible. However, we also don't
|
|
|
|
// want to schedule un-runnable threads.
|
|
|
|
if (pinned->GetRawState() == ThreadState::Runnable) {
|
|
|
|
top_thread = pinned;
|
|
|
|
} else {
|
|
|
|
top_thread = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-03 02:08:35 +00:00
|
|
|
} else {
|
|
|
|
idle_cores |= (1ULL << core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
top_threads[core_id] = top_thread;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
2020-12-03 02:08:35 +00:00
|
|
|
while (idle_cores != 0) {
|
2021-01-15 07:02:57 +00:00
|
|
|
const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
2020-12-03 02:08:35 +00:00
|
|
|
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
|
|
|
size_t num_candidates = 0;
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-03 02:08:35 +00:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* top_thread =
|
2020-12-03 02:08:35 +00:00
|
|
|
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
|
|
|
top_thread != suggested) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Make sure we're not dealing with threads too high priority for migration.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_thread != nullptr &&
|
|
|
|
top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// The suggested thread isn't bound to its core, so we can migrate it!
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested);
|
|
|
|
|
|
|
|
top_threads[core_id] = suggested;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Note this core as a candidate for migration.
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
migration_candidates[num_candidates++] = suggested_core;
|
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
|
|
|
|
// candidate cores' top threads.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (suggested == nullptr) {
|
|
|
|
for (size_t i = 0; i < num_candidates; i++) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if there's some other thread that can run on the candidate core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 candidate_core = migration_candidates[i];
|
|
|
|
suggested = top_threads[candidate_core];
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* next_on_candidate_core =
|
2020-12-03 02:08:35 +00:00
|
|
|
priority_queue.GetScheduledNext(candidate_core, suggested);
|
|
|
|
next_on_candidate_core != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// The candidate core can run some other thread! We'll migrate its current
|
|
|
|
// top thread to us.
|
2020-12-03 02:08:35 +00:00
|
|
|
top_threads[candidate_core] = next_on_candidate_core;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(candidate_core)
|
|
|
|
.UpdateHighestPriorityThread(top_threads[candidate_core]);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Perform the migration.
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(candidate_core, suggested);
|
|
|
|
|
|
|
|
top_threads[core_id] = suggested;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
|
|
|
|
top_threads[core_id]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
idle_cores &= ~(1ULL << core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return cores_needing_scheduling;
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
|
|
|
|
// Get an atomic reference to the core scheduler's previous thread.
|
|
|
|
std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
|
|
|
|
static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
|
|
|
|
|
|
|
|
// Atomically clear the previous thread if it's our target.
|
|
|
|
KThread* compare = thread;
|
|
|
|
prev_thread.compare_exchange_strong(compare, nullptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the state has changed, because if it hasn't there's nothing to do.
|
2020-12-28 21:16:43 +00:00
|
|
|
const auto cur_state = thread->GetRawState();
|
2020-12-03 02:08:35 +00:00
|
|
|
if (cur_state == old_state) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Update the priority queues.
|
2020-12-28 21:16:43 +00:00
|
|
|
if (old_state == ThreadState::Runnable) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we were previously runnable, then we're not runnable now, and we should remove.
|
2020-12-03 02:08:35 +00:00
|
|
|
GetPriorityQueue(kernel).Remove(thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
2020-12-28 21:16:43 +00:00
|
|
|
} else if (cur_state == ThreadState::Runnable) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we're now runnable, then we weren't previously, and we should add.
|
2020-12-03 02:08:35 +00:00
|
|
|
GetPriorityQueue(kernel).PushBack(thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread is runnable, we want to change its priority in the queue.
|
2020-12-28 21:16:43 +00:00
|
|
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
2020-12-03 02:08:35 +00:00
|
|
|
GetPriorityQueue(kernel).ChangePriority(
|
|
|
|
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
2020-12-05 08:02:30 +00:00
|
|
|
const KAffinityMask& old_affinity, s32 old_core) {
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread is runnable, we want to change its affinity in the queue.
|
2020-12-28 21:16:43 +00:00
|
|
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
2020-12-03 02:08:35 +00:00
|
|
|
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
|
|
|
ASSERT(system.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Rotate the front of the queue to the end.
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
|
|
|
KThread* next_thread = nullptr;
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_thread != nullptr) {
|
|
|
|
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
|
|
|
if (next_thread != top_thread) {
|
|
|
|
IncrementScheduledCount(top_thread);
|
|
|
|
IncrementScheduledCount(next_thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-03 02:08:35 +00:00
|
|
|
{
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
2020-12-03 02:08:35 +00:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-03 02:08:35 +00:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the next thread is a new thread that has been waiting longer than our
|
|
|
|
// suggestion, we prefer it to our suggestion.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_thread != next_thread && next_thread != nullptr &&
|
|
|
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
|
|
|
|
suggested = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
|
|
|
|
// to the front of the queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the next suggestion.
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
{
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
|
2020-12-03 02:08:35 +00:00
|
|
|
if (best_thread == GetCurrentThread()) {
|
|
|
|
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the best thread we can choose has a priority the same or worse than ours, try to
|
|
|
|
// migrate a higher priority thread.
|
2020-12-29 04:41:01 +00:00
|
|
|
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-03 02:08:35 +00:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the suggestion's priority is the same as ours, don't bother.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-03 02:08:35 +00:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
|
|
|
// suggestion to the front of the queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the next suggestion.
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// After a rotation, we need a scheduler update.
|
2020-12-03 02:08:35 +00:00
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
|
|
|
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
|
|
|
scheduler->GetCurrentThread()->DisableDispatch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
2020-12-03 02:08:35 +00:00
|
|
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
2021-01-20 21:42:27 +00:00
|
|
|
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
|
|
|
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
|
|
|
scheduler->GetCurrentThread()->EnableDispatch();
|
|
|
|
}
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
RescheduleCores(kernel, cores_needing_scheduling);
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
if (IsSchedulerUpdateNeeded(kernel)) {
|
|
|
|
return UpdateHighestPriorityThreadsImpl(kernel);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 08:02:30 +00:00
|
|
|
KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
|
2020-12-03 02:08:35 +00:00
|
|
|
return kernel.GlobalSchedulerContext().priority_queue;
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Validate preconditions.
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(CanSchedule(kernel));
|
|
|
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the current thread and process.
|
2021-01-20 21:42:27 +00:00
|
|
|
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
Process& cur_process = *kernel.CurrentProcess();
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Perform the yield.
|
2020-12-03 02:08:35 +00:00
|
|
|
{
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2020-12-28 21:16:43 +00:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Put the current thread at the back of the queue.
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
2020-12-03 02:08:35 +00:00
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the next thread is different, we have an update to perform.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (next_thread != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
|
|
|
// process is scheduled again.
|
2020-12-03 02:08:35 +00:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Validate preconditions.
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(CanSchedule(kernel));
|
|
|
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the current thread and process.
|
2021-01-20 21:42:27 +00:00
|
|
|
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
Process& cur_process = *kernel.CurrentProcess();
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Perform the yield.
|
2020-12-03 02:08:35 +00:00
|
|
|
{
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2020-12-28 21:16:43 +00:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the current active core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 core_id = cur_thread.GetActiveCore();
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Put the current thread at the back of the queue.
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
2020-12-03 02:08:35 +00:00
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-03 02:08:35 +00:00
|
|
|
bool recheck = false;
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-03 02:08:35 +00:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the suggested thread is the thread running on its core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* running_on_suggested_core =
|
2020-12-03 02:08:35 +00:00
|
|
|
(suggested_core >= 0)
|
|
|
|
? kernel.Scheduler(suggested_core).state.highest_priority_thread
|
|
|
|
: nullptr;
|
|
|
|
running_on_suggested_core != suggested) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the current thread's priority is higher than our suggestion's we prefer
|
|
|
|
// the next thread to the suggestion. We also prefer the next thread when the
|
|
|
|
// current thread's priority is equal to the suggestions, but the next thread
|
|
|
|
// has been waiting longer.
|
2020-12-03 02:08:35 +00:00
|
|
|
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
|
|
|
|
(suggested->GetPriority() == cur_thread.GetPriority() &&
|
|
|
|
next_thread != std::addressof(cur_thread) &&
|
|
|
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
|
|
|
|
suggested = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
|
|
|
// suggestion to the front of the queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (running_on_suggested_core == nullptr ||
|
|
|
|
running_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
} else {
|
2020-12-05 07:37:35 +00:00
|
|
|
// We couldn't perform a migration, but we should check again on a future
|
|
|
|
// yield.
|
2020-12-03 02:08:35 +00:00
|
|
|
recheck = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the next suggestion.
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we still have a suggestion or the next thread is different, we have an update to
|
|
|
|
// perform.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else if (!recheck) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Otherwise if we don't need to re-check, set the thread's yield count so that we
|
|
|
|
// won't waste work until the process is scheduled again.
|
2020-12-03 02:08:35 +00:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KScheduler::YieldToAnyThread(KernelCore& kernel) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Validate preconditions.
|
2020-12-03 02:08:35 +00:00
|
|
|
ASSERT(CanSchedule(kernel));
|
|
|
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the current thread and process.
|
2021-01-20 21:42:27 +00:00
|
|
|
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
Process& cur_process = *kernel.CurrentProcess();
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-03 02:08:35 +00:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Perform the yield.
|
2020-12-03 02:08:35 +00:00
|
|
|
{
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2020-12-28 21:16:43 +00:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the current active core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 core_id = cur_thread.GetActiveCore();
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Migrate the current thread to core -1.
|
2020-12-03 02:08:35 +00:00
|
|
|
cur_thread.SetActiveCore(-1);
|
|
|
|
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
|
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If there's nothing scheduled, we can try to perform a migration.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-03 02:08:35 +00:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-03 02:08:35 +00:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 07:01:08 +00:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-03 02:08:35 +00:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 07:37:35 +00:00
|
|
|
// If we're allowed to do a migration, do one.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Regardless of whether we migrated, we had a candidate, so we're done.
|
2020-12-03 02:08:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// Get the next suggestion.
|
2020-12-03 02:08:35 +00:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 07:37:35 +00:00
|
|
|
// If the suggestion is different from the current thread, we need to perform an
|
|
|
|
// update.
|
2020-12-03 02:08:35 +00:00
|
|
|
if (suggested != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
|
|
|
// process is scheduled again.
|
2020-12-03 02:08:35 +00:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
} else {
|
2020-12-05 07:37:35 +00:00
|
|
|
// Otherwise, we have an update to perform.
|
2020-12-03 02:08:35 +00:00
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
|
2021-03-06 01:08:17 +00:00
|
|
|
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
|
2021-01-21 19:26:00 +00:00
|
|
|
state.needs_scheduling.store(true);
|
2021-01-20 21:42:27 +00:00
|
|
|
state.interrupt_task_thread_runnable = false;
|
|
|
|
state.should_count_idle = false;
|
|
|
|
state.idle_count = 0;
|
|
|
|
state.idle_thread_stack = nullptr;
|
|
|
|
state.highest_priority_thread = nullptr;
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
KScheduler::~KScheduler() = default;
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread* KScheduler::GetCurrentThread() const {
|
2021-01-21 19:26:00 +00:00
|
|
|
if (auto result = current_thread.load(); result) {
|
|
|
|
return result;
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
2021-01-21 21:00:16 +00:00
|
|
|
return idle_thread;
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 KScheduler::GetLastContextSwitchTicks() const {
|
|
|
|
return last_context_switch_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::RescheduleCurrentCore() {
|
|
|
|
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
|
|
|
|
|
|
|
auto& phys_core = system.Kernel().PhysicalCore(core_id);
|
|
|
|
if (phys_core.IsInterrupted()) {
|
|
|
|
phys_core.ClearInterrupt();
|
|
|
|
}
|
|
|
|
guard.lock();
|
2021-01-21 19:26:00 +00:00
|
|
|
if (state.needs_scheduling.load()) {
|
2020-12-03 02:08:35 +00:00
|
|
|
Schedule();
|
|
|
|
} else {
|
|
|
|
guard.unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::OnThreadStart() {
|
|
|
|
SwitchContextStep2();
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::Unload(KThread* thread) {
|
2021-01-20 21:42:27 +00:00
|
|
|
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
if (thread) {
|
2021-01-20 21:42:27 +00:00
|
|
|
if (thread->IsCallingSvc()) {
|
2020-12-03 02:08:35 +00:00
|
|
|
system.ArmInterface(core_id).ExceptionalExit();
|
2021-01-20 21:42:27 +00:00
|
|
|
thread->ClearIsCallingSvc();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
if (!thread->IsTerminationRequested()) {
|
|
|
|
prev_thread = thread;
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
|
|
|
cpu_core.SaveContext(thread->GetContext32());
|
|
|
|
cpu_core.SaveContext(thread->GetContext64());
|
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
|
|
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
|
|
|
cpu_core.ClearExclusiveState();
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
prev_thread = nullptr;
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
2021-01-21 00:47:57 +00:00
|
|
|
thread->context_guard.unlock();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::Reload(KThread* thread) {
|
2021-01-20 21:42:27 +00:00
|
|
|
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
if (thread) {
|
2020-12-28 21:16:43 +00:00
|
|
|
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
2020-12-03 02:08:35 +00:00
|
|
|
|
|
|
|
auto* const thread_owner_process = thread->GetOwnerProcess();
|
|
|
|
if (thread_owner_process != nullptr) {
|
|
|
|
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
|
|
|
}
|
2020-12-31 08:46:09 +00:00
|
|
|
|
|
|
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
|
|
|
cpu_core.LoadContext(thread->GetContext32());
|
|
|
|
cpu_core.LoadContext(thread->GetContext64());
|
|
|
|
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
|
|
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
|
|
|
cpu_core.ClearExclusiveState();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::SwitchContextStep2() {
|
|
|
|
// Load context of new thread
|
2021-01-21 19:26:00 +00:00
|
|
|
Reload(current_thread.load());
|
2020-12-03 02:08:35 +00:00
|
|
|
|
|
|
|
RescheduleCurrentCore();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::ScheduleImpl() {
|
2021-01-21 19:26:00 +00:00
|
|
|
KThread* previous_thread = current_thread.load();
|
2021-01-21 00:47:57 +00:00
|
|
|
KThread* next_thread = state.highest_priority_thread;
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
state.needs_scheduling = false;
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2021-01-21 00:47:57 +00:00
|
|
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
|
|
|
if (next_thread == nullptr) {
|
2021-01-21 21:00:16 +00:00
|
|
|
next_thread = idle_thread;
|
2021-01-21 00:47:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not actually switching thread, there's nothing to do.
|
2021-01-21 19:26:00 +00:00
|
|
|
if (next_thread == current_thread.load()) {
|
2020-12-03 02:08:35 +00:00
|
|
|
guard.unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-01-21 19:26:00 +00:00
|
|
|
current_thread.store(next_thread);
|
2021-01-21 00:47:57 +00:00
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
Process* const previous_process = system.Kernel().CurrentProcess();
|
|
|
|
|
|
|
|
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
|
|
|
|
|
|
|
// Save context for previous thread
|
|
|
|
Unload(previous_thread);
|
|
|
|
|
2021-03-06 01:08:17 +00:00
|
|
|
std::shared_ptr<Common::Fiber>* old_context;
|
2020-12-03 02:08:35 +00:00
|
|
|
if (previous_thread != nullptr) {
|
2021-03-06 01:08:17 +00:00
|
|
|
old_context = &previous_thread->GetHostContext();
|
2020-12-03 02:08:35 +00:00
|
|
|
} else {
|
2021-03-06 01:08:17 +00:00
|
|
|
old_context = &idle_thread->GetHostContext();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
guard.unlock();
|
|
|
|
|
2021-03-06 01:08:17 +00:00
|
|
|
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
2020-12-03 02:08:35 +00:00
|
|
|
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
|
|
|
auto& next_scheduler = *system.Kernel().CurrentScheduler();
|
|
|
|
next_scheduler.SwitchContextStep2();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::OnSwitch(void* this_scheduler) {
|
|
|
|
KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
|
|
|
|
sched->SwitchToCurrent();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::SwitchToCurrent() {
|
|
|
|
while (true) {
|
|
|
|
{
|
|
|
|
std::scoped_lock lock{guard};
|
2021-01-21 19:26:00 +00:00
|
|
|
current_thread.store(state.highest_priority_thread);
|
|
|
|
state.needs_scheduling.store(false);
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
const auto is_switch_pending = [this] {
|
|
|
|
std::scoped_lock lock{guard};
|
2021-01-21 19:26:00 +00:00
|
|
|
return state.needs_scheduling.load();
|
2020-12-03 02:08:35 +00:00
|
|
|
};
|
|
|
|
do {
|
2021-01-21 19:26:00 +00:00
|
|
|
auto next_thread = current_thread.load();
|
|
|
|
if (next_thread != nullptr) {
|
|
|
|
next_thread->context_guard.lock();
|
|
|
|
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
|
|
|
next_thread->context_guard.unlock();
|
2020-12-03 02:08:35 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-01-21 19:26:00 +00:00
|
|
|
if (next_thread->GetActiveCore() != core_id) {
|
|
|
|
next_thread->context_guard.unlock();
|
2020-12-03 02:08:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-03-06 01:08:17 +00:00
|
|
|
std::shared_ptr<Common::Fiber>* next_context;
|
2021-01-21 19:26:00 +00:00
|
|
|
if (next_thread != nullptr) {
|
2021-03-06 01:08:17 +00:00
|
|
|
next_context = &next_thread->GetHostContext();
|
2020-12-03 02:08:35 +00:00
|
|
|
} else {
|
2021-03-06 01:08:17 +00:00
|
|
|
next_context = &idle_thread->GetHostContext();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
2021-03-06 01:08:17 +00:00
|
|
|
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
2020-12-03 02:08:35 +00:00
|
|
|
} while (!is_switch_pending());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
|
2020-12-03 02:08:35 +00:00
|
|
|
const u64 prev_switch_ticks = last_context_switch_time;
|
|
|
|
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
|
|
|
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
|
|
|
|
|
|
|
if (thread != nullptr) {
|
2021-01-20 21:42:27 +00:00
|
|
|
thread->AddCpuTime(core_id, update_ticks);
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (process != nullptr) {
|
|
|
|
process->UpdateCPUTimeTicks(update_ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
last_context_switch_time = most_recent_switch_ticks;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::Initialize() {
|
|
|
|
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
|
|
|
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
|
|
|
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
2021-01-20 21:42:27 +00:00
|
|
|
auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
|
2021-01-03 09:49:18 +00:00
|
|
|
KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
|
2021-01-01 10:06:06 +00:00
|
|
|
nullptr, std::move(init_func), init_func_parameter);
|
2021-01-21 21:00:16 +00:00
|
|
|
idle_thread = thread_res.Unwrap().get();
|
2020-12-03 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
|
|
|
: KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock::~KScopedSchedulerLock() = default;
|
2020-12-03 02:08:35 +00:00
|
|
|
|
|
|
|
} // namespace Kernel
|