2018-01-13 21:22:39 +00:00
|
|
|
// Copyright 2018 yuzu emulator team
|
2014-12-17 05:38:14 +00:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 08:49:13 +00:00
|
|
|
// Refer to the license.txt file included.
|
2014-04-10 23:58:28 +00:00
|
|
|
|
2018-01-12 03:36:56 +00:00
|
|
|
#include <algorithm>
|
2018-02-25 12:40:22 +00:00
|
|
|
#include <cinttypes>
|
2018-04-20 02:36:48 +00:00
|
|
|
#include <iterator>
|
2018-07-31 12:06:09 +00:00
|
|
|
#include <mutex>
|
|
|
|
#include <vector>
|
2018-01-12 03:36:56 +00:00
|
|
|
|
2018-07-31 12:06:09 +00:00
|
|
|
#include "common/assert.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/logging/log.h"
|
2015-08-17 21:25:21 +00:00
|
|
|
#include "common/microprofile.h"
|
2018-01-05 00:45:15 +00:00
|
|
|
#include "common/string_util.h"
|
2018-08-31 16:21:34 +00:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-03-13 21:49:59 +00:00
|
|
|
#include "core/core.h"
|
2018-08-31 16:21:34 +00:00
|
|
|
#include "core/core_cpu.h"
|
2016-09-18 00:38:01 +00:00
|
|
|
#include "core/core_timing.h"
|
2018-06-21 06:49:43 +00:00
|
|
|
#include "core/hle/kernel/address_arbiter.h"
|
2016-05-22 17:30:13 +00:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2016-06-14 23:03:30 +00:00
|
|
|
#include "core/hle/kernel/client_session.h"
|
2018-01-08 02:24:19 +00:00
|
|
|
#include "core/hle/kernel/event.h"
|
2017-05-29 23:45:42 +00:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2018-08-31 16:21:34 +00:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2018-01-01 19:38:34 +00:00
|
|
|
#include "core/hle/kernel/mutex.h"
|
2015-05-11 14:15:10 +00:00
|
|
|
#include "core/hle/kernel/process.h"
|
2017-12-31 20:58:16 +00:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2018-08-31 16:21:34 +00:00
|
|
|
#include "core/hle/kernel/scheduler.h"
|
2018-01-14 22:15:31 +00:00
|
|
|
#include "core/hle/kernel/shared_memory.h"
|
2018-01-03 01:40:30 +00:00
|
|
|
#include "core/hle/kernel/svc.h"
|
|
|
|
#include "core/hle/kernel/svc_wrap.h"
|
2017-10-15 02:18:42 +00:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2017-10-14 21:30:07 +00:00
|
|
|
#include "core/hle/lock.h"
|
2014-10-23 03:20:01 +00:00
|
|
|
#include "core/hle/result.h"
|
2014-04-13 01:55:36 +00:00
|
|
|
#include "core/hle/service/service.h"
|
2014-04-10 23:58:28 +00:00
|
|
|
|
2018-01-03 01:40:30 +00:00
|
|
|
namespace Kernel {
|
2018-09-13 23:14:50 +00:00
|
|
|
namespace {
|
|
|
|
constexpr bool Is4KBAligned(VAddr address) {
|
|
|
|
return (address & 0xFFF) == 0;
|
|
|
|
}
|
|
|
|
} // Anonymous namespace
|
2014-04-11 03:26:12 +00:00
|
|
|
|
2017-12-28 20:29:52 +00:00
|
|
|
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
|
|
|
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
|
2018-09-13 23:09:04 +00:00
|
|
|
|
|
|
|
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
|
|
|
|
if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
auto& process = *Core::CurrentProcess();
|
2018-01-03 03:24:12 +00:00
|
|
|
CASCADE_RESULT(*heap_addr,
|
|
|
|
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
|
2017-12-28 20:29:52 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-08 02:23:42 +00:00
|
|
|
static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) {
|
2018-07-24 13:46:42 +00:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
|
|
|
"(STUBBED) called, addr=0x{:X}, size=0x{:X}, state0=0x{:X}, state1=0x{:X}", addr,
|
|
|
|
size, state0, state1);
|
2018-01-08 02:23:42 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-29 02:38:38 +00:00
|
|
|
/// Maps a memory range into a different range.
|
|
|
|
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 16:20:50 +00:00
|
|
|
src_addr, size);
|
2018-09-13 23:14:50 +00:00
|
|
|
|
|
|
|
if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Is4KBAligned(size)) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
|
2017-12-29 02:38:38 +00:00
|
|
|
}
|
|
|
|
|
2017-12-31 20:22:49 +00:00
|
|
|
/// Unmaps a region that was previously mapped with svcMapMemory
|
|
|
|
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 16:20:50 +00:00
|
|
|
src_addr, size);
|
2018-09-13 23:14:50 +00:00
|
|
|
|
|
|
|
if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Is4KBAligned(size)) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
|
2017-12-31 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2014-04-13 01:55:36 +00:00
|
|
|
/// Connect to an OS service given the port name, returns the handle to the port to out
|
2018-01-18 01:34:52 +00:00
|
|
|
static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address) {
|
2018-09-02 15:58:58 +00:00
|
|
|
if (!Memory::IsValidVirtualAddress(port_name_address)) {
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_NOT_FOUND;
|
2018-09-02 15:58:58 +00:00
|
|
|
}
|
2017-10-14 21:30:07 +00:00
|
|
|
|
|
|
|
static constexpr std::size_t PortNameMaxLength = 11;
|
|
|
|
// Read 1 char beyond the max allowed port name to detect names that are too long.
|
|
|
|
std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1);
|
2018-09-02 15:58:58 +00:00
|
|
|
if (port_name.size() > PortNameMaxLength) {
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_PORT_NAME_TOO_LONG;
|
2018-09-02 15:58:58 +00:00
|
|
|
}
|
2014-06-02 00:48:29 +00:00
|
|
|
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
|
2014-06-02 00:48:29 +00:00
|
|
|
|
2018-09-02 15:58:58 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto it = kernel.FindNamedPort(port_name);
|
|
|
|
if (!kernel.IsValidNamedPort(it)) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_NOT_FOUND;
|
2015-01-30 18:07:04 +00:00
|
|
|
}
|
2014-06-02 00:48:29 +00:00
|
|
|
|
2016-12-05 16:02:08 +00:00
|
|
|
auto client_port = it->second;
|
2016-06-14 23:03:30 +00:00
|
|
|
|
2018-01-03 01:40:30 +00:00
|
|
|
SharedPtr<ClientSession> client_session;
|
2016-12-05 18:59:57 +00:00
|
|
|
CASCADE_RESULT(client_session, client_port->Connect());
|
2016-06-14 23:03:30 +00:00
|
|
|
|
|
|
|
// Return the client session
|
2018-08-28 16:30:33 +00:00
|
|
|
CASCADE_RESULT(*out_handle, kernel.HandleTable().Create(client_session));
|
2015-01-23 05:36:58 +00:00
|
|
|
return RESULT_SUCCESS;
|
2014-04-13 01:55:36 +00:00
|
|
|
}
|
|
|
|
|
2016-12-08 16:06:19 +00:00
|
|
|
/// Makes a blocking IPC call to an OS service.
|
2018-01-03 01:40:30 +00:00
|
|
|
static ResultCode SendSyncRequest(Handle handle) {
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<ClientSession> session = kernel.HandleTable().Get<ClientSession>(handle);
|
2017-12-30 18:40:28 +00:00
|
|
|
if (!session) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
2015-01-23 05:44:52 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
2014-10-23 03:20:01 +00:00
|
|
|
}
|
2014-05-27 02:12:46 +00:00
|
|
|
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
2014-05-27 02:12:46 +00:00
|
|
|
|
2017-01-01 16:57:02 +00:00
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
|
2016-12-14 17:33:49 +00:00
|
|
|
// TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server
|
|
|
|
// responds and cause a reschedule.
|
2018-01-03 01:40:30 +00:00
|
|
|
return session->SendSyncRequest(GetCurrentThread());
|
2014-04-10 23:58:28 +00:00
|
|
|
}
|
|
|
|
|
2017-10-23 04:15:45 +00:00
|
|
|
/// Get the ID for the specified thread.
|
2018-01-03 01:40:30 +00:00
|
|
|
static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-10-23 04:15:45 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2017-12-30 18:40:28 +00:00
|
|
|
if (!thread) {
|
2017-10-23 04:15:45 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*thread_id = thread->GetThreadId();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the ID of the specified process
|
2018-01-03 01:40:30 +00:00
|
|
|
static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X}", process_handle);
|
2017-10-23 04:15:45 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Process> process = kernel.HandleTable().Get<Process>(process_handle);
|
2017-12-30 18:40:28 +00:00
|
|
|
if (!process) {
|
2017-10-23 04:15:45 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*process_id = process->process_id;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-06 19:19:28 +00:00
|
|
|
/// Default thread wakeup callback for WaitSynchronization
|
2018-01-08 16:35:03 +00:00
|
|
|
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
2018-09-15 13:21:06 +00:00
|
|
|
SharedPtr<WaitObject> object, std::size_t index) {
|
2018-07-20 01:39:05 +00:00
|
|
|
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
|
2018-01-06 19:19:28 +00:00
|
|
|
|
|
|
|
if (reason == ThreadWakeupReason::Timeout) {
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
2018-01-08 16:35:03 +00:00
|
|
|
return true;
|
2018-01-06 19:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(reason == ThreadWakeupReason::Signal);
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
2018-01-09 20:02:43 +00:00
|
|
|
thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
|
2018-01-08 16:35:03 +00:00
|
|
|
return true;
|
2018-01-06 19:19:28 +00:00
|
|
|
};
|
|
|
|
|
2018-01-01 19:47:57 +00:00
|
|
|
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
2018-01-09 16:53:50 +00:00
|
|
|
static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 handle_count,
|
|
|
|
s64 nano_seconds) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
2018-07-02 16:20:50 +00:00
|
|
|
handles_address, handle_count, nano_seconds);
|
2018-01-06 19:34:32 +00:00
|
|
|
|
|
|
|
if (!Memory::IsValidVirtualAddress(handles_address))
|
|
|
|
return ERR_INVALID_POINTER;
|
|
|
|
|
2018-01-09 20:02:43 +00:00
|
|
|
static constexpr u64 MaxHandles = 0x40;
|
|
|
|
|
|
|
|
if (handle_count > MaxHandles)
|
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge);
|
2018-01-06 19:34:32 +00:00
|
|
|
|
2018-01-09 16:53:50 +00:00
|
|
|
auto thread = GetCurrentThread();
|
|
|
|
|
2018-01-06 19:34:32 +00:00
|
|
|
using ObjectPtr = SharedPtr<WaitObject>;
|
|
|
|
std::vector<ObjectPtr> objects(handle_count);
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-01-06 19:34:32 +00:00
|
|
|
|
2018-07-24 13:55:15 +00:00
|
|
|
for (u64 i = 0; i < handle_count; ++i) {
|
|
|
|
const Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
|
2018-08-28 16:30:33 +00:00
|
|
|
const auto object = kernel.HandleTable().Get<WaitObject>(handle);
|
2018-07-24 13:55:15 +00:00
|
|
|
|
|
|
|
if (object == nullptr) {
|
2018-01-06 19:34:32 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
2018-07-24 13:55:15 +00:00
|
|
|
}
|
|
|
|
|
2018-01-06 19:34:32 +00:00
|
|
|
objects[i] = object;
|
|
|
|
}
|
|
|
|
|
2018-01-09 16:53:50 +00:00
|
|
|
// Find the first object that is acquirable in the provided list of objects
|
|
|
|
auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) {
|
|
|
|
return !object->ShouldWait(thread);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (itr != objects.end()) {
|
|
|
|
// We found a ready object, acquire it and set the result value
|
|
|
|
WaitObject* object = itr->get();
|
|
|
|
object->Acquire(thread);
|
|
|
|
*index = static_cast<s32>(std::distance(objects.begin(), itr));
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No objects were ready to be acquired, prepare to suspend the thread.
|
|
|
|
|
|
|
|
// If a timeout value of 0 was provided, just return the Timeout error code instead of
|
|
|
|
// suspending the thread.
|
|
|
|
if (nano_seconds == 0)
|
|
|
|
return RESULT_TIMEOUT;
|
|
|
|
|
2018-01-09 20:02:43 +00:00
|
|
|
for (auto& object : objects)
|
|
|
|
object->AddWaitingThread(thread);
|
|
|
|
|
|
|
|
thread->wait_objects = std::move(objects);
|
2018-07-20 01:39:05 +00:00
|
|
|
thread->status = ThreadStatus::WaitSynchAny;
|
2018-01-09 20:02:43 +00:00
|
|
|
|
|
|
|
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
|
|
|
thread->WakeAfterDelay(nano_seconds);
|
|
|
|
thread->wakeup_callback = DefaultThreadWakeupCallback;
|
|
|
|
|
2018-05-19 21:57:44 +00:00
|
|
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
2018-01-09 20:02:43 +00:00
|
|
|
|
|
|
|
return RESULT_TIMEOUT;
|
2018-01-01 19:47:57 +00:00
|
|
|
}
|
|
|
|
|
2018-01-09 20:02:04 +00:00
|
|
|
/// Resumes a thread waiting on WaitSynchronization
|
|
|
|
static ResultCode CancelSynchronization(Handle thread_handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
2018-01-09 20:02:04 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-01-09 20:02:04 +00:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-07-20 01:39:05 +00:00
|
|
|
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
|
2018-01-09 20:02:04 +00:00
|
|
|
thread->SetWaitSynchronizationResult(
|
|
|
|
ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled));
|
|
|
|
thread->ResumeFromWait();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-01 19:02:26 +00:00
|
|
|
/// Attempts to locks a mutex, creating it if it does not already exist
|
2018-01-18 01:34:52 +00:00
|
|
|
static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
|
|
|
|
Handle requesting_thread_handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
2018-07-02 16:20:50 +00:00
|
|
|
"called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
|
|
|
|
"requesting_current_thread_handle=0x{:08X}",
|
|
|
|
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
2018-01-01 19:02:26 +00:00
|
|
|
|
2018-09-17 22:49:51 +00:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& handle_table = Core::System::GetInstance().Kernel().HandleTable();
|
|
|
|
return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
|
|
|
|
requesting_thread_handle);
|
2018-01-01 19:02:26 +00:00
|
|
|
}
|
|
|
|
|
2018-01-01 19:04:36 +00:00
|
|
|
/// Unlock a mutex
|
2018-01-18 01:34:52 +00:00
|
|
|
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
2018-01-01 19:04:36 +00:00
|
|
|
|
2018-09-17 22:49:51 +00:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-04-20 17:01:14 +00:00
|
|
|
return Mutex::Release(mutex_addr);
|
2018-01-01 19:04:36 +00:00
|
|
|
}
|
|
|
|
|
2017-10-14 21:30:07 +00:00
|
|
|
/// Break program execution
|
2018-08-15 00:43:56 +00:00
|
|
|
static void Break(u64 reason, u64 info1, u64 info2) {
|
|
|
|
LOG_CRITICAL(
|
|
|
|
Debug_Emulated,
|
|
|
|
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
reason, info1, info2);
|
2017-10-14 21:30:07 +00:00
|
|
|
ASSERT(false);
|
2014-04-17 00:41:33 +00:00
|
|
|
}
|
|
|
|
|
2017-10-14 21:30:07 +00:00
|
|
|
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
|
2018-09-12 08:49:08 +00:00
|
|
|
static void OutputDebugString(VAddr address, u64 len) {
|
2018-09-12 08:51:41 +00:00
|
|
|
if (len == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-25 23:11:22 +00:00
|
|
|
std::string str(len, '\0');
|
|
|
|
Memory::ReadBlock(address, str.data(), str.size());
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_DEBUG(Debug_Emulated, "{}", str);
|
2014-05-18 03:37:25 +00:00
|
|
|
}
|
|
|
|
|
2018-01-01 21:01:06 +00:00
|
|
|
/// Gets system/memory information for the current process
|
2017-10-14 21:30:07 +00:00
|
|
|
static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
2018-07-02 16:20:50 +00:00
|
|
|
info_sub_id, handle);
|
2018-01-01 21:01:06 +00:00
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const auto& vm_manager = Core::CurrentProcess()->vm_manager;
|
2018-01-10 05:58:25 +00:00
|
|
|
|
2018-01-01 21:01:06 +00:00
|
|
|
switch (static_cast<GetInfoType>(info_id)) {
|
2018-01-10 05:58:25 +00:00
|
|
|
case GetInfoType::AllowedCpuIdBitmask:
|
2018-03-13 21:49:59 +00:00
|
|
|
*result = Core::CurrentProcess()->allowed_processor_mask;
|
2018-01-10 05:58:25 +00:00
|
|
|
break;
|
2018-01-16 22:06:45 +00:00
|
|
|
case GetInfoType::AllowedThreadPrioBitmask:
|
2018-03-13 21:49:59 +00:00
|
|
|
*result = Core::CurrentProcess()->allowed_thread_priority_mask;
|
2018-01-16 22:06:45 +00:00
|
|
|
break;
|
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
2018-03-15 02:09:22 +00:00
|
|
|
*result = Memory::MAP_REGION_VADDR;
|
2018-01-16 22:06:45 +00:00
|
|
|
break;
|
|
|
|
case GetInfoType::MapRegionSize:
|
2018-03-15 02:09:22 +00:00
|
|
|
*result = Memory::MAP_REGION_SIZE;
|
2018-01-16 22:06:45 +00:00
|
|
|
break;
|
2018-01-15 20:42:57 +00:00
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
2018-03-15 02:09:22 +00:00
|
|
|
*result = Memory::HEAP_VADDR;
|
2018-01-15 20:42:57 +00:00
|
|
|
break;
|
|
|
|
case GetInfoType::HeapRegionSize:
|
|
|
|
*result = Memory::HEAP_SIZE;
|
|
|
|
break;
|
2018-01-01 21:01:06 +00:00
|
|
|
case GetInfoType::TotalMemoryUsage:
|
|
|
|
*result = vm_manager.GetTotalMemoryUsage();
|
|
|
|
break;
|
|
|
|
case GetInfoType::TotalHeapUsage:
|
|
|
|
*result = vm_manager.GetTotalHeapUsage();
|
|
|
|
break;
|
2018-02-04 17:34:45 +00:00
|
|
|
case GetInfoType::IsCurrentProcessBeingDebugged:
|
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-01-01 21:01:06 +00:00
|
|
|
case GetInfoType::RandomEntropy:
|
|
|
|
*result = 0;
|
|
|
|
break;
|
|
|
|
case GetInfoType::AddressSpaceBaseAddr:
|
|
|
|
*result = vm_manager.GetAddressSpaceBaseAddr();
|
|
|
|
break;
|
|
|
|
case GetInfoType::AddressSpaceSize:
|
|
|
|
*result = vm_manager.GetAddressSpaceSize();
|
|
|
|
break;
|
|
|
|
case GetInfoType::NewMapRegionBaseAddr:
|
2018-03-15 02:09:22 +00:00
|
|
|
*result = Memory::NEW_MAP_REGION_VADDR;
|
2018-01-01 21:01:06 +00:00
|
|
|
break;
|
|
|
|
case GetInfoType::NewMapRegionSize:
|
2018-03-15 02:09:22 +00:00
|
|
|
*result = Memory::NEW_MAP_REGION_SIZE;
|
2018-01-01 21:01:06 +00:00
|
|
|
break;
|
2018-01-16 22:06:45 +00:00
|
|
|
case GetInfoType::IsVirtualAddressMemoryEnabled:
|
2018-03-13 21:49:59 +00:00
|
|
|
*result = Core::CurrentProcess()->is_virtual_address_memory_enabled;
|
2018-01-16 22:06:45 +00:00
|
|
|
break;
|
2018-01-15 20:42:57 +00:00
|
|
|
case GetInfoType::TitleId:
|
2018-08-25 08:58:55 +00:00
|
|
|
*result = Core::CurrentProcess()->program_id;
|
2018-01-15 20:42:57 +00:00
|
|
|
break;
|
|
|
|
case GetInfoType::PrivilegedProcessId:
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 16:20:50 +00:00
|
|
|
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
|
2018-01-15 20:42:57 +00:00
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-06-18 07:28:37 +00:00
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 16:20:50 +00:00
|
|
|
"(STUBBED) Attempted to query user exception context address, returned 0");
|
2018-06-18 07:28:37 +00:00
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-01-01 21:01:06 +00:00
|
|
|
default:
|
|
|
|
UNIMPLEMENTED();
|
2015-05-17 05:06:59 +00:00
|
|
|
}
|
2018-01-01 21:01:06 +00:00
|
|
|
|
2015-01-23 05:36:58 +00:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-01 22:50:36 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 03:50:17 +00:00
|
|
|
/// Sets the thread activity
|
|
|
|
static ResultCode SetThreadActivity(Handle handle, u32 unknown) {
|
2018-07-02 16:20:50 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, unknown=0x{:08X}", handle, unknown);
|
2018-04-03 03:50:17 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the thread context
|
|
|
|
static ResultCode GetThreadContext(Handle handle, VAddr addr) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, addr=0x{:X}", handle, addr);
|
2018-04-03 03:50:17 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-06-02 02:12:54 +00:00
|
|
|
/// Gets the priority for the specified thread
|
2017-12-31 21:06:11 +00:00
|
|
|
static ResultCode GetThreadPriority(u32* priority, Handle handle) {
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
|
2017-12-31 21:06:11 +00:00
|
|
|
if (!thread)
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
|
|
|
|
*priority = thread->GetPriority();
|
2015-01-23 05:36:58 +00:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-03 23:49:51 +00:00
|
|
|
}
|
|
|
|
|
2017-12-31 20:58:16 +00:00
|
|
|
/// Sets the priority for the specified thread
|
|
|
|
static ResultCode SetThreadPriority(Handle handle, u32 priority) {
|
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-09-12 08:25:53 +00:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
2017-12-31 20:58:16 +00:00
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
|
2017-12-31 20:58:16 +00:00
|
|
|
if (!thread)
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-12-31 20:58:16 +00:00
|
|
|
|
|
|
|
// Note: The kernel uses the current process's resource limit instead of
|
|
|
|
// the one from the thread owner's resource limit.
|
2018-03-13 21:49:59 +00:00
|
|
|
SharedPtr<ResourceLimit>& resource_limit = Core::CurrentProcess()->resource_limit;
|
2018-04-20 23:35:02 +00:00
|
|
|
if (resource_limit->GetMaxResourceValue(ResourceType::Priority) > priority) {
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_NOT_AUTHORIZED;
|
2017-12-31 20:58:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
thread->SetPriority(priority);
|
|
|
|
|
2018-05-19 21:57:44 +00:00
|
|
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
2017-12-31 20:58:16 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 21:01:04 +00:00
|
|
|
/// Get which CPU core is executing the current thread
|
|
|
|
static u32 GetCurrentProcessorNumber() {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
2018-05-06 01:59:35 +00:00
|
|
|
return GetCurrentThread()->processor_id;
|
2017-12-31 21:01:04 +00:00
|
|
|
}
|
|
|
|
|
2018-01-14 22:15:31 +00:00
|
|
|
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
|
|
|
u32 permissions) {
|
2018-07-02 16:20:50 +00:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
|
|
|
shared_memory_handle, addr, size, permissions);
|
2018-01-14 22:15:31 +00:00
|
|
|
|
2018-09-14 00:16:43 +00:00
|
|
|
if (!Is4KBAligned(addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Is4KBAligned(size)) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto permissions_type = static_cast<MemoryPermission>(permissions);
|
|
|
|
if (permissions_type != MemoryPermission::Read &&
|
|
|
|
permissions_type != MemoryPermission::ReadWrite) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-01-14 22:15:31 +00:00
|
|
|
if (!shared_memory) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-09-14 00:16:43 +00:00
|
|
|
return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
|
|
|
|
MemoryPermission::DontCare);
|
2018-01-14 22:15:31 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 19:16:43 +00:00
|
|
|
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
|
2018-07-02 16:20:50 +00:00
|
|
|
shared_memory_handle, addr, size);
|
2018-02-22 19:16:43 +00:00
|
|
|
|
2018-09-14 00:16:43 +00:00
|
|
|
if (!Is4KBAligned(addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Is4KBAligned(size)) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-02-22 19:16:43 +00:00
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
return shared_memory->Unmap(Core::CurrentProcess().get(), addr);
|
2018-02-22 19:16:43 +00:00
|
|
|
}
|
|
|
|
|
2015-07-17 19:45:12 +00:00
|
|
|
/// Query process memory
|
2017-10-14 21:30:07 +00:00
|
|
|
static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_info*/,
|
2018-01-03 01:40:30 +00:00
|
|
|
Handle process_handle, u64 addr) {
|
2018-08-28 16:30:33 +00:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Process> process = kernel.HandleTable().Get<Process>(process_handle);
|
2017-12-30 18:40:28 +00:00
|
|
|
if (!process) {
|
2015-07-17 19:45:12 +00:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-10-20 03:00:46 +00:00
|
|
|
}
|
2015-07-18 02:19:16 +00:00
|
|
|
auto vma = process->vm_manager.FindVMA(addr);
|
2017-10-20 03:00:46 +00:00
|
|
|
memory_info->attributes = 0;
|
2018-03-13 21:49:59 +00:00
|
|
|
if (vma == Core::CurrentProcess()->vm_manager.vma_map.end()) {
|
2017-10-14 21:30:07 +00:00
|
|
|
memory_info->base_address = 0;
|
2018-01-03 01:40:30 +00:00
|
|
|
memory_info->permission = static_cast<u32>(VMAPermission::None);
|
2017-10-14 21:30:07 +00:00
|
|
|
memory_info->size = 0;
|
2018-03-10 22:46:23 +00:00
|
|
|
memory_info->type = static_cast<u32>(MemoryState::Unmapped);
|
2017-10-20 03:00:46 +00:00
|
|
|
} else {
|
|
|
|
memory_info->base_address = vma->second.base;
|
|
|
|
memory_info->permission = static_cast<u32>(vma->second.permissions);
|
|
|
|
memory_info->size = vma->second.size;
|
|
|
|
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
|
2017-10-14 21:30:07 +00:00
|
|
|
}
|
2017-12-30 18:40:28 +00:00
|
|
|
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} addr={:X}", process_handle, addr);
|
2015-01-23 05:36:58 +00:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-16 00:17:30 +00:00
|
|
|
}
|
|
|
|
|
2015-07-17 19:45:12 +00:00
|
|
|
/// Query memory
|
2017-10-14 21:30:07 +00:00
|
|
|
static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
|
2018-01-03 01:40:30 +00:00
|
|
|
return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
|
2015-07-17 19:45:12 +00:00
|
|
|
}
|
|
|
|
|
2018-01-01 19:38:34 +00:00
|
|
|
/// Exits the current process
|
|
|
|
static void ExitProcess() {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_INFO(Kernel_SVC, "Process {} exiting", Core::CurrentProcess()->process_id);
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
ASSERT_MSG(Core::CurrentProcess()->status == ProcessStatus::Running,
|
|
|
|
"Process has already exited");
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
Core::CurrentProcess()->status = ProcessStatus::Exited;
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
auto stop_threads = [](const std::vector<SharedPtr<Thread>>& thread_list) {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
if (thread->owner_process != Core::CurrentProcess())
|
|
|
|
continue;
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
if (thread == GetCurrentThread())
|
|
|
|
continue;
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
// TODO(Subv): When are the other running/ready threads terminated?
|
2018-07-20 01:39:05 +00:00
|
|
|
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny ||
|
|
|
|
thread->status == ThreadStatus::WaitSynchAll,
|
2018-05-03 02:36:51 +00:00
|
|
|
"Exiting processes with non-waiting threads is currently unimplemented");
|
2018-01-01 19:38:34 +00:00
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
thread->Stop();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto& system = Core::System::GetInstance();
|
|
|
|
stop_threads(system.Scheduler(0)->GetThreadList());
|
|
|
|
stop_threads(system.Scheduler(1)->GetThreadList());
|
|
|
|
stop_threads(system.Scheduler(2)->GetThreadList());
|
|
|
|
stop_threads(system.Scheduler(3)->GetThreadList());
|
2018-01-01 19:38:34 +00:00
|
|
|
|
|
|
|
// Kill the current thread
|
2018-01-03 01:40:30 +00:00
|
|
|
GetCurrentThread()->Stop();
|
2018-01-01 19:38:34 +00:00
|
|
|
|
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
}
|
|
|
|
|
2017-12-31 21:10:01 +00:00
|
|
|
/// Creates a new thread
|
|
|
|
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
|
|
|
|
u32 priority, s32 processor_id) {
|
2018-09-08 12:40:24 +00:00
|
|
|
std::string name = fmt::format("thread-{:X}", entry_point);
|
2017-12-31 21:10:01 +00:00
|
|
|
|
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-09-12 08:25:53 +00:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
2017-12-31 21:10:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 21:49:59 +00:00
|
|
|
SharedPtr<ResourceLimit>& resource_limit = Core::CurrentProcess()->resource_limit;
|
2018-04-20 23:35:02 +00:00
|
|
|
if (resource_limit->GetMaxResourceValue(ResourceType::Priority) > priority) {
|
2018-01-03 01:40:30 +00:00
|
|
|
return ERR_NOT_AUTHORIZED;
|
2017-12-31 21:10:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (processor_id == THREADPROCESSORID_DEFAULT) {
|
|
|
|
// Set the target CPU to the one specified in the process' exheader.
|
2018-03-13 21:49:59 +00:00
|
|
|
processor_id = Core::CurrentProcess()->ideal_processor;
|
2017-12-31 21:10:01 +00:00
|
|
|
ASSERT(processor_id != THREADPROCESSORID_DEFAULT);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (processor_id) {
|
|
|
|
case THREADPROCESSORID_0:
|
|
|
|
case THREADPROCESSORID_1:
|
2018-01-10 05:58:25 +00:00
|
|
|
case THREADPROCESSORID_2:
|
|
|
|
case THREADPROCESSORID_3:
|
2017-12-31 21:10:01 +00:00
|
|
|
break;
|
|
|
|
default:
|
2018-09-12 08:27:35 +00:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
2017-12-31 21:10:01 +00:00
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-01-03 01:40:30 +00:00
|
|
|
CASCADE_RESULT(SharedPtr<Thread> thread,
|
2018-08-28 16:30:33 +00:00
|
|
|
Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top,
|
2018-03-13 21:49:59 +00:00
|
|
|
Core::CurrentProcess()));
|
2018-08-28 16:30:33 +00:00
|
|
|
CASCADE_RESULT(thread->guest_handle, kernel.HandleTable().Create(thread));
|
2017-12-31 22:23:36 +00:00
|
|
|
*out_handle = thread->guest_handle;
|
2017-12-31 21:10:01 +00:00
|
|
|
|
2018-05-19 21:57:44 +00:00
|
|
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
2017-12-31 21:10:01 +00:00
|
|
|
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
2018-07-02 16:20:50 +00:00
|
|
|
"called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, "
|
|
|
|
"threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}",
|
|
|
|
entry_point, name, arg, stack_top, priority, processor_id, *out_handle);
|
2017-12-31 21:10:01 +00:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-30 18:40:28 +00:00
|
|
|
/// Starts the thread for the provided handle
|
2017-12-30 18:37:07 +00:00
|
|
|
static ResultCode StartThread(Handle thread_handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-12-30 18:37:07 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2017-12-30 18:37:07 +00:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-07-20 01:39:05 +00:00
|
|
|
ASSERT(thread->status == ThreadStatus::Dormant);
|
2018-05-19 21:57:44 +00:00
|
|
|
|
2017-12-30 18:37:07 +00:00
|
|
|
thread->ResumeFromWait();
|
2018-05-19 21:57:44 +00:00
|
|
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
2017-12-30 18:37:07 +00:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 21:11:27 +00:00
|
|
|
/// Called when a thread exits
|
|
|
|
static void ExitThread() {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
|
2017-12-31 21:11:27 +00:00
|
|
|
|
2018-01-03 01:40:30 +00:00
|
|
|
ExitCurrentThread();
|
2017-12-31 21:11:27 +00:00
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
}
|
|
|
|
|
2014-06-01 14:37:19 +00:00
|
|
|
/// Sleep the current thread
|
2014-11-17 03:58:39 +00:00
|
|
|
static void SleepThread(s64 nanoseconds) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
2014-11-26 05:34:14 +00:00
|
|
|
|
2017-01-05 19:14:22 +00:00
|
|
|
// Don't attempt to yield execution if there are no available threads to run,
|
|
|
|
// this way we avoid a useless reschedule to the idle thread.
|
2018-05-03 02:36:51 +00:00
|
|
|
if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
|
2017-01-05 19:14:22 +00:00
|
|
|
return;
|
|
|
|
|
2014-12-20 07:32:19 +00:00
|
|
|
// Sleep current thread and check for next thread to schedule
|
2018-01-03 01:40:30 +00:00
|
|
|
WaitCurrentThread_Sleep();
|
2015-01-07 21:40:08 +00:00
|
|
|
|
|
|
|
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
2018-01-03 01:40:30 +00:00
|
|
|
GetCurrentThread()->WakeAfterDelay(nanoseconds);
|
2017-01-01 16:57:02 +00:00
|
|
|
|
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
2014-06-01 14:37:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 06:49:43 +00:00
|
|
|
/// Wait process wide key atomic
|
2018-01-09 02:41:37 +00:00
|
|
|
static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_variable_addr,
|
2018-01-06 21:14:12 +00:00
|
|
|
Handle thread_handle, s64 nano_seconds) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(
|
2018-01-09 02:41:37 +00:00
|
|
|
Kernel_SVC,
|
2018-05-02 13:14:28 +00:00
|
|
|
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
|
2018-01-09 02:41:37 +00:00
|
|
|
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
2018-01-06 21:14:12 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-01-06 21:14:12 +00:00
|
|
|
ASSERT(thread);
|
|
|
|
|
2018-04-20 19:39:28 +00:00
|
|
|
CASCADE_CODE(Mutex::Release(mutex_addr));
|
2018-01-08 16:35:03 +00:00
|
|
|
|
2018-04-20 19:39:28 +00:00
|
|
|
SharedPtr<Thread> current_thread = GetCurrentThread();
|
|
|
|
current_thread->condvar_wait_address = condition_variable_addr;
|
|
|
|
current_thread->mutex_wait_address = mutex_addr;
|
|
|
|
current_thread->wait_handle = thread_handle;
|
2018-07-20 01:39:05 +00:00
|
|
|
current_thread->status = ThreadStatus::WaitMutex;
|
2018-04-20 19:39:28 +00:00
|
|
|
current_thread->wakeup_callback = nullptr;
|
2018-01-08 16:35:03 +00:00
|
|
|
|
2018-04-20 19:39:28 +00:00
|
|
|
current_thread->WakeAfterDelay(nano_seconds);
|
2018-01-06 21:14:12 +00:00
|
|
|
|
2018-04-21 01:15:16 +00:00
|
|
|
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
2018-01-06 21:14:12 +00:00
|
|
|
|
2018-05-06 03:54:43 +00:00
|
|
|
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule();
|
2018-01-06 21:14:12 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-10-14 21:30:07 +00:00
|
|
|
/// Signal process wide key
|
2018-01-09 02:41:37 +00:00
|
|
|
static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
2018-07-02 16:20:50 +00:00
|
|
|
condition_variable_addr, target);
|
2018-01-07 21:55:17 +00:00
|
|
|
|
2018-09-15 13:21:06 +00:00
|
|
|
auto RetrieveWaitingThreads = [](std::size_t core_index,
|
|
|
|
std::vector<SharedPtr<Thread>>& waiting_threads,
|
|
|
|
VAddr condvar_addr) {
|
|
|
|
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
|
|
|
auto& thread_list = scheduler->GetThreadList();
|
2018-05-19 21:58:30 +00:00
|
|
|
|
2018-09-15 13:21:06 +00:00
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
if (thread->condvar_wait_address == condvar_addr)
|
|
|
|
waiting_threads.push_back(thread);
|
|
|
|
}
|
|
|
|
};
|
2018-05-19 21:58:30 +00:00
|
|
|
|
|
|
|
// Retrieve a list of all threads that are waiting for this condition variable.
|
|
|
|
std::vector<SharedPtr<Thread>> waiting_threads;
|
|
|
|
RetrieveWaitingThreads(0, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(1, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(2, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(3, waiting_threads, condition_variable_addr);
|
|
|
|
// Sort them by priority, such that the highest priority ones come first.
|
|
|
|
std::sort(waiting_threads.begin(), waiting_threads.end(),
|
|
|
|
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
|
|
|
return lhs->current_priority < rhs->current_priority;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
|
|
|
// them all.
|
2018-09-15 13:21:06 +00:00
|
|
|
std::size_t last = waiting_threads.size();
|
2018-05-19 21:58:30 +00:00
|
|
|
if (target != -1)
|
|
|
|
last = target;
|
|
|
|
|
|
|
|
// If there are no threads waiting on this condition variable, just exit
|
|
|
|
if (last > waiting_threads.size())
|
|
|
|
return RESULT_SUCCESS;
|
2018-05-06 02:00:34 +00:00
|
|
|
|
2018-09-15 13:21:06 +00:00
|
|
|
for (std::size_t index = 0; index < last; ++index) {
|
2018-05-19 21:58:30 +00:00
|
|
|
auto& thread = waiting_threads[index];
|
2018-05-06 02:00:34 +00:00
|
|
|
|
2018-05-19 21:58:30 +00:00
|
|
|
ASSERT(thread->condvar_wait_address == condition_variable_addr);
|
2018-05-06 02:00:34 +00:00
|
|
|
|
2018-09-15 13:21:06 +00:00
|
|
|
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
|
2018-07-22 17:27:24 +00:00
|
|
|
|
|
|
|
auto& monitor = Core::System::GetInstance().Monitor();
|
|
|
|
|
|
|
|
// Atomically read the value of the mutex.
|
|
|
|
u32 mutex_val = 0;
|
|
|
|
do {
|
|
|
|
monitor.SetExclusive(current_core, thread->mutex_wait_address);
|
|
|
|
|
|
|
|
// If the mutex is not yet acquired, acquire it.
|
|
|
|
mutex_val = Memory::Read32(thread->mutex_wait_address);
|
|
|
|
|
|
|
|
if (mutex_val != 0) {
|
|
|
|
monitor.ClearExclusive();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address,
|
|
|
|
thread->wait_handle));
|
2018-05-19 21:58:30 +00:00
|
|
|
|
|
|
|
if (mutex_val == 0) {
|
|
|
|
// We were able to acquire the mutex, resume this thread.
|
2018-07-20 01:39:05 +00:00
|
|
|
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
2018-05-19 21:58:30 +00:00
|
|
|
thread->ResumeFromWait();
|
2018-01-07 21:55:17 +00:00
|
|
|
|
2018-05-19 21:58:30 +00:00
|
|
|
auto lock_owner = thread->lock_owner;
|
|
|
|
if (lock_owner)
|
|
|
|
lock_owner->RemoveMutexWaiter(thread);
|
|
|
|
|
|
|
|
thread->lock_owner = nullptr;
|
|
|
|
thread->mutex_wait_address = 0;
|
|
|
|
thread->condvar_wait_address = 0;
|
|
|
|
thread->wait_handle = 0;
|
|
|
|
} else {
|
2018-07-22 17:27:24 +00:00
|
|
|
// Atomically signal that the mutex now has a waiting thread.
|
|
|
|
do {
|
|
|
|
monitor.SetExclusive(current_core, thread->mutex_wait_address);
|
|
|
|
|
|
|
|
// Ensure that the mutex value is still what we expect.
|
|
|
|
u32 value = Memory::Read32(thread->mutex_wait_address);
|
|
|
|
// TODO(Subv): When this happens, the kernel just clears the exclusive state and
|
|
|
|
// retries the initial read for this thread.
|
|
|
|
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
|
|
|
|
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address,
|
|
|
|
mutex_val | Mutex::MutexHasWaitersFlag));
|
|
|
|
|
|
|
|
// The mutex is already owned by some other thread, make this thread wait on it.
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-05-19 21:58:30 +00:00
|
|
|
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
2018-08-28 16:30:33 +00:00
|
|
|
auto owner = kernel.HandleTable().Get<Thread>(owner_handle);
|
2018-05-19 21:58:30 +00:00
|
|
|
ASSERT(owner);
|
2018-08-12 21:35:27 +00:00
|
|
|
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
2018-05-19 21:58:30 +00:00
|
|
|
thread->wakeup_callback = nullptr;
|
|
|
|
|
|
|
|
owner->AddMutexWaiter(thread);
|
|
|
|
|
|
|
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
|
|
|
}
|
|
|
|
}
|
2018-01-07 21:55:17 +00:00
|
|
|
|
2015-01-23 05:36:58 +00:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-09 04:52:27 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 06:49:43 +00:00
|
|
|
// Wait for an address (via Address Arbiter)
|
|
|
|
static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}",
|
2018-07-02 16:20:50 +00:00
|
|
|
address, type, value, timeout);
|
2018-06-21 06:49:43 +00:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 06:47:59 +00:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-06-21 06:49:43 +00:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
|
|
|
if (address % sizeof(u32) != 0) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-06-22 06:47:59 +00:00
|
|
|
switch (static_cast<AddressArbiter::ArbitrationType>(type)) {
|
2018-06-22 03:09:51 +00:00
|
|
|
case AddressArbiter::ArbitrationType::WaitIfLessThan:
|
|
|
|
return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, false);
|
|
|
|
case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan:
|
|
|
|
return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, true);
|
|
|
|
case AddressArbiter::ArbitrationType::WaitIfEqual:
|
|
|
|
return AddressArbiter::WaitForAddressIfEqual(address, value, timeout);
|
|
|
|
default:
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
2018-06-21 06:49:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Signals to an address (via Address Arbiter)
|
|
|
|
static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to_wake) {
|
2018-07-02 16:20:50 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
|
|
|
|
address, type, value, num_to_wake);
|
2018-06-21 06:49:43 +00:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 06:47:59 +00:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-06-21 06:49:43 +00:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
|
|
|
if (address % sizeof(u32) != 0) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-06-22 06:47:59 +00:00
|
|
|
switch (static_cast<AddressArbiter::SignalType>(type)) {
|
2018-06-22 03:09:51 +00:00
|
|
|
case AddressArbiter::SignalType::Signal:
|
|
|
|
return AddressArbiter::SignalToAddress(address, num_to_wake);
|
|
|
|
case AddressArbiter::SignalType::IncrementAndSignalIfEqual:
|
|
|
|
return AddressArbiter::IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
|
|
|
case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
|
|
|
return AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(address, value,
|
|
|
|
num_to_wake);
|
|
|
|
default:
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
2018-06-21 06:49:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-12 02:59:31 +00:00
|
|
|
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
|
|
|
static u64 GetSystemTick() {
|
|
|
|
const u64 result{CoreTiming::GetTicks()};
|
|
|
|
|
|
|
|
// Advance time to defeat dumb games that busy-wait for the frame to end.
|
|
|
|
CoreTiming::AddTicks(400);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-10-14 21:30:07 +00:00
|
|
|
/// Close a handle
|
2018-01-03 01:40:30 +00:00
|
|
|
static ResultCode CloseHandle(Handle handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
|
2018-08-28 16:30:33 +00:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
return kernel.HandleTable().Close(handle);
|
2015-08-06 00:39:53 +00:00
|
|
|
}
|
|
|
|
|
2018-01-08 02:24:19 +00:00
|
|
|
/// Reset an event
|
|
|
|
static ResultCode ResetSignal(Handle handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called handle 0x{:08X}", handle);
|
2018-08-28 16:30:33 +00:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto event = kernel.HandleTable().Get<Event>(handle);
|
|
|
|
|
2018-01-08 02:24:19 +00:00
|
|
|
ASSERT(event != nullptr);
|
2018-08-28 16:30:33 +00:00
|
|
|
|
2018-01-08 02:24:19 +00:00
|
|
|
event->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a TransferMemory object
|
|
|
|
static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 permissions) {
|
2018-07-02 16:20:50 +00:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
|
|
|
|
permissions);
|
2018-01-08 02:24:19 +00:00
|
|
|
*handle = 0;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-06 03:13:15 +00:00
|
|
|
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
2018-05-06 03:13:15 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-05-06 03:13:15 +00:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*core = thread->ideal_core;
|
2018-05-10 23:12:46 +00:00
|
|
|
*mask = thread->affinity_mask;
|
2018-05-06 03:13:15 +00:00
|
|
|
|
2018-03-30 01:07:49 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-06 03:13:15 +00:00
|
|
|
static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, mask=0x{:16X}, core=0x{:X}", thread_handle,
|
2018-07-02 16:20:50 +00:00
|
|
|
mask, core);
|
2018-05-06 03:13:15 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-05-06 03:13:15 +00:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
svc: Correct always true assertion case in SetThreadCoreMask
The reason this would never be true is that ideal_processor is a u8 and
THREADPROCESSORID_DEFAULT is an s32. In this case, it boils down to how
arithmetic conversions are performed before performing the comparison.
If an unsigned value has a lesser conversion rank (aka smaller size)
than the signed type being compared, then the unsigned value is promoted
to the signed value (i.e. u8 -> s32 happens before the comparison). No
sign-extension occurs here either.
An alternative phrasing:
Say we have a variable named core and it's given a value of -2.
u8 core = -2;
This becomes 254 due to the lack of sign. During integral promotion to
the signed type, this still remains as 254, and therefore the condition
will always be true, because no matter what value the u8 is given it
will never be -2 in terms of 32 bits.
Now, if one type was a s32 and one was a u32, this would be entirely
different, since they have the same bit width (and the signed type would
be converted to unsigned instead of the other way around) but would
still have its representation preserved in terms of bits, allowing the
comparison to be false in some cases, as opposed to being true all the
time.
---
We also get rid of two signed/unsigned comparison warnings while we're
at it.
2018-07-19 18:36:22 +00:00
|
|
|
if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) {
|
|
|
|
ASSERT(thread->owner_process->ideal_processor !=
|
|
|
|
static_cast<u8>(THREADPROCESSORID_DEFAULT));
|
2018-05-30 17:03:19 +00:00
|
|
|
// Set the target CPU to the one specified in the process' exheader.
|
|
|
|
core = thread->owner_process->ideal_processor;
|
2018-06-20 16:39:10 +00:00
|
|
|
mask = 1ull << core;
|
2018-05-30 17:03:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask == 0) {
|
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidCombination);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This value is used to only change the affinity mask without changing the current ideal core.
|
|
|
|
static constexpr u32 OnlyChangeMask = static_cast<u32>(-3);
|
|
|
|
|
|
|
|
if (core == OnlyChangeMask) {
|
|
|
|
core = thread->ideal_core;
|
svc: Correct always true assertion case in SetThreadCoreMask
The reason this would never be true is that ideal_processor is a u8 and
THREADPROCESSORID_DEFAULT is an s32. In this case, it boils down to how
arithmetic conversions are performed before performing the comparison.
If an unsigned value has a lesser conversion rank (aka smaller size)
than the signed type being compared, then the unsigned value is promoted
to the signed value (i.e. u8 -> s32 happens before the comparison). No
sign-extension occurs here either.
An alternative phrasing:
Say we have a variable named core and it's given a value of -2.
u8 core = -2;
This becomes 254 due to the lack of sign. During integral promotion to
the signed type, this still remains as 254, and therefore the condition
will always be true, because no matter what value the u8 is given it
will never be -2 in terms of 32 bits.
Now, if one type was a s32 and one was a u32, this would be entirely
different, since they have the same bit width (and the signed type would
be converted to unsigned instead of the other way around) but would
still have its representation preserved in terms of bits, allowing the
comparison to be false in some cases, as opposed to being true all the
time.
---
We also get rid of two signed/unsigned comparison warnings while we're
at it.
2018-07-19 18:36:22 +00:00
|
|
|
} else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) {
|
2018-05-30 17:03:19 +00:00
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error out if the input core isn't enabled in the input mask.
|
2018-06-20 16:39:10 +00:00
|
|
|
if (core < Core::NUM_CPU_CORES && (mask & (1ull << core)) == 0) {
|
2018-05-30 17:03:19 +00:00
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidCombination);
|
|
|
|
}
|
|
|
|
|
2018-05-06 03:13:15 +00:00
|
|
|
thread->ChangeCore(core, mask);
|
|
|
|
|
2018-01-16 22:23:53 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-02-03 18:36:54 +00:00
|
|
|
static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permissions,
|
2018-01-20 00:35:25 +00:00
|
|
|
u32 remote_permissions) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
|
2018-07-02 16:20:50 +00:00
|
|
|
local_permissions, remote_permissions);
|
2018-08-28 16:30:33 +00:00
|
|
|
|
2018-09-14 01:04:43 +00:00
|
|
|
// Size must be a multiple of 4KB and be less than or equal to
|
|
|
|
// approx. 8 GB (actually (1GB - 512B) * 8)
|
|
|
|
if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto local_perms = static_cast<MemoryPermission>(local_permissions);
|
|
|
|
if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
|
|
|
|
if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
|
|
|
|
remote_perms != MemoryPermission::DontCare) {
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto& handle_table = kernel.HandleTable();
|
|
|
|
auto shared_mem_handle =
|
|
|
|
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
|
2018-09-14 01:04:43 +00:00
|
|
|
local_perms, remote_perms);
|
2018-01-20 00:35:25 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
|
2018-01-20 00:35:25 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-02-22 14:28:15 +00:00
|
|
|
static ResultCode ClearEvent(Handle handle) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
|
2018-02-22 14:28:15 +00:00
|
|
|
|
2018-08-28 16:30:33 +00:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Event> evt = kernel.HandleTable().Get<Event>(handle);
|
2018-02-22 14:28:15 +00:00
|
|
|
if (evt == nullptr)
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
evt->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-05-06 03:04:25 +00:00
|
|
|
namespace {
|
2016-09-18 00:38:01 +00:00
|
|
|
struct FunctionDef {
|
|
|
|
using Func = void();
|
2015-05-06 03:04:25 +00:00
|
|
|
|
2016-09-18 00:38:01 +00:00
|
|
|
u32 id;
|
|
|
|
Func* func;
|
|
|
|
const char* name;
|
|
|
|
};
|
2017-10-14 21:30:07 +00:00
|
|
|
} // namespace
|
2015-05-06 03:04:25 +00:00
|
|
|
|
|
|
|
static const FunctionDef SVC_Table[] = {
|
2016-09-18 00:38:01 +00:00
|
|
|
{0x00, nullptr, "Unknown"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x01, SvcWrap<SetHeapSize>, "SetHeapSize"},
|
|
|
|
{0x02, nullptr, "SetMemoryPermission"},
|
2018-01-08 02:24:19 +00:00
|
|
|
{0x03, SvcWrap<SetMemoryAttribute>, "SetMemoryAttribute"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x04, SvcWrap<MapMemory>, "MapMemory"},
|
|
|
|
{0x05, SvcWrap<UnmapMemory>, "UnmapMemory"},
|
|
|
|
{0x06, SvcWrap<QueryMemory>, "QueryMemory"},
|
|
|
|
{0x07, SvcWrap<ExitProcess>, "ExitProcess"},
|
|
|
|
{0x08, SvcWrap<CreateThread>, "CreateThread"},
|
|
|
|
{0x09, SvcWrap<StartThread>, "StartThread"},
|
|
|
|
{0x0A, SvcWrap<ExitThread>, "ExitThread"},
|
|
|
|
{0x0B, SvcWrap<SleepThread>, "SleepThread"},
|
|
|
|
{0x0C, SvcWrap<GetThreadPriority>, "GetThreadPriority"},
|
|
|
|
{0x0D, SvcWrap<SetThreadPriority>, "SetThreadPriority"},
|
2018-03-30 01:07:49 +00:00
|
|
|
{0x0E, SvcWrap<GetThreadCoreMask>, "GetThreadCoreMask"},
|
2018-01-16 22:23:53 +00:00
|
|
|
{0x0F, SvcWrap<SetThreadCoreMask>, "SetThreadCoreMask"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x10, SvcWrap<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"},
|
|
|
|
{0x11, nullptr, "SignalEvent"},
|
2018-02-22 14:28:15 +00:00
|
|
|
{0x12, SvcWrap<ClearEvent>, "ClearEvent"},
|
2018-01-14 22:15:31 +00:00
|
|
|
{0x13, SvcWrap<MapSharedMemory>, "MapSharedMemory"},
|
2018-02-22 19:16:43 +00:00
|
|
|
{0x14, SvcWrap<UnmapSharedMemory>, "UnmapSharedMemory"},
|
2018-01-08 02:24:19 +00:00
|
|
|
{0x15, SvcWrap<CreateTransferMemory>, "CreateTransferMemory"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x16, SvcWrap<CloseHandle>, "CloseHandle"},
|
2018-01-08 02:24:19 +00:00
|
|
|
{0x17, SvcWrap<ResetSignal>, "ResetSignal"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x18, SvcWrap<WaitSynchronization>, "WaitSynchronization"},
|
2018-01-09 20:02:04 +00:00
|
|
|
{0x19, SvcWrap<CancelSynchronization>, "CancelSynchronization"},
|
2018-01-18 01:34:52 +00:00
|
|
|
{0x1A, SvcWrap<ArbitrateLock>, "ArbitrateLock"},
|
|
|
|
{0x1B, SvcWrap<ArbitrateUnlock>, "ArbitrateUnlock"},
|
2018-01-06 21:14:12 +00:00
|
|
|
{0x1C, SvcWrap<WaitProcessWideKeyAtomic>, "WaitProcessWideKeyAtomic"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x1D, SvcWrap<SignalProcessWideKey>, "SignalProcessWideKey"},
|
2018-01-12 02:59:31 +00:00
|
|
|
{0x1E, SvcWrap<GetSystemTick>, "GetSystemTick"},
|
2018-01-18 01:34:52 +00:00
|
|
|
{0x1F, SvcWrap<ConnectToNamedPort>, "ConnectToNamedPort"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x20, nullptr, "SendSyncRequestLight"},
|
|
|
|
{0x21, SvcWrap<SendSyncRequest>, "SendSyncRequest"},
|
|
|
|
{0x22, nullptr, "SendSyncRequestWithUserBuffer"},
|
|
|
|
{0x23, nullptr, "SendAsyncRequestWithUserBuffer"},
|
|
|
|
{0x24, SvcWrap<GetProcessId>, "GetProcessId"},
|
|
|
|
{0x25, SvcWrap<GetThreadId>, "GetThreadId"},
|
|
|
|
{0x26, SvcWrap<Break>, "Break"},
|
|
|
|
{0x27, SvcWrap<OutputDebugString>, "OutputDebugString"},
|
|
|
|
{0x28, nullptr, "ReturnFromException"},
|
|
|
|
{0x29, SvcWrap<GetInfo>, "GetInfo"},
|
|
|
|
{0x2A, nullptr, "FlushEntireDataCache"},
|
|
|
|
{0x2B, nullptr, "FlushDataCache"},
|
|
|
|
{0x2C, nullptr, "MapPhysicalMemory"},
|
|
|
|
{0x2D, nullptr, "UnmapPhysicalMemory"},
|
2018-04-17 15:37:43 +00:00
|
|
|
{0x2E, nullptr, "GetNextThreadInfo"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x2F, nullptr, "GetLastThreadInfo"},
|
|
|
|
{0x30, nullptr, "GetResourceLimitLimitValue"},
|
|
|
|
{0x31, nullptr, "GetResourceLimitCurrentValue"},
|
2018-04-03 03:50:17 +00:00
|
|
|
{0x32, SvcWrap<SetThreadActivity>, "SetThreadActivity"},
|
|
|
|
{0x33, SvcWrap<GetThreadContext>, "GetThreadContext"},
|
2018-06-21 06:49:43 +00:00
|
|
|
{0x34, SvcWrap<WaitForAddress>, "WaitForAddress"},
|
|
|
|
{0x35, SvcWrap<SignalToAddress>, "SignalToAddress"},
|
2017-10-14 21:30:07 +00:00
|
|
|
{0x36, nullptr, "Unknown"},
|
|
|
|
{0x37, nullptr, "Unknown"},
|
|
|
|
{0x38, nullptr, "Unknown"},
|
|
|
|
{0x39, nullptr, "Unknown"},
|
|
|
|
{0x3A, nullptr, "Unknown"},
|
|
|
|
{0x3B, nullptr, "Unknown"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x3C, nullptr, "DumpInfo"},
|
2018-04-17 15:37:43 +00:00
|
|
|
{0x3D, nullptr, "DumpInfoNew"},
|
2017-10-14 21:30:07 +00:00
|
|
|
{0x3E, nullptr, "Unknown"},
|
2016-09-18 00:38:01 +00:00
|
|
|
{0x3F, nullptr, "Unknown"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x40, nullptr, "CreateSession"},
|
|
|
|
{0x41, nullptr, "AcceptSession"},
|
|
|
|
{0x42, nullptr, "ReplyAndReceiveLight"},
|
|
|
|
{0x43, nullptr, "ReplyAndReceive"},
|
|
|
|
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
|
|
|
|
{0x45, nullptr, "CreateEvent"},
|
2016-09-18 00:38:01 +00:00
|
|
|
{0x46, nullptr, "Unknown"},
|
2017-10-14 21:30:07 +00:00
|
|
|
{0x47, nullptr, "Unknown"},
|
2018-04-17 15:37:43 +00:00
|
|
|
{0x48, nullptr, "AllocateUnsafeMemory"},
|
|
|
|
{0x49, nullptr, "FreeUnsafeMemory"},
|
|
|
|
{0x4A, nullptr, "SetUnsafeAllocationLimit"},
|
2018-01-18 01:32:38 +00:00
|
|
|
{0x4B, nullptr, "CreateJitMemory"},
|
|
|
|
{0x4C, nullptr, "MapJitMemory"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x4D, nullptr, "SleepSystem"},
|
|
|
|
{0x4E, nullptr, "ReadWriteRegister"},
|
|
|
|
{0x4F, nullptr, "SetProcessActivity"},
|
2018-01-20 00:35:25 +00:00
|
|
|
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x51, nullptr, "MapTransferMemory"},
|
|
|
|
{0x52, nullptr, "UnmapTransferMemory"},
|
|
|
|
{0x53, nullptr, "CreateInterruptEvent"},
|
|
|
|
{0x54, nullptr, "QueryPhysicalAddress"},
|
|
|
|
{0x55, nullptr, "QueryIoMapping"},
|
|
|
|
{0x56, nullptr, "CreateDeviceAddressSpace"},
|
|
|
|
{0x57, nullptr, "AttachDeviceAddressSpace"},
|
|
|
|
{0x58, nullptr, "DetachDeviceAddressSpace"},
|
|
|
|
{0x59, nullptr, "MapDeviceAddressSpaceByForce"},
|
|
|
|
{0x5A, nullptr, "MapDeviceAddressSpaceAligned"},
|
|
|
|
{0x5B, nullptr, "MapDeviceAddressSpace"},
|
|
|
|
{0x5C, nullptr, "UnmapDeviceAddressSpace"},
|
|
|
|
{0x5D, nullptr, "InvalidateProcessDataCache"},
|
|
|
|
{0x5E, nullptr, "StoreProcessDataCache"},
|
|
|
|
{0x5F, nullptr, "FlushProcessDataCache"},
|
|
|
|
{0x60, nullptr, "DebugActiveProcess"},
|
|
|
|
{0x61, nullptr, "BreakDebugProcess"},
|
|
|
|
{0x62, nullptr, "TerminateDebugProcess"},
|
|
|
|
{0x63, nullptr, "GetDebugEvent"},
|
|
|
|
{0x64, nullptr, "ContinueDebugEvent"},
|
|
|
|
{0x65, nullptr, "GetProcessList"},
|
|
|
|
{0x66, nullptr, "GetThreadList"},
|
|
|
|
{0x67, nullptr, "GetDebugThreadContext"},
|
|
|
|
{0x68, nullptr, "SetDebugThreadContext"},
|
|
|
|
{0x69, nullptr, "QueryDebugProcessMemory"},
|
|
|
|
{0x6A, nullptr, "ReadDebugProcessMemory"},
|
|
|
|
{0x6B, nullptr, "WriteDebugProcessMemory"},
|
|
|
|
{0x6C, nullptr, "SetHardwareBreakPoint"},
|
|
|
|
{0x6D, nullptr, "GetDebugThreadParam"},
|
2016-09-18 00:38:01 +00:00
|
|
|
{0x6E, nullptr, "Unknown"},
|
2018-04-17 15:37:43 +00:00
|
|
|
{0x6F, nullptr, "GetMemoryInfo"},
|
2018-01-03 01:47:26 +00:00
|
|
|
{0x70, nullptr, "CreatePort"},
|
|
|
|
{0x71, nullptr, "ManageNamedPort"},
|
|
|
|
{0x72, nullptr, "ConnectToPort"},
|
|
|
|
{0x73, nullptr, "SetProcessMemoryPermission"},
|
|
|
|
{0x74, nullptr, "MapProcessMemory"},
|
|
|
|
{0x75, nullptr, "UnmapProcessMemory"},
|
|
|
|
{0x76, nullptr, "QueryProcessMemory"},
|
|
|
|
{0x77, nullptr, "MapProcessCodeMemory"},
|
|
|
|
{0x78, nullptr, "UnmapProcessCodeMemory"},
|
|
|
|
{0x79, nullptr, "CreateProcess"},
|
|
|
|
{0x7A, nullptr, "StartProcess"},
|
|
|
|
{0x7B, nullptr, "TerminateProcess"},
|
|
|
|
{0x7C, nullptr, "GetProcessInfo"},
|
|
|
|
{0x7D, nullptr, "CreateResourceLimit"},
|
|
|
|
{0x7E, nullptr, "SetResourceLimitLimitValue"},
|
|
|
|
{0x7F, nullptr, "CallSecureMonitor"},
|
2014-04-10 23:58:28 +00:00
|
|
|
};
|
|
|
|
|
2015-07-21 07:51:36 +00:00
|
|
|
static const FunctionDef* GetSVCInfo(u32 func_num) {
|
2018-04-20 02:36:48 +00:00
|
|
|
if (func_num >= std::size(SVC_Table)) {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
|
2015-05-06 03:04:25 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return &SVC_Table[func_num];
|
|
|
|
}
|
|
|
|
|
2015-08-17 21:25:21 +00:00
|
|
|
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|
|
|
|
2015-07-21 07:51:36 +00:00
|
|
|
void CallSVC(u32 immediate) {
|
2015-08-17 21:25:21 +00:00
|
|
|
MICROPROFILE_SCOPE(Kernel_SVC);
|
2015-05-06 03:04:25 +00:00
|
|
|
|
2017-10-14 21:30:07 +00:00
|
|
|
// Lock the global kernel mutex when we enter the kernel HLE.
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
|
|
|
|
2015-07-21 07:51:36 +00:00
|
|
|
const FunctionDef* info = GetSVCInfo(immediate);
|
2015-05-06 03:04:25 +00:00
|
|
|
if (info) {
|
|
|
|
if (info->func) {
|
|
|
|
info->func();
|
|
|
|
} else {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
|
2015-05-06 03:04:25 +00:00
|
|
|
}
|
2017-10-14 21:30:07 +00:00
|
|
|
} else {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
|
2015-05-06 03:04:25 +00:00
|
|
|
}
|
2014-04-10 23:58:28 +00:00
|
|
|
}
|
2014-04-11 22:44:21 +00:00
|
|
|
|
2018-01-03 01:40:30 +00:00
|
|
|
} // namespace Kernel
|