chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 00:06:02 +00:00
|
|
|
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2013-09-19 03:52:51 +00:00
|
|
|
|
2018-01-27 15:16:39 +00:00
|
|
|
#include <algorithm>
|
2015-09-10 03:23:44 +00:00
|
|
|
#include <cstring>
|
2018-07-18 23:02:47 +00:00
|
|
|
|
2015-05-13 02:38:56 +00:00
|
|
|
#include "common/assert.h"
|
2020-03-07 22:59:42 +00:00
|
|
|
#include "common/atomic_ops.h"
|
2022-11-12 16:02:07 +00:00
|
|
|
#include "common/cache_management.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/logging/log.h"
|
2019-03-02 20:20:28 +00:00
|
|
|
#include "common/page_table.h"
|
2020-01-19 00:49:30 +00:00
|
|
|
#include "common/settings.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/swap.h"
|
2017-09-24 21:44:13 +00:00
|
|
|
#include "core/core.h"
|
2020-04-09 02:50:46 +00:00
|
|
|
#include "core/device_memory.h"
|
2021-02-13 01:58:31 +00:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-04-24 05:04:28 +00:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2016-09-21 06:52:38 +00:00
|
|
|
#include "core/memory.h"
|
2019-02-23 04:38:45 +00:00
|
|
|
#include "video_core/gpu.h"
|
2016-04-16 22:57:57 +00:00
|
|
|
|
2020-03-31 19:10:44 +00:00
|
|
|
namespace Core::Memory {
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2019-11-26 17:33:20 +00:00
|
|
|
// Implementation class used to keep the specifics of the memory subsystem hidden
|
|
|
|
// from outside classes. This also allows modification to the internals of the memory
|
|
|
|
// subsystem without needing to rebuild all files that make use of the memory interface.
|
|
|
|
struct Memory::Impl {
|
|
|
|
explicit Impl(Core::System& system_) : system{system_} {}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-04-09 02:50:46 +00:00
|
|
|
current_page_table = &process.PageTable().PageTableImpl();
|
2020-01-19 00:49:30 +00:00
|
|
|
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
2019-11-26 23:34:30 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
|
2019-11-26 23:34:30 +00:00
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2022-08-18 23:28:55 +00:00
|
|
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
|
2022-02-21 20:36:34 +00:00
|
|
|
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
|
2022-08-18 23:28:55 +00:00
|
|
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
|
|
|
|
Common::PageType::Memory);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
|
|
|
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-11-26 17:33:20 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
2022-08-18 23:28:55 +00:00
|
|
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
|
|
|
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
|
|
|
|
Common::PageType::Unmapped);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
|
|
|
system.DeviceMemory().buffer.Unmap(base, size);
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2018-05-03 02:36:51 +00:00
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
|
2022-08-18 23:28:55 +00:00
|
|
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
2019-11-26 23:28:44 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
if (!paddr) {
|
|
|
|
return {};
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
2022-09-06 00:42:24 +00:00
|
|
|
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 16:56:01 +00:00
|
|
|
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
|
2022-08-18 23:28:55 +00:00
|
|
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
2022-06-06 16:56:01 +00:00
|
|
|
|
|
|
|
if (paddr == 0) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-09-06 00:42:24 +00:00
|
|
|
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
2022-06-06 16:56:01 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Read8(const VAddr addr) {
|
|
|
|
return Read<u8>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Read16(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
return Read<u16_le>(addr);
|
|
|
|
} else {
|
2020-10-13 12:10:50 +00:00
|
|
|
const u32 a{Read<u8>(addr)};
|
|
|
|
const u32 b{Read<u8>(addr + sizeof(u8))};
|
|
|
|
return static_cast<u16>((b << 8) | a);
|
2020-04-09 03:03:25 +00:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 Read32(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
return Read<u32_le>(addr);
|
|
|
|
} else {
|
2020-10-13 12:10:50 +00:00
|
|
|
const u32 a{Read16(addr)};
|
|
|
|
const u32 b{Read16(addr + sizeof(u16))};
|
|
|
|
return (b << 16) | a;
|
2020-04-09 03:03:25 +00:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 Read64(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
return Read<u64_le>(addr);
|
|
|
|
} else {
|
|
|
|
const u32 a{Read32(addr)};
|
|
|
|
const u32 b{Read32(addr + sizeof(u32))};
|
|
|
|
return (static_cast<u64>(b) << 32) | a;
|
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Write8(const VAddr addr, const u8 data) {
|
|
|
|
Write<u8>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write16(const VAddr addr, const u16 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
Write<u16_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write<u8>(addr, static_cast<u8>(data));
|
|
|
|
Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write32(const VAddr addr, const u32 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
Write<u32_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write16(addr, static_cast<u16>(data));
|
|
|
|
Write16(addr + sizeof(u16), static_cast<u16>(data >> 16));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write64(const VAddr addr, const u64 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
Write<u64_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write32(addr, static_cast<u32>(data));
|
|
|
|
Write32(addr + sizeof(u32), static_cast<u32>(data >> 32));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
|
|
|
|
return WriteExclusive<u8>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
|
|
|
|
return WriteExclusive<u16_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
|
|
|
|
return WriteExclusive<u32_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
|
|
|
|
return WriteExclusive<u64_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
std::string string;
|
|
|
|
string.reserve(max_length);
|
|
|
|
for (std::size_t i = 0; i < max_length; ++i) {
|
2021-08-05 20:11:14 +00:00
|
|
|
const char c = Read<s8>(vaddr);
|
2019-11-26 20:48:19 +00:00
|
|
|
if (c == '\0') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
string.push_back(c);
|
|
|
|
++vaddr;
|
|
|
|
}
|
|
|
|
string.shrink_to_fit();
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2021-08-05 21:09:08 +00:00
|
|
|
void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size,
|
2021-08-05 20:11:14 +00:00
|
|
|
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
|
2020-04-09 02:50:46 +00:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-26 21:29:34 +00:00
|
|
|
std::size_t remaining_size = size;
|
2022-08-18 23:28:55 +00:00
|
|
|
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
|
|
|
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
2019-11-26 21:29:34 +00:00
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
while (remaining_size) {
|
2019-11-26 21:29:34 +00:00
|
|
|
const std::size_t copy_amount =
|
2022-08-18 23:28:55 +00:00
|
|
|
std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr =
|
|
|
|
static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
|
2019-11-26 21:29:34 +00:00
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped: {
|
2021-08-05 20:11:14 +00:00
|
|
|
on_unmapped(copy_amount, current_vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
2022-08-18 23:28:55 +00:00
|
|
|
u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
|
2021-08-05 20:11:14 +00:00
|
|
|
on_memory(copy_amount, mem_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
2022-06-06 16:56:01 +00:00
|
|
|
case Common::PageType::DebugMemory: {
|
|
|
|
u8* const mem_ptr{GetPointerFromDebugMemory(current_vaddr)};
|
|
|
|
on_memory(copy_amount, mem_ptr);
|
|
|
|
break;
|
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-05 20:11:14 +00:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
|
|
|
on_rasterizer(current_vaddr, copy_amount, host_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
2021-08-05 20:11:14 +00:00
|
|
|
increment(copy_amount);
|
2019-11-26 21:29:34 +00:00
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
template <bool UNSAFE>
|
|
|
|
void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
WalkBlock(
|
|
|
|
process, src_addr, size,
|
|
|
|
[src_addr, size, &dest_buffer](const std::size_t copy_amount,
|
|
|
|
const VAddr current_vaddr) {
|
2020-04-05 21:23:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount, const u8* const src_ptr) {
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const VAddr current_vaddr, const std::size_t copy_amount,
|
|
|
|
const u8* const host_ptr) {
|
2021-08-05 20:29:43 +00:00
|
|
|
if constexpr (!UNSAFE) {
|
2021-08-05 20:11:14 +00:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
|
|
|
}
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount) {
|
2021-08-05 20:11:14 +00:00
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
ReadBlockImpl<false>(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
ReadBlockImpl<true>(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
template <bool UNSAFE>
|
|
|
|
void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr,
|
|
|
|
const void* src_buffer, const std::size_t size) {
|
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
|
2021-08-05 20:29:43 +00:00
|
|
|
if constexpr (!UNSAFE) {
|
2021-08-05 20:11:14 +00:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
|
|
|
}
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount) {
|
2021-08-05 20:11:14 +00:00
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<true>(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(dest_ptr, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
|
2020-04-05 16:58:23 +00:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(host_ptr, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount) {});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-26 21:06:49 +00:00
|
|
|
const std::size_t size) {
|
2021-08-05 21:09:08 +00:00
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount, const VAddr current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
ZeroBlock(process, dest_addr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount, const u8* const src_ptr) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
|
2020-04-05 16:58:23 +00:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&](const std::size_t copy_amount) {
|
2021-08-05 21:09:08 +00:00
|
|
|
dest_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
src_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2022-11-12 16:02:07 +00:00
|
|
|
template <typename Callback>
|
|
|
|
Result PerformCacheOperation(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size,
|
|
|
|
Callback&& cb) {
|
|
|
|
class InvalidMemoryException : public std::exception {};
|
|
|
|
|
|
|
|
try {
|
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[&](const std::size_t block_size, const VAddr current_vaddr) {
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}", current_vaddr);
|
|
|
|
throw InvalidMemoryException();
|
|
|
|
},
|
|
|
|
[&](const std::size_t block_size, u8* const host_ptr) { cb(block_size, host_ptr); },
|
|
|
|
[&](const VAddr current_vaddr, const std::size_t block_size, u8* const host_ptr) {
|
|
|
|
system.GPU().FlushRegion(current_vaddr, block_size);
|
|
|
|
cb(block_size, host_ptr);
|
|
|
|
},
|
|
|
|
[](const std::size_t block_size) {});
|
|
|
|
} catch (InvalidMemoryException&) {
|
|
|
|
return Kernel::ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
|
|
|
Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
|
|
|
|
auto perform = [&](const std::size_t block_size, u8* const host_ptr) {
|
|
|
|
// Do nothing; this operation (dc ivac) cannot be supported
|
|
|
|
// from EL0
|
|
|
|
};
|
|
|
|
return PerformCacheOperation(process, dest_addr, size, perform);
|
|
|
|
}
|
|
|
|
|
|
|
|
Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
|
|
|
|
auto perform = [&](const std::size_t block_size, u8* const host_ptr) {
|
|
|
|
// dc cvac: Store to point of coherency
|
|
|
|
Common::DataCacheLineCleanByVAToPoC(host_ptr, block_size);
|
|
|
|
};
|
|
|
|
return PerformCacheOperation(process, dest_addr, size, perform);
|
|
|
|
}
|
|
|
|
|
|
|
|
Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
|
|
|
|
auto perform = [&](const std::size_t block_size, u8* const host_ptr) {
|
|
|
|
// dc civac: Store to point of coherency, and invalidate from cache
|
|
|
|
Common::DataCacheLineCleanAndInvalidateByVAToPoC(host_ptr, block_size);
|
|
|
|
};
|
|
|
|
return PerformCacheOperation(process, dest_addr, size, perform);
|
|
|
|
}
|
|
|
|
|
2022-06-06 16:56:01 +00:00
|
|
|
void MarkRegionDebug(VAddr vaddr, u64 size, bool debug) {
|
|
|
|
if (vaddr == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over a contiguous CPU address space, marking/unmarking the region.
|
|
|
|
// The region is at a granularity of CPU pages.
|
|
|
|
|
2022-08-18 23:28:55 +00:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
2022-06-06 16:56:01 +00:00
|
|
|
const Common::PageType page_type{
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
2022-06-06 16:56:01 +00:00
|
|
|
if (debug) {
|
|
|
|
// Switch page type to debug if now debug
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
ASSERT_MSG(false, "Attempted to mark unmapped pages as debug");
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
case Common::PageType::DebugMemory:
|
|
|
|
// Page is already marked.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
2022-06-06 16:56:01 +00:00
|
|
|
nullptr, Common::PageType::DebugMemory);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to non-debug if now non-debug
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
ASSERT_MSG(false, "Attempted to mark unmapped pages as non-debug");
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
// Don't mess with already non-debug or rasterizer memory.
|
|
|
|
break;
|
|
|
|
case Common::PageType::DebugMemory: {
|
2022-08-18 23:28:55 +00:00
|
|
|
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
|
|
|
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
2022-06-06 16:56:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
if (vaddr == 0) {
|
|
|
|
return;
|
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
2022-03-26 19:38:30 +00:00
|
|
|
const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
|
2021-06-06 07:57:24 +00:00
|
|
|
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
|
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
|
|
|
// address space, marking the region as un/cached. The region is marked un/cached at a
|
|
|
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
|
|
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
|
|
|
|
2022-08-18 23:28:55 +00:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const Common::PageType page_type{
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (cached) {
|
|
|
|
// Switch page type to cached if now cached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
2022-06-06 16:56:01 +00:00
|
|
|
case Common::PageType::DebugMemory:
|
2019-11-26 20:56:13 +00:00
|
|
|
case Common::PageType::Memory:
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
nullptr, Common::PageType::RasterizerCachedMemory);
|
2019-11-26 20:56:13 +00:00
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already marked as cached.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to uncached if now uncached
|
|
|
|
switch (page_type) {
|
2021-08-05 20:29:43 +00:00
|
|
|
case Common::PageType::Unmapped: // NOLINT(bugprone-branch-clone)
|
2019-11-26 20:56:13 +00:00
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
2021-08-05 20:29:43 +00:00
|
|
|
break;
|
2022-06-06 16:56:01 +00:00
|
|
|
case Common::PageType::DebugMemory:
|
2019-11-26 20:56:13 +00:00
|
|
|
case Common::PageType::Memory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already unmarked as cached.
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2022-08-18 23:28:55 +00:00
|
|
|
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (pointer == nullptr) {
|
|
|
|
// It's possible that this function has been called while updating the
|
|
|
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
|
|
|
// longer exist, and we should just leave the pagetable entry blank.
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
nullptr, Common::PageType::Unmapped);
|
2019-11-26 20:56:13 +00:00
|
|
|
} else {
|
2022-08-18 23:28:55 +00:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
|
|
|
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
2019-11-26 20:56:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
/**
|
|
|
|
* Maps a region of pages as a specific type.
|
|
|
|
*
|
|
|
|
* @param page_table The page table to use to perform the mapping.
|
|
|
|
* @param base The base address to begin mapping at.
|
|
|
|
* @param size The total size of the range in bytes.
|
2020-09-23 17:39:00 +00:00
|
|
|
* @param target The target address to begin mapping from.
|
2019-11-26 18:09:12 +00:00
|
|
|
* @param type The page type to map the memory as.
|
|
|
|
*/
|
2020-04-09 02:50:46 +00:00
|
|
|
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
|
2019-11-26 18:09:12 +00:00
|
|
|
Common::PageType type) {
|
2022-08-18 23:28:55 +00:00
|
|
|
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
|
|
|
|
(base + size) * YUZU_PAGESIZE);
|
2019-11-26 18:09:12 +00:00
|
|
|
|
|
|
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
|
|
|
if (system.IsPoweredOn()) {
|
|
|
|
auto& gpu = system.GPU();
|
|
|
|
for (u64 i = 0; i < size; i++) {
|
|
|
|
const auto page = base + i;
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
2022-08-18 23:28:55 +00:00
|
|
|
gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-09-19 01:50:21 +00:00
|
|
|
}
|
|
|
|
}
|
2018-03-23 02:56:41 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
const VAddr end = base + size;
|
|
|
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
|
|
|
base + page_table.pointers.size());
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
if (!target) {
|
2020-07-05 10:25:08 +00:00
|
|
|
ASSERT_MSG(type != Common::PageType::Memory,
|
2022-08-18 23:28:55 +00:00
|
|
|
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
2020-07-05 10:25:08 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
page_table.pointers[base].Store(nullptr, type);
|
2020-04-09 02:50:46 +00:00
|
|
|
page_table.backing_addr[base] = 0;
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
base += 1;
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
} else {
|
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
page_table.pointers[base].Store(
|
2022-09-06 00:42:24 +00:00
|
|
|
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
|
2022-08-18 23:28:55 +00:00
|
|
|
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
|
2020-04-09 02:50:46 +00:00
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
2019-12-30 23:11:45 +00:00
|
|
|
"memory mapping base yield a nullptr within the table");
|
2019-02-27 22:22:47 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
base += 1;
|
2022-08-18 23:28:55 +00:00
|
|
|
target += YUZU_PAGESIZE;
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-02-27 22:22:47 +00:00
|
|
|
}
|
2014-04-01 22:18:02 +00:00
|
|
|
}
|
2013-09-19 03:52:51 +00:00
|
|
|
|
2021-08-07 01:32:06 +00:00
|
|
|
[[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const {
|
2021-05-29 07:24:09 +00:00
|
|
|
// AARCH64 masks the upper 16 bit of all memory accesses
|
2022-07-16 22:48:45 +00:00
|
|
|
vaddr &= 0xffffffffffffULL;
|
2021-05-29 07:24:09 +00:00
|
|
|
|
|
|
|
if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) {
|
2021-08-07 01:32:06 +00:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2021-05-29 07:24:09 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 05:51:49 +00:00
|
|
|
// Avoid adding any extra logic to this fast-path block
|
2022-08-18 23:28:55 +00:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
2021-08-07 01:32:06 +00:00
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
|
|
|
return &pointer[vaddr];
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped:
|
2021-08-07 01:32:06 +00:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Memory:
|
2021-08-07 01:32:06 +00:00
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
|
|
|
|
return nullptr;
|
2022-06-06 16:56:01 +00:00
|
|
|
case Common::PageType::DebugMemory:
|
|
|
|
return GetPointerFromDebugMemory(vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
|
|
|
on_rasterizer();
|
|
|
|
return host_ptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2021-08-07 01:32:06 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] u8* GetPointer(const VAddr vaddr) const {
|
|
|
|
return GetPointerImpl(
|
2021-08-07 03:03:21 +00:00
|
|
|
vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); },
|
2021-08-07 01:32:06 +00:00
|
|
|
[]() {});
|
|
|
|
}
|
|
|
|
|
2022-02-19 13:18:02 +00:00
|
|
|
[[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
|
|
|
|
return GetPointerImpl(
|
|
|
|
vaddr, []() {}, []() {});
|
|
|
|
}
|
|
|
|
|
2021-08-07 01:32:06 +00:00
|
|
|
/**
|
|
|
|
* Reads a particular data type out of memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to read the data type from.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to read out of memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T read from the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T Read(VAddr vaddr) {
|
|
|
|
T result = 0;
|
|
|
|
const u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr);
|
2021-08-07 01:32:06 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); });
|
2021-08-07 01:32:06 +00:00
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(&result, ptr, sizeof(T));
|
|
|
|
}
|
|
|
|
return result;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
/**
|
|
|
|
* Writes a particular data type to memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to write the data type to.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to write to memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2021-05-29 07:24:09 +00:00
|
|
|
void Write(VAddr vaddr, const T data) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
|
2021-08-07 01:32:06 +00:00
|
|
|
vaddr, static_cast<u64>(data));
|
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
|
2021-08-07 01:32:06 +00:00
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(ptr, &data, sizeof(T));
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
template <typename T>
|
2021-05-29 07:24:09 +00:00
|
|
|
bool WriteExclusive(VAddr vaddr, const T data, const T expected) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
|
|
|
|
sizeof(T) * 8, vaddr, static_cast<u64>(data));
|
2021-08-07 01:32:06 +00:00
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
|
2021-08-07 01:32:06 +00:00
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-05-29 07:24:09 +00:00
|
|
|
bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
|
2021-08-07 01:32:06 +00:00
|
|
|
vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0]));
|
|
|
|
},
|
2022-10-21 06:34:07 +00:00
|
|
|
[&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); });
|
2021-08-07 01:32:06 +00:00
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
Common::PageTable* current_page_table = nullptr;
|
2019-11-26 18:09:12 +00:00
|
|
|
Core::System& system;
|
|
|
|
};
|
2014-04-26 05:27:25 +00:00
|
|
|
|
2021-04-03 00:06:21 +00:00
|
|
|
Memory::Memory(Core::System& system_) : system{system_} {
|
|
|
|
Reset();
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
Memory::~Memory() = default;
|
2016-01-30 18:41:04 +00:00
|
|
|
|
2021-04-03 00:06:21 +00:00
|
|
|
void Memory::Reset() {
|
|
|
|
impl = std::make_unique<Impl>(system);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-02-25 02:04:12 +00:00
|
|
|
impl->SetCurrentPageTable(process, core_id);
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2019-11-26 18:09:12 +00:00
|
|
|
impl->MapMemoryRegion(page_table, base, size, target);
|
2015-05-13 02:38:56 +00:00
|
|
|
}
|
2014-12-30 03:35:06 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
impl->UnmapRegion(page_table, base, size);
|
|
|
|
}
|
2016-04-16 22:57:57 +00:00
|
|
|
|
2019-11-26 18:46:41 +00:00
|
|
|
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
2021-08-05 20:11:14 +00:00
|
|
|
const Kernel::KProcess& process = *system.CurrentProcess();
|
2021-08-05 20:29:43 +00:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2022-08-18 23:28:55 +00:00
|
|
|
const size_t page = vaddr >> YUZU_PAGEBITS;
|
2021-09-29 11:54:59 +00:00
|
|
|
if (page >= page_table.pointers.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const auto [pointer, type] = page_table.pointers[page].PointerType();
|
2022-06-06 16:56:01 +00:00
|
|
|
return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory ||
|
|
|
|
type == Common::PageType::DebugMemory;
|
2019-11-26 18:46:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-30 23:35:01 +00:00
|
|
|
bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
|
|
|
|
VAddr end = base + size;
|
2022-08-18 23:28:55 +00:00
|
|
|
VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
|
2022-05-30 23:35:01 +00:00
|
|
|
|
2022-08-18 23:28:55 +00:00
|
|
|
for (; page < end; page += YUZU_PAGESIZE) {
|
2022-05-30 23:35:01 +00:00
|
|
|
if (!IsValidVirtualAddress(page)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:19:15 +00:00
|
|
|
u8* Memory::GetPointer(VAddr vaddr) {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2022-02-19 13:18:02 +00:00
|
|
|
u8* Memory::GetPointerSilent(VAddr vaddr) {
|
|
|
|
return impl->GetPointerSilent(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:19:15 +00:00
|
|
|
const u8* Memory::GetPointer(VAddr vaddr) const {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Memory::Read8(const VAddr addr) {
|
|
|
|
return impl->Read8(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Memory::Read16(const VAddr addr) {
|
|
|
|
return impl->Read16(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Memory::Read32(const VAddr addr) {
|
|
|
|
return impl->Read32(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 Memory::Read64(const VAddr addr) {
|
|
|
|
return impl->Read64(addr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Memory::Write8(VAddr addr, u8 data) {
|
|
|
|
impl->Write8(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write16(VAddr addr, u16 data) {
|
|
|
|
impl->Write16(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write32(VAddr addr, u32 data) {
|
|
|
|
impl->Write32(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write64(VAddr addr, u64 data) {
|
|
|
|
impl->Write64(addr, data);
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
|
|
|
|
return impl->WriteExclusive8(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
|
|
|
|
return impl->WriteExclusive16(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
|
|
|
|
return impl->WriteExclusive32(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
|
|
|
|
return impl->WriteExclusive64(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
|
|
|
|
return impl->WriteExclusive128(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
return impl->ReadCString(vaddr, max_length);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
2019-11-26 21:29:34 +00:00
|
|
|
const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlock(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer,
|
2019-11-26 22:39:57 +00:00
|
|
|
std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
impl->WriteBlock(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-26 21:06:49 +00:00
|
|
|
const std::size_t size) {
|
|
|
|
impl->CopyBlock(process, dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2022-07-16 22:48:45 +00:00
|
|
|
void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, const std::size_t size) {
|
|
|
|
impl->ZeroBlock(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
2022-11-12 16:02:07 +00:00
|
|
|
Result Memory::InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
return impl->InvalidateDataCache(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
Result Memory::StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
return impl->StoreDataCache(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
Result Memory::FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
return impl->FlushDataCache(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
impl->RasterizerMarkRegionCached(vaddr, size, cached);
|
|
|
|
}
|
|
|
|
|
2022-06-06 16:56:01 +00:00
|
|
|
void Memory::MarkRegionDebug(VAddr vaddr, u64 size, bool debug) {
|
|
|
|
impl->MarkRegionDebug(vaddr, size, debug);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:10:44 +00:00
|
|
|
} // namespace Core::Memory
|