2021-08-05 20:29:43 +00:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
2014-12-17 05:38:14 +00:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-04-08 23:15:46 +00:00
|
|
|
// Refer to the license.txt file included.
|
2013-09-19 03:52:51 +00:00
|
|
|
|
2018-01-27 15:16:39 +00:00
|
|
|
#include <algorithm>
|
2015-09-10 03:23:44 +00:00
|
|
|
#include <cstring>
|
2018-07-18 23:02:47 +00:00
|
|
|
|
2015-05-13 02:38:56 +00:00
|
|
|
#include "common/assert.h"
|
2020-03-07 22:59:42 +00:00
|
|
|
#include "common/atomic_ops.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/logging/log.h"
|
2019-03-02 20:20:28 +00:00
|
|
|
#include "common/page_table.h"
|
2020-01-19 00:49:30 +00:00
|
|
|
#include "common/settings.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/swap.h"
|
2017-09-24 21:44:13 +00:00
|
|
|
#include "core/core.h"
|
2020-04-09 02:50:46 +00:00
|
|
|
#include "core/device_memory.h"
|
2021-02-13 01:58:31 +00:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-04-24 05:04:28 +00:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2016-09-21 06:52:38 +00:00
|
|
|
#include "core/memory.h"
|
2019-02-23 04:38:45 +00:00
|
|
|
#include "video_core/gpu.h"
|
2016-04-16 22:57:57 +00:00
|
|
|
|
2020-03-31 19:10:44 +00:00
|
|
|
namespace Core::Memory {
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2019-11-26 17:33:20 +00:00
|
|
|
// Implementation class used to keep the specifics of the memory subsystem hidden
|
|
|
|
// from outside classes. This also allows modification to the internals of the memory
|
|
|
|
// subsystem without needing to rebuild all files that make use of the memory interface.
|
|
|
|
struct Memory::Impl {
|
|
|
|
explicit Impl(Core::System& system_) : system{system_} {}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-04-09 02:50:46 +00:00
|
|
|
current_page_table = &process.PageTable().PageTableImpl();
|
2020-01-19 00:49:30 +00:00
|
|
|
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
2019-11-26 23:34:30 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
|
2019-11-26 23:34:30 +00:00
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2019-11-26 18:09:12 +00:00
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
2020-01-19 00:49:30 +00:00
|
|
|
ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End,
|
|
|
|
"Out of bounds target: {:016X}", target);
|
2019-11-26 18:09:12 +00:00
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
|
|
|
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-11-26 17:33:20 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
2020-04-09 02:50:46 +00:00
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
|
|
|
system.DeviceMemory().buffer.Unmap(base, size);
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2018-05-03 02:36:51 +00:00
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
|
2020-04-09 02:50:46 +00:00
|
|
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
|
2019-11-26 23:28:44 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
if (!paddr) {
|
|
|
|
return {};
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
return system.DeviceMemory().GetPointer(paddr) + vaddr;
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Read8(const VAddr addr) {
|
|
|
|
return Read<u8>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Read16(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
return Read<u16_le>(addr);
|
|
|
|
} else {
|
2020-10-13 12:10:50 +00:00
|
|
|
const u32 a{Read<u8>(addr)};
|
|
|
|
const u32 b{Read<u8>(addr + sizeof(u8))};
|
|
|
|
return static_cast<u16>((b << 8) | a);
|
2020-04-09 03:03:25 +00:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 Read32(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
return Read<u32_le>(addr);
|
|
|
|
} else {
|
2020-10-13 12:10:50 +00:00
|
|
|
const u32 a{Read16(addr)};
|
|
|
|
const u32 b{Read16(addr + sizeof(u16))};
|
|
|
|
return (b << 16) | a;
|
2020-04-09 03:03:25 +00:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 Read64(const VAddr addr) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
return Read<u64_le>(addr);
|
|
|
|
} else {
|
|
|
|
const u32 a{Read32(addr)};
|
|
|
|
const u32 b{Read32(addr + sizeof(u32))};
|
|
|
|
return (static_cast<u64>(b) << 32) | a;
|
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Write8(const VAddr addr, const u8 data) {
|
|
|
|
Write<u8>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write16(const VAddr addr, const u16 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
Write<u16_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write<u8>(addr, static_cast<u8>(data));
|
|
|
|
Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write32(const VAddr addr, const u32 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
Write<u32_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write16(addr, static_cast<u16>(data));
|
|
|
|
Write16(addr + sizeof(u16), static_cast<u16>(data >> 16));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write64(const VAddr addr, const u64 data) {
|
2020-04-09 03:03:25 +00:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
Write<u64_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write32(addr, static_cast<u32>(data));
|
|
|
|
Write32(addr + sizeof(u32), static_cast<u32>(data >> 32));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
|
|
|
|
return WriteExclusive<u8>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
|
|
|
|
return WriteExclusive<u16_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
|
|
|
|
return WriteExclusive<u32_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
|
|
|
|
return WriteExclusive<u64_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
std::string string;
|
|
|
|
string.reserve(max_length);
|
|
|
|
for (std::size_t i = 0; i < max_length; ++i) {
|
2021-08-05 20:11:14 +00:00
|
|
|
const char c = Read<s8>(vaddr);
|
2019-11-26 20:48:19 +00:00
|
|
|
if (c == '\0') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
string.push_back(c);
|
|
|
|
++vaddr;
|
|
|
|
}
|
|
|
|
string.shrink_to_fit();
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2021-08-05 21:09:08 +00:00
|
|
|
void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size,
|
2021-08-05 20:11:14 +00:00
|
|
|
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
|
2020-04-09 02:50:46 +00:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-26 21:29:34 +00:00
|
|
|
std::size_t remaining_size = size;
|
2021-08-05 20:11:14 +00:00
|
|
|
std::size_t page_index = addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = addr & PAGE_MASK;
|
2019-11-26 21:29:34 +00:00
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
while (remaining_size) {
|
2019-11-26 21:29:34 +00:00
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped: {
|
2021-08-05 20:11:14 +00:00
|
|
|
on_unmapped(copy_amount, current_vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
DEBUG_ASSERT(pointer);
|
2021-08-05 20:11:14 +00:00
|
|
|
u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
|
|
|
on_memory(copy_amount, mem_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-05 20:11:14 +00:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
|
|
|
on_rasterizer(current_vaddr, copy_amount, host_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
2021-08-05 20:11:14 +00:00
|
|
|
increment(copy_amount);
|
2019-11-26 21:29:34 +00:00
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
template <bool UNSAFE>
|
|
|
|
void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
WalkBlock(
|
|
|
|
process, src_addr, size,
|
|
|
|
[src_addr, size, &dest_buffer](const std::size_t copy_amount,
|
|
|
|
const VAddr current_vaddr) {
|
2020-04-05 21:23:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&dest_buffer](const std::size_t copy_amount, const u8* const src_ptr) {
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&system = system, &dest_buffer](const VAddr current_vaddr,
|
|
|
|
const std::size_t copy_amount,
|
|
|
|
const u8* const host_ptr) {
|
2021-08-05 20:29:43 +00:00
|
|
|
if constexpr (!UNSAFE) {
|
2021-08-05 20:11:14 +00:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
|
|
|
}
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&dest_buffer](const std::size_t copy_amount) {
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
ReadBlockImpl<false>(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
ReadBlockImpl<true>(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 20:11:14 +00:00
|
|
|
template <bool UNSAFE>
|
|
|
|
void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr,
|
|
|
|
const void* src_buffer, const std::size_t size) {
|
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&src_buffer](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&system = system, &src_buffer](const VAddr current_vaddr,
|
|
|
|
const std::size_t copy_amount, u8* const host_ptr) {
|
2021-08-05 20:29:43 +00:00
|
|
|
if constexpr (!UNSAFE) {
|
2021-08-05 20:11:14 +00:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
|
|
|
}
|
2020-04-05 21:23:49 +00:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&src_buffer](const std::size_t copy_amount) {
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<true>(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
2020-04-05 21:23:49 +00:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(dest_ptr, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[&system = system](const VAddr current_vaddr, const std::size_t copy_amount,
|
|
|
|
u8* const host_ptr) {
|
2020-04-05 16:58:23 +00:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(host_ptr, 0, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount) {});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-26 21:06:49 +00:00
|
|
|
const std::size_t size) {
|
2021-08-05 21:09:08 +00:00
|
|
|
WalkBlock(
|
|
|
|
process, dest_addr, size,
|
|
|
|
[this, &process, &dest_addr, &src_addr, size](const std::size_t copy_amount,
|
|
|
|
const VAddr current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
ZeroBlock(process, dest_addr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
|
|
|
[this, &process, &dest_addr](const std::size_t copy_amount, const u8* const src_ptr) {
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
|
|
|
[this, &system = system, &process, &dest_addr](
|
|
|
|
const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
|
2020-04-05 16:58:23 +00:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2021-08-05 20:11:14 +00:00
|
|
|
WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
|
2021-08-05 21:09:08 +00:00
|
|
|
},
|
|
|
|
[&dest_addr, &src_addr](const std::size_t copy_amount) {
|
|
|
|
dest_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
src_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
if (vaddr == 0) {
|
|
|
|
return;
|
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2021-06-06 07:57:24 +00:00
|
|
|
if (Settings::IsFastmemEnabled()) {
|
|
|
|
const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
|
|
|
|
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
|
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
|
|
|
// address space, marking the region as un/cached. The region is marked un/cached at a
|
|
|
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
|
|
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
|
|
|
const Common::PageType page_type{
|
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (cached) {
|
|
|
|
// Switch page type to cached if now cached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
nullptr, Common::PageType::RasterizerCachedMemory);
|
2019-11-26 20:56:13 +00:00
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already marked as cached.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to uncached if now uncached
|
|
|
|
switch (page_type) {
|
2021-08-05 20:29:43 +00:00
|
|
|
case Common::PageType::Unmapped: // NOLINT(bugprone-branch-clone)
|
2019-11-26 20:56:13 +00:00
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
2021-08-05 20:29:43 +00:00
|
|
|
break;
|
2019-11-26 20:56:13 +00:00
|
|
|
case Common::PageType::Memory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already unmarked as cached.
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (pointer == nullptr) {
|
|
|
|
// It's possible that this function has been called while updating the
|
|
|
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
|
|
|
// longer exist, and we should just leave the pagetable entry blank.
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
nullptr, Common::PageType::Unmapped);
|
2019-11-26 20:56:13 +00:00
|
|
|
} else {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
|
2019-11-26 20:56:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
/**
|
|
|
|
* Maps a region of pages as a specific type.
|
|
|
|
*
|
|
|
|
* @param page_table The page table to use to perform the mapping.
|
|
|
|
* @param base The base address to begin mapping at.
|
|
|
|
* @param size The total size of the range in bytes.
|
2020-09-23 17:39:00 +00:00
|
|
|
* @param target The target address to begin mapping from.
|
2019-11-26 18:09:12 +00:00
|
|
|
* @param type The page type to map the memory as.
|
|
|
|
*/
|
2020-04-09 02:50:46 +00:00
|
|
|
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
|
2019-11-26 18:09:12 +00:00
|
|
|
Common::PageType type) {
|
2020-04-09 02:50:46 +00:00
|
|
|
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE,
|
2019-11-26 18:09:12 +00:00
|
|
|
(base + size) * PAGE_SIZE);
|
|
|
|
|
|
|
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
|
|
|
if (system.IsPoweredOn()) {
|
|
|
|
auto& gpu = system.GPU();
|
|
|
|
for (u64 i = 0; i < size; i++) {
|
|
|
|
const auto page = base + i;
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
2019-11-26 18:09:12 +00:00
|
|
|
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
|
|
|
|
}
|
2019-09-19 01:50:21 +00:00
|
|
|
}
|
|
|
|
}
|
2018-03-23 02:56:41 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
const VAddr end = base + size;
|
|
|
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
|
|
|
base + page_table.pointers.size());
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
if (!target) {
|
2020-07-05 10:25:08 +00:00
|
|
|
ASSERT_MSG(type != Common::PageType::Memory,
|
|
|
|
"Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
page_table.pointers[base].Store(nullptr, type);
|
2020-04-09 02:50:46 +00:00
|
|
|
page_table.backing_addr[base] = 0;
|
2015-05-13 02:38:56 +00:00
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
base += 1;
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
} else {
|
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
page_table.pointers[base].Store(
|
|
|
|
system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type);
|
2020-04-09 02:50:46 +00:00
|
|
|
page_table.backing_addr[base] = target - (base << PAGE_BITS);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
2019-12-30 23:11:45 +00:00
|
|
|
"memory mapping base yield a nullptr within the table");
|
2019-02-27 22:22:47 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
base += 1;
|
2020-04-09 02:50:46 +00:00
|
|
|
target += PAGE_SIZE;
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-02-27 22:22:47 +00:00
|
|
|
}
|
2014-04-01 22:18:02 +00:00
|
|
|
}
|
2013-09-19 03:52:51 +00:00
|
|
|
|
2021-08-07 01:32:06 +00:00
|
|
|
[[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const {
|
2021-05-29 07:24:09 +00:00
|
|
|
// AARCH64 masks the upper 16 bit of all memory accesses
|
|
|
|
vaddr &= 0xffffffffffffLL;
|
|
|
|
|
|
|
|
if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) {
|
2021-08-07 01:32:06 +00:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2021-05-29 07:24:09 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 05:51:49 +00:00
|
|
|
// Avoid adding any extra logic to this fast-path block
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
2021-08-07 01:32:06 +00:00
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
|
|
|
return &pointer[vaddr];
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped:
|
2021-08-07 01:32:06 +00:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Memory:
|
2021-08-07 01:32:06 +00:00
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
|
|
|
|
return nullptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
|
|
|
on_rasterizer();
|
|
|
|
return host_ptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2021-08-07 01:32:06 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] u8* GetPointer(const VAddr vaddr) const {
|
|
|
|
return GetPointerImpl(
|
2021-08-07 03:03:21 +00:00
|
|
|
vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); },
|
2021-08-07 01:32:06 +00:00
|
|
|
[]() {});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reads a particular data type out of memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to read the data type from.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to read out of memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T read from the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T Read(VAddr vaddr) {
|
|
|
|
T result = 0;
|
|
|
|
const u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr);
|
2021-08-07 01:32:06 +00:00
|
|
|
},
|
|
|
|
[&system = system, vaddr]() { system.GPU().FlushRegion(vaddr, sizeof(T)); });
|
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(&result, ptr, sizeof(T));
|
|
|
|
}
|
|
|
|
return result;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
/**
|
|
|
|
* Writes a particular data type to memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to write the data type to.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to write to memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2021-05-29 07:24:09 +00:00
|
|
|
void Write(VAddr vaddr, const T data) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
|
2021-08-07 01:32:06 +00:00
|
|
|
vaddr, static_cast<u64>(data));
|
|
|
|
},
|
|
|
|
[&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
|
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(ptr, &data, sizeof(T));
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
template <typename T>
|
2021-05-29 07:24:09 +00:00
|
|
|
bool WriteExclusive(VAddr vaddr, const T data, const T expected) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
|
|
|
|
sizeof(T) * 8, vaddr, static_cast<u64>(data));
|
2021-08-07 01:32:06 +00:00
|
|
|
},
|
|
|
|
[&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
|
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-05-29 07:24:09 +00:00
|
|
|
bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) {
|
2021-08-07 01:32:06 +00:00
|
|
|
u8* const ptr = GetPointerImpl(
|
|
|
|
vaddr,
|
|
|
|
[vaddr, data]() {
|
2021-08-07 03:03:21 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
|
2021-08-07 01:32:06 +00:00
|
|
|
vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0]));
|
|
|
|
},
|
|
|
|
[&system = system, vaddr]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); });
|
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
Common::PageTable* current_page_table = nullptr;
|
2019-11-26 18:09:12 +00:00
|
|
|
Core::System& system;
|
|
|
|
};
|
2014-04-26 05:27:25 +00:00
|
|
|
|
2021-04-03 00:06:21 +00:00
|
|
|
Memory::Memory(Core::System& system_) : system{system_} {
|
|
|
|
Reset();
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
Memory::~Memory() = default;
|
2016-01-30 18:41:04 +00:00
|
|
|
|
2021-04-03 00:06:21 +00:00
|
|
|
void Memory::Reset() {
|
|
|
|
impl = std::make_unique<Impl>(system);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-02-25 02:04:12 +00:00
|
|
|
impl->SetCurrentPageTable(process, core_id);
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:50:46 +00:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2019-11-26 18:09:12 +00:00
|
|
|
impl->MapMemoryRegion(page_table, base, size, target);
|
2015-05-13 02:38:56 +00:00
|
|
|
}
|
2014-12-30 03:35:06 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
impl->UnmapRegion(page_table, base, size);
|
|
|
|
}
|
2016-04-16 22:57:57 +00:00
|
|
|
|
2019-11-26 18:46:41 +00:00
|
|
|
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
2021-08-05 20:11:14 +00:00
|
|
|
const Kernel::KProcess& process = *system.CurrentProcess();
|
2021-08-05 20:29:43 +00:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
|
|
|
const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType();
|
2021-08-05 20:11:14 +00:00
|
|
|
return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory;
|
2019-11-26 18:46:41 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 20:19:15 +00:00
|
|
|
u8* Memory::GetPointer(VAddr vaddr) {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
const u8* Memory::GetPointer(VAddr vaddr) const {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Memory::Read8(const VAddr addr) {
|
|
|
|
return impl->Read8(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Memory::Read16(const VAddr addr) {
|
|
|
|
return impl->Read16(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Memory::Read32(const VAddr addr) {
|
|
|
|
return impl->Read32(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 Memory::Read64(const VAddr addr) {
|
|
|
|
return impl->Read64(addr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Memory::Write8(VAddr addr, u8 data) {
|
|
|
|
impl->Write8(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write16(VAddr addr, u16 data) {
|
|
|
|
impl->Write16(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write32(VAddr addr, u32 data) {
|
|
|
|
impl->Write32(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write64(VAddr addr, u64 data) {
|
|
|
|
impl->Write64(addr, data);
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
|
|
|
|
return impl->WriteExclusive8(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
|
|
|
|
return impl->WriteExclusive16(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
|
|
|
|
return impl->WriteExclusive32(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
|
|
|
|
return impl->WriteExclusive64(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
|
|
|
|
return impl->WriteExclusive128(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
return impl->ReadCString(vaddr, max_length);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
2019-11-26 21:29:34 +00:00
|
|
|
const std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlock(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer,
|
2019-11-26 22:39:57 +00:00
|
|
|
std::size_t size) {
|
2021-08-05 20:11:14 +00:00
|
|
|
impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
impl->WriteBlock(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 21:23:49 +00:00
|
|
|
void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-26 21:06:49 +00:00
|
|
|
const std::size_t size) {
|
|
|
|
impl->CopyBlock(process, dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
impl->RasterizerMarkRegionCached(vaddr, size, cached);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:10:44 +00:00
|
|
|
} // namespace Core::Memory
|