2018-02-08 02:54:35 +00:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-04-23 15:57:12 +00:00
|
|
|
#include "common/alignment.h"
|
2018-02-08 02:54:35 +00:00
|
|
|
#include "common/assert.h"
|
2019-07-09 05:19:27 +00:00
|
|
|
#include "core/core.h"
|
2020-04-09 02:51:31 +00:00
|
|
|
#include "core/hle/kernel/memory/page_table.h"
|
2019-07-09 05:19:27 +00:00
|
|
|
#include "core/hle/kernel/process.h"
|
2019-02-24 05:15:35 +00:00
|
|
|
#include "core/memory.h"
|
2020-02-20 01:03:52 +00:00
|
|
|
#include "video_core/gpu.h"
|
2018-02-12 04:44:12 +00:00
|
|
|
#include "video_core/memory_manager.h"
|
2020-02-15 22:47:15 +00:00
|
|
|
#include "video_core/rasterizer_interface.h"
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2018-02-12 04:44:12 +00:00
|
|
|
namespace Tegra {
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2020-02-15 22:47:15 +00:00
|
|
|
MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
|
2020-07-26 04:16:21 +00:00
|
|
|
: system{system}, rasterizer{rasterizer}, page_table(page_table_size) {}
|
2018-11-23 17:58:55 +00:00
|
|
|
|
2019-05-09 23:04:41 +00:00
|
|
|
MemoryManager::~MemoryManager() = default;
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
|
|
|
|
u64 remaining_size{size};
|
|
|
|
for (u64 offset{}; offset < size; offset += page_size) {
|
|
|
|
if (remaining_size < page_size) {
|
|
|
|
SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size);
|
|
|
|
} else {
|
|
|
|
SetPageEntry(gpu_addr + offset, page_entry + offset);
|
|
|
|
}
|
|
|
|
remaining_size -= page_size;
|
|
|
|
}
|
2019-03-04 04:54:16 +00:00
|
|
|
return gpu_addr;
|
|
|
|
}
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
|
|
|
|
return UpdateRange(gpu_addr, cpu_addr, size);
|
|
|
|
}
|
2019-03-21 02:58:49 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
|
|
|
|
return Map(cpu_addr, *FindFreeRange(size, align), size);
|
2019-03-04 04:54:16 +00:00
|
|
|
}
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
|
|
|
|
if (!size) {
|
|
|
|
return;
|
|
|
|
}
|
2019-03-21 02:58:49 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
// Flush and invalidate through the GPU interface, to be asynchronous if possible.
|
|
|
|
system.GPU().FlushAndInvalidateRegion(*GpuToCpuAddress(gpu_addr), size);
|
2019-03-21 02:58:49 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
|
2019-03-04 04:54:16 +00:00
|
|
|
}
|
2018-07-24 15:19:51 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
|
|
|
|
for (u64 offset{}; offset < size; offset += page_size) {
|
|
|
|
if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
|
|
|
|
}
|
2019-03-21 02:58:49 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
|
|
|
|
return *AllocateFixed(*FindFreeRange(size, align), size);
|
2018-02-08 02:54:35 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
|
|
|
|
if (!page_entry.IsValid()) {
|
|
|
|
return;
|
|
|
|
}
|
2018-07-24 15:19:51 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
ASSERT(system.CurrentProcess()
|
|
|
|
->PageTable()
|
|
|
|
.LockForDeviceAddressSpace(page_entry.ToAddress(), size)
|
|
|
|
.IsSuccess());
|
|
|
|
}
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
|
|
|
|
if (!page_entry.IsValid()) {
|
|
|
|
return;
|
|
|
|
}
|
2020-02-20 01:03:52 +00:00
|
|
|
|
2019-07-09 06:17:44 +00:00
|
|
|
ASSERT(system.CurrentProcess()
|
2020-04-09 02:51:31 +00:00
|
|
|
->PageTable()
|
2020-07-26 04:16:21 +00:00
|
|
|
.UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
|
2019-07-09 05:19:27 +00:00
|
|
|
.IsSuccess());
|
2018-02-08 02:54:35 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
|
|
|
|
return page_table[PageEntryIndex(gpu_addr)];
|
|
|
|
}
|
2019-04-06 00:22:53 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
|
|
|
|
// TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
|
|
|
|
// improper tracking, but should be fixed in the future.
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
//// Unlock the old page
|
|
|
|
// TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
//// Lock the new page
|
|
|
|
// TryLockPage(page_entry, size);
|
2018-02-08 02:54:35 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
page_table[PageEntryIndex(gpu_addr)] = page_entry;
|
2019-03-09 19:06:51 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align) const {
|
|
|
|
if (!align) {
|
|
|
|
align = page_size;
|
|
|
|
} else {
|
|
|
|
align = Common::AlignUp(align, page_size);
|
2019-03-09 19:06:51 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
u64 available_size{};
|
|
|
|
GPUVAddr gpu_addr{address_space_start};
|
|
|
|
while (gpu_addr + available_size < address_space_size) {
|
|
|
|
if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) {
|
|
|
|
available_size += page_size;
|
|
|
|
|
|
|
|
if (available_size >= size) {
|
|
|
|
return gpu_addr;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
gpu_addr += available_size + page_size;
|
|
|
|
available_size = 0;
|
|
|
|
|
|
|
|
const auto remainder{gpu_addr % align};
|
|
|
|
if (remainder) {
|
|
|
|
gpu_addr = (gpu_addr - remainder) + align;
|
|
|
|
}
|
|
|
|
}
|
2018-11-01 02:20:37 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
return {};
|
|
|
|
}
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
|
|
|
|
const auto page_entry{GetPageEntry(gpu_addr)};
|
|
|
|
if (!page_entry.IsValid()) {
|
2019-03-09 19:06:51 +00:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
return page_entry.ToAddress() + (gpu_addr & page_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T MemoryManager::Read(GPUVAddr addr) const {
|
|
|
|
if (auto page_pointer{GetPointer(addr)}; page_pointer) {
|
2019-03-04 04:54:16 +00:00
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
|
|
|
T value;
|
2020-04-08 16:08:06 +00:00
|
|
|
std::memcpy(&value, page_pointer, sizeof(T));
|
2019-03-04 04:54:16 +00:00
|
|
|
return value;
|
2018-02-08 02:54:35 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 02:51:31 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
return {};
|
2018-02-08 02:54:35 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 04:54:16 +00:00
|
|
|
template <typename T>
|
2019-03-09 19:06:51 +00:00
|
|
|
void MemoryManager::Write(GPUVAddr addr, T data) {
|
2020-07-26 04:16:21 +00:00
|
|
|
if (auto page_pointer{GetPointer(addr)}; page_pointer) {
|
2019-03-04 04:54:16 +00:00
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
2020-04-08 16:08:06 +00:00
|
|
|
std::memcpy(page_pointer, &data, sizeof(T));
|
2019-03-04 04:54:16 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-05-20 19:21:06 +00:00
|
|
|
|
2020-04-09 02:51:31 +00:00
|
|
|
UNREACHABLE();
|
2019-03-04 04:54:16 +00:00
|
|
|
}
|
2018-07-24 15:19:51 +00:00
|
|
|
|
2019-04-06 00:30:46 +00:00
|
|
|
template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;
|
|
|
|
template u16 MemoryManager::Read<u16>(GPUVAddr addr) const;
|
|
|
|
template u32 MemoryManager::Read<u32>(GPUVAddr addr) const;
|
|
|
|
template u64 MemoryManager::Read<u64>(GPUVAddr addr) const;
|
2019-03-04 04:54:16 +00:00
|
|
|
template void MemoryManager::Write<u8>(GPUVAddr addr, u8 data);
|
|
|
|
template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data);
|
|
|
|
template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
|
|
|
|
template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
|
2018-11-01 02:20:37 +00:00
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
|
|
|
|
if (!GetPageEntry(gpu_addr).IsValid()) {
|
2019-03-09 19:06:51 +00:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
const auto address{GpuToCpuAddress(gpu_addr)};
|
|
|
|
if (!address) {
|
|
|
|
return {};
|
2019-04-06 00:25:25 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
return system.Memory().GetPointer(*address);
|
2019-04-06 00:25:25 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
|
|
|
|
if (!GetPageEntry(gpu_addr).IsValid()) {
|
2019-04-06 00:25:25 +00:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
const auto address{GpuToCpuAddress(gpu_addr)};
|
|
|
|
if (!address) {
|
|
|
|
return {};
|
2018-05-20 19:21:06 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
return system.Memory().GetPointer(*address);
|
2019-04-16 19:45:24 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
|
2019-03-29 01:58:28 +00:00
|
|
|
std::size_t remaining_size{size};
|
2020-06-20 02:02:56 +00:00
|
|
|
std::size_t page_index{gpu_src_addr >> page_bits};
|
|
|
|
std::size_t page_offset{gpu_src_addr & page_mask};
|
2019-03-29 01:58:28 +00:00
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount{
|
|
|
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
|
|
|
|
const auto src_addr{*page_addr + page_offset};
|
|
|
|
|
|
|
|
// Flush must happen on the rasterizer interface, such that memory is always synchronous
|
|
|
|
// when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
|
|
|
|
rasterizer.FlushRegion(src_addr, copy_amount);
|
|
|
|
system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
|
|
|
|
}
|
2019-03-29 01:58:28 +00:00
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
2019-03-04 04:54:16 +00:00
|
|
|
}
|
2019-03-29 01:58:28 +00:00
|
|
|
|
2020-06-20 02:02:56 +00:00
|
|
|
void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
|
2019-04-16 14:11:35 +00:00
|
|
|
const std::size_t size) const {
|
2019-04-16 03:01:35 +00:00
|
|
|
std::size_t remaining_size{size};
|
2020-06-20 02:02:56 +00:00
|
|
|
std::size_t page_index{gpu_src_addr >> page_bits};
|
|
|
|
std::size_t page_offset{gpu_src_addr & page_mask};
|
2019-04-16 03:01:35 +00:00
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount{
|
|
|
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
2020-07-26 04:16:21 +00:00
|
|
|
|
|
|
|
if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
|
|
|
|
const auto src_addr{*page_addr + page_offset};
|
|
|
|
system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
|
2019-04-20 00:35:54 +00:00
|
|
|
} else {
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
|
|
|
}
|
2020-07-26 04:16:21 +00:00
|
|
|
|
2019-04-16 03:01:35 +00:00
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
|
2019-03-29 01:58:28 +00:00
|
|
|
std::size_t remaining_size{size};
|
2020-06-20 02:02:56 +00:00
|
|
|
std::size_t page_index{gpu_dest_addr >> page_bits};
|
|
|
|
std::size_t page_offset{gpu_dest_addr & page_mask};
|
2019-03-29 01:58:28 +00:00
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount{
|
|
|
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
|
|
|
|
const auto dest_addr{*page_addr + page_offset};
|
|
|
|
|
|
|
|
// Invalidate must happen on the rasterizer interface, such that memory is always
|
|
|
|
// synchronous when it is written (even when in asynchronous GPU mode).
|
|
|
|
rasterizer.InvalidateRegion(dest_addr, copy_amount);
|
|
|
|
system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
|
|
|
|
}
|
2019-03-29 01:58:28 +00:00
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
2019-03-04 04:54:16 +00:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:02:56 +00:00
|
|
|
void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
|
2020-07-26 04:16:21 +00:00
|
|
|
std::size_t size) {
|
2019-04-16 03:01:35 +00:00
|
|
|
std::size_t remaining_size{size};
|
2020-06-20 02:02:56 +00:00
|
|
|
std::size_t page_index{gpu_dest_addr >> page_bits};
|
|
|
|
std::size_t page_offset{gpu_dest_addr & page_mask};
|
2019-04-16 03:01:35 +00:00
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount{
|
|
|
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
2020-07-26 04:16:21 +00:00
|
|
|
|
|
|
|
if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
|
|
|
|
const auto dest_addr{*page_addr + page_offset};
|
|
|
|
system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
|
2019-04-20 00:35:54 +00:00
|
|
|
}
|
2020-07-26 04:16:21 +00:00
|
|
|
|
2019-04-16 03:01:35 +00:00
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-26 04:16:21 +00:00
|
|
|
void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
|
2020-04-08 16:08:06 +00:00
|
|
|
std::vector<u8> tmp_buffer(size);
|
2020-06-20 02:02:56 +00:00
|
|
|
ReadBlock(gpu_src_addr, tmp_buffer.data(), size);
|
|
|
|
WriteBlock(gpu_dest_addr, tmp_buffer.data(), size);
|
2018-10-13 01:52:16 +00:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:02:56 +00:00
|
|
|
void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr,
|
2020-07-26 04:16:21 +00:00
|
|
|
std::size_t size) {
|
2019-04-16 14:11:35 +00:00
|
|
|
std::vector<u8> tmp_buffer(size);
|
2020-06-20 02:02:56 +00:00
|
|
|
ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size);
|
|
|
|
WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size);
|
2019-04-16 14:11:35 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 16:08:06 +00:00
|
|
|
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {
|
2020-07-26 04:16:21 +00:00
|
|
|
const auto cpu_addr{GpuToCpuAddress(gpu_addr)};
|
|
|
|
if (!cpu_addr) {
|
2019-03-04 04:54:16 +00:00
|
|
|
return {};
|
|
|
|
}
|
2020-07-26 04:16:21 +00:00
|
|
|
const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size};
|
|
|
|
return page <= Core::Memory::PAGE_SIZE;
|
2018-02-08 02:54:35 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 04:44:12 +00:00
|
|
|
} // namespace Tegra
|