2018-06-10 22:02:33 +00:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2019-02-16 03:05:17 +00:00
|
|
|
#include "common/assert.h"
|
2019-03-06 01:25:01 +00:00
|
|
|
#include "common/logging/log.h"
|
2018-11-06 20:26:27 +00:00
|
|
|
#include "core/core.h"
|
2019-07-24 23:18:17 +00:00
|
|
|
#include "core/settings.h"
|
2018-11-06 20:26:27 +00:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2018-06-10 22:02:33 +00:00
|
|
|
#include "video_core/engines/maxwell_dma.h"
|
2019-04-05 22:21:15 +00:00
|
|
|
#include "video_core/memory_manager.h"
|
2019-02-19 01:58:32 +00:00
|
|
|
#include "video_core/renderer_base.h"
|
2018-06-10 22:02:33 +00:00
|
|
|
#include "video_core/textures/decoders.h"
|
|
|
|
|
2018-10-20 19:58:06 +00:00
|
|
|
namespace Tegra::Engines {
|
2018-06-10 22:02:33 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
using namespace Texture;
|
|
|
|
|
2019-08-30 18:08:00 +00:00
|
|
|
MaxwellDMA::MaxwellDMA(Core::System& system, MemoryManager& memory_manager)
|
|
|
|
: system{system}, memory_manager{memory_manager} {}
|
2018-06-10 22:02:33 +00:00
|
|
|
|
2020-04-28 01:47:58 +00:00
|
|
|
void MaxwellDMA::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
|
2020-07-04 01:00:30 +00:00
|
|
|
ASSERT_MSG(method < NUM_REGS, "Invalid MaxwellDMA register");
|
2018-06-10 22:02:33 +00:00
|
|
|
|
2020-04-28 01:47:58 +00:00
|
|
|
regs.reg_array[method] = method_argument;
|
2018-06-10 22:02:33 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (method == offsetof(Regs, launch_dma) / sizeof(u32)) {
|
|
|
|
Launch();
|
2018-06-10 22:02:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 17:42:14 +00:00
|
|
|
void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
|
|
|
u32 methods_pending) {
|
2020-07-04 01:00:30 +00:00
|
|
|
for (size_t i = 0; i < amount; ++i) {
|
2020-04-28 01:47:58 +00:00
|
|
|
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
2020-04-20 06:16:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
void MaxwellDMA::Launch() {
|
2019-07-18 12:31:38 +00:00
|
|
|
LOG_TRACE(HW_GPU, "Requested a DMA copy");
|
2018-06-10 22:02:33 +00:00
|
|
|
|
|
|
|
// TODO(Subv): Perform more research and implement all features of this engine.
|
2020-07-04 01:00:30 +00:00
|
|
|
const LaunchDMA& launch = regs.launch_dma;
|
|
|
|
ASSERT(launch.remap_enable == 0);
|
|
|
|
ASSERT(launch.semaphore_type == LaunchDMA::SemaphoreType::NONE);
|
|
|
|
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
|
|
|
|
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
|
|
|
|
ASSERT(regs.dst_params.origin.x == 0);
|
|
|
|
ASSERT(regs.dst_params.origin.y == 0);
|
|
|
|
|
|
|
|
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
|
|
|
const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
|
|
|
|
|
|
|
if (!is_src_pitch && !is_dst_pitch) {
|
2018-10-18 01:29:10 +00:00
|
|
|
// If both the source and the destination are in block layout, assert.
|
|
|
|
UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented");
|
|
|
|
return;
|
|
|
|
}
|
2018-09-08 21:02:16 +00:00
|
|
|
|
2019-12-27 01:14:10 +00:00
|
|
|
// All copies here update the main memory, so mark all rasterizer states as invalid.
|
|
|
|
system.GPU().Maxwell3D().OnMemoryWrite();
|
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (is_src_pitch && is_dst_pitch) {
|
|
|
|
CopyPitchToPitch();
|
|
|
|
} else {
|
|
|
|
ASSERT(launch.multi_line_enable == 1);
|
2018-09-08 21:02:16 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (!is_src_pitch && is_dst_pitch) {
|
|
|
|
CopyBlockLinearToPitch();
|
|
|
|
} else {
|
|
|
|
CopyPitchToBlockLinear();
|
2018-10-18 01:29:10 +00:00
|
|
|
}
|
2018-07-02 14:46:33 +00:00
|
|
|
}
|
2020-07-04 01:00:30 +00:00
|
|
|
}
|
2018-06-10 22:02:33 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
void MaxwellDMA::CopyPitchToPitch() {
|
|
|
|
// When `multi_line_enable` bit is disabled the copy is performed as if we were copying a 1D
|
|
|
|
// buffer of length `line_length_in`.
|
|
|
|
// Otherwise we copy a 2D image of dimensions (line_length_in, line_count).
|
|
|
|
if (!regs.launch_dma.multi_line_enable) {
|
|
|
|
memory_manager.CopyBlock(regs.offset_out, regs.offset_in, regs.line_length_in);
|
|
|
|
return;
|
|
|
|
}
|
2019-02-24 05:15:35 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
// Perform a line-by-line copy.
|
|
|
|
// We're going to take a subrect of size (line_length_in, line_count) from the source rectangle.
|
|
|
|
// There is no need to manually flush/invalidate the regions because CopyBlock does that for us.
|
|
|
|
for (u32 line = 0; line < regs.line_count; ++line) {
|
|
|
|
const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
|
|
|
|
const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
|
|
|
|
memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
|
|
|
|
}
|
|
|
|
}
|
2019-03-09 19:36:52 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
void MaxwellDMA::CopyBlockLinearToPitch() {
|
|
|
|
ASSERT(regs.src_params.block_size.depth == 0);
|
2019-03-09 19:36:52 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
// Optimized path for micro copies.
|
|
|
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
|
|
|
if (dst_size < GetGOBSize() && regs.pitch_out <= 64) {
|
|
|
|
FastCopyBlockLinearToPitch();
|
|
|
|
return;
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
// Deswizzle the input and copy it over.
|
|
|
|
const u32 bytes_per_pixel = regs.pitch_out / regs.line_length_in;
|
|
|
|
const Parameters& src_params = regs.src_params;
|
|
|
|
const u32 width = src_params.width;
|
|
|
|
const u32 height = src_params.height;
|
|
|
|
const u32 depth = src_params.depth;
|
|
|
|
const u32 block_height = src_params.block_size.height;
|
|
|
|
const u32 block_depth = src_params.block_size.depth;
|
|
|
|
const size_t src_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
|
|
|
const size_t src_layer_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (Settings::IsGPULevelExtreme()) {
|
|
|
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
2019-04-23 16:41:55 +00:00
|
|
|
} else {
|
2020-07-04 01:00:30 +00:00
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, src_params.width,
|
|
|
|
bytes_per_pixel, read_buffer.data() + src_layer_size * src_params.layer,
|
|
|
|
write_buffer.data(), src_params.block_size.height, src_params.origin.x,
|
|
|
|
src_params.origin.y);
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
void MaxwellDMA::CopyPitchToBlockLinear() {
|
|
|
|
const auto& dst_params = regs.dst_params;
|
|
|
|
ASSERT(dst_params.block_size.depth == 0);
|
|
|
|
|
|
|
|
const u32 bytes_per_pixel = regs.pitch_in / regs.line_length_in;
|
|
|
|
const u32 width = dst_params.width;
|
|
|
|
const u32 height = dst_params.height;
|
|
|
|
const u32 depth = dst_params.depth;
|
|
|
|
const u32 block_height = dst_params.block_size.height;
|
|
|
|
const u32 block_depth = dst_params.block_size.depth;
|
|
|
|
const size_t dst_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
|
|
|
const size_t dst_layer_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
|
|
|
|
|
|
|
|
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (Settings::IsGPULevelExtreme()) {
|
|
|
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
} else {
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
|
|
|
SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, dst_params.width,
|
|
|
|
bytes_per_pixel, write_buffer.data() + dst_layer_size * dst_params.layer,
|
|
|
|
read_buffer.data(), dst_params.block_size.height, dst_params.origin.x,
|
|
|
|
dst_params.origin.y);
|
2019-04-23 16:41:55 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2019-04-23 16:41:55 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
|
|
|
const u32 bytes_per_pixel = regs.pitch_out / regs.line_length_in;
|
|
|
|
const size_t src_size = GetGOBSize();
|
|
|
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
|
|
|
u32 pos_x = regs.src_params.origin.x;
|
|
|
|
u32 pos_y = regs.src_params.origin.y;
|
|
|
|
const u64 offset = GetGOBOffset(regs.src_params.width, regs.src_params.height, pos_x, pos_y,
|
|
|
|
regs.src_params.block_size.height, bytes_per_pixel);
|
|
|
|
const u32 x_in_gob = 64 / bytes_per_pixel;
|
|
|
|
pos_x = pos_x % x_in_gob;
|
|
|
|
pos_y = pos_y % 8;
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
2018-10-18 01:29:10 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2019-04-23 16:41:55 +00:00
|
|
|
|
2020-07-04 01:00:30 +00:00
|
|
|
if (Settings::IsGPULevelExtreme()) {
|
|
|
|
memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
} else {
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_in + offset, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
2018-06-10 22:02:33 +00:00
|
|
|
}
|
2020-07-04 01:00:30 +00:00
|
|
|
|
|
|
|
UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width,
|
|
|
|
bytes_per_pixel, read_buffer.data(), write_buffer.data(),
|
|
|
|
regs.src_params.block_size.height, pos_x, pos_y);
|
|
|
|
|
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
2018-06-10 22:02:33 +00:00
|
|
|
}
|
|
|
|
|
2018-10-20 19:58:06 +00:00
|
|
|
} // namespace Tegra::Engines
|