2018-11-24 04:20:56 +00:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-11-27 23:42:21 +00:00
|
|
|
#include "common/microprofile.h"
|
2018-11-24 04:20:56 +00:00
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/memory.h"
|
|
|
|
#include "video_core/dma_pusher.h"
|
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
|
|
|
#include "video_core/gpu.h"
|
2019-04-06 03:59:54 +00:00
|
|
|
#include "video_core/memory_manager.h"
|
2018-11-24 04:20:56 +00:00
|
|
|
|
|
|
|
namespace Tegra {
|
|
|
|
|
2020-04-19 20:12:06 +00:00
|
|
|
DmaPusher::DmaPusher(Core::System& system, GPU& gpu) : gpu{gpu}, system{system} {}
|
2018-11-24 04:20:56 +00:00
|
|
|
|
|
|
|
DmaPusher::~DmaPusher() = default;
|
|
|
|
|
2018-11-27 23:42:21 +00:00
|
|
|
MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128, 128, 192));
|
|
|
|
|
2018-11-24 04:20:56 +00:00
|
|
|
void DmaPusher::DispatchCalls() {
|
2018-11-27 23:42:21 +00:00
|
|
|
MICROPROFILE_SCOPE(DispatchCalls);
|
|
|
|
|
2020-02-16 14:08:07 +00:00
|
|
|
gpu.SyncGuestHost();
|
2019-12-27 01:14:10 +00:00
|
|
|
// On entering GPU code, assume all memory may be touched by the ARM core.
|
|
|
|
gpu.Maxwell3D().OnMemoryWrite();
|
|
|
|
|
2018-11-28 00:17:33 +00:00
|
|
|
dma_pushbuffer_subindex = 0;
|
|
|
|
|
2020-04-19 20:12:06 +00:00
|
|
|
while (system.IsPoweredOn()) {
|
2018-11-24 04:20:56 +00:00
|
|
|
if (!Step()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-26 18:20:43 +00:00
|
|
|
gpu.FlushCommands();
|
2020-02-16 13:51:37 +00:00
|
|
|
gpu.SyncGuestHost();
|
2020-02-16 20:24:37 +00:00
|
|
|
gpu.OnCommandListEnd();
|
2018-11-24 04:20:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool DmaPusher::Step() {
|
2019-02-19 09:26:58 +00:00
|
|
|
if (!ib_enable || dma_pushbuffer.empty()) {
|
|
|
|
// pushbuffer empty and IB empty or nonexistent - nothing to do
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CommandList& command_list{dma_pushbuffer.front()};
|
2019-05-19 00:51:54 +00:00
|
|
|
ASSERT_OR_EXECUTE(!command_list.empty(), {
|
|
|
|
// Somehow the command_list is empty, in order to avoid a crash
|
|
|
|
// We ignore it and assume its size is 0.
|
|
|
|
dma_pushbuffer.pop();
|
|
|
|
dma_pushbuffer_subindex = 0;
|
|
|
|
return true;
|
|
|
|
});
|
2019-03-08 07:06:54 +00:00
|
|
|
const CommandListHeader command_list_header{command_list[dma_pushbuffer_subindex++]};
|
2019-02-19 09:26:58 +00:00
|
|
|
GPUVAddr dma_get = command_list_header.addr;
|
|
|
|
GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32);
|
|
|
|
bool non_main = command_list_header.is_non_main;
|
|
|
|
|
|
|
|
if (dma_pushbuffer_subindex >= command_list.size()) {
|
|
|
|
// We've gone through the current list, remove it from the queue
|
|
|
|
dma_pushbuffer.pop();
|
|
|
|
dma_pushbuffer_subindex = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (command_list_header.size == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push buffer non-empty, read a word
|
|
|
|
command_headers.resize(command_list_header.size);
|
2019-04-16 14:19:52 +00:00
|
|
|
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
|
|
|
|
command_list_header.size * sizeof(u32));
|
2019-02-19 09:26:58 +00:00
|
|
|
|
2020-04-20 06:16:56 +00:00
|
|
|
for (std::size_t index = 0; index < command_headers.size();) {
|
|
|
|
const CommandHeader& command_header = command_headers[index];
|
|
|
|
|
|
|
|
if (dma_state.method_count) {
|
2019-02-19 09:26:58 +00:00
|
|
|
// Data word of methods command
|
2020-04-20 06:16:56 +00:00
|
|
|
if (dma_state.non_incrementing) {
|
|
|
|
const u32 max_write = static_cast<u32>(
|
|
|
|
std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
|
|
|
|
index);
|
|
|
|
CallMultiMethod(&command_header.argument, max_write);
|
|
|
|
dma_state.method_count -= max_write;
|
|
|
|
index += max_write;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
CallMethod(command_header.argument);
|
|
|
|
}
|
2019-02-19 09:26:58 +00:00
|
|
|
|
|
|
|
if (!dma_state.non_incrementing) {
|
|
|
|
dma_state.method++;
|
2019-02-19 08:44:33 +00:00
|
|
|
}
|
2018-11-24 04:20:56 +00:00
|
|
|
|
2019-02-19 09:26:58 +00:00
|
|
|
if (dma_increment_once) {
|
|
|
|
dma_state.non_incrementing = true;
|
|
|
|
}
|
2018-11-24 04:20:56 +00:00
|
|
|
|
2019-02-19 09:26:58 +00:00
|
|
|
dma_state.method_count--;
|
|
|
|
} else {
|
|
|
|
// No command active - this is the first word of a new one
|
|
|
|
switch (command_header.mode) {
|
|
|
|
case SubmissionMode::Increasing:
|
|
|
|
SetState(command_header);
|
|
|
|
dma_state.non_incrementing = false;
|
|
|
|
dma_increment_once = false;
|
|
|
|
break;
|
|
|
|
case SubmissionMode::NonIncreasing:
|
|
|
|
SetState(command_header);
|
|
|
|
dma_state.non_incrementing = true;
|
|
|
|
dma_increment_once = false;
|
|
|
|
break;
|
|
|
|
case SubmissionMode::Inline:
|
|
|
|
dma_state.method = command_header.method;
|
|
|
|
dma_state.subchannel = command_header.subchannel;
|
|
|
|
CallMethod(command_header.arg_count);
|
|
|
|
dma_state.non_incrementing = true;
|
|
|
|
dma_increment_once = false;
|
|
|
|
break;
|
|
|
|
case SubmissionMode::IncreaseOnce:
|
|
|
|
SetState(command_header);
|
|
|
|
dma_state.non_incrementing = false;
|
|
|
|
dma_increment_once = true;
|
|
|
|
break;
|
2019-04-03 07:33:36 +00:00
|
|
|
default:
|
|
|
|
break;
|
2019-02-19 09:26:58 +00:00
|
|
|
}
|
2018-11-28 00:17:33 +00:00
|
|
|
}
|
2020-04-20 06:16:56 +00:00
|
|
|
index++;
|
2019-02-19 09:26:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!non_main) {
|
|
|
|
// TODO (degasus): This is dead code, as dma_mget is never read.
|
|
|
|
dma_mget = dma_put;
|
2018-11-24 04:20:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DmaPusher::SetState(const CommandHeader& command_header) {
|
|
|
|
dma_state.method = command_header.method;
|
|
|
|
dma_state.subchannel = command_header.subchannel;
|
|
|
|
dma_state.method_count = command_header.method_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DmaPusher::CallMethod(u32 argument) const {
|
|
|
|
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
|
|
|
|
}
|
|
|
|
|
2020-04-20 06:16:56 +00:00
|
|
|
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
|
|
|
|
gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
|
|
|
|
dma_state.method_count);
|
|
|
|
}
|
|
|
|
|
2018-11-24 04:20:56 +00:00
|
|
|
} // namespace Tegra
|