Video Core: initial Implementation of InstanceDraw Packaging
This commit is contained in:
parent
b31880dc5e
commit
ba02d564f8
|
@ -92,6 +92,10 @@ void Maxwell3D::InitializeRegisterDefaults() {
|
|||
|
||||
// Some games (like Super Mario Odyssey) assume that SRGB is enabled.
|
||||
regs.framebuffer_srgb = 1;
|
||||
mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_end_gl)] = true;
|
||||
mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)] = true;
|
||||
mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true;
|
||||
mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true;
|
||||
}
|
||||
|
||||
#define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name))
|
||||
|
@ -416,6 +420,76 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
|
|||
}
|
||||
}
|
||||
|
||||
void Maxwell3D::CallMethodFromMME(const GPU::MethodCall& method_call) {
|
||||
const u32 method = method_call.method;
|
||||
if (mme_inline[method]) {
|
||||
regs.reg_array[method] = method_call.argument;
|
||||
if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count) ||
|
||||
method == MAXWELL3D_REG_INDEX(index_array.count)) {
|
||||
MMMEDrawMode expected_mode = method == MAXWELL3D_REG_INDEX(vertex_buffer.count)
|
||||
? MMMEDrawMode::Array
|
||||
: MMMEDrawMode::Indexed;
|
||||
u32 count = method_call.argument;
|
||||
while (true) {
|
||||
if (mme_draw.current_mode == MMMEDrawMode::Undefined) {
|
||||
mme_draw.current_mode = expected_mode;
|
||||
mme_draw.current_count = count;
|
||||
mme_draw.instance_count = 1;
|
||||
break;
|
||||
} else {
|
||||
if (mme_draw.current_mode == expected_mode && count == mme_draw.current_count) {
|
||||
mme_draw.instance_count++;
|
||||
break;
|
||||
} else {
|
||||
FlushMMEInlineDraw();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (mme_draw.current_mode != MMMEDrawMode::Undefined) {
|
||||
FlushMMEInlineDraw();
|
||||
}
|
||||
CallMethod(method_call);
|
||||
}
|
||||
}
|
||||
|
||||
void Maxwell3D::FlushMMEInlineDraw() {
|
||||
LOG_DEBUG(HW_GPU, "called, topology={}, count={}", static_cast<u32>(regs.draw.topology.Value()),
|
||||
regs.vertex_buffer.count);
|
||||
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
|
||||
|
||||
auto debug_context = system.GetGPUDebugContext();
|
||||
|
||||
if (debug_context) {
|
||||
debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
}
|
||||
|
||||
// Both instance configuration registers can not be set at the same time.
|
||||
ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
|
||||
"Illegal combination of instancing parameters");
|
||||
|
||||
const bool is_indexed = mme_draw.current_mode == MMMEDrawMode::Indexed;
|
||||
rasterizer.AccelerateDrawMultiBatch(is_indexed);
|
||||
|
||||
if (debug_context) {
|
||||
debug_context->OnEvent(Tegra::DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
|
||||
// TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
|
||||
// the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
|
||||
// it's possible that it is incorrect and that there is some other register used to specify the
|
||||
// drawing mode.
|
||||
if (is_indexed) {
|
||||
regs.index_array.count = 0;
|
||||
} else {
|
||||
regs.vertex_buffer.count = 0;
|
||||
}
|
||||
mme_draw.current_mode = MMMEDrawMode::Undefined;
|
||||
mme_draw.current_count = 0;
|
||||
mme_draw.instance_count = 0;
|
||||
}
|
||||
|
||||
void Maxwell3D::ProcessMacroUpload(u32 data) {
|
||||
ASSERT_MSG(regs.macros.upload_address < macro_memory.size(),
|
||||
"upload_address exceeded macro_memory size!");
|
||||
|
|
|
@ -811,8 +811,9 @@ public:
|
|||
INSERT_PADDING_WORDS(0x21);
|
||||
|
||||
u32 vb_element_base;
|
||||
u32 vb_base_instance;
|
||||
|
||||
INSERT_PADDING_WORDS(0x36);
|
||||
INSERT_PADDING_WORDS(0x35);
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> c0;
|
||||
|
@ -1238,6 +1239,11 @@ public:
|
|||
/// Write the value to the register identified by method.
|
||||
void CallMethod(const GPU::MethodCall& method_call);
|
||||
|
||||
/// Write the value to the register identified by method.
|
||||
void CallMethodFromMME(const GPU::MethodCall& method_call);
|
||||
|
||||
void FlushMMEInlineDraw();
|
||||
|
||||
/// Given a Texture Handle, returns the TSC and TIC entries.
|
||||
Texture::FullTextureInfo GetTextureInfo(const Texture::TextureHandle tex_handle,
|
||||
std::size_t offset) const;
|
||||
|
@ -1263,6 +1269,18 @@ public:
|
|||
return execute_on;
|
||||
}
|
||||
|
||||
enum class MMMEDrawMode : u32 {
|
||||
Undefined,
|
||||
Array,
|
||||
Indexed,
|
||||
};
|
||||
|
||||
struct MMEDrawState {
|
||||
MMMEDrawMode current_mode{MMMEDrawMode::Undefined};
|
||||
u32 current_count;
|
||||
u32 instance_count;
|
||||
} mme_draw;
|
||||
|
||||
private:
|
||||
void InitializeRegisterDefaults();
|
||||
|
||||
|
@ -1275,6 +1293,8 @@ private:
|
|||
/// Start offsets of each macro in macro_memory
|
||||
std::array<u32, 0x80> macro_positions = {};
|
||||
|
||||
std::array<bool, Regs::NUM_REGS> mme_inline{};
|
||||
|
||||
/// Memory for macro code
|
||||
MacroMemory macro_memory;
|
||||
|
||||
|
@ -1402,6 +1422,7 @@ ASSERT_REG_POSITION(stencil_front_mask, 0x4E7);
|
|||
ASSERT_REG_POSITION(frag_color_clamp, 0x4EA);
|
||||
ASSERT_REG_POSITION(screen_y_control, 0x4EB);
|
||||
ASSERT_REG_POSITION(vb_element_base, 0x50D);
|
||||
ASSERT_REG_POSITION(vb_base_instance, 0x50E);
|
||||
ASSERT_REG_POSITION(clip_distance_enabled, 0x544);
|
||||
ASSERT_REG_POSITION(point_size, 0x546);
|
||||
ASSERT_REG_POSITION(zeta_enable, 0x54E);
|
||||
|
|
|
@ -257,7 +257,7 @@ void MacroInterpreter::SetMethodAddress(u32 address) {
|
|||
}
|
||||
|
||||
void MacroInterpreter::Send(u32 value) {
|
||||
maxwell3d.CallMethod({method_address.address, value});
|
||||
maxwell3d.CallMethodFromMME({method_address.address, value});
|
||||
// Increment the method address by the method increment.
|
||||
method_address.address.Assign(method_address.address.Value() +
|
||||
method_address.increment.Value());
|
||||
|
|
|
@ -31,6 +31,9 @@ public:
|
|||
/// Draw the current batch of vertex arrays
|
||||
virtual void DrawArrays() = 0;
|
||||
|
||||
/// Draw the current batch of vertex arrays
|
||||
virtual void DrawMultiArrays() = 0;
|
||||
|
||||
/// Clear the current framebuffer
|
||||
virtual void Clear() = 0;
|
||||
|
||||
|
@ -73,6 +76,10 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
virtual bool AccelerateDrawMultiBatch(bool is_indexed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Increase/decrease the number of object in pages touching the specified region
|
||||
virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
|
||||
|
||||
|
|
|
@ -405,6 +405,12 @@ bool RasterizerOpenGL::AccelerateDrawBatch(bool is_indexed) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool RasterizerOpenGL::AccelerateDrawMultiBatch(bool is_indexed) {
|
||||
accelerate_draw = is_indexed ? AccelDraw::Indexed : AccelDraw::Arrays;
|
||||
DrawMultiArrays();
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Map, typename Interval>
|
||||
static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||
return boost::make_iterator_range(map.equal_range(interval));
|
||||
|
@ -688,7 +694,7 @@ void RasterizerOpenGL::Clear() {
|
|||
}
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::DrawArrays() {
|
||||
void RasterizerOpenGL::DrawPrelude() {
|
||||
if (accelerate_draw == AccelDraw::Disabled)
|
||||
return;
|
||||
|
||||
|
@ -743,10 +749,7 @@ void RasterizerOpenGL::DrawArrays() {
|
|||
// Upload vertex and index data.
|
||||
SetupVertexBuffer(vao);
|
||||
SetupVertexInstances(vao);
|
||||
const GLintptr index_buffer_offset = SetupIndexBuffer();
|
||||
|
||||
// Setup draw parameters. It will automatically choose what glDraw* method to use.
|
||||
const DrawParameters params = SetupDraw(index_buffer_offset);
|
||||
index_buffer_offset = SetupIndexBuffer();
|
||||
|
||||
// Prepare packed bindings.
|
||||
bind_ubo_pushbuffer.Setup(0);
|
||||
|
@ -754,7 +757,8 @@ void RasterizerOpenGL::DrawArrays() {
|
|||
|
||||
// Setup shaders and their used resources.
|
||||
texture_cache.GuardSamplers(true);
|
||||
SetupShaders(params.primitive_mode);
|
||||
const auto primitive_mode = MaxwellToGL::PrimitiveTopology(gpu.regs.draw.topology);
|
||||
SetupShaders(primitive_mode);
|
||||
texture_cache.GuardSamplers(false);
|
||||
|
||||
ConfigureFramebuffers(state);
|
||||
|
@ -778,11 +782,80 @@ void RasterizerOpenGL::DrawArrays() {
|
|||
if (texture_cache.TextureBarrier()) {
|
||||
glTextureBarrier();
|
||||
}
|
||||
}
|
||||
|
||||
params.DispatchDraw();
|
||||
void RasterizerOpenGL::DrawArrays() {
|
||||
DrawPrelude();
|
||||
|
||||
auto& maxwell3d = system.GPU().Maxwell3D();
|
||||
auto& regs = maxwell3d.regs;
|
||||
auto current_instance = maxwell3d.state.current_instance;
|
||||
auto primitive_mode = MaxwellToGL::PrimitiveTopology(regs.draw.topology);
|
||||
if (accelerate_draw == AccelDraw::Indexed) {
|
||||
auto index_format = MaxwellToGL::IndexFormat(regs.index_array.format);
|
||||
auto count = regs.index_array.count;
|
||||
auto base_vertex = static_cast<GLint>(regs.vb_element_base);
|
||||
const auto index_buffer_ptr = reinterpret_cast<const void*>(index_buffer_offset);
|
||||
if (current_instance > 0) {
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(primitive_mode, count, index_format,
|
||||
index_buffer_ptr, 1, base_vertex,
|
||||
current_instance);
|
||||
} else {
|
||||
glDrawElementsBaseVertex(primitive_mode, count, index_format, index_buffer_ptr,
|
||||
base_vertex);
|
||||
}
|
||||
} else {
|
||||
auto count = regs.vertex_buffer.count;
|
||||
auto vertex_first = regs.vertex_buffer.first;
|
||||
if (current_instance > 0) {
|
||||
glDrawArraysInstancedBaseInstance(primitive_mode, vertex_first, count, 1,
|
||||
current_instance);
|
||||
} else {
|
||||
glDrawArrays(primitive_mode, vertex_first, count);
|
||||
}
|
||||
}
|
||||
|
||||
accelerate_draw = AccelDraw::Disabled;
|
||||
gpu.dirty.memory_general = false;
|
||||
maxwell3d.dirty.memory_general = false;
|
||||
}
|
||||
|
||||
#pragma optimize("", off)
|
||||
|
||||
void RasterizerOpenGL::DrawMultiArrays() {
|
||||
DrawPrelude();
|
||||
|
||||
auto& maxwell3d = system.GPU().Maxwell3D();
|
||||
auto& regs = maxwell3d.regs;
|
||||
auto& draw_setup = maxwell3d.mme_draw;
|
||||
auto num_instances = draw_setup.instance_count;
|
||||
auto base_instance = static_cast<GLint>(regs.vb_base_instance);
|
||||
auto primitive_mode = MaxwellToGL::PrimitiveTopology(regs.draw.topology);
|
||||
if (draw_setup.current_mode == Tegra::Engines::Maxwell3D::MMMEDrawMode::Indexed) {
|
||||
auto index_format = MaxwellToGL::IndexFormat(regs.index_array.format);
|
||||
auto count = regs.index_array.count;
|
||||
auto base_vertex = static_cast<GLint>(regs.vb_element_base);
|
||||
const auto index_buffer_ptr = reinterpret_cast<const void*>(index_buffer_offset);
|
||||
if (num_instances > 1) {
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(primitive_mode, count, index_format,
|
||||
index_buffer_ptr, num_instances,
|
||||
base_vertex, base_instance);
|
||||
} else {
|
||||
glDrawElementsBaseVertex(primitive_mode, count, index_format, index_buffer_ptr,
|
||||
base_vertex);
|
||||
}
|
||||
} else {
|
||||
auto count = regs.vertex_buffer.count;
|
||||
auto vertex_first = regs.vertex_buffer.first;
|
||||
if (num_instances > 1) {
|
||||
glDrawArraysInstancedBaseInstance(primitive_mode, vertex_first, count, num_instances,
|
||||
base_instance);
|
||||
} else {
|
||||
glDrawArrays(primitive_mode, vertex_first, count);
|
||||
}
|
||||
}
|
||||
|
||||
accelerate_draw = AccelDraw::Disabled;
|
||||
maxwell3d.dirty.memory_general = false;
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
|
||||
|
|
|
@ -58,6 +58,7 @@ public:
|
|||
~RasterizerOpenGL() override;
|
||||
|
||||
void DrawArrays() override;
|
||||
void DrawMultiArrays() override;
|
||||
void Clear() override;
|
||||
void DispatchCompute(GPUVAddr code_addr) override;
|
||||
void FlushAll() override;
|
||||
|
@ -72,6 +73,7 @@ public:
|
|||
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
|
||||
u32 pixel_stride) override;
|
||||
bool AccelerateDrawBatch(bool is_indexed) override;
|
||||
bool AccelerateDrawMultiBatch(bool is_indexed) override;
|
||||
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
|
||||
void LoadDiskResources(const std::atomic_bool& stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback) override;
|
||||
|
@ -136,6 +138,8 @@ private:
|
|||
void SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
|
||||
std::size_t size);
|
||||
|
||||
void DrawPrelude();
|
||||
|
||||
/// Configures the current textures to use for the draw command. Returns shaders texture buffer
|
||||
/// usage.
|
||||
TextureBufferUsage SetupDrawTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
|
||||
|
@ -252,6 +256,8 @@ private:
|
|||
|
||||
DrawParameters SetupDraw(GLintptr index_buffer_offset);
|
||||
|
||||
GLintptr index_buffer_offset;
|
||||
|
||||
void SetupShaders(GLenum primitive_mode);
|
||||
|
||||
enum class AccelDraw { Disabled, Arrays, Indexed };
|
||||
|
|
|
@ -964,7 +964,7 @@ private:
|
|||
switch (element) {
|
||||
case 2:
|
||||
// Config pack's first value is instance_id.
|
||||
return {"config_pack[0]", Type::Uint};
|
||||
return {"gl_InstanceID", Type::Uint};
|
||||
case 3:
|
||||
return {"gl_VertexID", Type::Int};
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue