From 780240e6979b198e7bd10feaad5399b8b4b63762 Mon Sep 17 00:00:00 2001
From: Wollnashorn <Wollnashorn@users.noreply.github.com>
Date: Wed, 5 Apr 2023 01:29:46 +0200
Subject: [PATCH 1/4] shader_recompiler: Add subpixel offset for correct
 rounding at `ImageGather` On AMD a subpixel offset of 1/512 of the texel size
 is applied to the texture coordinates at a ImageGather call to ensure the
 rounding at the texel centers is done the same way as in Maxwell or other
 Nvidia architectures. See
 https://www.reedbeta.com/blog/texture-gathers-and-coordinate-precision/ for
 more details why this might be necessary.

This should fix shadow artifacts at object edges in Zelda: Breath of the Wild (#9957, #6956).
---
 .../backend/glsl/emit_glsl_image.cpp          | 29 ++++++++++++++
 .../backend/spirv/emit_spirv_image.cpp        | 39 +++++++++++++++++++
 src/shader_recompiler/profile.h               |  4 ++
 src/video_core/renderer_opengl/gl_device.cpp  |  1 +
 src/video_core/renderer_opengl/gl_device.h    |  5 +++
 .../renderer_opengl/gl_shader_cache.cpp       |  1 +
 .../renderer_vulkan/vk_pipeline_cache.cpp     |  1 +
 .../vulkan_common/vulkan_device.cpp           |  1 +
 src/video_core/vulkan_common/vulkan_device.h  |  5 +++
 9 files changed, 86 insertions(+)

diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index f335c8af0..418505475 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -143,6 +143,21 @@ IR::Inst* PrepareSparse(IR::Inst& inst) {
     }
     return sparse_inst;
 }
+
+std::string ImageGatherSubpixelOffset(const IR::TextureInstInfo& info, std::string_view texture,
+                                      std::string_view coords) {
+    switch (info.type) {
+    case TextureType::Color2D:
+    case TextureType::Color2DRect:
+        return fmt::format("{}+vec2(0.001953125)/vec2(textureSize({}, 0))", coords, texture);
+    case TextureType::ColorArray2D:
+    case TextureType::ColorCube:
+        return fmt::format("vec3({0}.xy+vec2(0.001953125)/vec2(textureSize({1}, 0)),{0}.z)", coords,
+                           texture);
+    default:
+        return std::string{coords};
+    }
+}
 } // Anonymous namespace
 
 void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
@@ -340,6 +355,13 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
         ctx.AddU1("{}=true;", *sparse_inst);
     }
+    std::string coords_with_subpixel_offset;
+    if (ctx.profile.need_gather_subpixel_offset) {
+        // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+        // AMD hardware as on Maxwell or other Nvidia architectures.
+        coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+        coords = coords_with_subpixel_offset;
+    }
     if (!sparse_inst || !supports_sparse) {
         if (offset.IsEmpty()) {
             ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords,
@@ -387,6 +409,13 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde
         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
         ctx.AddU1("{}=true;", *sparse_inst);
     }
+    std::string coords_with_subpixel_offset;
+    if (ctx.profile.need_gather_subpixel_offset) {
+        // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+        // AMD hardware as on Maxwell or other Nvidia architectures.
+        coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+        coords = coords_with_subpixel_offset;
+    }
     if (!sparse_inst || !supports_sparse) {
         if (offset.IsEmpty()) {
             ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 02073c420..968901d42 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -261,6 +261,39 @@ Id BitTest(EmitContext& ctx, Id mask, Id bit) {
     const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))};
     return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value);
 }
+
+Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info, Id texture,
+                             Id coords) {
+    // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+    // AMD hardware as on Maxwell or other Nvidia architectures.
+    const auto calculate_offset{[&](size_t dim) -> std::array<Id, 2> {
+        const Id nudge{ctx.Const(0x1p-9f)};
+        const Id image_size{ctx.OpImageQuerySizeLod(ctx.U32[dim], texture, ctx.u32_zero_value)};
+        const Id offset_x{ctx.OpFDiv(
+            ctx.F32[1], nudge,
+            ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 0)))};
+        const Id offset_y{ctx.OpFDiv(
+            ctx.F32[1], nudge,
+            ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 1)))};
+        return {ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 0), offset_x),
+                ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 1), offset_y)};
+    }};
+    switch (info.type) {
+    case TextureType::Color2D:
+    case TextureType::Color2DRect: {
+        const auto offset{calculate_offset(2)};
+        return ctx.OpCompositeConstruct(ctx.F32[2], offset[0], offset[1]);
+    }
+    case TextureType::ColorArray2D:
+    case TextureType::ColorCube: {
+        const auto offset{calculate_offset(3)};
+        return ctx.OpCompositeConstruct(ctx.F32[3], offset[0], offset[1],
+                                        ctx.OpCompositeExtract(ctx.F32[1], coords, 2));
+    }
+    default:
+        return coords;
+    }
+}
 } // Anonymous namespace
 
 Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
@@ -423,6 +456,9 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
                    const IR::Value& offset, const IR::Value& offset2) {
     const auto info{inst->Flags<IR::TextureInstInfo>()};
     const ImageOperands operands(ctx, offset, offset2);
+    if (ctx.profile.need_gather_subpixel_offset) {
+        coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+    }
     return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
                 ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
                 operands.MaskOptional(), operands.Span());
@@ -432,6 +468,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
                        const IR::Value& offset, const IR::Value& offset2, Id dref) {
     const auto info{inst->Flags<IR::TextureInstInfo>()};
     const ImageOperands operands(ctx, offset, offset2);
+    if (ctx.profile.need_gather_subpixel_offset) {
+        coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+    }
     return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
                 ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(),
                 operands.Span());
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 253e0d0bd..31390e869 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -52,6 +52,10 @@ struct Profile {
     bool need_declared_frag_colors{};
     /// Prevents fast math optimizations that may cause inaccuracies
     bool need_fastmath_off{};
+    /// Some GPU vendors use a lower fixed point format of 16.8 when calculating pixel coordinates
+    /// in the ImageGather instruction than the Maxwell architecture does. Applying an offset does
+    /// fix this mismatching rounding behaviour.
+    bool need_gather_subpixel_offset{};
 
     /// OpFClamp is broken and OpFMax + OpFMin should be used instead
     bool has_broken_spirv_clamp{};
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 22ed16ebf..d36a0a7a1 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -169,6 +169,7 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) {
     has_draw_texture = GLAD_GL_NV_draw_texture;
     warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
     need_fastmath_off = is_nvidia;
+    need_gather_subpixel_offset = is_amd;
     can_report_memory = GLAD_GL_NVX_gpu_memory_info;
 
     // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index 3ff8cad83..e8104c4de 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -160,6 +160,10 @@ public:
         return need_fastmath_off;
     }
 
+    bool NeedsGatherSubpixelOffset() const {
+        return need_gather_subpixel_offset;
+    }
+
     bool HasCbufFtouBug() const {
         return has_cbuf_ftou_bug;
     }
@@ -225,6 +229,7 @@ private:
     bool has_draw_texture{};
     bool warp_size_potentially_larger_than_guest{};
     bool need_fastmath_off{};
+    bool need_gather_subpixel_offset{};
     bool has_cbuf_ftou_bug{};
     bool has_bool_ref_bug{};
     bool can_report_memory{};
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 479bb8ba3..b40aa6f5e 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -218,6 +218,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
           .lower_left_origin_mode = true,
           .need_declared_frag_colors = true,
           .need_fastmath_off = device.NeedsFastmathOff(),
+          .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
 
           .has_broken_spirv_clamp = true,
           .has_broken_unsigned_image_offsets = true,
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 0684cceed..f51257267 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -329,6 +329,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
 
         .lower_left_origin_mode = false,
         .need_declared_frag_colors = false,
+        .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
 
         .has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS,
         .has_broken_spirv_position_input = driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY,
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 6f288b3f8..0939b62c9 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -431,6 +431,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
                         "AMD GCN4 and earlier have broken VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT");
             has_broken_cube_compatibility = true;
         }
+        need_gather_subpixel_offset = true;
     }
     if (extensions.sampler_filter_minmax && is_amd) {
         // Disable ext_sampler_filter_minmax on AMD GCN4 and lower as it is broken.
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 41b5da18a..50e95bcca 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -554,6 +554,10 @@ public:
         return features.robustness2.nullDescriptor;
     }
 
+    bool NeedsGatherSubpixelOffset() const {
+        return need_gather_subpixel_offset;
+    }
+
     u32 GetMaxVertexInputAttributes() const {
         return properties.properties.limits.maxVertexInputAttributes;
     }
@@ -664,6 +668,7 @@ private:
     bool must_emulate_bgr565{};             ///< Emulates BGR565 by swizzling RGB565 format.
     bool dynamic_state3_blending{};         ///< Has all blending features of dynamic_state3.
     bool dynamic_state3_enables{};          ///< Has all enables features of dynamic_state3.
+    bool need_gather_subpixel_offset{};     ///< Needs offset at ImageGather for correct rounding.
     u64 device_access_memory{};             ///< Total size of device local memory in bytes.
     u32 sets_per_pool{};                    ///< Sets per Description Pool
 

From fe91066f4673f7a3ee87235f08b72db4910eb01c Mon Sep 17 00:00:00 2001
From: Wollnashorn <Wollnashorn@users.noreply.github.com>
Date: Wed, 5 Apr 2023 03:02:24 +0200
Subject: [PATCH 2/4] video_core: Enable ImageGather with subpixel offset on
 Intel

---
 src/shader_recompiler/profile.h                      | 6 +++---
 src/video_core/renderer_opengl/gl_device.cpp         | 1 -
 src/video_core/renderer_opengl/gl_device.h           | 9 ++++-----
 src/video_core/renderer_opengl/gl_shader_cache.cpp   | 2 +-
 src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | 4 +++-
 src/video_core/vulkan_common/vulkan_device.cpp       | 1 -
 src/video_core/vulkan_common/vulkan_device.h         | 5 -----
 7 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 31390e869..9f88fb440 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -52,9 +52,9 @@ struct Profile {
     bool need_declared_frag_colors{};
     /// Prevents fast math optimizations that may cause inaccuracies
     bool need_fastmath_off{};
-    /// Some GPU vendors use a lower fixed point format of 16.8 when calculating pixel coordinates
-    /// in the ImageGather instruction than the Maxwell architecture does. Applying an offset does
-    /// fix this mismatching rounding behaviour.
+    /// Some GPU vendors use a different rounding precision when calculating texture pixel
+    /// coordinates with the 16.8 format in the ImageGather instruction than the Maxwell
+    /// architecture. Applying an offset does fix this mismatching rounding behaviour.
     bool need_gather_subpixel_offset{};
 
     /// OpFClamp is broken and OpFMax + OpFMin should be used instead
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index d36a0a7a1..22ed16ebf 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -169,7 +169,6 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) {
     has_draw_texture = GLAD_GL_NV_draw_texture;
     warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
     need_fastmath_off = is_nvidia;
-    need_gather_subpixel_offset = is_amd;
     can_report_memory = GLAD_GL_NVX_gpu_memory_info;
 
     // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index e8104c4de..cc0b95f1a 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -160,10 +160,6 @@ public:
         return need_fastmath_off;
     }
 
-    bool NeedsGatherSubpixelOffset() const {
-        return need_gather_subpixel_offset;
-    }
-
     bool HasCbufFtouBug() const {
         return has_cbuf_ftou_bug;
     }
@@ -180,6 +176,10 @@ public:
         return vendor_name == "ATI Technologies Inc.";
     }
 
+    bool IsIntel() const {
+        return vendor_name == "Intel";
+    }
+
     bool CanReportMemoryUsage() const {
         return can_report_memory;
     }
@@ -229,7 +229,6 @@ private:
     bool has_draw_texture{};
     bool warp_size_potentially_larger_than_guest{};
     bool need_fastmath_off{};
-    bool need_gather_subpixel_offset{};
     bool has_cbuf_ftou_bug{};
     bool has_bool_ref_bug{};
     bool can_report_memory{};
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index b40aa6f5e..6ecda2984 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -218,7 +218,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
           .lower_left_origin_mode = true,
           .need_declared_frag_colors = true,
           .need_fastmath_off = device.NeedsFastmathOff(),
-          .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
+          .need_gather_subpixel_offset = device.IsAmd() || device.IsIntel(),
 
           .has_broken_spirv_clamp = true,
           .has_broken_unsigned_image_offsets = true,
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index f51257267..8963b6a66 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -329,7 +329,9 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
 
         .lower_left_origin_mode = false,
         .need_declared_frag_colors = false,
-        .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
+        .need_gather_subpixel_offset = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY ||
+                                       driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS ||
+                                       driver_id == VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA,
 
         .has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS,
         .has_broken_spirv_position_input = driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY,
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 0939b62c9..6f288b3f8 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -431,7 +431,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
                         "AMD GCN4 and earlier have broken VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT");
             has_broken_cube_compatibility = true;
         }
-        need_gather_subpixel_offset = true;
     }
     if (extensions.sampler_filter_minmax && is_amd) {
         // Disable ext_sampler_filter_minmax on AMD GCN4 and lower as it is broken.
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 50e95bcca..41b5da18a 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -554,10 +554,6 @@ public:
         return features.robustness2.nullDescriptor;
     }
 
-    bool NeedsGatherSubpixelOffset() const {
-        return need_gather_subpixel_offset;
-    }
-
     u32 GetMaxVertexInputAttributes() const {
         return properties.properties.limits.maxVertexInputAttributes;
     }
@@ -668,7 +664,6 @@ private:
     bool must_emulate_bgr565{};             ///< Emulates BGR565 by swizzling RGB565 format.
     bool dynamic_state3_blending{};         ///< Has all blending features of dynamic_state3.
     bool dynamic_state3_enables{};          ///< Has all enables features of dynamic_state3.
-    bool need_gather_subpixel_offset{};     ///< Needs offset at ImageGather for correct rounding.
     u64 device_access_memory{};             ///< Total size of device local memory in bytes.
     u32 sets_per_pool{};                    ///< Sets per Description Pool
 

From 82b78cde7374c04e5c3a4d6255ddb6c26ecae946 Mon Sep 17 00:00:00 2001
From: Wollnashorn <Wollnashorn@users.noreply.github.com>
Date: Wed, 5 Apr 2023 18:00:35 +0200
Subject: [PATCH 3/4] shader_recompiler: Use vector arithmetic rather than
 component-wise in ImageGatherSubpixelOffset Should be more efficient and
 better readable

---
 .../backend/spirv/emit_spirv_image.cpp        | 27 +++++++------------
 1 file changed, 9 insertions(+), 18 deletions(-)

diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 968901d42..7d901c04b 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -266,30 +266,21 @@ Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info,
                              Id coords) {
     // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
     // AMD hardware as on Maxwell or other Nvidia architectures.
-    const auto calculate_offset{[&](size_t dim) -> std::array<Id, 2> {
+    const auto calculate_coords{[&](size_t dim) {
         const Id nudge{ctx.Const(0x1p-9f)};
         const Id image_size{ctx.OpImageQuerySizeLod(ctx.U32[dim], texture, ctx.u32_zero_value)};
-        const Id offset_x{ctx.OpFDiv(
-            ctx.F32[1], nudge,
-            ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 0)))};
-        const Id offset_y{ctx.OpFDiv(
-            ctx.F32[1], nudge,
-            ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 1)))};
-        return {ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 0), offset_x),
-                ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 1), offset_y)};
+        Id offset{dim == 2 ? ctx.ConstantComposite(ctx.F32[dim], nudge, nudge)
+                           : ctx.ConstantComposite(ctx.F32[dim], nudge, nudge, ctx.f32_zero_value)};
+        offset = ctx.OpFDiv(ctx.F32[dim], offset, ctx.OpConvertUToF(ctx.F32[dim], image_size));
+        return ctx.OpFAdd(ctx.F32[dim], coords, offset);
     }};
     switch (info.type) {
     case TextureType::Color2D:
-    case TextureType::Color2DRect: {
-        const auto offset{calculate_offset(2)};
-        return ctx.OpCompositeConstruct(ctx.F32[2], offset[0], offset[1]);
-    }
+    case TextureType::Color2DRect:
+        return calculate_coords(2);
     case TextureType::ColorArray2D:
-    case TextureType::ColorCube: {
-        const auto offset{calculate_offset(3)};
-        return ctx.OpCompositeConstruct(ctx.F32[3], offset[0], offset[1],
-                                        ctx.OpCompositeExtract(ctx.F32[1], coords, 2));
-    }
+    case TextureType::ColorCube:
+        return calculate_coords(3);
     default:
         return coords;
     }

From c0e5ecc399cc08e4cd54b04c8d63b99e1fcbddc7 Mon Sep 17 00:00:00 2001
From: Wollnashorn <Wollnashorn@users.noreply.github.com>
Date: Wed, 12 Apr 2023 17:11:02 +0200
Subject: [PATCH 4/4] video_core: Enable ImageGather rounding fix on AMD open
 source drivers

---
 src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 8963b6a66..985cc3203 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -330,6 +330,8 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
         .lower_left_origin_mode = false,
         .need_declared_frag_colors = false,
         .need_gather_subpixel_offset = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY ||
+                                       driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE ||
+                                       driver_id == VK_DRIVER_ID_MESA_RADV ||
                                        driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS ||
                                        driver_id == VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA,