summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Pitoiset <samuel.pitoiset@gmail.com>2024-05-08 16:10:54 +0200
committerMarge Bot <emma+marge@anholt.net>2024-05-14 15:32:07 +0000
commit9532b0f1b2d25c437bde67bf706386e1bd0c0fa3 (patch)
tree564a5621e3f63edfb0f76aadf65daf68d6a5ea45
parentfca40bcce37e500f4a9e0fcf42c3188bbc7b885a (diff)
radv: emit graphics pipelines directly from the cmdbuf
This allows us to unify emitting monolithic graphics pipelines and shader objects. Though, this temporarily reduces performance in some games due to more context rolls. But this will be fixed in the following commits by using the recent mechanism to track context register writes. Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28983>
-rw-r--r--src/amd/vulkan/radv_cmd_buffer.c917
-rw-r--r--src/amd/vulkan/radv_pipeline.c3
-rw-r--r--src/amd/vulkan/radv_pipeline.h4
-rw-r--r--src/amd/vulkan/radv_pipeline_graphics.c798
-rw-r--r--src/amd/vulkan/radv_pipeline_graphics.h40
5 files changed, 820 insertions, 942 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index 5a59baecd95..0077114b3f5 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -31,6 +31,7 @@
#include "vk_util.h"
#include "ac_debug.h"
+#include "ac_nir.h"
#include "ac_shader_args.h"
#include "aco_interface.h"
@@ -1915,6 +1916,821 @@ radv_emit_compute_shader(const struct radv_physical_device *pdev, struct radeon_
}
static void
+radv_emit_vgt_gs_mode(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader_info *info = &cmd_buffer->state.last_vgt_shader->info;
+ unsigned vgt_primitiveid_en = 0;
+ uint32_t vgt_gs_mode = 0;
+
+ if (info->is_ngg)
+ return;
+
+ if (info->stage == MESA_SHADER_GEOMETRY) {
+ vgt_gs_mode = ac_vgt_gs_mode(info->gs.vertices_out, pdev->info.gfx_level);
+ } else if (info->outinfo.export_prim_id || info->uses_prim_id) {
+ vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
+ vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, vgt_gs_mode);
+}
+
+static void
+radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *shader)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const uint64_t va = radv_shader_get_va(shader);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, S_00B124_MEM_BASE(va >> 40));
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc2);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, shader->info.regs.spi_vs_out_config);
+ radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT, shader->info.regs.spi_shader_pos_format);
+ radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, shader->info.regs.pa_cl_vs_out_cntl);
+
+ if (pdev->info.gfx_level <= GFX8)
+ radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF, shader->info.regs.vs.vgt_reuse_off);
+
+ if (pdev->info.gfx_level >= GFX7) {
+ radeon_set_sh_reg_idx(pdev, cmd_buffer->cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, 3,
+ shader->info.regs.vs.spi_shader_pgm_rsrc3_vs);
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS,
+ shader->info.regs.vs.spi_shader_late_alloc_vs);
+
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_uconfig_reg(cmd_buffer->cs, R_030980_GE_PC_ALLOC, shader->info.regs.ge_pc_alloc);
+
+ if (shader->info.stage == MESA_SHADER_TESS_EVAL) {
+ radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, shader->info.regs.vgt_gs_onchip_cntl);
+ }
+ }
+ }
+}
+
+static void
+radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *shader)
+{
+ const uint64_t va = radv_shader_get_va(shader);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, S_00B324_MEM_BASE(va >> 40));
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc2);
+}
+
+static void
+radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *shader)
+{
+ const uint64_t va = radv_shader_get_va(shader);
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, shader->config.rsrc1);
+}
+
+static void
+radv_emit_hw_ngg(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *es, const struct radv_shader *shader)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const uint64_t va = radv_shader_get_va(shader);
+ gl_shader_stage es_type;
+ const struct gfx10_ngg_info *ngg_state = &shader->info.ngg_info;
+
+ if (shader->info.stage == MESA_SHADER_GEOMETRY) {
+ if (shader->info.merged_shader_compiled_separately) {
+ es_type = es->info.stage;
+ } else {
+ es_type = shader->info.gs.es_type;
+ }
+ } else {
+ es_type = shader->info.stage;
+ }
+
+ if (!shader->info.merged_shader_compiled_separately) {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc2);
+ }
+
+ const struct radv_vs_output_info *outinfo = &shader->info.outinfo;
+
+ const bool es_enable_prim_id = outinfo->export_prim_id || (es && es->info.uses_prim_id);
+ bool break_wave_at_eoi = false;
+
+ if (es_type == MESA_SHADER_TESS_EVAL) {
+ if (es_enable_prim_id || (shader->info.uses_prim_id))
+ break_wave_at_eoi = true;
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, shader->info.regs.spi_vs_out_config);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028708_SPI_SHADER_IDX_FORMAT, 2);
+ radeon_emit(cmd_buffer->cs, shader->info.regs.ngg.spi_shader_idx_format);
+ radeon_emit(cmd_buffer->cs, shader->info.regs.spi_shader_pos_format);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, shader->info.regs.pa_cl_vs_out_cntl);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN,
+ shader->info.regs.ngg.vgt_primitiveid_en | S_028A84_PRIMITIVEID_EN(es_enable_prim_id));
+
+ radeon_set_context_reg(cmd_buffer->cs, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
+ shader->info.regs.ngg.ge_max_output_per_subgroup);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028B4C_GE_NGG_SUBGRP_CNTL, shader->info.regs.ngg.ge_ngg_subgrp_cntl);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT, shader->info.regs.vgt_gs_instance_cnt);
+
+ uint32_t ge_cntl = shader->info.regs.ngg.ge_cntl;
+ if (pdev->info.gfx_level >= GFX11) {
+ ge_cntl |= S_03096C_BREAK_PRIMGRP_AT_EOI(break_wave_at_eoi);
+ } else {
+ ge_cntl |= S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
+
+ /* Bug workaround for a possible hang with non-tessellation cases.
+ * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
+ *
+ * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
+ */
+ if (pdev->info.gfx_level == GFX10 && es_type != MESA_SHADER_TESS_EVAL && ngg_state->hw_max_esverts != 256) {
+ ge_cntl &= C_03096C_VERT_GRP_SIZE;
+
+ if (ngg_state->hw_max_esverts > 5) {
+ ge_cntl |= S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts - 5);
+ }
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, shader->info.regs.vgt_gs_onchip_cntl);
+ }
+
+ radeon_set_uconfig_reg(cmd_buffer->cs, R_03096C_GE_CNTL, ge_cntl);
+
+ radeon_set_sh_reg_idx(pdev, cmd_buffer->cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, 3,
+ shader->info.regs.spi_shader_pgm_rsrc3_gs);
+ radeon_set_sh_reg_idx(pdev, cmd_buffer->cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS, 3,
+ shader->info.regs.spi_shader_pgm_rsrc4_gs);
+
+ radeon_set_uconfig_reg(cmd_buffer->cs, R_030980_GE_PC_ALLOC, shader->info.regs.ge_pc_alloc);
+}
+
+static void
+radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *shader)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const uint64_t va = radv_shader_get_va(shader);
+
+ if (pdev->info.gfx_level >= GFX9) {
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
+ } else {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
+ }
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, shader->config.rsrc1);
+ } else {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, S_00B424_MEM_BASE(va >> 40));
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->config.rsrc2);
+ }
+}
+
+static void
+radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *vs = cmd_buffer->state.shaders[MESA_SHADER_VERTEX];
+
+ if (vs->info.merged_shader_compiled_separately) {
+ assert(vs->info.next_stage == MESA_SHADER_TESS_CTRL || vs->info.next_stage == MESA_SHADER_GEOMETRY);
+
+ const struct radv_userdata_info *loc = &vs->info.user_sgprs_locs.shader_data[AC_UD_NEXT_STAGE_PC];
+ const struct radv_shader *next_stage = cmd_buffer->state.shaders[vs->info.next_stage];
+ const uint32_t base_reg = vs->info.user_data_0;
+
+ assert(loc->sgpr_idx != -1 && loc->num_sgprs == 1);
+
+ if (!vs->info.vs.has_prolog) {
+ uint32_t rsrc1, rsrc2;
+
+ if (vs->info.next_stage == MESA_SHADER_TESS_CTRL) {
+ radv_shader_combine_cfg_vs_tcs(vs, next_stage, &rsrc1, NULL);
+
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, vs->va >> 8);
+ } else {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B410_SPI_SHADER_PGM_LO_LS, vs->va >> 8);
+ }
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, rsrc1);
+ } else {
+ radv_shader_combine_cfg_vs_gs(vs, next_stage, &rsrc1, &rsrc2);
+
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, vs->va >> 8);
+ } else {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, vs->va >> 8);
+ }
+
+ unsigned lds_size;
+ if (next_stage->info.is_ngg) {
+ lds_size = DIV_ROUND_UP(next_stage->info.ngg_info.lds_size, pdev->info.lds_encode_granularity);
+ } else {
+ lds_size = next_stage->info.gs_ring_info.lds_size;
+ }
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cmd_buffer->cs, rsrc1);
+ radeon_emit(cmd_buffer->cs, rsrc2 | S_00B22C_LDS_SIZE(lds_size));
+ }
+ }
+
+ radv_emit_shader_pointer(device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, next_stage->va, false);
+ return;
+ }
+
+ if (vs->info.vs.as_ls)
+ radv_emit_hw_ls(cmd_buffer, vs);
+ else if (vs->info.vs.as_es)
+ radv_emit_hw_es(cmd_buffer, vs);
+ else if (vs->info.is_ngg)
+ radv_emit_hw_ngg(cmd_buffer, NULL, vs);
+ else
+ radv_emit_hw_vs(cmd_buffer, vs);
+}
+
+static void
+radv_emit_tess_ctrl_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_shader *tcs = cmd_buffer->state.shaders[MESA_SHADER_TESS_CTRL];
+
+ if (tcs->info.merged_shader_compiled_separately) {
+ /* When VS+TCS are compiled separately on GFX9+, the VS will jump to the TCS and everything is
+ * emitted as part of the VS.
+ */
+ return;
+ }
+
+ radv_emit_hw_hs(cmd_buffer, tcs);
+}
+
+static void
+radv_emit_tess_eval_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *tes = cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL];
+
+ if (tes->info.merged_shader_compiled_separately) {
+ assert(tes->info.next_stage == MESA_SHADER_GEOMETRY);
+
+ const struct radv_userdata_info *loc = &tes->info.user_sgprs_locs.shader_data[AC_UD_NEXT_STAGE_PC];
+ const struct radv_shader *gs = cmd_buffer->state.shaders[MESA_SHADER_GEOMETRY];
+ const uint32_t base_reg = tes->info.user_data_0;
+ uint32_t rsrc1, rsrc2;
+
+ assert(loc->sgpr_idx != -1 && loc->num_sgprs == 1);
+
+ radv_shader_combine_cfg_tes_gs(tes, gs, &rsrc1, &rsrc2);
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, tes->va >> 8);
+
+ unsigned lds_size;
+ if (gs->info.is_ngg) {
+ lds_size = DIV_ROUND_UP(gs->info.ngg_info.lds_size, pdev->info.lds_encode_granularity);
+ } else {
+ lds_size = gs->info.gs_ring_info.lds_size;
+ }
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cmd_buffer->cs, rsrc1);
+ radeon_emit(cmd_buffer->cs, rsrc2 | S_00B22C_LDS_SIZE(lds_size));
+
+ radv_emit_shader_pointer(device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, gs->va, false);
+ return;
+ }
+
+ if (tes->info.is_ngg) {
+ radv_emit_hw_ngg(cmd_buffer, NULL, tes);
+ } else if (tes->info.tes.as_es) {
+ radv_emit_hw_es(cmd_buffer, tes);
+ } else {
+ radv_emit_hw_vs(cmd_buffer, tes);
+ }
+}
+
+static void
+radv_emit_hw_gs(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *gs)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_legacy_gs_info *gs_state = &gs->info.gs_ring_info;
+ const uint64_t va = radv_shader_get_va(gs);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gsvs_ring_offset[0]);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gsvs_ring_offset[1]);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gsvs_ring_offset[2]);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gs->info.regs.gs.vgt_gsvs_ring_itemsize);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gs_vert_itemsize[0]);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gs_vert_itemsize[1]);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gs_vert_itemsize[2]);
+ radeon_emit(cmd_buffer->cs, gs->info.regs.gs.vgt_gs_vert_itemsize[3]);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028B90_VGT_GS_INSTANCE_CNT, gs->info.regs.gs.vgt_gs_instance_cnt);
+
+ if (pdev->info.gfx_level >= GFX9) {
+ if (!gs->info.merged_shader_compiled_separately) {
+
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
+ } else {
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
+ }
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cmd_buffer->cs, gs->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, gs->config.rsrc2 | S_00B22C_LDS_SIZE(gs_state->lds_size));
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, gs->info.regs.vgt_gs_onchip_cntl);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ gs->info.regs.gs.vgt_gs_max_prims_per_subgroup);
+ } else {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, S_00B224_MEM_BASE(va >> 40));
+ radeon_emit(cmd_buffer->cs, gs->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, gs->config.rsrc2);
+
+ /* GFX6-8: ESGS offchip ring buffer is allocated according to VGT_ESGS_RING_ITEMSIZE.
+ * GFX9+: Only used to set the GS input VGPRs, emulated in shaders.
+ */
+ radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE, gs->info.regs.gs.vgt_esgs_ring_itemsize);
+ }
+
+ radeon_set_sh_reg_idx(pdev, cmd_buffer->cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, 3,
+ gs->info.regs.spi_shader_pgm_rsrc3_gs);
+
+ if (pdev->info.gfx_level >= GFX10) {
+ radeon_set_sh_reg_idx(pdev, cmd_buffer->cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS, 3,
+ gs->info.regs.spi_shader_pgm_rsrc4_gs);
+ }
+}
+
+static void
+radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_shader *gs = cmd_buffer->state.shaders[MESA_SHADER_GEOMETRY];
+ const struct radv_shader *es = cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL]
+ ? cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL]
+ : cmd_buffer->state.shaders[MESA_SHADER_VERTEX];
+ if (gs->info.is_ngg) {
+ radv_emit_hw_ngg(cmd_buffer, es, gs);
+ } else {
+ radv_emit_hw_gs(cmd_buffer, gs);
+ radv_emit_hw_vs(cmd_buffer, cmd_buffer->state.gs_copy_shader);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.regs.vgt_gs_max_vert_out);
+
+ if (gs->info.merged_shader_compiled_separately) {
+ const struct radv_userdata_info *vgt_esgs_ring_itemsize = radv_get_user_sgpr(gs, AC_UD_VGT_ESGS_RING_ITEMSIZE);
+
+ assert(vgt_esgs_ring_itemsize->sgpr_idx != -1 && vgt_esgs_ring_itemsize->num_sgprs == 1);
+
+ radeon_set_sh_reg(cmd_buffer->cs, gs->info.user_data_0 + vgt_esgs_ring_itemsize->sgpr_idx * 4,
+ es->info.esgs_itemsize / 4);
+
+ if (gs->info.is_ngg) {
+ const struct radv_userdata_info *ngg_lds_layout = radv_get_user_sgpr(gs, AC_UD_NGG_LDS_LAYOUT);
+
+ assert(ngg_lds_layout->sgpr_idx != -1 && ngg_lds_layout->num_sgprs == 1);
+ assert(!(gs->info.ngg_info.esgs_ring_size & 0xffff0000) && !(gs->info.ngg_info.scratch_lds_base & 0xffff0000));
+
+ radeon_set_sh_reg(cmd_buffer->cs, gs->info.user_data_0 + ngg_lds_layout->sgpr_idx * 4,
+ SET_SGPR_FIELD(NGG_LDS_LAYOUT_GS_OUT_VERTEX_BASE, gs->info.ngg_info.esgs_ring_size) |
+ SET_SGPR_FIELD(NGG_LDS_LAYOUT_SCRATCH_BASE, gs->info.ngg_info.scratch_lds_base));
+ }
+ }
+}
+
+static void
+radv_emit_vgt_gs_out(struct radv_cmd_buffer *cmd_buffer, uint32_t vgt_gs_out_prim_type)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+
+ if (pdev->info.gfx_level >= GFX11) {
+ radeon_set_uconfig_reg(cmd_buffer->cs, R_030998_VGT_GS_OUT_PRIM_TYPE, vgt_gs_out_prim_type);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, vgt_gs_out_prim_type);
+ }
+}
+
+static void
+radv_emit_mesh_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *ms = cmd_buffer->state.shaders[MESA_SHADER_MESH];
+ const uint32_t gs_out = radv_conv_gl_prim_to_gs_out(ms->info.ms.output_prim);
+
+ radv_emit_hw_ngg(cmd_buffer, NULL, ms);
+ radeon_set_context_reg(cmd_buffer->cs, R_028B38_VGT_GS_MAX_VERT_OUT, ms->info.regs.vgt_gs_max_vert_out);
+ radeon_set_uconfig_reg_idx(pdev, cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, V_008958_DI_PT_POINTLIST);
+
+ if (pdev->mesh_fast_launch_2) {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B2B0_SPI_SHADER_GS_MESHLET_DIM, 2);
+ radeon_emit(cmd_buffer->cs, ms->info.regs.ms.spi_shader_gs_meshlet_dim);
+ radeon_emit(cmd_buffer->cs, ms->info.regs.ms.spi_shader_gs_meshlet_exp_alloc);
+ }
+
+ radv_emit_vgt_gs_out(cmd_buffer, gs_out);
+}
+
+enum radv_ps_in_type {
+ radv_ps_in_interpolated,
+ radv_ps_in_flat,
+ radv_ps_in_explicit,
+ radv_ps_in_explicit_strict,
+ radv_ps_in_interpolated_fp16,
+ radv_ps_in_interpolated_fp16_hi,
+ radv_ps_in_per_prim_gfx103,
+ radv_ps_in_per_prim_gfx11,
+};
+
+static uint32_t
+offset_to_ps_input(const uint32_t offset, const enum radv_ps_in_type type)
+{
+ assert(offset != AC_EXP_PARAM_UNDEFINED);
+
+ if (offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 && offset <= AC_EXP_PARAM_DEFAULT_VAL_1111) {
+ /* The input is a DEFAULT_VAL constant. */
+ return S_028644_OFFSET(0x20) | S_028644_DEFAULT_VAL(offset - AC_EXP_PARAM_DEFAULT_VAL_0000);
+ }
+
+ assert(offset <= AC_EXP_PARAM_OFFSET_31);
+ uint32_t ps_input_cntl = S_028644_OFFSET(offset);
+
+ switch (type) {
+ case radv_ps_in_explicit_strict:
+ /* Rotate parameter cache contents to strict vertex order. */
+ ps_input_cntl |= S_028644_ROTATE_PC_PTR(1);
+ FALLTHROUGH;
+ case radv_ps_in_explicit:
+ /* Force parameter cache to be read in passthrough mode. */
+ ps_input_cntl |= S_028644_OFFSET(1 << 5);
+ FALLTHROUGH;
+ case radv_ps_in_flat:
+ ps_input_cntl |= S_028644_FLAT_SHADE(1);
+ break;
+ case radv_ps_in_interpolated_fp16_hi:
+ ps_input_cntl |= S_028644_ATTR1_VALID(1);
+ FALLTHROUGH;
+ case radv_ps_in_interpolated_fp16:
+ /* These must be set even if only the high 16 bits are used. */
+ ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) | S_028644_ATTR0_VALID(1);
+ break;
+ case radv_ps_in_per_prim_gfx11:
+ ps_input_cntl |= S_028644_PRIM_ATTR(1);
+ break;
+ case radv_ps_in_interpolated:
+ case radv_ps_in_per_prim_gfx103:
+ break;
+ }
+
+ return ps_input_cntl;
+}
+
+static void
+slot_to_ps_input(const struct radv_vs_output_info *outinfo, unsigned slot, uint32_t *ps_input_cntl, unsigned *ps_offset,
+ const bool use_default_0, const enum radv_ps_in_type type)
+{
+ unsigned vs_offset = outinfo->vs_output_param_offset[slot];
+
+ if (vs_offset == AC_EXP_PARAM_UNDEFINED) {
+ if (use_default_0)
+ vs_offset = AC_EXP_PARAM_DEFAULT_VAL_0000;
+ else
+ return;
+ }
+
+ ps_input_cntl[*ps_offset] = offset_to_ps_input(vs_offset, type);
+ ++(*ps_offset);
+}
+
+static void
+input_mask_to_ps_inputs(const struct radv_vs_output_info *outinfo, const struct radv_shader *ps, uint32_t input_mask,
+ uint32_t *ps_input_cntl, unsigned *ps_offset, const enum radv_ps_in_type default_type)
+{
+ u_foreach_bit (i, input_mask) {
+ unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VAR0 + i];
+ if (vs_offset == AC_EXP_PARAM_UNDEFINED) {
+ ps_input_cntl[*ps_offset] = S_028644_OFFSET(0x20);
+ ++(*ps_offset);
+ continue;
+ }
+
+ enum radv_ps_in_type type = default_type;
+
+ if (ps->info.ps.flat_shaded_mask & BITFIELD_BIT(*ps_offset))
+ type = radv_ps_in_flat;
+ else if (ps->info.ps.explicit_shaded_mask & BITFIELD_BIT(*ps_offset))
+ type = radv_ps_in_explicit;
+ else if (ps->info.ps.explicit_strict_shaded_mask & BITFIELD_BIT(*ps_offset))
+ type = radv_ps_in_explicit_strict;
+ else if (ps->info.ps.float16_hi_shaded_mask & BITFIELD_BIT(*ps_offset))
+ type = radv_ps_in_interpolated_fp16_hi;
+ else if (ps->info.ps.float16_shaded_mask & BITFIELD_BIT(*ps_offset))
+ type = radv_ps_in_interpolated_fp16;
+
+ ps_input_cntl[*ps_offset] = offset_to_ps_input(vs_offset, type);
+ ++(*ps_offset);
+ }
+}
+
+static void
+radv_emit_ps_inputs(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *ps = cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT];
+ const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
+ const struct radv_vs_output_info *outinfo = &last_vgt_shader->info.outinfo;
+ const bool mesh = last_vgt_shader->info.stage == MESA_SHADER_MESH;
+ const bool gfx11plus = pdev->info.gfx_level >= GFX11;
+ const enum radv_ps_in_type per_prim = gfx11plus ? radv_ps_in_per_prim_gfx11 : radv_ps_in_per_prim_gfx103;
+
+ uint32_t ps_input_cntl[32];
+ unsigned ps_offset = 0;
+
+ if (ps->info.ps.prim_id_input && !mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_PRIMITIVE_ID, ps_input_cntl, &ps_offset, false, radv_ps_in_flat);
+
+ if (ps->info.ps.layer_input && !mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_LAYER, ps_input_cntl, &ps_offset, true, radv_ps_in_flat);
+
+ if (ps->info.ps.viewport_index_input && !mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_VIEWPORT, ps_input_cntl, &ps_offset, true, radv_ps_in_flat);
+
+ if (ps->info.ps.has_pcoord)
+ ps_input_cntl[ps_offset++] = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
+
+ if (ps->info.ps.input_clips_culls_mask & 0x0f)
+ slot_to_ps_input(outinfo, VARYING_SLOT_CLIP_DIST0, ps_input_cntl, &ps_offset, false, radv_ps_in_interpolated);
+
+ if (ps->info.ps.input_clips_culls_mask & 0xf0)
+ slot_to_ps_input(outinfo, VARYING_SLOT_CLIP_DIST1, ps_input_cntl, &ps_offset, false, radv_ps_in_interpolated);
+
+ input_mask_to_ps_inputs(outinfo, ps, ps->info.ps.input_mask, ps_input_cntl, &ps_offset, radv_ps_in_interpolated);
+
+ /* Per-primitive PS inputs: the HW needs these to be last. */
+
+ if (ps->info.ps.prim_id_input && mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_PRIMITIVE_ID, ps_input_cntl, &ps_offset, false, per_prim);
+
+ if (ps->info.ps.layer_input && mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_LAYER, ps_input_cntl, &ps_offset, true, per_prim);
+
+ if (ps->info.ps.viewport_index_input && mesh)
+ slot_to_ps_input(outinfo, VARYING_SLOT_VIEWPORT, ps_input_cntl, &ps_offset, true, per_prim);
+
+ input_mask_to_ps_inputs(outinfo, ps, ps->info.ps.input_per_primitive_mask, ps_input_cntl, &ps_offset, per_prim);
+
+ if (ps_offset) {
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0, ps_offset);
+ for (unsigned i = 0; i < ps_offset; i++) {
+ radeon_emit(cmd_buffer->cs, ps_input_cntl[i]);
+ }
+ }
+}
+
+static void
+radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *ps = cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT];
+ const uint64_t va = radv_shader_get_va(ps);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, S_00B024_MEM_BASE(va >> 40));
+ radeon_emit(cmd_buffer->cs, ps->config.rsrc1);
+ radeon_emit(cmd_buffer->cs, ps->config.rsrc2);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA, 2);
+ radeon_emit(cmd_buffer->cs, ps->config.spi_ps_input_ena);
+ radeon_emit(cmd_buffer->cs, ps->config.spi_ps_input_addr);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL, ps->info.regs.ps.spi_ps_in_control);
+ radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT, ps->info.regs.ps.spi_shader_z_format);
+
+ if (pdev->info.gfx_level >= GFX9 && pdev->info.gfx_level < GFX11)
+ radeon_set_context_reg(cmd_buffer->cs, R_028C40_PA_SC_SHADER_CONTROL, ps->info.regs.ps.pa_sc_shader_control);
+}
+
+static void
+radv_emit_vgt_reuse(struct radv_cmd_buffer *cmd_buffer, const struct radv_vgt_shader_key *key)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *tes = radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_TESS_EVAL);
+
+ if (pdev->info.gfx_level == GFX10_3) {
+ /* Legacy Tess+GS should disable reuse to prevent hangs on GFX10.3. */
+ const bool has_legacy_tess_gs = key->tess && key->gs && !key->ngg;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF, S_028AB4_REUSE_OFF(has_legacy_tess_gs));
+ }
+
+ if (pdev->info.family >= CHIP_POLARIS10 && pdev->info.gfx_level < GFX10) {
+ unsigned vtx_reuse_depth = 30;
+ if (tes && tes->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD) {
+ vtx_reuse_depth = 14;
+ }
+ radeon_set_context_reg(cmd_buffer->cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth));
+ }
+}
+
+static void
+radv_emit_vgt_shader_config(struct radv_cmd_buffer *cmd_buffer, const struct radv_vgt_shader_key *key)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ uint32_t stages = 0;
+
+ if (key->tess) {
+ stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
+
+ if (key->gs)
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | S_028B54_GS_EN(1);
+ else if (key->ngg)
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
+ else
+ stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
+ } else if (key->gs) {
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | S_028B54_GS_EN(1);
+ } else if (key->mesh) {
+ assert(!key->ngg_passthrough);
+ unsigned gs_fast_launch = pdev->mesh_fast_launch_2 ? 2 : 1;
+ stages |=
+ S_028B54_GS_EN(1) | S_028B54_GS_FAST_LAUNCH(gs_fast_launch) | S_028B54_NGG_WAVE_ID_EN(key->mesh_scratch_ring);
+ } else if (key->ngg) {
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
+ }
+
+ if (key->ngg) {
+ stages |= S_028B54_PRIMGEN_EN(1) | S_028B54_NGG_WAVE_ID_EN(key->ngg_streamout) |
+ S_028B54_PRIMGEN_PASSTHRU_EN(key->ngg_passthrough) |
+ S_028B54_PRIMGEN_PASSTHRU_NO_MSG(key->ngg_passthrough && pdev->info.family >= CHIP_NAVI23);
+ } else if (key->gs) {
+ stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+ }
+
+ if (pdev->info.gfx_level >= GFX9)
+ stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
+
+ if (pdev->info.gfx_level >= GFX10) {
+ stages |= S_028B54_HS_W32_EN(key->hs_wave32) | S_028B54_GS_W32_EN(key->gs_wave32) |
+ S_028B54_VS_W32_EN(pdev->info.gfx_level < GFX11 && key->vs_wave32);
+ /* Legacy GS only supports Wave64. Read it as an implication. */
+ assert(!(key->gs && !key->ngg) || !key->gs_wave32);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
+}
+
+static void
+gfx103_emit_vgt_draw_payload_cntl(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_shader *mesh_shader = cmd_buffer->state.shaders[MESA_SHADER_MESH];
+ const bool enable_vrs = cmd_buffer->state.uses_vrs;
+ bool enable_prim_payload = false;
+
+ /* Enables the second channel of the primitive export instruction.
+ * This channel contains: VRS rate x, y, viewport and layer.
+ */
+ if (mesh_shader) {
+ const struct radv_vs_output_info *outinfo = &mesh_shader->info.outinfo;
+
+ enable_prim_payload = (outinfo->writes_viewport_index_per_primitive || outinfo->writes_layer_per_primitive ||
+ outinfo->writes_primitive_shading_rate_per_primitive);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A98_VGT_DRAW_PAYLOAD_CNTL,
+ S_028A98_EN_VRS_RATE(enable_vrs) | S_028A98_EN_PRIM_PAYLOAD(enable_prim_payload));
+}
+
+static void
+gfx103_emit_vrs_state(struct radv_cmd_buffer *cmd_buffer)
+{
+ const struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+ const struct radv_shader *ps = cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT];
+ const bool force_vrs_per_vertex = cmd_buffer->state.last_vgt_shader->info.force_vrs_per_vertex;
+ const bool enable_vrs_coarse_shading = cmd_buffer->state.uses_vrs_coarse_shading;
+ uint32_t mode = V_028064_SC_VRS_COMB_MODE_PASSTHRU;
+ uint8_t rate_x = 0, rate_y = 0;
+
+ if (enable_vrs_coarse_shading) {
+ /* When per-draw VRS is not enabled at all, try enabling VRS coarse shading 2x2 if the driver
+ * determined that it's safe to enable.
+ */
+ mode = V_028064_SC_VRS_COMB_MODE_OVERRIDE;
+ rate_x = rate_y = 1;
+ } else if (force_vrs_per_vertex) {
+ /* Otherwise, if per-draw VRS is not enabled statically, try forcing per-vertex VRS if
+ * requested by the user. Note that vkd3d-proton always has to declare VRS as dynamic because
+ * in DX12 it's fully dynamic.
+ */
+ radeon_set_context_reg(cmd_buffer->cs, R_028848_PA_CL_VRS_CNTL,
+ S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE) |
+ S_028848_VERTEX_RATE_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE));
+
+ /* If the shader is using discard, turn off coarse shading because discard at 2x2 pixel
+ * granularity degrades quality too much. MIN allows sample shading but not coarse shading.
+ */
+ mode = ps->info.ps.can_discard ? V_028064_SC_VRS_COMB_MODE_MIN : V_028064_SC_VRS_COMB_MODE_PASSTHRU;
+ }
+
+ if (pdev->info.gfx_level < GFX11) {
+ radeon_set_context_reg(cmd_buffer->cs, R_028064_DB_VRS_OVERRIDE_CNTL,
+ S_028064_VRS_OVERRIDE_RATE_COMBINER_MODE(mode) | S_028064_VRS_OVERRIDE_RATE_X(rate_x) |
+ S_028064_VRS_OVERRIDE_RATE_Y(rate_y));
+ }
+}
+
+static void
+radv_emit_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
+ const struct radv_physical_device *pdev = radv_device_physical(device);
+
+ radv_foreach_stage(s, cmd_buffer->state.active_stages & RADV_GRAPHICS_STAGE_BITS)
+ {
+ switch (s) {
+ case MESA_SHADER_VERTEX:
+ radv_emit_vertex_shader(cmd_buffer);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ radv_emit_tess_ctrl_shader(cmd_buffer);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ radv_emit_tess_eval_shader(cmd_buffer);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ radv_emit_geometry_shader(cmd_buffer);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ radv_emit_fragment_shader(cmd_buffer);
+ radv_emit_ps_inputs(cmd_buffer);
+ break;
+ case MESA_SHADER_MESH:
+ radv_emit_mesh_shader(cmd_buffer);
+ break;
+ case MESA_SHADER_TASK:
+ radv_emit_compute_shader(pdev, cmd_buffer->gang.cs, cmd_buffer->state.shaders[MESA_SHADER_TASK]);
+ break;
+ default:
+ unreachable("invalid bind stage");
+ }
+ }
+
+ const struct radv_vgt_shader_key vgt_shader_cfg_key =
+ radv_get_vgt_shader_key(device, cmd_buffer->state.shaders, cmd_buffer->state.gs_copy_shader);
+
+ radv_emit_vgt_gs_mode(cmd_buffer);
+ radv_emit_vgt_reuse(cmd_buffer, &vgt_shader_cfg_key);
+ radv_emit_vgt_shader_config(cmd_buffer, &vgt_shader_cfg_key);
+
+ if (pdev->info.gfx_level >= GFX10_3) {
+ gfx103_emit_vgt_draw_payload_cntl(cmd_buffer);
+ gfx103_emit_vrs_state(cmd_buffer);
+ }
+
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_GRAPHICS_SHADERS;
+}
+
+static void
radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
@@ -1948,15 +2764,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
}
- radeon_emit_array(cmd_buffer->cs, pipeline->base.cs.buf, pipeline->base.cs.cdw);
-
- if (!cmd_buffer->state.emitted_graphics_pipeline ||
- cmd_buffer->state.emitted_graphics_pipeline->base.ctx_cs.cdw != pipeline->base.ctx_cs.cdw ||
- cmd_buffer->state.emitted_graphics_pipeline->base.ctx_cs_hash != pipeline->base.ctx_cs_hash ||
- memcmp(cmd_buffer->state.emitted_graphics_pipeline->base.ctx_cs.buf, pipeline->base.ctx_cs.buf,
- pipeline->base.ctx_cs.cdw * 4)) {
- radeon_emit_array(cmd_buffer->cs, pipeline->base.ctx_cs.buf, pipeline->base.ctx_cs.cdw);
- }
+ radv_emit_graphics_shaders(cmd_buffer);
if (device->pbb_allowed) {
const struct radv_binning_settings *settings = &pdev->binning_settings;
@@ -1974,14 +2782,9 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
if (pipeline->sqtt_shaders_reloc) {
/* Emit shaders relocation because RGP requires them to be contiguous in memory. */
radv_sqtt_emit_relocated_shaders(cmd_buffer, pipeline);
- }
- struct radv_shader *task_shader = cmd_buffer->state.shaders[MESA_SHADER_TASK];
- if (task_shader) {
- radv_emit_compute_shader(pdev, cmd_buffer->gang.cs, task_shader);
-
- /* Relocate the task shader because RGP requires shaders to be contiguous in memory. */
- if (pipeline->sqtt_shaders_reloc) {
+ struct radv_shader *task_shader = cmd_buffer->state.shaders[MESA_SHADER_TASK];
+ if (task_shader) {
const struct radv_sqtt_shaders_reloc *reloc = pipeline->sqtt_shaders_reloc;
const uint64_t va = reloc->va[MESA_SHADER_TASK];
@@ -2340,7 +3143,7 @@ radv_emit_primitive_topology(struct radv_cmd_buffer *cmd_buffer)
radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, d->vk.ia.primitive_topology);
}
- radv_emit_vgt_gs_out(device, cmd_buffer->cs, cmd_buffer->cs, vgt_gs_out_prim_type);
+ radv_emit_vgt_gs_out(cmd_buffer, vgt_gs_out_prim_type);
if (loc->sgpr_idx == -1)
return;
@@ -9483,86 +10286,6 @@ radv_cmdbuf_get_last_vgt_api_stage(const struct radv_cmd_buffer *cmd_buffer)
}
static void
-radv_emit_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
- struct radeon_cmdbuf *cs = cmd_buffer->cs;
-
- radv_foreach_stage(s, cmd_buffer->state.active_stages & RADV_GRAPHICS_STAGE_BITS)
- {
- switch (s) {
- case MESA_SHADER_VERTEX: {
- const struct radv_shader *vs = cmd_buffer->state.shaders[MESA_SHADER_VERTEX];
- struct radv_shader *next_stage = NULL;
-
- if (vs->info.merged_shader_compiled_separately) {
- assert(vs->info.next_stage == MESA_SHADER_TESS_CTRL || vs->info.next_stage == MESA_SHADER_GEOMETRY);
- next_stage = cmd_buffer->state.shaders[vs->info.next_stage];
- }
-
- radv_emit_vertex_shader(device, cs, cs, vs, next_stage);
- break;
- }
- case MESA_SHADER_TESS_CTRL:
- radv_emit_tess_ctrl_shader(device, cs, cmd_buffer->state.shaders[MESA_SHADER_TESS_CTRL]);
- break;
- case MESA_SHADER_TESS_EVAL: {
- const struct radv_shader *tes = cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL];
- struct radv_shader *gs = NULL;
-
- if (tes->info.merged_shader_compiled_separately) {
- assert(tes->info.next_stage == MESA_SHADER_GEOMETRY);
- gs = cmd_buffer->state.shaders[MESA_SHADER_GEOMETRY];
- }
-
- radv_emit_tess_eval_shader(device, cs, cs, tes, gs);
- break;
- }
- case MESA_SHADER_GEOMETRY: {
- struct radv_shader *es = cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL]
- ? cmd_buffer->state.shaders[MESA_SHADER_TESS_EVAL]
- : cmd_buffer->state.shaders[MESA_SHADER_VERTEX];
-
- radv_emit_geometry_shader(device, cs, cs, cmd_buffer->state.shaders[MESA_SHADER_GEOMETRY], es,
- cmd_buffer->state.gs_copy_shader);
- break;
- }
- case MESA_SHADER_FRAGMENT:
- radv_emit_fragment_shader(device, cs, cs, cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT]);
- radv_emit_ps_inputs(device, cs, last_vgt_shader, cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT]);
- break;
- case MESA_SHADER_MESH:
- radv_emit_mesh_shader(device, cs, cs, cmd_buffer->state.shaders[MESA_SHADER_MESH]);
- break;
- case MESA_SHADER_TASK:
- radv_emit_compute_shader(pdev, cmd_buffer->gang.cs, cmd_buffer->state.shaders[MESA_SHADER_TASK]);
- break;
- default:
- unreachable("invalid bind stage");
- }
- }
-
- /* Emit graphics states related to shaders. */
- const struct radv_vgt_shader_key vgt_shader_cfg_key =
- radv_get_vgt_shader_key(device, cmd_buffer->state.shaders, cmd_buffer->state.gs_copy_shader);
-
- radv_emit_vgt_gs_mode(device, cs, last_vgt_shader);
- radv_emit_vgt_reuse(device, cs, radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_TESS_EVAL),
- &vgt_shader_cfg_key);
- radv_emit_vgt_shader_config(device, cs, &vgt_shader_cfg_key);
-
- if (pdev->info.gfx_level >= GFX10_3) {
- gfx103_emit_vgt_draw_payload_cntl(cs, cmd_buffer->state.shaders[MESA_SHADER_MESH], cmd_buffer->state.uses_vrs);
- gfx103_emit_vrs_state(device, cs, cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT],
- cmd_buffer->state.uses_vrs_coarse_shading, last_vgt_shader->info.force_vrs_per_vertex);
- }
-
- cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_GRAPHICS_SHADERS;
-}
-
-static void
radv_emit_color_output_state(struct radv_cmd_buffer *cmd_buffer)
{
uint32_t col_format_compacted = radv_compact_spi_shader_col_format(cmd_buffer->state.spi_shader_col_format);
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index a50805207f9..e5c0aee56aa 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -97,9 +97,6 @@ radv_pipeline_destroy(struct radv_device *device, struct radv_pipeline *pipeline
unreachable("invalid pipeline type");
}
- if (pipeline->cs.buf)
- free(pipeline->cs.buf);
-
radv_rmv_log_resource_destroy(device, (uint64_t)radv_pipeline_to_handle(pipeline));
vk_object_base_finish(&pipeline->base);
vk_free2(&device->vk.alloc, allocator, pipeline);
diff --git a/src/amd/vulkan/radv_pipeline.h b/src/amd/vulkan/radv_pipeline.h
index 6a7be01db3e..43b3f94d2a5 100644
--- a/src/amd/vulkan/radv_pipeline.h
+++ b/src/amd/vulkan/radv_pipeline.h
@@ -51,10 +51,6 @@ struct radv_pipeline {
struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES];
struct radv_shader *gs_copy_shader;
- struct radeon_cmdbuf cs;
- uint32_t ctx_cs_hash;
- struct radeon_cmdbuf ctx_cs;
-
uint32_t user_data_0[MESA_VULKAN_SHADER_STAGES];
/* Unique pipeline hash identifier. */
diff --git a/src/amd/vulkan/radv_pipeline_graphics.c b/src/amd/vulkan/radv_pipeline_graphics.c
index cb770265de6..0b394f01765 100644
--- a/src/amd/vulkan/radv_pipeline_graphics.c
+++ b/src/amd/vulkan/radv_pipeline_graphics.c
@@ -2753,624 +2753,6 @@ done:
return result;
}
-void
-radv_emit_vgt_gs_mode(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- const struct radv_shader *last_vgt_api_shader)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const struct radv_shader_info *info = &last_vgt_api_shader->info;
- unsigned vgt_primitiveid_en = 0;
- uint32_t vgt_gs_mode = 0;
-
- if (info->is_ngg)
- return;
-
- if (info->stage == MESA_SHADER_GEOMETRY) {
- vgt_gs_mode = ac_vgt_gs_mode(info->gs.vertices_out, pdev->info.gfx_level);
- } else if (info->outinfo.export_prim_id || info->uses_prim_id) {
- vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
- vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
- }
-
- radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en);
- radeon_set_context_reg(ctx_cs, R_028A40_VGT_GS_MODE, vgt_gs_mode);
-}
-
-static void
-radv_emit_hw_vs(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *shader)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint64_t va = radv_shader_get_va(shader);
-
- radeon_set_sh_reg_seq(cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
- radeon_emit(cs, va >> 8);
- radeon_emit(cs, S_00B124_MEM_BASE(va >> 40));
- radeon_emit(cs, shader->config.rsrc1);
- radeon_emit(cs, shader->config.rsrc2);
-
- radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG, shader->info.regs.spi_vs_out_config);
- radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT, shader->info.regs.spi_shader_pos_format);
- radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL, shader->info.regs.pa_cl_vs_out_cntl);
-
- if (pdev->info.gfx_level <= GFX8)
- radeon_set_context_reg(ctx_cs, R_028AB4_VGT_REUSE_OFF, shader->info.regs.vs.vgt_reuse_off);
-
- if (pdev->info.gfx_level >= GFX7) {
- radeon_set_sh_reg_idx(pdev, cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, 3,
- shader->info.regs.vs.spi_shader_pgm_rsrc3_vs);
- radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, shader->info.regs.vs.spi_shader_late_alloc_vs);
-
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_uconfig_reg(cs, R_030980_GE_PC_ALLOC, shader->info.regs.ge_pc_alloc);
-
- if (shader->info.stage == MESA_SHADER_TESS_EVAL) {
- radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL, shader->info.regs.vgt_gs_onchip_cntl);
- }
- }
- }
-}
-
-static void
-radv_emit_hw_es(struct radeon_cmdbuf *cs, const struct radv_shader *shader)
-{
- uint64_t va = radv_shader_get_va(shader);
-
- radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
- radeon_emit(cs, va >> 8);
- radeon_emit(cs, S_00B324_MEM_BASE(va >> 40));
- radeon_emit(cs, shader->config.rsrc1);
- radeon_emit(cs, shader->config.rsrc2);
-}
-
-static void
-radv_emit_hw_ls(struct radeon_cmdbuf *cs, const struct radv_shader *shader)
-{
- uint64_t va = radv_shader_get_va(shader);
-
- radeon_set_sh_reg(cs, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
-
- radeon_set_sh_reg(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, shader->config.rsrc1);
-}
-
-static void
-radv_emit_hw_ngg(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *es, const struct radv_shader *shader)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint64_t va = radv_shader_get_va(shader);
- gl_shader_stage es_type;
- const struct gfx10_ngg_info *ngg_state = &shader->info.ngg_info;
-
- if (shader->info.stage == MESA_SHADER_GEOMETRY) {
- if (shader->info.merged_shader_compiled_separately) {
- es_type = es->info.stage;
- } else {
- es_type = shader->info.gs.es_type;
- }
- } else {
- es_type = shader->info.stage;
- }
-
- if (!shader->info.merged_shader_compiled_separately) {
- radeon_set_sh_reg(cs, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
-
- radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
- radeon_emit(cs, shader->config.rsrc1);
- radeon_emit(cs, shader->config.rsrc2);
- }
-
- const struct radv_vs_output_info *outinfo = &shader->info.outinfo;
-
- bool es_enable_prim_id = outinfo->export_prim_id || (es && es->info.uses_prim_id);
- bool break_wave_at_eoi = false;
-
- if (es_type == MESA_SHADER_TESS_EVAL) {
- if (es_enable_prim_id || (shader->info.uses_prim_id))
- break_wave_at_eoi = true;
- }
-
- radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG, shader->info.regs.spi_vs_out_config);
-
- radeon_set_context_reg_seq(ctx_cs, R_028708_SPI_SHADER_IDX_FORMAT, 2);
- radeon_emit(ctx_cs, shader->info.regs.ngg.spi_shader_idx_format);
- radeon_emit(ctx_cs, shader->info.regs.spi_shader_pos_format);
-
- radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL, shader->info.regs.pa_cl_vs_out_cntl);
-
- radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN,
- shader->info.regs.ngg.vgt_primitiveid_en | S_028A84_PRIMITIVEID_EN(es_enable_prim_id));
-
- radeon_set_context_reg(ctx_cs, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
- shader->info.regs.ngg.ge_max_output_per_subgroup);
-
- radeon_set_context_reg(ctx_cs, R_028B4C_GE_NGG_SUBGRP_CNTL, shader->info.regs.ngg.ge_ngg_subgrp_cntl);
-
- radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT, shader->info.regs.vgt_gs_instance_cnt);
-
- uint32_t ge_cntl = shader->info.regs.ngg.ge_cntl;
- if (pdev->info.gfx_level >= GFX11) {
- ge_cntl |= S_03096C_BREAK_PRIMGRP_AT_EOI(break_wave_at_eoi);
- } else {
- ge_cntl |= S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
-
- /* Bug workaround for a possible hang with non-tessellation cases.
- * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
- *
- * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
- */
- if (pdev->info.gfx_level == GFX10 && es_type != MESA_SHADER_TESS_EVAL && ngg_state->hw_max_esverts != 256) {
- ge_cntl &= C_03096C_VERT_GRP_SIZE;
-
- if (ngg_state->hw_max_esverts > 5) {
- ge_cntl |= S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts - 5);
- }
- }
-
- radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL, shader->info.regs.vgt_gs_onchip_cntl);
- }
-
- radeon_set_uconfig_reg(cs, R_03096C_GE_CNTL, ge_cntl);
-
- radeon_set_sh_reg_idx(pdev, cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, 3, shader->info.regs.spi_shader_pgm_rsrc3_gs);
- radeon_set_sh_reg_idx(pdev, cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS, 3, shader->info.regs.spi_shader_pgm_rsrc4_gs);
-
- radeon_set_uconfig_reg(cs, R_030980_GE_PC_ALLOC, shader->info.regs.ge_pc_alloc);
-}
-
-static void
-radv_emit_hw_hs(const struct radv_device *device, struct radeon_cmdbuf *cs, const struct radv_shader *shader)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint64_t va = radv_shader_get_va(shader);
-
- if (pdev->info.gfx_level >= GFX9) {
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_sh_reg(cs, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
- } else {
- radeon_set_sh_reg(cs, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
- }
-
- radeon_set_sh_reg(cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, shader->config.rsrc1);
- } else {
- radeon_set_sh_reg_seq(cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
- radeon_emit(cs, va >> 8);
- radeon_emit(cs, S_00B424_MEM_BASE(va >> 40));
- radeon_emit(cs, shader->config.rsrc1);
- radeon_emit(cs, shader->config.rsrc2);
- }
-}
-
-void
-radv_emit_vertex_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *vs, const struct radv_shader *next_stage)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
-
- if (vs->info.merged_shader_compiled_separately) {
- const struct radv_userdata_info *loc = &vs->info.user_sgprs_locs.shader_data[AC_UD_NEXT_STAGE_PC];
- const uint32_t base_reg = vs->info.user_data_0;
-
- assert(loc->sgpr_idx != -1 && loc->num_sgprs == 1);
-
- if (!vs->info.vs.has_prolog) {
- uint32_t rsrc1, rsrc2;
-
- if (vs->info.next_stage == MESA_SHADER_TESS_CTRL) {
- radv_shader_combine_cfg_vs_tcs(vs, next_stage, &rsrc1, NULL);
-
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_sh_reg(cs, R_00B520_SPI_SHADER_PGM_LO_LS, vs->va >> 8);
- } else {
- radeon_set_sh_reg(cs, R_00B410_SPI_SHADER_PGM_LO_LS, vs->va >> 8);
- }
-
- radeon_set_sh_reg(cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, rsrc1);
- } else {
- radv_shader_combine_cfg_vs_gs(vs, next_stage, &rsrc1, &rsrc2);
-
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_sh_reg(cs, R_00B320_SPI_SHADER_PGM_LO_ES, vs->va >> 8);
- } else {
- radeon_set_sh_reg(cs, R_00B210_SPI_SHADER_PGM_LO_ES, vs->va >> 8);
- }
-
- unsigned lds_size;
- if (next_stage->info.is_ngg) {
- lds_size = DIV_ROUND_UP(next_stage->info.ngg_info.lds_size, pdev->info.lds_encode_granularity);
- } else {
- lds_size = next_stage->info.gs_ring_info.lds_size;
- }
-
- radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
- radeon_emit(cs, rsrc1);
- radeon_emit(cs, rsrc2 | S_00B22C_LDS_SIZE(lds_size));
- }
- }
-
- radv_emit_shader_pointer(device, cs, base_reg + loc->sgpr_idx * 4, next_stage->va, false);
- return;
- }
-
- if (vs->info.vs.as_ls)
- radv_emit_hw_ls(cs, vs);
- else if (vs->info.vs.as_es)
- radv_emit_hw_es(cs, vs);
- else if (vs->info.is_ngg)
- radv_emit_hw_ngg(device, ctx_cs, cs, NULL, vs);
- else
- radv_emit_hw_vs(device, ctx_cs, cs, vs);
-}
-
-void
-radv_emit_tess_ctrl_shader(const struct radv_device *device, struct radeon_cmdbuf *cs, const struct radv_shader *tcs)
-{
- if (tcs->info.merged_shader_compiled_separately) {
- /* When VS+TCS are compiled separately on GFX9+, the VS will jump to the TCS and everything is
- * emitted as part of the VS.
- */
- return;
- }
-
- radv_emit_hw_hs(device, cs, tcs);
-}
-
-void
-radv_emit_tess_eval_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *tes, const struct radv_shader *gs)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
-
- if (tes->info.merged_shader_compiled_separately) {
- const struct radv_userdata_info *loc = &tes->info.user_sgprs_locs.shader_data[AC_UD_NEXT_STAGE_PC];
- const uint32_t base_reg = tes->info.user_data_0;
- uint32_t rsrc1, rsrc2;
-
- assert(loc->sgpr_idx != -1 && loc->num_sgprs == 1);
-
- radv_shader_combine_cfg_tes_gs(tes, gs, &rsrc1, &rsrc2);
-
- radeon_set_sh_reg(cs, R_00B210_SPI_SHADER_PGM_LO_ES, tes->va >> 8);
-
- unsigned lds_size;
- if (gs->info.is_ngg) {
- lds_size = DIV_ROUND_UP(gs->info.ngg_info.lds_size, pdev->info.lds_encode_granularity);
- } else {
- lds_size = gs->info.gs_ring_info.lds_size;
- }
-
- radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
- radeon_emit(cs, rsrc1);
- radeon_emit(cs, rsrc2 | S_00B22C_LDS_SIZE(lds_size));
-
- radv_emit_shader_pointer(device, cs, base_reg + loc->sgpr_idx * 4, gs->va, false);
- return;
- }
-
- if (tes->info.is_ngg) {
- radv_emit_hw_ngg(device, ctx_cs, cs, NULL, tes);
- } else if (tes->info.tes.as_es) {
- radv_emit_hw_es(cs, tes);
- } else {
- radv_emit_hw_vs(device, ctx_cs, cs, tes);
- }
-}
-
-static void
-radv_emit_hw_gs(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *gs)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const struct radv_legacy_gs_info *gs_state = &gs->info.gs_ring_info;
- const uint64_t va = radv_shader_get_va(gs);
-
- radeon_set_context_reg_seq(ctx_cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gsvs_ring_offset[0]);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gsvs_ring_offset[1]);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gsvs_ring_offset[2]);
- radeon_set_context_reg(ctx_cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gs->info.regs.gs.vgt_gsvs_ring_itemsize);
-
- radeon_set_context_reg_seq(ctx_cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gs_vert_itemsize[0]);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gs_vert_itemsize[1]);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gs_vert_itemsize[2]);
- radeon_emit(ctx_cs, gs->info.regs.gs.vgt_gs_vert_itemsize[3]);
-
- radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT, gs->info.regs.gs.vgt_gs_instance_cnt);
-
- if (pdev->info.gfx_level >= GFX9) {
- if (!gs->info.merged_shader_compiled_separately) {
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_sh_reg(cs, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
- } else {
- radeon_set_sh_reg(cs, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
- }
-
- radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
- radeon_emit(cs, gs->config.rsrc1);
- radeon_emit(cs, gs->config.rsrc2 | S_00B22C_LDS_SIZE(gs_state->lds_size));
- }
-
- radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL, gs->info.regs.vgt_gs_onchip_cntl);
- radeon_set_context_reg(ctx_cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
- gs->info.regs.gs.vgt_gs_max_prims_per_subgroup);
- } else {
- radeon_set_sh_reg_seq(cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
- radeon_emit(cs, va >> 8);
- radeon_emit(cs, S_00B224_MEM_BASE(va >> 40));
- radeon_emit(cs, gs->config.rsrc1);
- radeon_emit(cs, gs->config.rsrc2);
-
- /* GFX6-8: ESGS offchip ring buffer is allocated according to VGT_ESGS_RING_ITEMSIZE.
- * GFX9+: Only used to set the GS input VGPRs, emulated in shaders.
- */
- radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE, gs->info.regs.gs.vgt_esgs_ring_itemsize);
- }
-
- radeon_set_sh_reg_idx(pdev, cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, 3, gs->info.regs.spi_shader_pgm_rsrc3_gs);
-
- if (pdev->info.gfx_level >= GFX10) {
- radeon_set_sh_reg_idx(pdev, cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS, 3, gs->info.regs.spi_shader_pgm_rsrc4_gs);
- }
-}
-
-void
-radv_emit_geometry_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *gs, const struct radv_shader *es,
- const struct radv_shader *gs_copy_shader)
-{
- if (gs->info.is_ngg) {
- radv_emit_hw_ngg(device, ctx_cs, cs, es, gs);
- } else {
- radv_emit_hw_gs(device, ctx_cs, cs, gs);
- radv_emit_hw_vs(device, ctx_cs, cs, gs_copy_shader);
- }
-
- radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.regs.vgt_gs_max_vert_out);
-
- if (gs->info.merged_shader_compiled_separately) {
- const struct radv_userdata_info *vgt_esgs_ring_itemsize = radv_get_user_sgpr(gs, AC_UD_VGT_ESGS_RING_ITEMSIZE);
-
- assert(vgt_esgs_ring_itemsize->sgpr_idx != -1 && vgt_esgs_ring_itemsize->num_sgprs == 1);
-
- radeon_set_sh_reg(cs, gs->info.user_data_0 + vgt_esgs_ring_itemsize->sgpr_idx * 4, es->info.esgs_itemsize / 4);
-
- if (gs->info.is_ngg) {
- const struct radv_userdata_info *ngg_lds_layout = radv_get_user_sgpr(gs, AC_UD_NGG_LDS_LAYOUT);
-
- assert(ngg_lds_layout->sgpr_idx != -1 && ngg_lds_layout->num_sgprs == 1);
- assert(!(gs->info.ngg_info.esgs_ring_size & 0xffff0000) && !(gs->info.ngg_info.scratch_lds_base & 0xffff0000));
-
- radeon_set_sh_reg(cs, gs->info.user_data_0 + ngg_lds_layout->sgpr_idx * 4,
- SET_SGPR_FIELD(NGG_LDS_LAYOUT_GS_OUT_VERTEX_BASE, gs->info.ngg_info.esgs_ring_size) |
- SET_SGPR_FIELD(NGG_LDS_LAYOUT_SCRATCH_BASE, gs->info.ngg_info.scratch_lds_base));
- }
- }
-}
-
-void
-radv_emit_mesh_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *ms)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const uint32_t gs_out = radv_conv_gl_prim_to_gs_out(ms->info.ms.output_prim);
-
- radv_emit_hw_ngg(device, ctx_cs, cs, NULL, ms);
- radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT, ms->info.regs.vgt_gs_max_vert_out);
- radeon_set_uconfig_reg_idx(pdev, cs, R_030908_VGT_PRIMITIVE_TYPE, 1, V_008958_DI_PT_POINTLIST);
-
- if (pdev->mesh_fast_launch_2) {
- radeon_set_sh_reg_seq(cs, R_00B2B0_SPI_SHADER_GS_MESHLET_DIM, 2);
- radeon_emit(cs, ms->info.regs.ms.spi_shader_gs_meshlet_dim);
- radeon_emit(cs, ms->info.regs.ms.spi_shader_gs_meshlet_exp_alloc);
- }
-
- radv_emit_vgt_gs_out(device, ctx_cs, cs, gs_out);
-}
-
-enum radv_ps_in_type {
- radv_ps_in_interpolated,
- radv_ps_in_flat,
- radv_ps_in_explicit,
- radv_ps_in_explicit_strict,
- radv_ps_in_interpolated_fp16,
- radv_ps_in_interpolated_fp16_hi,
- radv_ps_in_per_prim_gfx103,
- radv_ps_in_per_prim_gfx11,
-};
-
-static uint32_t
-offset_to_ps_input(const uint32_t offset, const enum radv_ps_in_type type)
-{
- assert(offset != AC_EXP_PARAM_UNDEFINED);
-
- if (offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 && offset <= AC_EXP_PARAM_DEFAULT_VAL_1111) {
- /* The input is a DEFAULT_VAL constant. */
- return S_028644_OFFSET(0x20) | S_028644_DEFAULT_VAL(offset - AC_EXP_PARAM_DEFAULT_VAL_0000);
- }
-
- assert(offset <= AC_EXP_PARAM_OFFSET_31);
- uint32_t ps_input_cntl = S_028644_OFFSET(offset);
-
- switch (type) {
- case radv_ps_in_explicit_strict:
- /* Rotate parameter cache contents to strict vertex order. */
- ps_input_cntl |= S_028644_ROTATE_PC_PTR(1);
- FALLTHROUGH;
- case radv_ps_in_explicit:
- /* Force parameter cache to be read in passthrough mode. */
- ps_input_cntl |= S_028644_OFFSET(1 << 5);
- FALLTHROUGH;
- case radv_ps_in_flat:
- ps_input_cntl |= S_028644_FLAT_SHADE(1);
- break;
- case radv_ps_in_interpolated_fp16_hi:
- ps_input_cntl |= S_028644_ATTR1_VALID(1);
- FALLTHROUGH;
- case radv_ps_in_interpolated_fp16:
- /* These must be set even if only the high 16 bits are used. */
- ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) | S_028644_ATTR0_VALID(1);
- break;
- case radv_ps_in_per_prim_gfx11:
- ps_input_cntl |= S_028644_PRIM_ATTR(1);
- break;
- case radv_ps_in_interpolated:
- case radv_ps_in_per_prim_gfx103:
- break;
- }
-
- return ps_input_cntl;
-}
-
-static void
-slot_to_ps_input(const struct radv_vs_output_info *outinfo, unsigned slot, uint32_t *ps_input_cntl, unsigned *ps_offset,
- const bool use_default_0, const enum radv_ps_in_type type)
-{
- unsigned vs_offset = outinfo->vs_output_param_offset[slot];
-
- if (vs_offset == AC_EXP_PARAM_UNDEFINED) {
- if (use_default_0)
- vs_offset = AC_EXP_PARAM_DEFAULT_VAL_0000;
- else
- return;
- }
-
- ps_input_cntl[*ps_offset] = offset_to_ps_input(vs_offset, type);
- ++(*ps_offset);
-}
-
-static void
-input_mask_to_ps_inputs(const struct radv_vs_output_info *outinfo, const struct radv_shader *ps, uint32_t input_mask,
- uint32_t *ps_input_cntl, unsigned *ps_offset, const enum radv_ps_in_type default_type)
-{
- u_foreach_bit (i, input_mask) {
- unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VAR0 + i];
- if (vs_offset == AC_EXP_PARAM_UNDEFINED) {
- ps_input_cntl[*ps_offset] = S_028644_OFFSET(0x20);
- ++(*ps_offset);
- continue;
- }
-
- enum radv_ps_in_type type = default_type;
-
- if (ps->info.ps.flat_shaded_mask & BITFIELD_BIT(*ps_offset))
- type = radv_ps_in_flat;
- else if (ps->info.ps.explicit_shaded_mask & BITFIELD_BIT(*ps_offset))
- type = radv_ps_in_explicit;
- else if (ps->info.ps.explicit_strict_shaded_mask & BITFIELD_BIT(*ps_offset))
- type = radv_ps_in_explicit_strict;
- else if (ps->info.ps.float16_hi_shaded_mask & BITFIELD_BIT(*ps_offset))
- type = radv_ps_in_interpolated_fp16_hi;
- else if (ps->info.ps.float16_shaded_mask & BITFIELD_BIT(*ps_offset))
- type = radv_ps_in_interpolated_fp16;
-
- ps_input_cntl[*ps_offset] = offset_to_ps_input(vs_offset, type);
- ++(*ps_offset);
- }
-}
-
-void
-radv_emit_ps_inputs(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- const struct radv_shader *last_vgt_shader, const struct radv_shader *ps)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const struct radv_vs_output_info *outinfo = &last_vgt_shader->info.outinfo;
- const bool mesh = last_vgt_shader->info.stage == MESA_SHADER_MESH;
- const bool gfx11plus = pdev->info.gfx_level >= GFX11;
- const enum radv_ps_in_type per_prim = gfx11plus ? radv_ps_in_per_prim_gfx11 : radv_ps_in_per_prim_gfx103;
-
- uint32_t ps_input_cntl[32];
- unsigned ps_offset = 0;
-
- if (ps->info.ps.prim_id_input && !mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_PRIMITIVE_ID, ps_input_cntl, &ps_offset, false, radv_ps_in_flat);
-
- if (ps->info.ps.layer_input && !mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_LAYER, ps_input_cntl, &ps_offset, true, radv_ps_in_flat);
-
- if (ps->info.ps.viewport_index_input && !mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_VIEWPORT, ps_input_cntl, &ps_offset, true, radv_ps_in_flat);
-
- if (ps->info.ps.has_pcoord)
- ps_input_cntl[ps_offset++] = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
-
- if (ps->info.ps.input_clips_culls_mask & 0x0f)
- slot_to_ps_input(outinfo, VARYING_SLOT_CLIP_DIST0, ps_input_cntl, &ps_offset, false, radv_ps_in_interpolated);
-
- if (ps->info.ps.input_clips_culls_mask & 0xf0)
- slot_to_ps_input(outinfo, VARYING_SLOT_CLIP_DIST1, ps_input_cntl, &ps_offset, false, radv_ps_in_interpolated);
-
- input_mask_to_ps_inputs(outinfo, ps, ps->info.ps.input_mask, ps_input_cntl, &ps_offset, radv_ps_in_interpolated);
-
- /* Per-primitive PS inputs: the HW needs these to be last. */
-
- if (ps->info.ps.prim_id_input && mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_PRIMITIVE_ID, ps_input_cntl, &ps_offset, false, per_prim);
-
- if (ps->info.ps.layer_input && mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_LAYER, ps_input_cntl, &ps_offset, true, per_prim);
-
- if (ps->info.ps.viewport_index_input && mesh)
- slot_to_ps_input(outinfo, VARYING_SLOT_VIEWPORT, ps_input_cntl, &ps_offset, true, per_prim);
-
- input_mask_to_ps_inputs(outinfo, ps, ps->info.ps.input_per_primitive_mask, ps_input_cntl, &ps_offset, per_prim);
-
- if (ps_offset) {
- radeon_set_context_reg_seq(ctx_cs, R_028644_SPI_PS_INPUT_CNTL_0, ps_offset);
- for (unsigned i = 0; i < ps_offset; i++) {
- radeon_emit(ctx_cs, ps_input_cntl[i]);
- }
- }
-}
-
-void
-radv_emit_fragment_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *ps)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint64_t va;
-
- va = radv_shader_get_va(ps);
-
- radeon_set_sh_reg_seq(cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
- radeon_emit(cs, va >> 8);
- radeon_emit(cs, S_00B024_MEM_BASE(va >> 40));
- radeon_emit(cs, ps->config.rsrc1);
- radeon_emit(cs, ps->config.rsrc2);
-
- radeon_set_context_reg_seq(ctx_cs, R_0286CC_SPI_PS_INPUT_ENA, 2);
- radeon_emit(ctx_cs, ps->config.spi_ps_input_ena);
- radeon_emit(ctx_cs, ps->config.spi_ps_input_addr);
-
- radeon_set_context_reg(ctx_cs, R_0286D8_SPI_PS_IN_CONTROL, ps->info.regs.ps.spi_ps_in_control);
- radeon_set_context_reg(ctx_cs, R_028710_SPI_SHADER_Z_FORMAT, ps->info.regs.ps.spi_shader_z_format);
-
- if (pdev->info.gfx_level >= GFX9 && pdev->info.gfx_level < GFX11)
- radeon_set_context_reg(ctx_cs, R_028C40_PA_SC_SHADER_CONTROL, ps->info.regs.ps.pa_sc_shader_control);
-}
-
-void
-radv_emit_vgt_reuse(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, const struct radv_shader *tes,
- const struct radv_vgt_shader_key *key)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
-
- if (pdev->info.gfx_level == GFX10_3) {
- /* Legacy Tess+GS should disable reuse to prevent hangs on GFX10.3. */
- const bool has_legacy_tess_gs = key->tess && key->gs && !key->ngg;
-
- radeon_set_context_reg(ctx_cs, R_028AB4_VGT_REUSE_OFF, S_028AB4_REUSE_OFF(has_legacy_tess_gs));
- }
-
- if (pdev->info.family >= CHIP_POLARIS10 && pdev->info.gfx_level < GFX10) {
- unsigned vtx_reuse_depth = 30;
- if (tes && tes->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD) {
- vtx_reuse_depth = 14;
- }
- radeon_set_context_reg(ctx_cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth));
- }
-}
-
struct radv_vgt_shader_key
radv_get_vgt_shader_key(const struct radv_device *device, struct radv_shader **shaders,
const struct radv_shader *gs_copy_shader)
@@ -3418,86 +2800,6 @@ radv_get_vgt_shader_key(const struct radv_device *device, struct radv_shader **s
return key;
}
-void
-radv_emit_vgt_shader_config(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- const struct radv_vgt_shader_key *key)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint32_t stages = 0;
-
- if (key->tess) {
- stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
-
- if (key->gs)
- stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | S_028B54_GS_EN(1);
- else if (key->ngg)
- stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
- else
- stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
- } else if (key->gs) {
- stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | S_028B54_GS_EN(1);
- } else if (key->mesh) {
- assert(!key->ngg_passthrough);
- unsigned gs_fast_launch = pdev->mesh_fast_launch_2 ? 2 : 1;
- stages |=
- S_028B54_GS_EN(1) | S_028B54_GS_FAST_LAUNCH(gs_fast_launch) | S_028B54_NGG_WAVE_ID_EN(key->mesh_scratch_ring);
- } else if (key->ngg) {
- stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
- }
-
- if (key->ngg) {
- stages |= S_028B54_PRIMGEN_EN(1) | S_028B54_NGG_WAVE_ID_EN(key->ngg_streamout) |
- S_028B54_PRIMGEN_PASSTHRU_EN(key->ngg_passthrough) |
- S_028B54_PRIMGEN_PASSTHRU_NO_MSG(key->ngg_passthrough && pdev->info.family >= CHIP_NAVI23);
- } else if (key->gs) {
- stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
- }
-
- if (pdev->info.gfx_level >= GFX9)
- stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
-
- if (pdev->info.gfx_level >= GFX10) {
- stages |= S_028B54_HS_W32_EN(key->hs_wave32) | S_028B54_GS_W32_EN(key->gs_wave32) |
- S_028B54_VS_W32_EN(pdev->info.gfx_level < GFX11 && key->vs_wave32);
- /* Legacy GS only supports Wave64. Read it as an implication. */
- assert(!(key->gs && !key->ngg) || !key->gs_wave32);
- }
-
- radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
-}
-
-void
-radv_emit_vgt_gs_out(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- uint32_t vgt_gs_out_prim_type)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
-
- if (pdev->info.gfx_level >= GFX11) {
- radeon_set_uconfig_reg(cs, R_030998_VGT_GS_OUT_PRIM_TYPE, vgt_gs_out_prim_type);
- } else {
- radeon_set_context_reg(ctx_cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, vgt_gs_out_prim_type);
- }
-}
-
-void
-gfx103_emit_vgt_draw_payload_cntl(struct radeon_cmdbuf *ctx_cs, const struct radv_shader *mesh_shader, bool enable_vrs)
-{
- bool enable_prim_payload = false;
-
- /* Enables the second channel of the primitive export instruction.
- * This channel contains: VRS rate x, y, viewport and layer.
- */
- if (mesh_shader) {
- const struct radv_vs_output_info *outinfo = &mesh_shader->info.outinfo;
-
- enable_prim_payload = (outinfo->writes_viewport_index_per_primitive || outinfo->writes_layer_per_primitive ||
- outinfo->writes_primitive_shading_rate_per_primitive);
- }
-
- radeon_set_context_reg(ctx_cs, R_028A98_VGT_DRAW_PAYLOAD_CNTL,
- S_028A98_EN_VRS_RATE(enable_vrs) | S_028A98_EN_PRIM_PAYLOAD(enable_prim_payload));
-}
-
static bool
gfx103_pipeline_vrs_coarse_shading(const struct radv_device *device, const struct radv_graphics_pipeline *pipeline)
{
@@ -3517,104 +2819,6 @@ gfx103_pipeline_vrs_coarse_shading(const struct radv_device *device, const struc
return true;
}
-void
-gfx103_emit_vrs_state(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, const struct radv_shader *ps,
- bool enable_vrs_coarse_shading, bool force_vrs_per_vertex)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- uint32_t mode = V_028064_SC_VRS_COMB_MODE_PASSTHRU;
- uint8_t rate_x = 0, rate_y = 0;
-
- if (enable_vrs_coarse_shading) {
- /* When per-draw VRS is not enabled at all, try enabling VRS coarse shading 2x2 if the driver
- * determined that it's safe to enable.
- */
- mode = V_028064_SC_VRS_COMB_MODE_OVERRIDE;
- rate_x = rate_y = 1;
- } else if (force_vrs_per_vertex) {
- /* Otherwise, if per-draw VRS is not enabled statically, try forcing per-vertex VRS if
- * requested by the user. Note that vkd3d-proton always has to declare VRS as dynamic because
- * in DX12 it's fully dynamic.
- */
- radeon_set_context_reg(ctx_cs, R_028848_PA_CL_VRS_CNTL,
- S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE) |
- S_028848_VERTEX_RATE_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE));
-
- /* If the shader is using discard, turn off coarse shading because discard at 2x2 pixel
- * granularity degrades quality too much. MIN allows sample shading but not coarse shading.
- */
- mode = ps->info.ps.can_discard ? V_028064_SC_VRS_COMB_MODE_MIN : V_028064_SC_VRS_COMB_MODE_PASSTHRU;
- }
-
- if (pdev->info.gfx_level < GFX11) {
- radeon_set_context_reg(ctx_cs, R_028064_DB_VRS_OVERRIDE_CNTL,
- S_028064_VRS_OVERRIDE_RATE_COMBINER_MODE(mode) | S_028064_VRS_OVERRIDE_RATE_X(rate_x) |
- S_028064_VRS_OVERRIDE_RATE_Y(rate_y));
- }
-}
-
-static void
-radv_pipeline_emit_pm4(const struct radv_device *device, struct radv_graphics_pipeline *pipeline)
-{
- const struct radv_physical_device *pdev = radv_device_physical(device);
- const struct radv_shader *last_vgt_shader = radv_get_last_vgt_shader(pipeline);
- const struct radv_shader *ps = pipeline->base.shaders[MESA_SHADER_FRAGMENT];
- struct radeon_cmdbuf *ctx_cs = &pipeline->base.ctx_cs;
- struct radeon_cmdbuf *cs = &pipeline->base.cs;
-
- cs->reserved_dw = cs->max_dw = 64;
- ctx_cs->reserved_dw = ctx_cs->max_dw = 256;
- cs->buf = malloc(4 * (cs->max_dw + ctx_cs->max_dw));
- ctx_cs->buf = cs->buf + cs->max_dw;
-
- const struct radv_vgt_shader_key vgt_shader_key =
- radv_get_vgt_shader_key(device, pipeline->base.shaders, pipeline->base.gs_copy_shader);
-
- radv_emit_vgt_gs_mode(device, ctx_cs, pipeline->base.shaders[pipeline->last_vgt_api_stage]);
-
- if (radv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX)) {
- radv_emit_vertex_shader(device, ctx_cs, cs, pipeline->base.shaders[MESA_SHADER_VERTEX], NULL);
- }
-
- if (radv_pipeline_has_stage(pipeline, MESA_SHADER_MESH)) {
- radv_emit_mesh_shader(device, ctx_cs, cs, pipeline->base.shaders[MESA_SHADER_MESH]);
- }
-
- if (radv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_CTRL)) {
- radv_emit_tess_ctrl_shader(device, cs, pipeline->base.shaders[MESA_SHADER_TESS_CTRL]);
-
- if (radv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
- radv_emit_tess_eval_shader(device, ctx_cs, cs, pipeline->base.shaders[MESA_SHADER_TESS_EVAL], NULL);
- }
- }
-
- if (radv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
- const struct radv_shader *gs = pipeline->base.shaders[MESA_SHADER_GEOMETRY];
- const struct radv_shader *es = pipeline->base.shaders[gs->info.gs.es_type];
-
- radv_emit_geometry_shader(device, ctx_cs, cs, gs, es, pipeline->base.gs_copy_shader);
- }
-
- if (ps) {
- radv_emit_fragment_shader(device, ctx_cs, cs, ps);
- radv_emit_ps_inputs(device, ctx_cs, last_vgt_shader, ps);
- }
-
- radv_emit_vgt_reuse(device, ctx_cs, radv_get_shader(pipeline->base.shaders, MESA_SHADER_TESS_EVAL), &vgt_shader_key);
- radv_emit_vgt_shader_config(device, ctx_cs, &vgt_shader_key);
-
- if (pdev->info.gfx_level >= GFX10_3) {
- gfx103_emit_vgt_draw_payload_cntl(ctx_cs, pipeline->base.shaders[MESA_SHADER_MESH], pipeline->uses_vrs);
- gfx103_emit_vrs_state(device, ctx_cs, pipeline->base.shaders[MESA_SHADER_FRAGMENT],
- pipeline->uses_vrs_coarse_shading, pipeline->force_vrs_per_vertex);
- }
-
- pipeline->base.ctx_cs_hash = _mesa_hash_data(ctx_cs->buf, ctx_cs->cdw * 4);
-
- assert(ctx_cs->cdw <= ctx_cs->max_dw);
- assert(cs->cdw <= cs->max_dw);
-}
-
static void
radv_pipeline_init_vertex_input_state(const struct radv_device *device, struct radv_graphics_pipeline *pipeline,
const struct vk_graphics_pipeline_state *state)
@@ -3940,8 +3144,6 @@ radv_graphics_pipeline_init(struct radv_graphics_pipeline *pipeline, struct radv
radv_pipeline_init_extra(pipeline, extra, &state);
}
- radv_pipeline_emit_pm4(device, pipeline);
-
return result;
}
diff --git a/src/amd/vulkan/radv_pipeline_graphics.h b/src/amd/vulkan/radv_pipeline_graphics.h
index 2a90ffa45fa..0453670d04a 100644
--- a/src/amd/vulkan/radv_pipeline_graphics.h
+++ b/src/amd/vulkan/radv_pipeline_graphics.h
@@ -608,31 +608,6 @@ void radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipelin
struct radv_shader **shaders, struct radv_shader_binary **binaries,
struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary);
-void radv_emit_vgt_gs_mode(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- const struct radv_shader *last_vgt_api_shader);
-
-void radv_emit_vertex_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *vs, const struct radv_shader *next_stage);
-
-void radv_emit_tess_ctrl_shader(const struct radv_device *device, struct radeon_cmdbuf *cs,
- const struct radv_shader *tcs);
-
-void radv_emit_tess_eval_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- struct radeon_cmdbuf *cs, const struct radv_shader *tes, const struct radv_shader *gs);
-
-void radv_emit_geometry_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *gs, const struct radv_shader *es,
- const struct radv_shader *gs_copy_shader);
-
-void radv_emit_mesh_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *ms);
-
-void radv_emit_ps_inputs(const struct radv_device *device, struct radeon_cmdbuf *cs,
- const struct radv_shader *last_vgt_shader, const struct radv_shader *ps);
-
-void radv_emit_fragment_shader(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- const struct radv_shader *ps);
-
struct radv_vgt_shader_key {
uint8_t tess : 1;
uint8_t gs : 1;
@@ -646,24 +621,9 @@ struct radv_vgt_shader_key {
uint8_t vs_wave32 : 1;
};
-void radv_emit_vgt_reuse(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, const struct radv_shader *tes,
- const struct radv_vgt_shader_key *key);
-
struct radv_vgt_shader_key radv_get_vgt_shader_key(const struct radv_device *device, struct radv_shader **shaders,
const struct radv_shader *gs_copy_shader);
-void radv_emit_vgt_shader_config(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs,
- const struct radv_vgt_shader_key *key);
-
-void radv_emit_vgt_gs_out(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, struct radeon_cmdbuf *cs,
- uint32_t vgt_gs_out_prim_type);
-
-void gfx103_emit_vgt_draw_payload_cntl(struct radeon_cmdbuf *ctx_cs, const struct radv_shader *mesh_shader,
- bool enable_vrs);
-
-void gfx103_emit_vrs_state(const struct radv_device *device, struct radeon_cmdbuf *ctx_cs, const struct radv_shader *ps,
- bool enable_vrs_coarse_shading, bool force_vrs_per_vertex);
-
uint32_t radv_get_vgt_gs_out(struct radv_shader **shaders, uint32_t primitive_topology);
bool radv_needs_null_export_workaround(const struct radv_device *device, const struct radv_shader *ps,