summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-06-24 17:48:22 +0200
committerNicolai Hähnle <nicolai.haehnle@amd.com>2017-07-05 12:33:56 +0200
commited37fd493277473871a6c6fd9c023f9fe177c3b7 (patch)
tree2f6725b1065a4f45d1d690e62a4827bcb3950793
parent0fa97015990813aea8174daa43f58d6bbc08012e (diff)
radeonsi/nir: perform radeonsi-specific lowering and optimization passes
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_nir.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c
index dc2ef8bd40..83c7ab11fe 100644
--- a/src/gallium/drivers/radeonsi/si_shader_nir.c
+++ b/src/gallium/drivers/radeonsi/si_shader_nir.c
@@ -337,6 +337,47 @@ si_lower_nir(struct si_shader_selector* sel)
variable->data.driver_location += 1;
}
}
+
+ /* Perform lowerings (and optimizations) of code.
+ *
+ * Performance considerations aside, we must:
+ * - lower certain ALU operations
+ * - ensure constant offsets for texture instructions are folded
+ * and copy-propagated
+ */
+ NIR_PASS_V(sel->nir, nir_lower_returns);
+ NIR_PASS_V(sel->nir, nir_lower_vars_to_ssa);
+ NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar);
+ NIR_PASS_V(sel->nir, nir_lower_phis_to_scalar);
+
+ bool progress;
+ do {
+ progress = false;
+
+ /* (Constant) copy propagation is needed for txf with offsets. */
+ NIR_PASS(progress, sel->nir, nir_copy_prop);
+ NIR_PASS(progress, sel->nir, nir_opt_remove_phis);
+ NIR_PASS(progress, sel->nir, nir_opt_dce);
+ if (nir_opt_trivial_continues(sel->nir)) {
+ progress = true;
+ NIR_PASS(progress, sel->nir, nir_copy_prop);
+ NIR_PASS(progress, sel->nir, nir_opt_dce);
+ }
+ NIR_PASS(progress, sel->nir, nir_opt_if);
+ NIR_PASS(progress, sel->nir, nir_opt_dead_cf);
+ NIR_PASS(progress, sel->nir, nir_opt_cse);
+ NIR_PASS(progress, sel->nir, nir_opt_peephole_select, 8);
+
+ /* Needed for algebraic lowering */
+ NIR_PASS(progress, sel->nir, nir_opt_algebraic);
+ NIR_PASS(progress, sel->nir, nir_opt_constant_folding);
+
+ NIR_PASS(progress, sel->nir, nir_opt_undef);
+ NIR_PASS(progress, sel->nir, nir_opt_conditional_discard);
+ if (sel->nir->options->max_unroll_iterations) {
+ NIR_PASS(progress, sel->nir, nir_opt_loop_unroll, 0);
+ }
+ } while (progress);
}
static void declare_nir_input_vs(struct si_shader_context *ctx,