diff options
author | Ian Romanick <ian.d.romanick@intel.com> | 2018-07-23 18:31:37 -0700 |
---|---|---|
committer | Ian Romanick <ian.d.romanick@intel.com> | 2018-08-02 11:20:02 -0700 |
commit | 99f20d50bba1be19c3de9d78c6058e8528eb11ae (patch) | |
tree | d35544b846672842fdfa76c1903a70892d345fb0 | |
parent | 7abc8f47d4a870283c742065d9b3ad569772cc8b (diff) |
WIP: intel/compiler: Use Boolean expression minimization pass
-rw-r--r-- | src/intel/compiler/brw_nir.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index ab188c991c3..b40386dbeb1 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -656,6 +656,8 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) * of some int64 instructions. */ OPT(nir_opt_algebraic); + if (is_scalar) + OPT(nir_opt_minimize_boolean); /* Lower int64 instructions before nir_optimize so that loop unrolling * sees their actual cost. @@ -666,6 +668,9 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) nir = brw_nir_optimize(nir, compiler, is_scalar); + if (is_scalar) + OPT(nir_opt_minimize_boolean); + /* This needs to be run after the first optimization pass but before we * lower indirect derefs away */ @@ -684,6 +689,9 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) OPT(nir_lower_system_values); + if (is_scalar) + OPT(nir_opt_minimize_boolean); + const nir_lower_subgroups_options subgroups_options = { .subgroup_size = BRW_SUBGROUP_SIZE, .ballot_bit_size = 32, @@ -781,6 +789,14 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler, OPT(nir_opt_algebraic_late); + if (is_scalar) { + if (OPT(nir_opt_minimize_boolean)) { + OPT(nir_opt_cse); + OPT(nir_copy_prop); + OPT(nir_opt_dce); + } + } + OPT(nir_lower_to_source_mods); OPT(nir_copy_prop); OPT(nir_opt_dce); |