From 5476d1818300835d120c21bcd5bd5967f5b66c84 Mon Sep 17 00:00:00 2001 From: Rhys Perry Date: Wed, 9 Oct 2019 15:03:45 +0100 Subject: [PATCH] nir/algebraic: add patterns for a >> #b << #b Fixes compilation of a Battlefront 2 shader with ACO by removing VGPR spilling. The reassociation makes it worse on LLVM though. pipeline-db (ACO): Totals from affected shaders: SGPRS: 10704 -> 10688 (-0.15 %) VGPRS: 18736 -> 18528 (-1.11 %) Spilled SGPRs: 70 -> 70 (0.00 %) Spilled VGPRs: 0 -> 0 (0.00 %) Private memory VGPRs: 0 -> 0 (0.00 %) Scratch size: 0 -> 0 (0.00 %) dwords per thread Code Size: 909696 -> 885796 (-2.63 %) bytes LDS: 225 -> 225 (0.00 %) blocks Max Waves: 1115 -> 1129 (1.26 %) pipeline-db (LLVM): Totals from affected shaders: SGPRS: 8472 -> 8424 (-0.57 %) VGPRS: 14284 -> 14368 (0.59 %) Spilled SGPRs: 0 -> 0 (0.00 %) Spilled VGPRs: 442 -> 503 (13.80 %) Private memory VGPRs: 0 -> 0 (0.00 %) Scratch size: 268 -> 396 (47.76 %) dwords per thread Code Size: 862568 -> 853028 (-1.11 %) bytes LDS: 0 -> 0 (0.00 %) blocks Max Waves: 971 -> 964 (-0.72 %) Signed-off-by: Rhys Perry Reviewed-by: Connor Abbott Part-of: --- src/compiler/nir/nir_opt_algebraic.py | 16 ++++++++++++++++ src/compiler/nir/nir_search_helpers.h | 26 ++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py index 7b9a6a8..fd0007b 100644 --- a/src/compiler/nir/nir_opt_algebraic.py +++ b/src/compiler/nir/nir_opt_algebraic.py @@ -255,6 +255,22 @@ for s in [8, 16, 32, 64]: (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))), ]) +# Optimize a pattern of address calculation created by DXVK where the offset is +# divided by 4 and then multipled by 4. This can be turned into an iand and the +# additions before can be reassociated to CSE the iand instruction. +for log2 in range(1, 7): # powers of two from 2 to 64 + v = 1 << log2 + mask = 0xffffffff & ~(v - 1) + b_is_multiple = '#b(is_unsigned_multiple_of_{})'.format(v) + + optimizations.extend([ + # 'a >> #b << #b' -> 'a & ~((1 << #b) - 1)' + (('ishl@32', ('ushr@32', a, log2), log2), ('iand', a, mask)), + + # Reassociate for improved CSE + (('iand@32', ('iadd@32', a, b_is_multiple), mask), ('iadd', ('iand', a, mask), b)), + ]) + optimizations.extend([ # This is common for address calculations. Reassociating may enable the # 'a<src[src].src)) \ + return false; \ + \ + for (unsigned i = 0; i < num_components; i++) { \ + uint64_t val = nir_src_comp_as_uint(instr->src[src].src, swizzle[i]); \ + if (val % test != 0) \ + return false; \ + } \ + \ + return true; \ +} + +MULTIPLE(2) +MULTIPLE(4) +MULTIPLE(8) +MULTIPLE(16) +MULTIPLE(32) +MULTIPLE(64) + static inline bool is_zero_to_one(UNUSED struct hash_table *ht, nir_alu_instr *instr, unsigned src, unsigned num_components, -- 2.7.4