4ac7dc9085c4e1595e1135fe3b9b2ef85f9b3e63
[platform/upstream/mesa.git] / src / compiler / nir / nir_lower_bit_size.c
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "nir_builder.h"
25
26 /**
27  * Some ALU operations may not be supported in hardware in specific bit-sizes.
28  * This pass allows implementations to selectively lower such operations to
29  * a bit-size that is supported natively and then converts the result back to
30  * the original bit-size.
31  */
32
33 static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,
34                                         nir_alu_type type, unsigned bit_size)
35 {
36    assert(src->bit_size < bit_size);
37
38    /* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
39    nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
40    if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
41        alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
42       nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
43       nir_alu_src_copy(&instr->src[0], &alu->src[0], instr);
44       return nir_builder_alu_instr_finish_and_insert(bld, instr);
45    }
46
47    return nir_convert_to_bit_size(bld, src, type, bit_size);
48 }
49
50 static void
51 lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
52 {
53    const nir_op op = alu->op;
54    unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
55
56    bld->cursor = nir_before_instr(&alu->instr);
57
58    /* Convert each source to the requested bit-size */
59    nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
60    for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
61       nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
62
63       nir_alu_type type = nir_op_infos[op].input_types[i];
64       if (nir_alu_type_get_type_size(type) == 0)
65          src = convert_to_bit_size(bld, src, type, bit_size);
66
67       if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {
68          assert(util_is_power_of_two_nonzero(dst_bit_size));
69          src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
70       }
71
72       srcs[i] = src;
73    }
74
75    /* Emit the lowered ALU instruction */
76    nir_ssa_def *lowered_dst = NULL;
77    if (op == nir_op_imul_high || op == nir_op_umul_high) {
78       assert(dst_bit_size * 2 <= bit_size);
79       lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
80       if (nir_op_infos[op].output_type & nir_type_uint)
81          lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
82       else
83          lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
84    } else if (op == nir_op_iadd_sat || op == nir_op_isub_sat || op == nir_op_uadd_sat ||
85               op == nir_op_uadd_carry) {
86       if (op == nir_op_isub_sat)
87          lowered_dst = nir_isub(bld, srcs[0], srcs[1]);
88       else
89          lowered_dst = nir_iadd(bld, srcs[0], srcs[1]);
90
91       /* The add_sat and sub_sat instructions need to clamp the result to the
92        * range of the original type.
93        */
94       if (op == nir_op_iadd_sat || op == nir_op_isub_sat) {
95          const int64_t int_max = u_intN_max(dst_bit_size);
96          const int64_t int_min = u_intN_min(dst_bit_size);
97
98          lowered_dst = nir_iclamp(bld, lowered_dst,
99                                   nir_imm_intN_t(bld, int_min, bit_size),
100                                   nir_imm_intN_t(bld, int_max, bit_size));
101       } else if (op == nir_op_uadd_sat) {
102          const uint64_t uint_max = u_uintN_max(dst_bit_size);
103
104          lowered_dst = nir_umin(bld, lowered_dst,
105                                 nir_imm_intN_t(bld, uint_max, bit_size));
106       } else {
107          assert(op == nir_op_uadd_carry);
108          lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
109       }
110    } else {
111       lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
112    }
113
114
115    /* Convert result back to the original bit-size */
116    if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
117        dst_bit_size != bit_size) {
118       nir_alu_type type = nir_op_infos[op].output_type;
119       nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
120       nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, dst);
121    } else {
122       nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);
123    }
124 }
125
126 static void
127 lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
128                       unsigned bit_size)
129 {
130    switch (intrin->intrinsic) {
131    case nir_intrinsic_read_invocation:
132    case nir_intrinsic_read_first_invocation:
133    case nir_intrinsic_vote_feq:
134    case nir_intrinsic_vote_ieq:
135    case nir_intrinsic_shuffle:
136    case nir_intrinsic_shuffle_xor:
137    case nir_intrinsic_shuffle_up:
138    case nir_intrinsic_shuffle_down:
139    case nir_intrinsic_quad_broadcast:
140    case nir_intrinsic_quad_swap_horizontal:
141    case nir_intrinsic_quad_swap_vertical:
142    case nir_intrinsic_quad_swap_diagonal:
143    case nir_intrinsic_reduce:
144    case nir_intrinsic_inclusive_scan:
145    case nir_intrinsic_exclusive_scan: {
146       assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
147       const unsigned old_bit_size = intrin->dest.ssa.bit_size;
148       assert(old_bit_size < bit_size);
149
150       nir_alu_type type = nir_type_uint;
151       if (nir_intrinsic_has_reduction_op(intrin))
152          type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];
153       else if (intrin->intrinsic == nir_intrinsic_vote_feq)
154          type = nir_type_float;
155
156       b->cursor = nir_before_instr(&intrin->instr);
157       nir_intrinsic_instr *new_intrin =
158          nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
159
160       nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
161                                                      type, bit_size);
162       new_intrin->src[0] = nir_src_for_ssa(new_src);
163
164       if (intrin->intrinsic == nir_intrinsic_vote_feq ||
165           intrin->intrinsic == nir_intrinsic_vote_ieq) {
166          /* These return a Boolean; it's always 1-bit */
167          assert(new_intrin->dest.ssa.bit_size == 1);
168       } else {
169          /* These return the same bit size as the source; we need to adjust
170           * the size and then we'll have to emit a down-cast.
171           */
172          assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
173          new_intrin->dest.ssa.bit_size = bit_size;
174       }
175
176       nir_builder_instr_insert(b, &new_intrin->instr);
177
178       nir_ssa_def *res = &new_intrin->dest.ssa;
179       if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
180          /* For exclusive scan, we have to be careful because the identity
181           * value for the higher bit size may get added into the mix by
182           * disabled channels.  For some cases (imin/imax in particular),
183           * this value won't convert to the right identity value when we
184           * down-cast so we have to clamp it.
185           */
186          switch (nir_intrinsic_reduction_op(intrin)) {
187          case nir_op_imin: {
188             int64_t int_max = (1ull << (old_bit_size - 1)) - 1;
189             res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));
190             break;
191          }
192          case nir_op_imax: {
193             int64_t int_min = -(int64_t)(1ull << (old_bit_size - 1));
194             res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));
195             break;
196          }
197          default:
198             break;
199          }
200       }
201
202       if (intrin->intrinsic != nir_intrinsic_vote_feq &&
203           intrin->intrinsic != nir_intrinsic_vote_ieq)
204          res = nir_u2uN(b, res, old_bit_size);
205
206       nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
207       break;
208    }
209
210    default:
211       unreachable("Unsupported instruction");
212    }
213 }
214
215 static void
216 lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
217                 nir_phi_instr *last_phi)
218 {
219    assert(phi->dest.is_ssa);
220    unsigned old_bit_size = phi->dest.ssa.bit_size;
221    assert(old_bit_size < bit_size);
222
223    nir_foreach_phi_src(src, phi) {
224       b->cursor = nir_after_block_before_jump(src->pred);
225       assert(src->src.is_ssa);
226       nir_ssa_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
227
228       nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
229    }
230
231    phi->dest.ssa.bit_size = bit_size;
232
233    b->cursor = nir_after_instr(&last_phi->instr);
234
235    nir_ssa_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size);
236    nir_ssa_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
237                                   new_dest->parent_instr);
238 }
239
240 static bool
241 lower_impl(nir_function_impl *impl,
242            nir_lower_bit_size_callback callback,
243            void *callback_data)
244 {
245    nir_builder b = nir_builder_create(impl);
246    bool progress = false;
247
248    nir_foreach_block(block, impl) {
249       /* Stash this so we can rewrite phi destinations quickly. */
250       nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
251
252       nir_foreach_instr_safe(instr, block) {
253          unsigned lower_bit_size = callback(instr, callback_data);
254          if (lower_bit_size == 0)
255             continue;
256
257          switch (instr->type) {
258          case nir_instr_type_alu:
259             lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);
260             break;
261
262          case nir_instr_type_intrinsic:
263             lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),
264                                   lower_bit_size);
265             break;
266
267          case nir_instr_type_phi:
268             lower_phi_instr(&b, nir_instr_as_phi(instr),
269                             lower_bit_size, last_phi);
270             break;
271
272          default:
273             unreachable("Unsupported instruction type");
274          }
275          progress = true;
276       }
277    }
278
279    if (progress) {
280       nir_metadata_preserve(impl, nir_metadata_block_index |
281                                   nir_metadata_dominance);
282    } else {
283       nir_metadata_preserve(impl, nir_metadata_all);
284    }
285
286    return progress;
287 }
288
289 bool
290 nir_lower_bit_size(nir_shader *shader,
291                    nir_lower_bit_size_callback callback,
292                    void *callback_data)
293 {
294    bool progress = false;
295
296    nir_foreach_function_impl(impl, shader) {
297       progress |= lower_impl(impl, callback, callback_data);
298    }
299
300    return progress;
301 }
302
303 static void
304 split_phi(nir_builder *b, nir_phi_instr *phi)
305 {
306    nir_phi_instr *lowered[2] = {
307       nir_phi_instr_create(b->shader),
308       nir_phi_instr_create(b->shader)
309    };
310    int num_components = phi->dest.ssa.num_components;
311    assert(phi->dest.ssa.bit_size == 64);
312
313    nir_foreach_phi_src(src, phi) {
314       assert(num_components == src->src.ssa->num_components);
315
316       b->cursor = nir_before_src(&src->src);
317
318       nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
319       nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
320
321       nir_phi_instr_add_src(lowered[0], src->pred, nir_src_for_ssa(x));
322       nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
323    }
324
325    nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest, num_components,
326                      32);
327    nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest, num_components,
328                      32);
329
330    b->cursor = nir_before_instr(&phi->instr);
331    nir_builder_instr_insert(b, &lowered[0]->instr);
332    nir_builder_instr_insert(b, &lowered[1]->instr);
333
334    b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
335    nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
336    nir_ssa_def_rewrite_uses(&phi->dest.ssa, merged);
337    nir_instr_remove(&phi->instr);
338 }
339
340 static bool
341 lower_64bit_phi_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
342 {
343    if (instr->type != nir_instr_type_phi)
344       return false;
345
346    nir_phi_instr *phi = nir_instr_as_phi(instr);
347    assert(phi->dest.is_ssa);
348
349    if (phi->dest.ssa.bit_size <= 32)
350       return false;
351
352    split_phi(b, phi);
353    return true;
354 }
355
356 bool
357 nir_lower_64bit_phis(nir_shader *shader)
358 {
359    return nir_shader_instructions_pass(shader, lower_64bit_phi_instr,
360                                        nir_metadata_block_index |
361                                        nir_metadata_dominance,
362                                        NULL);
363 }