4afeebfd06ad1fd3bfe4e504681d5ffeb3f8ca1a
[platform/upstream/mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2  *
3  * Copyright 2019 Red Hat.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_conv.h"
31 #include "lp_bld_gather.h"
32 #include "lp_bld_logic.h"
33 #include "lp_bld_quad.h"
34 #include "lp_bld_flow.h"
35 #include "lp_bld_intr.h"
36 #include "lp_bld_struct.h"
37 #include "lp_bld_debug.h"
38 #include "lp_bld_printf.h"
39 #include "nir_deref.h"
40 #include "nir_search_helpers.h"
41
42
43 // Doing AOS (and linear) codegen?
44 static bool
45 is_aos(const struct lp_build_nir_context *bld_base)
46 {
47    // AOS is used for vectors of uint8[16]
48    return bld_base->base.type.length == 16 && bld_base->base.type.width == 8;
49 }
50
51
52 static void
53 visit_cf_list(struct lp_build_nir_context *bld_base,
54               struct exec_list *list);
55
56
57 static LLVMValueRef
58 cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
59           nir_alu_type alu_type, unsigned bit_size)
60 {
61    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
62    switch (alu_type) {
63    case nir_type_float:
64       switch (bit_size) {
65       case 16:
66          return LLVMBuildBitCast(builder, val, bld_base->half_bld.vec_type, "");
67       case 32:
68          return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
69       case 64:
70          return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
71       default:
72          assert(0);
73          break;
74       }
75       break;
76    case nir_type_int:
77       switch (bit_size) {
78       case 8:
79          return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
80       case 16:
81          return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
82       case 32:
83          return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
84       case 64:
85          return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
86       default:
87          assert(0);
88          break;
89       }
90       break;
91    case nir_type_uint:
92       switch (bit_size) {
93       case 8:
94          return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
95       case 16:
96          return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
97       case 1:
98       case 32:
99          return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
100       case 64:
101          return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
102       default:
103          assert(0);
104          break;
105       }
106       break;
107    case nir_type_uint32:
108       return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
109    default:
110       return val;
111    }
112    return NULL;
113 }
114
115
116 static unsigned
117 glsl_sampler_to_pipe(int sampler_dim, bool is_array)
118 {
119    unsigned pipe_target = PIPE_BUFFER;
120    switch (sampler_dim) {
121    case GLSL_SAMPLER_DIM_1D:
122       pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
123       break;
124    case GLSL_SAMPLER_DIM_2D:
125       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
126       break;
127    case GLSL_SAMPLER_DIM_SUBPASS:
128    case GLSL_SAMPLER_DIM_SUBPASS_MS:
129       pipe_target = PIPE_TEXTURE_2D_ARRAY;
130       break;
131    case GLSL_SAMPLER_DIM_3D:
132       pipe_target = PIPE_TEXTURE_3D;
133       break;
134    case GLSL_SAMPLER_DIM_MS:
135       pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
136       break;
137    case GLSL_SAMPLER_DIM_CUBE:
138       pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
139       break;
140    case GLSL_SAMPLER_DIM_RECT:
141       pipe_target = PIPE_TEXTURE_RECT;
142       break;
143    case GLSL_SAMPLER_DIM_BUF:
144       pipe_target = PIPE_BUFFER;
145       break;
146    default:
147       break;
148    }
149    return pipe_target;
150 }
151
152
153 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
154 {
155    return bld_base->ssa_defs[ssa->index];
156 }
157
158
159 static LLVMValueRef
160 get_src(struct lp_build_nir_context *bld_base, nir_src src);
161
162
163 static LLVMValueRef
164 get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
165 {
166    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
167    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
168    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
169    LLVMValueRef indir_src = NULL;
170    if (src.indirect)
171       indir_src = get_src(bld_base, *src.indirect);
172    return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
173 }
174
175
176 static LLVMValueRef
177 get_src(struct lp_build_nir_context *bld_base, nir_src src)
178 {
179    if (src.is_ssa)
180       return get_ssa_src(bld_base, src.ssa);
181    else
182       return get_reg_src(bld_base, src.reg);
183 }
184
185
186 static void
187 assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
188 {
189    bld_base->ssa_defs[idx] = ptr;
190 }
191
192
193 static void
194 assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
195                 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
196 {
197    if ((ssa->num_components == 1 || is_aos(bld_base))) {
198       assign_ssa(bld_base, ssa->index, vals[0]);
199    } else {
200       assign_ssa(bld_base, ssa->index,
201              lp_nir_array_build_gather_values(bld_base->base.gallivm->builder,
202                                               vals, ssa->num_components));
203    }
204 }
205
206
207 static void
208 assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
209            unsigned write_mask,
210            LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
211 {
212    assert(write_mask != 0x0);
213    struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
214    LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
215    struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
216    LLVMValueRef indir_src = NULL;
217    if (reg->indirect)
218       indir_src = get_src(bld_base, *reg->indirect);
219    bld_base->store_reg(bld_base, reg_bld, reg, write_mask,
220                        indir_src, reg_storage, vals);
221 }
222
223
224 static void
225 assign_dest(struct lp_build_nir_context *bld_base,
226             const nir_dest *dest,
227             LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
228 {
229    if (dest->is_ssa)
230       assign_ssa_dest(bld_base, &dest->ssa, vals);
231    else
232       assign_reg(bld_base, &dest->reg, 0xf, vals);
233 }
234
235
236 static void
237 assign_alu_dest(struct lp_build_nir_context *bld_base,
238                 const nir_alu_dest *dest,
239                 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
240 {
241    if (dest->dest.is_ssa)
242       assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
243    else
244       assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
245 }
246
247
248 static LLVMValueRef
249 int_to_bool32(struct lp_build_nir_context *bld_base,
250               uint32_t src_bit_size,
251               bool is_unsigned,
252               LLVMValueRef val)
253 {
254    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
255    struct lp_build_context *int_bld =
256       get_int_bld(bld_base, is_unsigned, src_bit_size);
257    LLVMValueRef result = lp_build_compare(bld_base->base.gallivm,
258                                           int_bld->type, PIPE_FUNC_NOTEQUAL,
259                                           val, int_bld->zero);
260    if (src_bit_size == 16)
261       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
262    else if (src_bit_size == 64)
263       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
264    return result;
265 }
266
267
268 static LLVMValueRef
269 flt_to_bool32(struct lp_build_nir_context *bld_base,
270               uint32_t src_bit_size,
271               LLVMValueRef val)
272 {
273    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
274    struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
275    LLVMValueRef result =
276       lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
277    if (src_bit_size == 64)
278       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
279    if (src_bit_size == 16)
280       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
281    return result;
282 }
283
284
285 static LLVMValueRef
286 fcmp32(struct lp_build_nir_context *bld_base,
287        enum pipe_compare_func compare,
288        uint32_t src_bit_size,
289        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
290 {
291    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
292    struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
293    LLVMValueRef result;
294
295    if (compare != PIPE_FUNC_NOTEQUAL)
296       result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
297    else
298       result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
299    if (src_bit_size == 64)
300       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
301    else if (src_bit_size == 16)
302       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
303    return result;
304 }
305
306
307 static LLVMValueRef
308 icmp32(struct lp_build_nir_context *bld_base,
309        enum pipe_compare_func compare,
310        bool is_unsigned,
311        uint32_t src_bit_size,
312        LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
313 {
314    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
315    struct lp_build_context *i_bld =
316       get_int_bld(bld_base, is_unsigned, src_bit_size);
317    LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
318    if (src_bit_size < 32)
319       result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
320    else if (src_bit_size == 64)
321       result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
322    return result;
323 }
324
325
326 /**
327  * Get a source register value for an ALU instruction.
328  * This is where swizzles are handled.  There should be no negation
329  * or absolute value modifiers.
330  * num_components indicates the number of components needed in the
331  * returned array or vector.
332  */
333 static LLVMValueRef
334 get_alu_src(struct lp_build_nir_context *bld_base,
335             nir_alu_src src,
336             unsigned num_components)
337 {
338    assert(!src.negate);
339    assert(!src.abs);
340    assert(num_components >= 1);
341    assert(num_components <= 4);
342
343    struct gallivm_state *gallivm = bld_base->base.gallivm;
344    LLVMBuilderRef builder = gallivm->builder;
345    const unsigned src_components = nir_src_num_components(src.src);
346    assert(src_components > 0);
347    LLVMValueRef value = get_src(bld_base, src.src);
348    assert(value);
349
350    /* check if swizzling needed for the src vector */
351    bool need_swizzle = false;
352    for (unsigned i = 0; i < src_components; ++i) {
353       if (src.swizzle[i] != i) {
354          need_swizzle = true;
355          break;
356       }
357    }
358
359    if (is_aos(bld_base) && !need_swizzle) {
360       return value;
361    }
362
363    if (need_swizzle || num_components != src_components) {
364       if (is_aos(bld_base) && need_swizzle) {
365          // Handle swizzle for AOS
366          assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind);
367
368          // swizzle vector of ((r,g,b,a), (r,g,b,a), (r,g,b,a), (r,g,b,a))
369          assert(bld_base->base.type.width == 8);
370          assert(bld_base->base.type.length == 16);
371
372          // Do our own swizzle here since lp_build_swizzle_aos_n() does
373          // not do what we want.
374          // Ex: value = {r0,g0,b0,a0, r1,g1,b1,a1, r2,g2,b2,a2, r3,g3,b3,a3}.
375          // aos swizzle = {2,1,0,3}  // swap red/blue
376          // shuffles = {2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15}
377          // result = {b0,g0,r0,a0, b1,g1,r1,a1, b2,g2,r2,a2, b3,g3,r3,a3}.
378          LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH];
379          for (unsigned i = 0; i < 16; i++) {
380             unsigned chan = i % 4;
381             /* apply src register swizzle */
382             if (chan < num_components) {
383                chan = src.swizzle[chan];
384             } else {
385                chan = src.swizzle[0];
386             }
387             /* apply aos swizzle */
388             chan = lp_nir_aos_swizzle(bld_base, chan);
389             shuffles[i] = lp_build_const_int32(gallivm, (i & ~3) + chan);
390          }
391          value = LLVMBuildShuffleVector(builder, value,
392                                         LLVMGetUndef(LLVMTypeOf(value)),
393                                         LLVMConstVector(shuffles, 16), "");
394       } else if (src_components > 1 && num_components == 1) {
395          value = LLVMBuildExtractValue(gallivm->builder, value,
396                                        src.swizzle[0], "");
397       } else if (src_components == 1 && num_components > 1) {
398          LLVMValueRef values[] = {value, value, value, value,
399                                   value, value, value, value,
400                                   value, value, value, value,
401                                   value, value, value, value};
402          value = lp_nir_array_build_gather_values(builder, values, num_components);
403       } else {
404          LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
405          for (unsigned i = 0; i < num_components; i++)
406             arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
407          value = arr;
408       }
409    }
410
411    return value;
412 }
413
414
415 static LLVMValueRef
416 emit_b2f(struct lp_build_nir_context *bld_base,
417          LLVMValueRef src0,
418          unsigned bitsize)
419 {
420    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
421    LLVMValueRef result =
422       LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
423                    LLVMBuildBitCast(builder,
424                                     lp_build_const_vec(bld_base->base.gallivm,
425                                                        bld_base->base.type,
426                                                        1.0),
427                                     bld_base->int_bld.vec_type, ""),
428                    "");
429    result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
430    switch (bitsize) {
431    case 16:
432       result = LLVMBuildFPTrunc(builder, result,
433                                 bld_base->half_bld.vec_type, "");
434       break;
435    case 32:
436       break;
437    case 64:
438       result = LLVMBuildFPExt(builder, result,
439                               bld_base->dbl_bld.vec_type, "");
440       break;
441    default:
442       unreachable("unsupported bit size.");
443    }
444    return result;
445 }
446
447
448 static LLVMValueRef
449 emit_b2i(struct lp_build_nir_context *bld_base,
450          LLVMValueRef src0,
451          unsigned bitsize)
452 {
453    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
454    LLVMValueRef result = LLVMBuildAnd(builder,
455                           cast_type(bld_base, src0, nir_type_int, 32),
456                           lp_build_const_int_vec(bld_base->base.gallivm,
457                                                  bld_base->base.type, 1), "");
458    switch (bitsize) {
459    case 8:
460       return LLVMBuildTrunc(builder, result, bld_base->int8_bld.vec_type, "");
461    case 16:
462       return LLVMBuildTrunc(builder, result, bld_base->int16_bld.vec_type, "");
463    case 32:
464       return result;
465    case 64:
466       return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
467    default:
468       unreachable("unsupported bit size.");
469    }
470 }
471
472
473 static LLVMValueRef
474 emit_b32csel(struct lp_build_nir_context *bld_base,
475              unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
476              LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
477 {
478    LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
479    LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
480    struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
481    return lp_build_select(bld, v, src[1], src[2]);
482 }
483
484
485 static LLVMValueRef
486 split_64bit(struct lp_build_nir_context *bld_base,
487             LLVMValueRef src,
488             bool hi)
489 {
490    struct gallivm_state *gallivm = bld_base->base.gallivm;
491    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
492    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
493    int len = bld_base->base.type.length * 2;
494    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
495 #if UTIL_ARCH_LITTLE_ENDIAN
496       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
497       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
498 #else
499       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
500       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
501 #endif
502    }
503
504    src = LLVMBuildBitCast(gallivm->builder, src,
505            LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
506    return LLVMBuildShuffleVector(gallivm->builder, src,
507                                  LLVMGetUndef(LLVMTypeOf(src)),
508                                  LLVMConstVector(hi ? shuffles2 : shuffles,
509                                                  bld_base->base.type.length),
510                                  "");
511 }
512
513
514 static LLVMValueRef
515 merge_64bit(struct lp_build_nir_context *bld_base,
516             LLVMValueRef input,
517             LLVMValueRef input2)
518 {
519    struct gallivm_state *gallivm = bld_base->base.gallivm;
520    LLVMBuilderRef builder = gallivm->builder;
521    int i;
522    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
523    int len = bld_base->base.type.length * 2;
524    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
525
526    for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
527 #if UTIL_ARCH_LITTLE_ENDIAN
528       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
529       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
530 #else
531       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
532       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
533 #endif
534    }
535    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
536 }
537
538
539 static LLVMValueRef
540 split_16bit(struct lp_build_nir_context *bld_base,
541             LLVMValueRef src,
542             bool hi)
543 {
544    struct gallivm_state *gallivm = bld_base->base.gallivm;
545    LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
546    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
547    int len = bld_base->base.type.length * 2;
548    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
549 #if UTIL_ARCH_LITTLE_ENDIAN
550       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
551       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
552 #else
553       shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
554       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
555 #endif
556    }
557
558    src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt16TypeInContext(gallivm->context), len), "");
559    return LLVMBuildShuffleVector(gallivm->builder, src,
560                                  LLVMGetUndef(LLVMTypeOf(src)),
561                                  LLVMConstVector(hi ? shuffles2 : shuffles,
562                                                  bld_base->base.type.length),
563                                  "");
564 }
565
566
567 static LLVMValueRef
568 merge_16bit(struct lp_build_nir_context *bld_base,
569             LLVMValueRef input,
570             LLVMValueRef input2)
571 {
572    struct gallivm_state *gallivm = bld_base->base.gallivm;
573    LLVMBuilderRef builder = gallivm->builder;
574    int i;
575    LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
576    int len = bld_base->int16_bld.type.length * 2;
577    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
578
579    for (i = 0; i < bld_base->int_bld.type.length * 2; i+=2) {
580 #if UTIL_ARCH_LITTLE_ENDIAN
581       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
582       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
583 #else
584       shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
585       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
586 #endif
587    }
588    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
589 }
590
591
592 static LLVMValueRef
593 get_signed_divisor(struct gallivm_state *gallivm,
594                    struct lp_build_context *int_bld,
595                    struct lp_build_context *mask_bld,
596                    int src_bit_size,
597                    LLVMValueRef src, LLVMValueRef divisor)
598 {
599    LLVMBuilderRef builder = gallivm->builder;
600    /* However for signed divides SIGFPE can occur if the numerator is INT_MIN
601       and divisor is -1. */
602    /* set mask if numerator == INT_MIN */
603    long long min_val;
604    switch (src_bit_size) {
605    case 8:
606       min_val = INT8_MIN;
607       break;
608    case 16:
609       min_val = INT16_MIN;
610       break;
611    default:
612    case 32:
613       min_val = INT_MIN;
614       break;
615    case 64:
616       min_val = INT64_MIN;
617       break;
618    }
619    LLVMValueRef div_mask2 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src,
620                                          lp_build_const_int_vec(gallivm, int_bld->type, min_val));
621    /* set another mask if divisor is - 1 */
622    LLVMValueRef div_mask3 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, divisor,
623                                          lp_build_const_int_vec(gallivm, int_bld->type, -1));
624    div_mask2 = LLVMBuildAnd(builder, div_mask2, div_mask3, "");
625
626    divisor = lp_build_select(mask_bld, div_mask2, int_bld->one, divisor);
627    return divisor;
628 }
629
630
631 static LLVMValueRef
632 do_int_divide(struct lp_build_nir_context *bld_base,
633               bool is_unsigned, unsigned src_bit_size,
634               LLVMValueRef src, LLVMValueRef src2)
635 {
636    struct gallivm_state *gallivm = bld_base->base.gallivm;
637    LLVMBuilderRef builder = gallivm->builder;
638    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
639    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
640
641    /* avoid divide by 0. Converted divisor from 0 to -1 */
642    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
643                                         mask_bld->zero);
644
645    LLVMValueRef divisor = LLVMBuildOr(builder, div_mask, src2, "");
646    if (!is_unsigned) {
647       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
648                                    src_bit_size, src, divisor);
649    }
650    LLVMValueRef result = lp_build_div(int_bld, src, divisor);
651
652    if (!is_unsigned) {
653       LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
654       return LLVMBuildAnd(builder, not_div_mask, result, "");
655    } else
656       /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
657        * may as well do same for idiv */
658       return LLVMBuildOr(builder, div_mask, result, "");
659 }
660
661
662 static LLVMValueRef
663 do_int_mod(struct lp_build_nir_context *bld_base,
664            bool is_unsigned, unsigned src_bit_size,
665            LLVMValueRef src, LLVMValueRef src2)
666 {
667    struct gallivm_state *gallivm = bld_base->base.gallivm;
668    LLVMBuilderRef builder = gallivm->builder;
669    struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
670    struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
671    LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
672                                         mask_bld->zero);
673    LLVMValueRef divisor = LLVMBuildOr(builder,
674                                       div_mask,
675                                       src2, "");
676    if (!is_unsigned) {
677       divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
678                                    src_bit_size, src, divisor);
679    }
680    LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
681    return LLVMBuildOr(builder, div_mask, result, "");
682 }
683
684
685 static LLVMValueRef
686 do_quantize_to_f16(struct lp_build_nir_context *bld_base,
687                    LLVMValueRef src)
688 {
689    struct gallivm_state *gallivm = bld_base->base.gallivm;
690    LLVMBuilderRef builder = gallivm->builder;
691    LLVMValueRef result, cond, cond2, temp;
692
693    result = LLVMBuildFPTrunc(builder, src, bld_base->half_bld.vec_type, "");
694    result = LLVMBuildFPExt(builder, result, bld_base->base.vec_type, "");
695
696    temp = lp_build_abs(get_flt_bld(bld_base, 32), result);
697    cond = LLVMBuildFCmp(builder, LLVMRealOGT,
698                         LLVMBuildBitCast(builder, lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, 0x38800000), bld_base->base.vec_type, ""),
699                         temp, "");
700    cond2 = LLVMBuildFCmp(builder, LLVMRealONE, temp, bld_base->base.zero, "");
701    cond = LLVMBuildAnd(builder, cond, cond2, "");
702    result = LLVMBuildSelect(builder, cond, bld_base->base.zero, result, "");
703    return result;
704 }
705
706
707 static LLVMValueRef
708 do_alu_action(struct lp_build_nir_context *bld_base,
709               const nir_alu_instr *instr,
710               unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
711               LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
712 {
713    struct gallivm_state *gallivm = bld_base->base.gallivm;
714    LLVMBuilderRef builder = gallivm->builder;
715    LLVMValueRef result;
716
717    switch (instr->op) {
718    case nir_op_b2f16:
719       result = emit_b2f(bld_base, src[0], 16);
720       break;
721    case nir_op_b2f32:
722       result = emit_b2f(bld_base, src[0], 32);
723       break;
724    case nir_op_b2f64:
725       result = emit_b2f(bld_base, src[0], 64);
726       break;
727    case nir_op_b2i8:
728       result = emit_b2i(bld_base, src[0], 8);
729       break;
730    case nir_op_b2i16:
731       result = emit_b2i(bld_base, src[0], 16);
732       break;
733    case nir_op_b2i32:
734       result = emit_b2i(bld_base, src[0], 32);
735       break;
736    case nir_op_b2i64:
737       result = emit_b2i(bld_base, src[0], 64);
738       break;
739    case nir_op_b32csel:
740       result = emit_b32csel(bld_base, src_bit_size, src);
741       break;
742    case nir_op_bit_count:
743       result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
744       if (src_bit_size[0] < 32)
745          result = LLVMBuildZExt(builder, result, bld_base->int_bld.vec_type, "");
746       else if (src_bit_size[0] > 32)
747          result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
748       break;
749    case nir_op_bitfield_select:
750       result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
751       break;
752    case nir_op_bitfield_reverse:
753       result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
754       break;
755    case nir_op_f2b32:
756       result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
757       break;
758    case nir_op_f2f16:
759       if (src_bit_size[0] == 64)
760          src[0] = LLVMBuildFPTrunc(builder, src[0],
761                                    bld_base->base.vec_type, "");
762       result = LLVMBuildFPTrunc(builder, src[0],
763                                 bld_base->half_bld.vec_type, "");
764       break;
765    case nir_op_f2f32:
766       if (src_bit_size[0] < 32)
767          result = LLVMBuildFPExt(builder, src[0],
768                                  bld_base->base.vec_type, "");
769       else
770          result = LLVMBuildFPTrunc(builder, src[0],
771                                    bld_base->base.vec_type, "");
772       break;
773    case nir_op_f2f64:
774       result = LLVMBuildFPExt(builder, src[0],
775                               bld_base->dbl_bld.vec_type, "");
776       break;
777    case nir_op_f2i8:
778       result = LLVMBuildFPToSI(builder,
779                                src[0],
780                                bld_base->uint8_bld.vec_type, "");
781       break;
782    case nir_op_f2i16:
783       result = LLVMBuildFPToSI(builder,
784                                src[0],
785                                bld_base->uint16_bld.vec_type, "");
786       break;
787    case nir_op_f2i32:
788       result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
789       break;
790    case nir_op_f2u8:
791       result = LLVMBuildFPToUI(builder,
792                                src[0],
793                                bld_base->uint8_bld.vec_type, "");
794       break;
795    case nir_op_f2u16:
796       result = LLVMBuildFPToUI(builder,
797                                src[0],
798                                bld_base->uint16_bld.vec_type, "");
799       break;
800    case nir_op_f2u32:
801       result = LLVMBuildFPToUI(builder,
802                                src[0],
803                                bld_base->base.int_vec_type, "");
804       break;
805    case nir_op_f2i64:
806       result = LLVMBuildFPToSI(builder,
807                                src[0],
808                                bld_base->int64_bld.vec_type, "");
809       break;
810    case nir_op_f2u64:
811       result = LLVMBuildFPToUI(builder,
812                                src[0],
813                                bld_base->uint64_bld.vec_type, "");
814       break;
815    case nir_op_fabs:
816       result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
817       break;
818    case nir_op_fadd:
819       result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
820                             src[0], src[1]);
821       break;
822    case nir_op_fceil:
823       result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
824       break;
825    case nir_op_fcos:
826       result = lp_build_cos(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
827       break;
828    case nir_op_fddx:
829    case nir_op_fddx_coarse:
830    case nir_op_fddx_fine:
831       result = lp_build_ddx(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
832       break;
833    case nir_op_fddy:
834    case nir_op_fddy_coarse:
835    case nir_op_fddy_fine:
836       result = lp_build_ddy(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
837       break;
838    case nir_op_fdiv:
839       result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
840                             src[0], src[1]);
841       break;
842    case nir_op_feq32:
843       result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
844       break;
845    case nir_op_fexp2:
846       result = lp_build_exp2(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
847       break;
848    case nir_op_ffloor:
849       result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
850       break;
851    case nir_op_ffma:
852       result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
853       break;
854    case nir_op_ffract: {
855       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
856       LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
857       result = lp_build_sub(flt_bld, src[0], tmp);
858       break;
859    }
860    case nir_op_fge:
861    case nir_op_fge32:
862       result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
863       break;
864    case nir_op_find_lsb: {
865       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
866       result = lp_build_cttz(int_bld, src[0]);
867       if (src_bit_size[0] < 32)
868          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
869       else if (src_bit_size[0] > 32)
870          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
871       break;
872    }
873    case nir_op_fisfinite32:
874       unreachable("Should have been lowered in nir_opt_algebraic_late.");
875    case nir_op_flog2:
876       result = lp_build_log2_safe(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
877       break;
878    case nir_op_flt:
879    case nir_op_flt32:
880       result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
881       break;
882    case nir_op_fmax:
883    case nir_op_fmin: {
884       enum gallivm_nan_behavior minmax_nan;
885       int first = 0;
886
887       /* If one of the sources is known to be a number (i.e., not NaN), then
888        * better code can be generated by passing that information along.
889        */
890       if (is_a_number(bld_base->range_ht, instr, 1,
891                       0 /* unused num_components */,
892                       NULL /* unused swizzle */)) {
893          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
894       } else if (is_a_number(bld_base->range_ht, instr, 0,
895                              0 /* unused num_components */,
896                              NULL /* unused swizzle */)) {
897          first = 1;
898          minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
899       } else {
900          minmax_nan = GALLIVM_NAN_RETURN_OTHER;
901       }
902
903       if (instr->op == nir_op_fmin) {
904          result = lp_build_min_ext(get_flt_bld(bld_base, src_bit_size[0]),
905                                    src[first], src[1 - first], minmax_nan);
906       } else {
907          result = lp_build_max_ext(get_flt_bld(bld_base, src_bit_size[0]),
908                                    src[first], src[1 - first], minmax_nan);
909       }
910       break;
911    }
912    case nir_op_fmod: {
913       struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
914       result = lp_build_div(flt_bld, src[0], src[1]);
915       result = lp_build_floor(flt_bld, result);
916       result = lp_build_mul(flt_bld, src[1], result);
917       result = lp_build_sub(flt_bld, src[0], result);
918       break;
919    }
920    case nir_op_fmul:
921       result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
922                             src[0], src[1]);
923       break;
924    case nir_op_fneu32:
925       result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
926       break;
927    case nir_op_fneg:
928       result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
929       break;
930    case nir_op_fpow:
931       result = lp_build_pow(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
932       break;
933    case nir_op_fquantize2f16:
934       result = do_quantize_to_f16(bld_base, src[0]);
935       break;
936    case nir_op_frcp:
937       result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
938       break;
939    case nir_op_fround_even:
940       if (src_bit_size[0] == 16) {
941          struct lp_build_context *bld = get_flt_bld(bld_base, 16);
942          char intrinsic[64];
943          lp_format_intrinsic(intrinsic, 64, "llvm.roundeven", bld->vec_type);
944          result = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, src[0]);
945       } else {
946          result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
947       }
948       break;
949    case nir_op_frsq:
950       result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
951       break;
952    case nir_op_fsat:
953       result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
954       break;
955    case nir_op_fsign:
956       result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
957       break;
958    case nir_op_fsin:
959       result = lp_build_sin(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
960       break;
961    case nir_op_fsqrt:
962       result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
963       break;
964    case nir_op_ftrunc:
965       result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
966       break;
967    case nir_op_i2b32:
968       result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
969       break;
970    case nir_op_i2f16:
971       result = LLVMBuildSIToFP(builder, src[0],
972                                bld_base->half_bld.vec_type, "");
973       break;
974    case nir_op_i2f32:
975       result = lp_build_int_to_float(&bld_base->base, src[0]);
976       break;
977    case nir_op_i2f64:
978       result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
979       break;
980    case nir_op_i2i8:
981       result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
982       break;
983    case nir_op_i2i16:
984       if (src_bit_size[0] < 16)
985          result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
986       else
987          result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
988       break;
989    case nir_op_i2i32:
990       if (src_bit_size[0] < 32)
991          result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
992       else
993          result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
994       break;
995    case nir_op_i2i64:
996       result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
997       break;
998    case nir_op_iabs:
999       result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
1000       break;
1001    case nir_op_iadd:
1002       result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
1003                             src[0], src[1]);
1004       break;
1005    case nir_op_iand:
1006       result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
1007                             src[0], src[1]);
1008       break;
1009    case nir_op_idiv:
1010       result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
1011       break;
1012    case nir_op_ieq32:
1013       result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
1014       break;
1015    case nir_op_ige32:
1016       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
1017       break;
1018    case nir_op_ilt32:
1019       result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
1020       break;
1021    case nir_op_imax:
1022       result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
1023       break;
1024    case nir_op_imin:
1025       result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
1026       break;
1027    case nir_op_imul:
1028    case nir_op_imul24:
1029       result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
1030                             src[0], src[1]);
1031       break;
1032    case nir_op_imul_high: {
1033       LLVMValueRef hi_bits;
1034       lp_build_mul_32_lohi(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1], &hi_bits);
1035       result = hi_bits;
1036       break;
1037    }
1038    case nir_op_ine32:
1039       result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
1040       break;
1041    case nir_op_ineg:
1042       result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
1043       break;
1044    case nir_op_inot:
1045       result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
1046       break;
1047    case nir_op_ior:
1048       result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
1049                            src[0], src[1]);
1050       break;
1051    case nir_op_imod:
1052    case nir_op_irem:
1053       result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
1054       break;
1055    case nir_op_ishl: {
1056       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1057       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
1058       if (src_bit_size[0] == 64)
1059          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1060       if (src_bit_size[0] < 32)
1061          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1062       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1063       result = lp_build_shl(int_bld, src[0], src[1]);
1064       break;
1065    }
1066    case nir_op_ishr: {
1067       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1068       struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
1069       if (src_bit_size[0] == 64)
1070          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1071       if (src_bit_size[0] < 32)
1072          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1073       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1074       result = lp_build_shr(int_bld, src[0], src[1]);
1075       break;
1076    }
1077    case nir_op_isign:
1078       result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
1079       break;
1080    case nir_op_isub:
1081       result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
1082                             src[0], src[1]);
1083       break;
1084    case nir_op_ixor:
1085       result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
1086                             src[0], src[1]);
1087       break;
1088    case nir_op_mov:
1089       result = src[0];
1090       break;
1091    case nir_op_unpack_64_2x32_split_x:
1092       result = split_64bit(bld_base, src[0], false);
1093       break;
1094    case nir_op_unpack_64_2x32_split_y:
1095       result = split_64bit(bld_base, src[0], true);
1096       break;
1097
1098    case nir_op_pack_32_2x16_split: {
1099       LLVMValueRef tmp = merge_16bit(bld_base, src[0], src[1]);
1100       result = LLVMBuildBitCast(builder, tmp, bld_base->base.vec_type, "");
1101       break;
1102    }
1103    case nir_op_unpack_32_2x16_split_x:
1104       result = split_16bit(bld_base, src[0], false);
1105       break;
1106    case nir_op_unpack_32_2x16_split_y:
1107       result = split_16bit(bld_base, src[0], true);
1108       break;
1109    case nir_op_pack_64_2x32_split: {
1110       LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
1111       result = LLVMBuildBitCast(builder, tmp, bld_base->uint64_bld.vec_type, "");
1112       break;
1113    }
1114    case nir_op_pack_32_4x8_split: {
1115       LLVMValueRef tmp1 = merge_16bit(bld_base, src[0], src[1]);
1116       LLVMValueRef tmp2 = merge_16bit(bld_base, src[2], src[3]);
1117       tmp1 = LLVMBuildBitCast(builder, tmp1, bld_base->uint16_bld.vec_type, "");
1118       tmp2 = LLVMBuildBitCast(builder, tmp2, bld_base->uint16_bld.vec_type, "");
1119       LLVMValueRef tmp = merge_16bit(bld_base, tmp1, tmp2);
1120       result = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.vec_type, "");
1121       break;
1122    }
1123    case nir_op_u2f16:
1124       result = LLVMBuildUIToFP(builder, src[0],
1125                                bld_base->half_bld.vec_type, "");
1126       break;
1127    case nir_op_u2f32:
1128       result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
1129       break;
1130    case nir_op_u2f64:
1131       result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
1132       break;
1133    case nir_op_u2u8:
1134       result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
1135       break;
1136    case nir_op_u2u16:
1137       if (src_bit_size[0] < 16)
1138          result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
1139       else
1140          result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
1141       break;
1142    case nir_op_u2u32:
1143       if (src_bit_size[0] < 32)
1144          result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
1145       else
1146          result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
1147       break;
1148    case nir_op_u2u64:
1149       result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
1150       break;
1151    case nir_op_udiv:
1152       result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
1153       break;
1154    case nir_op_ufind_msb: {
1155       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1156       result = lp_build_ctlz(uint_bld, src[0]);
1157       result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
1158       if (src_bit_size[0] < 32)
1159          result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
1160       else
1161          result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
1162       break;
1163    }
1164    case nir_op_uge32:
1165       result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
1166       break;
1167    case nir_op_ult32:
1168       result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
1169       break;
1170    case nir_op_umax:
1171       result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1172       break;
1173    case nir_op_umin:
1174       result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1175       break;
1176    case nir_op_umod:
1177       result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
1178       break;
1179    case nir_op_umul_high: {
1180       LLVMValueRef hi_bits;
1181       lp_build_mul_32_lohi(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1], &hi_bits);
1182       result = hi_bits;
1183       break;
1184    }
1185    case nir_op_ushr: {
1186       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1187       if (src_bit_size[0] == 64)
1188          src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1189       if (src_bit_size[0] < 32)
1190          src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1191       src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1192       result = lp_build_shr(uint_bld, src[0], src[1]);
1193       break;
1194    }
1195    case nir_op_bcsel: {
1196       LLVMTypeRef src1_type = LLVMTypeOf(src[1]);
1197       LLVMTypeRef src2_type = LLVMTypeOf(src[2]);
1198
1199       if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
1200           LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
1201          src[2] = LLVMBuildIntToPtr(builder, src[2], src1_type, "");
1202       } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
1203                  LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
1204          src[1] = LLVMBuildIntToPtr(builder, src[1], src2_type, "");
1205       }
1206
1207       for (int i = 1; i <= 2; i++) {
1208          LLVMTypeRef type = LLVMTypeOf(src[i]);
1209          if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
1210             break;
1211          src[i] = LLVMBuildBitCast(builder, src[i], get_int_bld(bld_base, true, src_bit_size[i])->vec_type, "");
1212       }
1213       return LLVMBuildSelect(builder, src[0], src[1], src[2], "");
1214    }
1215    default:
1216       assert(0);
1217       break;
1218    }
1219    return result;
1220 }
1221
1222
1223 static void
1224 visit_alu(struct lp_build_nir_context *bld_base,
1225           const nir_alu_instr *instr)
1226 {
1227    struct gallivm_state *gallivm = bld_base->base.gallivm;
1228    LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
1229    unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
1230    const unsigned num_components = nir_dest_num_components(instr->dest.dest);
1231    unsigned src_components;
1232
1233    switch (instr->op) {
1234    case nir_op_vec2:
1235    case nir_op_vec3:
1236    case nir_op_vec4:
1237    case nir_op_vec8:
1238    case nir_op_vec16:
1239       src_components = 1;
1240       break;
1241    case nir_op_pack_half_2x16:
1242       src_components = 2;
1243       break;
1244    case nir_op_unpack_half_2x16:
1245       src_components = 1;
1246       break;
1247    case nir_op_cube_face_coord_amd:
1248    case nir_op_cube_face_index_amd:
1249       src_components = 3;
1250       break;
1251    case nir_op_fsum2:
1252    case nir_op_fsum3:
1253    case nir_op_fsum4:
1254       src_components = nir_op_infos[instr->op].input_sizes[0];
1255       break;
1256    default:
1257       src_components = num_components;
1258       break;
1259    }
1260
1261    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1262       src[i] = get_alu_src(bld_base, instr->src[i], src_components);
1263       src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
1264    }
1265
1266    if (instr->op == nir_op_mov &&
1267        is_aos(bld_base) &&
1268        !instr->dest.dest.is_ssa) {
1269       for (unsigned i = 0; i < 4; i++) {
1270          if (instr->dest.write_mask & (1 << i)) {
1271             assign_reg(bld_base, &instr->dest.dest.reg, (1 << i), src);
1272          }
1273       }
1274       return;
1275    }
1276
1277    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1278    if (instr->op == nir_op_vec4 ||
1279        instr->op == nir_op_vec3 ||
1280        instr->op == nir_op_vec2 ||
1281        instr->op == nir_op_vec8 ||
1282        instr->op == nir_op_vec16) {
1283       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1284          result[i] = cast_type(bld_base, src[i],
1285                                nir_op_infos[instr->op].input_types[i],
1286                                src_bit_size[i]);
1287       }
1288    } else if (instr->op == nir_op_fsum4 ||
1289               instr->op == nir_op_fsum3 ||
1290               instr->op == nir_op_fsum2) {
1291       for (unsigned c = 0; c < nir_op_infos[instr->op].input_sizes[0]; c++) {
1292          LLVMValueRef temp_chan = LLVMBuildExtractValue(gallivm->builder,
1293                                                           src[0], c, "");
1294          temp_chan = cast_type(bld_base, temp_chan,
1295                                nir_op_infos[instr->op].input_types[0],
1296                                src_bit_size[0]);
1297          result[0] = (c == 0) ? temp_chan
1298             : lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
1299                            result[0], temp_chan);
1300       }
1301    } else if (is_aos(bld_base)) {
1302       result[0] = do_alu_action(bld_base, instr, src_bit_size, src);
1303    } else {
1304       /* Loop for R,G,B,A channels */
1305       for (unsigned c = 0; c < num_components; c++) {
1306          LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
1307
1308          /* Loop over instruction operands */
1309          for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1310             if (num_components > 1) {
1311                src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
1312                                                      src[i], c, "");
1313             } else {
1314                src_chan[i] = src[i];
1315             }
1316             src_chan[i] = cast_type(bld_base, src_chan[i],
1317                                     nir_op_infos[instr->op].input_types[i],
1318                                     src_bit_size[i]);
1319          }
1320          result[c] = do_alu_action(bld_base, instr, src_bit_size, src_chan);
1321          result[c] = cast_type(bld_base, result[c],
1322                                nir_op_infos[instr->op].output_type,
1323                                nir_dest_bit_size(instr->dest.dest));
1324       }
1325    }
1326    assign_alu_dest(bld_base, &instr->dest, result);
1327 }
1328
1329
1330 static void
1331 visit_load_const(struct lp_build_nir_context *bld_base,
1332                  const nir_load_const_instr *instr)
1333 {
1334    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1335    bld_base->load_const(bld_base, instr, result);
1336    assign_ssa_dest(bld_base, &instr->def, result);
1337 }
1338
1339
1340 static void
1341 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
1342                  bool vs_in, unsigned *vertex_index_out,
1343                  LLVMValueRef *vertex_index_ref,
1344                  unsigned *const_out, LLVMValueRef *indir_out)
1345 {
1346    LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1347    nir_variable *var = nir_deref_instr_get_variable(instr);
1348    nir_deref_path path;
1349    unsigned idx_lvl = 1;
1350
1351    nir_deref_path_init(&path, instr, NULL);
1352
1353    if (vertex_index_out != NULL || vertex_index_ref != NULL) {
1354       if (vertex_index_ref) {
1355          *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
1356          if (vertex_index_out)
1357             *vertex_index_out = 0;
1358       } else {
1359          *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
1360       }
1361       ++idx_lvl;
1362    }
1363
1364    uint32_t const_offset = 0;
1365    LLVMValueRef offset = NULL;
1366
1367    if (var->data.compact && nir_src_is_const(instr->arr.index)) {
1368       assert(instr->deref_type == nir_deref_type_array);
1369       const_offset = nir_src_as_uint(instr->arr.index);
1370       goto out;
1371    }
1372
1373    for (; path.path[idx_lvl]; ++idx_lvl) {
1374       const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
1375       if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
1376          unsigned index = path.path[idx_lvl]->strct.index;
1377
1378          for (unsigned i = 0; i < index; i++) {
1379             const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
1380             const_offset += glsl_count_attribute_slots(ft, vs_in);
1381          }
1382       } else if (path.path[idx_lvl]->deref_type == nir_deref_type_array) {
1383          unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
1384          if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
1385            const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
1386          } else {
1387            LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
1388            idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
1389            LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
1390                                                idx_src);
1391            if (offset)
1392              offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
1393            else
1394              offset = array_off;
1395          }
1396       } else
1397          unreachable("Uhandled deref type in get_deref_instr_offset");
1398    }
1399
1400 out:
1401    nir_deref_path_finish(&path);
1402
1403    if (const_offset && offset)
1404       offset = LLVMBuildAdd(builder, offset,
1405                             lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
1406                             "");
1407    *const_out = const_offset;
1408    *indir_out = offset;
1409 }
1410
1411
1412 static void
1413 visit_load_input(struct lp_build_nir_context *bld_base,
1414                  nir_intrinsic_instr *instr,
1415                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1416 {
1417    nir_variable var = {0};
1418    var.data.location = nir_intrinsic_io_semantics(instr).location;
1419    var.data.driver_location = nir_intrinsic_base(instr);
1420    var.data.location_frac = nir_intrinsic_component(instr);
1421
1422    unsigned nc = nir_dest_num_components(instr->dest);
1423    unsigned bit_size = nir_dest_bit_size(instr->dest);
1424
1425    nir_src offset = *nir_get_io_offset_src(instr);
1426    bool indirect = !nir_src_is_const(offset);
1427    if (!indirect)
1428       assert(nir_src_as_uint(offset) == 0);
1429    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1430
1431    bld_base->load_var(bld_base, nir_var_shader_in, nc, bit_size, &var, 0, NULL, 0, indir_index, result);
1432 }
1433
1434
1435 static void
1436 visit_store_output(struct lp_build_nir_context *bld_base,
1437                    nir_intrinsic_instr *instr)
1438 {
1439    nir_variable var = {0};
1440    var.data.location = nir_intrinsic_io_semantics(instr).location;
1441    var.data.driver_location = nir_intrinsic_base(instr);
1442    var.data.location_frac = nir_intrinsic_component(instr);
1443
1444    unsigned mask = nir_intrinsic_write_mask(instr);
1445
1446    unsigned bit_size = nir_src_bit_size(instr->src[0]);
1447    LLVMValueRef src = get_src(bld_base, instr->src[0]);
1448
1449    nir_src offset = *nir_get_io_offset_src(instr);
1450    bool indirect = !nir_src_is_const(offset);
1451    if (!indirect)
1452       assert(nir_src_as_uint(offset) == 0);
1453    LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1454
1455    if (mask == 0x1 && LLVMGetTypeKind(LLVMTypeOf(src)) == LLVMArrayTypeKind) {
1456       src = LLVMBuildExtractValue(bld_base->base.gallivm->builder,
1457                                   src, 0, "");
1458    }
1459
1460    bld_base->store_var(bld_base, nir_var_shader_out, util_last_bit(mask),
1461                        bit_size, &var, mask, NULL, 0, indir_index, src);
1462 }
1463
1464
1465 static void
1466 visit_load_var(struct lp_build_nir_context *bld_base,
1467                nir_intrinsic_instr *instr,
1468                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1469 {
1470    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1471    nir_variable *var = nir_deref_instr_get_variable(deref);
1472    assert(util_bitcount(deref->modes) == 1);
1473    nir_variable_mode mode = deref->modes;
1474    unsigned const_index;
1475    LLVMValueRef indir_index;
1476    LLVMValueRef indir_vertex_index = NULL;
1477    unsigned vertex_index = 0;
1478    unsigned nc = nir_dest_num_components(instr->dest);
1479    unsigned bit_size = nir_dest_bit_size(instr->dest);
1480    if (var) {
1481       bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
1482          var->data.mode == nir_var_shader_in;
1483       bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
1484          var->data.mode == nir_var_shader_in;
1485       bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1486          var->data.mode == nir_var_shader_in;
1487       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1488          var->data.mode == nir_var_shader_out && !var->data.patch;
1489       bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
1490          var->data.mode == nir_var_shader_in && !var->data.patch;
1491
1492       mode = var->data.mode;
1493
1494       get_deref_offset(bld_base, deref, vs_in,
1495                    gs_in ? &vertex_index : NULL,
1496                    (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
1497                    &const_index, &indir_index);
1498    }
1499    bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index,
1500                       indir_vertex_index, const_index, indir_index, result);
1501 }
1502
1503
1504 static void
1505 visit_store_var(struct lp_build_nir_context *bld_base,
1506                 nir_intrinsic_instr *instr)
1507 {
1508    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1509    nir_variable *var = nir_deref_instr_get_variable(deref);
1510    assert(util_bitcount(deref->modes) == 1);
1511    nir_variable_mode mode = deref->modes;
1512    int writemask = instr->const_index[0];
1513    unsigned bit_size = nir_src_bit_size(instr->src[1]);
1514    LLVMValueRef src = get_src(bld_base, instr->src[1]);
1515    unsigned const_index = 0;
1516    LLVMValueRef indir_index, indir_vertex_index = NULL;
1517    if (var) {
1518       bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1519          var->data.mode == nir_var_shader_out && !var->data.patch;
1520       get_deref_offset(bld_base, deref, false, NULL,
1521                        tcs_out ? &indir_vertex_index : NULL,
1522                        &const_index, &indir_index);
1523    }
1524    bld_base->store_var(bld_base, mode, instr->num_components, bit_size,
1525                        var, writemask, indir_vertex_index, const_index,
1526                        indir_index, src);
1527 }
1528
1529
1530 static void
1531 visit_load_ubo(struct lp_build_nir_context *bld_base,
1532                nir_intrinsic_instr *instr,
1533                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1534 {
1535    struct gallivm_state *gallivm = bld_base->base.gallivm;
1536    LLVMBuilderRef builder = gallivm->builder;
1537    LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1538    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1539
1540    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1541    idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1542    bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
1543                       nir_dest_bit_size(instr->dest),
1544                       offset_is_uniform, idx, offset, result);
1545 }
1546
1547
1548 static void
1549 visit_load_push_constant(struct lp_build_nir_context *bld_base,
1550                          nir_intrinsic_instr *instr,
1551                          LLVMValueRef result[4])
1552 {
1553    struct gallivm_state *gallivm = bld_base->base.gallivm;
1554    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1555    LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1556    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1557
1558    bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
1559                       nir_dest_bit_size(instr->dest),
1560                       offset_is_uniform, idx, offset, result);
1561 }
1562
1563
1564 static void
1565 visit_load_ssbo(struct lp_build_nir_context *bld_base,
1566                 nir_intrinsic_instr *instr,
1567                 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1568 {
1569    LLVMValueRef idx = cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_uint, 32);
1570    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1571    bool index_and_offset_are_uniform = nir_src_is_always_uniform(instr->src[0]) && nir_src_is_always_uniform(instr->src[1]);
1572    bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1573                       index_and_offset_are_uniform, idx, offset, result);
1574 }
1575
1576
1577 static void
1578 visit_store_ssbo(struct lp_build_nir_context *bld_base,
1579                  nir_intrinsic_instr *instr)
1580 {
1581    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1582    LLVMValueRef idx = cast_type(bld_base, get_src(bld_base, instr->src[1]), nir_type_uint, 32);
1583    LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1584    bool index_and_offset_are_uniform = nir_src_is_always_uniform(instr->src[1]) && nir_src_is_always_uniform(instr->src[2]);
1585    int writemask = instr->const_index[0];
1586    int nc = nir_src_num_components(instr->src[0]);
1587    int bitsize = nir_src_bit_size(instr->src[0]);
1588    bld_base->store_mem(bld_base, writemask, nc, bitsize, index_and_offset_are_uniform, idx, offset, val);
1589 }
1590
1591
1592 static void
1593 visit_get_ssbo_size(struct lp_build_nir_context *bld_base,
1594                     nir_intrinsic_instr *instr,
1595                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1596 {
1597    LLVMValueRef idx = cast_type(bld_base,
1598                                 get_src(bld_base, instr->src[0]),
1599                                 nir_type_uint, 32);
1600    result[0] = bld_base->get_ssbo_size(bld_base, idx);
1601 }
1602
1603
1604 static void
1605 visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1606                   nir_intrinsic_instr *instr,
1607                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1608 {
1609    LLVMValueRef idx = cast_type(bld_base, get_src(bld_base, instr->src[0]),
1610                                 nir_type_uint, 32);
1611    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1612    LLVMValueRef val = get_src(bld_base, instr->src[2]);
1613    LLVMValueRef val2 = NULL;
1614    int bitsize = nir_src_bit_size(instr->src[2]);
1615    if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1616       val2 = get_src(bld_base, instr->src[3]);
1617
1618    bld_base->atomic_mem(bld_base, instr->intrinsic, bitsize, idx,
1619                         offset, val, val2, &result[0]);
1620 }
1621
1622
1623 static void
1624 visit_load_image(struct lp_build_nir_context *bld_base,
1625                  nir_intrinsic_instr *instr,
1626                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1627 {
1628    struct gallivm_state *gallivm = bld_base->base.gallivm;
1629    LLVMBuilderRef builder = gallivm->builder;
1630    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1631    LLVMValueRef coords[5];
1632    struct lp_img_params params;
1633
1634    memset(&params, 0, sizeof(params));
1635    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1636                                         nir_intrinsic_image_array(instr));
1637    for (unsigned i = 0; i < 4; i++)
1638       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1639    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1640       coords[2] = coords[1];
1641
1642    params.coords = coords;
1643    params.outdata = result;
1644    params.img_op = LP_IMG_LOAD;
1645    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1646        nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS)
1647       params.ms_index = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1648                                   nir_type_uint, 32);
1649    if (nir_src_is_const(instr->src[0]))
1650       params.image_index = nir_src_as_int(instr->src[0]);
1651    else
1652       params.image_index_offset = get_src(bld_base, instr->src[0]);
1653    bld_base->image_op(bld_base, &params);
1654 }
1655
1656
1657 static void
1658 visit_store_image(struct lp_build_nir_context *bld_base,
1659                   nir_intrinsic_instr *instr)
1660 {
1661    struct gallivm_state *gallivm = bld_base->base.gallivm;
1662    LLVMBuilderRef builder = gallivm->builder;
1663    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1664    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1665    LLVMValueRef coords[5];
1666    struct lp_img_params params;
1667
1668    memset(&params, 0, sizeof(params));
1669    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr), nir_intrinsic_image_array(instr));
1670    for (unsigned i = 0; i < 4; i++)
1671       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1672    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1673       coords[2] = coords[1];
1674    params.coords = coords;
1675
1676    for (unsigned i = 0; i < 4; i++) {
1677       params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1678       params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1679    }
1680    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1681       params.ms_index = get_src(bld_base, instr->src[2]);
1682    params.img_op = LP_IMG_STORE;
1683    if (nir_src_is_const(instr->src[0]))
1684       params.image_index = nir_src_as_int(instr->src[0]);
1685    else
1686       params.image_index_offset = get_src(bld_base, instr->src[0]);
1687
1688    if (params.target == PIPE_TEXTURE_1D_ARRAY)
1689       coords[2] = coords[1];
1690    bld_base->image_op(bld_base, &params);
1691 }
1692
1693
1694 static void
1695 visit_atomic_image(struct lp_build_nir_context *bld_base,
1696                    nir_intrinsic_instr *instr,
1697                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1698 {
1699    struct gallivm_state *gallivm = bld_base->base.gallivm;
1700    LLVMBuilderRef builder = gallivm->builder;
1701    struct lp_img_params params;
1702    LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1703    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1704    LLVMValueRef coords[5];
1705
1706    memset(&params, 0, sizeof(params));
1707
1708    switch (instr->intrinsic) {
1709    case nir_intrinsic_image_atomic_add:
1710       params.op = LLVMAtomicRMWBinOpAdd;
1711       break;
1712    case nir_intrinsic_image_atomic_exchange:
1713       params.op = LLVMAtomicRMWBinOpXchg;
1714       break;
1715    case nir_intrinsic_image_atomic_and:
1716       params.op = LLVMAtomicRMWBinOpAnd;
1717       break;
1718    case nir_intrinsic_image_atomic_or:
1719       params.op = LLVMAtomicRMWBinOpOr;
1720       break;
1721    case nir_intrinsic_image_atomic_xor:
1722       params.op = LLVMAtomicRMWBinOpXor;
1723       break;
1724    case nir_intrinsic_image_atomic_umin:
1725       params.op = LLVMAtomicRMWBinOpUMin;
1726       break;
1727    case nir_intrinsic_image_atomic_umax:
1728       params.op = LLVMAtomicRMWBinOpUMax;
1729       break;
1730    case nir_intrinsic_image_atomic_imin:
1731       params.op = LLVMAtomicRMWBinOpMin;
1732       break;
1733    case nir_intrinsic_image_atomic_imax:
1734       params.op = LLVMAtomicRMWBinOpMax;
1735       break;
1736    case nir_intrinsic_image_atomic_fadd:
1737       params.op = LLVMAtomicRMWBinOpFAdd;
1738       break;
1739 #if LLVM_VERSION >= 15
1740    case nir_intrinsic_image_atomic_fmin:
1741       params.op = LLVMAtomicRMWBinOpFMin;
1742       break;
1743    case nir_intrinsic_image_atomic_fmax:
1744       params.op = LLVMAtomicRMWBinOpFMax;
1745       break;
1746 #endif
1747    default:
1748       break;
1749    }
1750
1751    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1752                                         nir_intrinsic_image_array(instr));
1753    for (unsigned i = 0; i < 4; i++) {
1754       coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1755    }
1756    if (params.target == PIPE_TEXTURE_1D_ARRAY) {
1757       coords[2] = coords[1];
1758    }
1759
1760    params.coords = coords;
1761
1762    if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1763       params.ms_index = get_src(bld_base, instr->src[2]);
1764    if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {
1765       LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1766       params.indata[0] = in_val;
1767       params.indata2[0] = cas_val;
1768    } else {
1769       params.indata[0] = in_val;
1770    }
1771
1772    params.outdata = result;
1773    params.img_op =
1774       (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap)
1775       ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1776    if (nir_src_is_const(instr->src[0]))
1777       params.image_index = nir_src_as_int(instr->src[0]);
1778    else
1779       params.image_index_offset = get_src(bld_base, instr->src[0]);
1780
1781    bld_base->image_op(bld_base, &params);
1782 }
1783
1784
1785 static void
1786 visit_image_size(struct lp_build_nir_context *bld_base,
1787                  nir_intrinsic_instr *instr,
1788                  LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1789 {
1790    struct lp_sampler_size_query_params params = { 0 };
1791
1792    if (nir_src_is_const(instr->src[0]))
1793       params.texture_unit = nir_src_as_int(instr->src[0]);
1794    else
1795       params.texture_unit_offset = get_src(bld_base, instr->src[0]);
1796    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1797                                         nir_intrinsic_image_array(instr));
1798    params.sizes_out = result;
1799
1800    bld_base->image_size(bld_base, &params);
1801 }
1802
1803
1804 static void
1805 visit_image_samples(struct lp_build_nir_context *bld_base,
1806                     nir_intrinsic_instr *instr,
1807                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1808 {
1809    struct lp_sampler_size_query_params params = { 0 };
1810
1811    if (nir_src_is_const(instr->src[0]))
1812       params.texture_unit = nir_src_as_int(instr->src[0]);
1813    else
1814       params.texture_unit_offset = get_src(bld_base, instr->src[0]);
1815    params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1816                                         nir_intrinsic_image_array(instr));
1817    params.sizes_out = result;
1818    params.samples_only = true;
1819
1820    bld_base->image_size(bld_base, &params);
1821 }
1822
1823 static void
1824 visit_shared_load(struct lp_build_nir_context *bld_base,
1825                   nir_intrinsic_instr *instr,
1826                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1827 {
1828    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1829    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1830    bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1831                       offset_is_uniform, NULL, offset, result);
1832 }
1833
1834
1835 static void
1836 visit_shared_store(struct lp_build_nir_context *bld_base,
1837                    nir_intrinsic_instr *instr)
1838 {
1839    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1840    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1841    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1842    int writemask = instr->const_index[1];
1843    int nc = nir_src_num_components(instr->src[0]);
1844    int bitsize = nir_src_bit_size(instr->src[0]);
1845    bld_base->store_mem(bld_base, writemask, nc, bitsize, offset_is_uniform, NULL, offset, val);
1846 }
1847
1848
1849 static void
1850 visit_shared_atomic(struct lp_build_nir_context *bld_base,
1851                     nir_intrinsic_instr *instr,
1852                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1853 {
1854    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1855    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1856    LLVMValueRef val2 = NULL;
1857    int bitsize = nir_src_bit_size(instr->src[1]);
1858    if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1859       val2 = get_src(bld_base, instr->src[2]);
1860
1861    bld_base->atomic_mem(bld_base, instr->intrinsic, bitsize, NULL, offset, val, val2, &result[0]);
1862 }
1863
1864
1865 static void
1866 visit_barrier(struct lp_build_nir_context *bld_base)
1867 {
1868    bld_base->barrier(bld_base);
1869 }
1870
1871
1872 static void
1873 visit_discard(struct lp_build_nir_context *bld_base,
1874               nir_intrinsic_instr *instr)
1875 {
1876    LLVMValueRef cond = NULL;
1877    if (instr->intrinsic == nir_intrinsic_discard_if) {
1878       cond = get_src(bld_base, instr->src[0]);
1879       cond = cast_type(bld_base, cond, nir_type_int, 32);
1880    }
1881    bld_base->discard(bld_base, cond);
1882 }
1883
1884
1885 static void
1886 visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1887                         nir_intrinsic_instr *instr,
1888                         LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1889 {
1890    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1891
1892    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1893    bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest),
1894                              nir_dest_bit_size(instr->dest),
1895                              nir_src_bit_size(instr->src[0]),
1896                              offset_is_uniform, offset, result);
1897 }
1898
1899
1900 static void
1901 visit_load_global(struct lp_build_nir_context *bld_base,
1902                   nir_intrinsic_instr *instr,
1903                   LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1904 {
1905    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1906    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1907    bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1908                          nir_src_bit_size(instr->src[0]),
1909                          offset_is_uniform, addr, result);
1910 }
1911
1912
1913 static void
1914 visit_store_global(struct lp_build_nir_context *bld_base,
1915                    nir_intrinsic_instr *instr)
1916 {
1917    LLVMValueRef val = get_src(bld_base, instr->src[0]);
1918    int nc = nir_src_num_components(instr->src[0]);
1919    int bitsize = nir_src_bit_size(instr->src[0]);
1920    LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1921    int addr_bitsize = nir_src_bit_size(instr->src[1]);
1922    int writemask = instr->const_index[0];
1923    bld_base->store_global(bld_base, writemask, nc, bitsize,
1924                           addr_bitsize, addr, val);
1925 }
1926
1927
1928 static void
1929 visit_global_atomic(struct lp_build_nir_context *bld_base,
1930                     nir_intrinsic_instr *instr,
1931                     LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1932 {
1933    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1934    LLVMValueRef val = get_src(bld_base, instr->src[1]);
1935    LLVMValueRef val2 = NULL;
1936    int addr_bitsize = nir_src_bit_size(instr->src[0]);
1937    int val_bitsize = nir_src_bit_size(instr->src[1]);
1938    if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1939       val2 = get_src(bld_base, instr->src[2]);
1940
1941    bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize,
1942                            val_bitsize, addr, val, val2, &result[0]);
1943 }
1944
1945 #if LLVM_VERSION_MAJOR >= 10
1946 static void visit_shuffle(struct lp_build_nir_context *bld_base,
1947                           nir_intrinsic_instr *instr,
1948                           LLVMValueRef dst[4])
1949 {
1950    LLVMValueRef src = get_src(bld_base, instr->src[0]);
1951    src = cast_type(bld_base, src, nir_type_int, nir_src_bit_size(instr->src[0]));
1952    LLVMValueRef index = get_src(bld_base, instr->src[1]);
1953    index = cast_type(bld_base, index, nir_type_uint, nir_src_bit_size(instr->src[1]));
1954
1955    bld_base->shuffle(bld_base, src, index, instr, dst);
1956 }
1957 #endif
1958
1959
1960 static void
1961 visit_interp(struct lp_build_nir_context *bld_base,
1962              nir_intrinsic_instr *instr,
1963              LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1964 {
1965    struct gallivm_state *gallivm = bld_base->base.gallivm;
1966    LLVMBuilderRef builder = gallivm->builder;
1967    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1968    unsigned num_components = nir_dest_num_components(instr->dest);
1969    nir_variable *var = nir_deref_instr_get_variable(deref);
1970    unsigned const_index;
1971    LLVMValueRef indir_index;
1972    LLVMValueRef offsets[2] = { NULL, NULL };
1973    get_deref_offset(bld_base, deref, false, NULL, NULL,
1974                     &const_index, &indir_index);
1975    bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
1976    bool sample = false;
1977    if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
1978       for (unsigned i = 0; i < 2; i++) {
1979          offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
1980          offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
1981       }
1982    } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
1983       offsets[0] = get_src(bld_base, instr->src[1]);
1984       offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
1985       sample = true;
1986    }
1987    bld_base->interp_at(bld_base, num_components, var, centroid, sample,
1988                        const_index, indir_index, offsets, result);
1989 }
1990
1991
1992 static void
1993 visit_load_scratch(struct lp_build_nir_context *bld_base,
1994                    nir_intrinsic_instr *instr,
1995                    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1996 {
1997    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1998
1999    bld_base->load_scratch(bld_base, nir_dest_num_components(instr->dest),
2000                           nir_dest_bit_size(instr->dest), offset, result);
2001 }
2002
2003
2004 static void
2005 visit_store_scratch(struct lp_build_nir_context *bld_base,
2006                     nir_intrinsic_instr *instr)
2007 {
2008    LLVMValueRef val = get_src(bld_base, instr->src[0]);
2009    LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2010    int writemask = instr->const_index[2];
2011    int nc = nir_src_num_components(instr->src[0]);
2012    int bitsize = nir_src_bit_size(instr->src[0]);
2013    bld_base->store_scratch(bld_base, writemask, nc, bitsize, offset, val);
2014 }
2015
2016
2017 static void
2018 visit_intrinsic(struct lp_build_nir_context *bld_base,
2019                 nir_intrinsic_instr *instr)
2020 {
2021    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
2022    switch (instr->intrinsic) {
2023    case nir_intrinsic_load_input:
2024       visit_load_input(bld_base, instr, result);
2025       break;
2026    case nir_intrinsic_store_output:
2027       visit_store_output(bld_base, instr);
2028       break;
2029    case nir_intrinsic_load_deref:
2030       visit_load_var(bld_base, instr, result);
2031       break;
2032    case nir_intrinsic_store_deref:
2033       visit_store_var(bld_base, instr);
2034       break;
2035    case nir_intrinsic_load_ubo:
2036       visit_load_ubo(bld_base, instr, result);
2037       break;
2038    case nir_intrinsic_load_push_constant:
2039       visit_load_push_constant(bld_base, instr, result);
2040       break;
2041    case nir_intrinsic_load_ssbo:
2042       visit_load_ssbo(bld_base, instr, result);
2043       break;
2044    case nir_intrinsic_store_ssbo:
2045       visit_store_ssbo(bld_base, instr);
2046       break;
2047    case nir_intrinsic_get_ssbo_size:
2048       visit_get_ssbo_size(bld_base, instr, result);
2049       break;
2050    case nir_intrinsic_load_vertex_id:
2051    case nir_intrinsic_load_primitive_id:
2052    case nir_intrinsic_load_instance_id:
2053    case nir_intrinsic_load_base_instance:
2054    case nir_intrinsic_load_base_vertex:
2055    case nir_intrinsic_load_first_vertex:
2056    case nir_intrinsic_load_workgroup_id:
2057    case nir_intrinsic_load_local_invocation_id:
2058    case nir_intrinsic_load_local_invocation_index:
2059    case nir_intrinsic_load_num_workgroups:
2060    case nir_intrinsic_load_invocation_id:
2061    case nir_intrinsic_load_front_face:
2062    case nir_intrinsic_load_draw_id:
2063    case nir_intrinsic_load_workgroup_size:
2064    case nir_intrinsic_load_work_dim:
2065    case nir_intrinsic_load_tess_coord:
2066    case nir_intrinsic_load_tess_level_outer:
2067    case nir_intrinsic_load_tess_level_inner:
2068    case nir_intrinsic_load_patch_vertices_in:
2069    case nir_intrinsic_load_sample_id:
2070    case nir_intrinsic_load_sample_pos:
2071    case nir_intrinsic_load_sample_mask_in:
2072    case nir_intrinsic_load_view_index:
2073    case nir_intrinsic_load_subgroup_invocation:
2074    case nir_intrinsic_load_subgroup_id:
2075    case nir_intrinsic_load_num_subgroups:
2076       bld_base->sysval_intrin(bld_base, instr, result);
2077       break;
2078    case nir_intrinsic_load_helper_invocation:
2079       bld_base->helper_invocation(bld_base, &result[0]);
2080       break;
2081    case nir_intrinsic_discard_if:
2082    case nir_intrinsic_discard:
2083       visit_discard(bld_base, instr);
2084       break;
2085    case nir_intrinsic_emit_vertex:
2086       bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
2087       break;
2088    case nir_intrinsic_end_primitive:
2089       bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
2090       break;
2091    case nir_intrinsic_ssbo_atomic_add:
2092    case nir_intrinsic_ssbo_atomic_imin:
2093    case nir_intrinsic_ssbo_atomic_imax:
2094    case nir_intrinsic_ssbo_atomic_umin:
2095    case nir_intrinsic_ssbo_atomic_umax:
2096    case nir_intrinsic_ssbo_atomic_and:
2097    case nir_intrinsic_ssbo_atomic_or:
2098    case nir_intrinsic_ssbo_atomic_xor:
2099    case nir_intrinsic_ssbo_atomic_exchange:
2100    case nir_intrinsic_ssbo_atomic_comp_swap:
2101    case nir_intrinsic_ssbo_atomic_fadd:
2102    case nir_intrinsic_ssbo_atomic_fmin:
2103    case nir_intrinsic_ssbo_atomic_fmax:
2104       visit_ssbo_atomic(bld_base, instr, result);
2105       break;
2106    case nir_intrinsic_image_load:
2107       visit_load_image(bld_base, instr, result);
2108       break;
2109    case nir_intrinsic_image_store:
2110       visit_store_image(bld_base, instr);
2111       break;
2112    case nir_intrinsic_image_atomic_add:
2113    case nir_intrinsic_image_atomic_imin:
2114    case nir_intrinsic_image_atomic_imax:
2115    case nir_intrinsic_image_atomic_umin:
2116    case nir_intrinsic_image_atomic_umax:
2117    case nir_intrinsic_image_atomic_and:
2118    case nir_intrinsic_image_atomic_or:
2119    case nir_intrinsic_image_atomic_xor:
2120    case nir_intrinsic_image_atomic_exchange:
2121    case nir_intrinsic_image_atomic_comp_swap:
2122    case nir_intrinsic_image_atomic_fadd:
2123    case nir_intrinsic_image_atomic_fmin:
2124    case nir_intrinsic_image_atomic_fmax:
2125       visit_atomic_image(bld_base, instr, result);
2126       break;
2127    case nir_intrinsic_image_size:
2128       visit_image_size(bld_base, instr, result);
2129       break;
2130    case nir_intrinsic_image_samples:
2131       visit_image_samples(bld_base, instr, result);
2132       break;
2133    case nir_intrinsic_load_shared:
2134       visit_shared_load(bld_base, instr, result);
2135       break;
2136    case nir_intrinsic_store_shared:
2137       visit_shared_store(bld_base, instr);
2138       break;
2139    case nir_intrinsic_shared_atomic_add:
2140    case nir_intrinsic_shared_atomic_imin:
2141    case nir_intrinsic_shared_atomic_umin:
2142    case nir_intrinsic_shared_atomic_imax:
2143    case nir_intrinsic_shared_atomic_umax:
2144    case nir_intrinsic_shared_atomic_and:
2145    case nir_intrinsic_shared_atomic_or:
2146    case nir_intrinsic_shared_atomic_xor:
2147    case nir_intrinsic_shared_atomic_exchange:
2148    case nir_intrinsic_shared_atomic_comp_swap:
2149    case nir_intrinsic_shared_atomic_fadd:
2150    case nir_intrinsic_shared_atomic_fmin:
2151    case nir_intrinsic_shared_atomic_fmax:
2152       visit_shared_atomic(bld_base, instr, result);
2153       break;
2154    case nir_intrinsic_control_barrier:
2155    case nir_intrinsic_scoped_barrier:
2156       visit_barrier(bld_base);
2157       break;
2158    case nir_intrinsic_group_memory_barrier:
2159    case nir_intrinsic_memory_barrier:
2160    case nir_intrinsic_memory_barrier_shared:
2161    case nir_intrinsic_memory_barrier_buffer:
2162    case nir_intrinsic_memory_barrier_image:
2163    case nir_intrinsic_memory_barrier_tcs_patch:
2164       break;
2165    case nir_intrinsic_load_kernel_input:
2166       visit_load_kernel_input(bld_base, instr, result);
2167      break;
2168    case nir_intrinsic_load_global:
2169    case nir_intrinsic_load_global_constant:
2170       visit_load_global(bld_base, instr, result);
2171       break;
2172    case nir_intrinsic_store_global:
2173       visit_store_global(bld_base, instr);
2174       break;
2175    case nir_intrinsic_global_atomic_add:
2176    case nir_intrinsic_global_atomic_imin:
2177    case nir_intrinsic_global_atomic_umin:
2178    case nir_intrinsic_global_atomic_imax:
2179    case nir_intrinsic_global_atomic_umax:
2180    case nir_intrinsic_global_atomic_and:
2181    case nir_intrinsic_global_atomic_or:
2182    case nir_intrinsic_global_atomic_xor:
2183    case nir_intrinsic_global_atomic_exchange:
2184    case nir_intrinsic_global_atomic_comp_swap:
2185    case nir_intrinsic_global_atomic_fadd:
2186    case nir_intrinsic_global_atomic_fmin:
2187    case nir_intrinsic_global_atomic_fmax:
2188       visit_global_atomic(bld_base, instr, result);
2189       break;
2190    case nir_intrinsic_vote_all:
2191    case nir_intrinsic_vote_any:
2192    case nir_intrinsic_vote_ieq:
2193    case nir_intrinsic_vote_feq:
2194       bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2195       break;
2196    case nir_intrinsic_elect:
2197       bld_base->elect(bld_base, result);
2198       break;
2199    case nir_intrinsic_reduce:
2200    case nir_intrinsic_inclusive_scan:
2201    case nir_intrinsic_exclusive_scan:
2202       bld_base->reduce(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2203       break;
2204    case nir_intrinsic_ballot:
2205       bld_base->ballot(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
2206       break;
2207 #if LLVM_VERSION_MAJOR >= 10
2208    case nir_intrinsic_shuffle:
2209       visit_shuffle(bld_base, instr, result);
2210       break;
2211 #endif
2212    case nir_intrinsic_read_invocation:
2213    case nir_intrinsic_read_first_invocation: {
2214       LLVMValueRef src1 = NULL;
2215       LLVMValueRef src0 = get_src(bld_base, instr->src[0]);
2216       if (instr->intrinsic == nir_intrinsic_read_invocation) {
2217          src1 = cast_type(bld_base, get_src(bld_base, instr->src[1]), nir_type_int, 32);
2218          src0 = cast_type(bld_base, src0, nir_type_int, nir_src_bit_size(instr->src[0]));
2219       }
2220       bld_base->read_invocation(bld_base, src0, nir_src_bit_size(instr->src[0]), src1, result);
2221       break;
2222    }
2223    case nir_intrinsic_interp_deref_at_offset:
2224    case nir_intrinsic_interp_deref_at_centroid:
2225    case nir_intrinsic_interp_deref_at_sample:
2226       visit_interp(bld_base, instr, result);
2227       break;
2228    case nir_intrinsic_load_scratch:
2229       visit_load_scratch(bld_base, instr, result);
2230       break;
2231    case nir_intrinsic_store_scratch:
2232       visit_store_scratch(bld_base, instr);
2233       break;
2234    case nir_intrinsic_shader_clock:
2235       bld_base->clock(bld_base, result);
2236       break;
2237    default:
2238       fprintf(stderr, "Unsupported intrinsic: ");
2239       nir_print_instr(&instr->instr, stderr);
2240       fprintf(stderr, "\n");
2241       assert(0);
2242       break;
2243    }
2244    if (result[0]) {
2245       assign_dest(bld_base, &instr->dest, result);
2246    }
2247 }
2248
2249
2250 static void
2251 visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2252 {
2253    struct lp_sampler_size_query_params params = { 0 };
2254    LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
2255    LLVMValueRef explicit_lod = NULL;
2256    LLVMValueRef texture_unit_offset = NULL;
2257    for (unsigned i = 0; i < instr->num_srcs; i++) {
2258       switch (instr->src[i].src_type) {
2259       case nir_tex_src_lod:
2260          explicit_lod = cast_type(bld_base,
2261                                   get_src(bld_base, instr->src[i].src),
2262                                   nir_type_int, 32);
2263          break;
2264       case nir_tex_src_texture_offset:
2265          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2266          break;
2267       default:
2268          break;
2269       }
2270    }
2271
2272    params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
2273    params.texture_unit = instr->texture_index;
2274    params.explicit_lod = explicit_lod;
2275    params.is_sviewinfo = TRUE;
2276    params.sizes_out = sizes_out;
2277    params.samples_only = (instr->op == nir_texop_texture_samples);
2278    params.texture_unit_offset = texture_unit_offset;
2279
2280    if (instr->op == nir_texop_query_levels)
2281       params.explicit_lod = bld_base->uint_bld.zero;
2282    bld_base->tex_size(bld_base, &params);
2283    assign_dest(bld_base, &instr->dest,
2284                &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
2285 }
2286
2287
2288 static enum lp_sampler_lod_property
2289 lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
2290                           nir_src lod_src)
2291 {
2292    enum lp_sampler_lod_property lod_property;
2293
2294    if (nir_src_is_always_uniform(lod_src)) {
2295       lod_property = LP_SAMPLER_LOD_SCALAR;
2296    } else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
2297       if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2298          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2299       else
2300          lod_property = LP_SAMPLER_LOD_PER_QUAD;
2301    } else {
2302       lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2303    }
2304    return lod_property;
2305 }
2306
2307
2308 static void
2309 visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2310 {
2311    struct gallivm_state *gallivm = bld_base->base.gallivm;
2312    LLVMBuilderRef builder = gallivm->builder;
2313    LLVMValueRef coords[5];
2314    LLVMValueRef offsets[3] = { NULL };
2315    LLVMValueRef explicit_lod = NULL, ms_index = NULL;
2316    struct lp_sampler_params params;
2317    struct lp_derivatives derivs;
2318    unsigned sample_key = 0;
2319    nir_deref_instr *texture_deref_instr = NULL;
2320    nir_deref_instr *sampler_deref_instr = NULL;
2321    LLVMValueRef texture_unit_offset = NULL;
2322    LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
2323    unsigned lod_src = 0;
2324    LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
2325    unsigned coord_vals = is_aos(bld_base) ? 1 : instr->coord_components;
2326    memset(&params, 0, sizeof(params));
2327    enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2328
2329    if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
2330       visit_txs(bld_base, instr);
2331       return;
2332    }
2333    if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
2334       sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
2335    else if (instr->op == nir_texop_tg4) {
2336       sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
2337       sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
2338    } else if (instr->op == nir_texop_lod)
2339       sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
2340    for (unsigned i = 0; i < instr->num_srcs; i++) {
2341       switch (instr->src[i].src_type) {
2342       case nir_tex_src_coord: {
2343          LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
2344          if (coord_vals == 1)
2345             coords[0] = coord;
2346          else {
2347             for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2348                coords[chan] = LLVMBuildExtractValue(builder, coord,
2349                                                     chan, "");
2350          }
2351          for (unsigned chan = coord_vals; chan < 5; chan++)
2352             coords[chan] = coord_undef;
2353
2354          break;
2355       }
2356       case nir_tex_src_texture_deref:
2357          texture_deref_instr = nir_src_as_deref(instr->src[i].src);
2358          break;
2359       case nir_tex_src_sampler_deref:
2360          sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
2361          break;
2362       case nir_tex_src_comparator:
2363          sample_key |= LP_SAMPLER_SHADOW;
2364          coords[4] = get_src(bld_base, instr->src[i].src);
2365          coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
2366          break;
2367       case nir_tex_src_bias:
2368          sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2369          lod_src = i;
2370          explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2371          break;
2372       case nir_tex_src_lod:
2373          sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2374          lod_src = i;
2375          if (instr->op == nir_texop_txf)
2376             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2377          else
2378             explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2379          break;
2380       case nir_tex_src_ddx: {
2381          int deriv_cnt = instr->coord_components;
2382          if (instr->is_array)
2383             deriv_cnt--;
2384          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2385          if (deriv_cnt == 1)
2386             derivs.ddx[0] = deriv_val;
2387          else
2388             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2389                derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
2390                                                         chan, "");
2391          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2392             derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
2393          break;
2394       }
2395       case nir_tex_src_ddy: {
2396          int deriv_cnt = instr->coord_components;
2397          if (instr->is_array)
2398             deriv_cnt--;
2399          LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2400          if (deriv_cnt == 1)
2401             derivs.ddy[0] = deriv_val;
2402          else
2403             for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2404                derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
2405                                                         chan, "");
2406          for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2407             derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
2408          break;
2409       }
2410       case nir_tex_src_offset: {
2411          int offset_cnt = instr->coord_components;
2412          if (instr->is_array)
2413             offset_cnt--;
2414          LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
2415          sample_key |= LP_SAMPLER_OFFSETS;
2416          if (offset_cnt == 1)
2417             offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
2418          else {
2419             for (unsigned chan = 0; chan < offset_cnt; ++chan) {
2420                offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
2421                                                      chan, "");
2422                offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
2423             }
2424          }
2425          break;
2426       }
2427       case nir_tex_src_ms_index:
2428          sample_key |= LP_SAMPLER_FETCH_MS;
2429          ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2430          break;
2431
2432       case nir_tex_src_texture_offset:
2433          texture_unit_offset = get_src(bld_base, instr->src[i].src);
2434          break;
2435       case nir_tex_src_sampler_offset:
2436          break;
2437       default:
2438          assert(0);
2439          break;
2440       }
2441    }
2442    if (!sampler_deref_instr)
2443       sampler_deref_instr = texture_deref_instr;
2444
2445    if (explicit_lod)
2446       lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
2447
2448    if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
2449        instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
2450       for (unsigned chan = 0; chan < coord_vals; ++chan)
2451          coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
2452    else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
2453       for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2454          coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
2455
2456    if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
2457       /* move layer coord for 1d arrays. */
2458       coords[2] = coords[1];
2459       coords[1] = coord_undef;
2460    }
2461
2462    uint32_t samp_base_index = 0, tex_base_index = 0;
2463    if (!sampler_deref_instr) {
2464       int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
2465       if (samp_src_index == -1) {
2466          samp_base_index = instr->sampler_index;
2467       }
2468    }
2469    if (!texture_deref_instr) {
2470       int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
2471       if (tex_src_index == -1) {
2472          tex_base_index = instr->texture_index;
2473       }
2474    }
2475
2476    if (instr->op == nir_texop_txd) {
2477       sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2478       params.derivs = &derivs;
2479       if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
2480          if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2481             lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2482          else
2483             lod_property = LP_SAMPLER_LOD_PER_QUAD;
2484       } else
2485          lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2486    }
2487
2488    sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2489    params.sample_key = sample_key;
2490    params.offsets = offsets;
2491    params.texture_index = tex_base_index;
2492    params.texture_index_offset = texture_unit_offset;
2493    params.sampler_index = samp_base_index;
2494    params.coords = coords;
2495    params.texel = texel;
2496    params.lod = explicit_lod;
2497    params.ms_index = ms_index;
2498    params.aniso_filter_table = bld_base->aniso_filter_table;
2499    bld_base->tex(bld_base, &params);
2500
2501    if (nir_dest_bit_size(instr->dest) != 32) {
2502       assert(nir_dest_bit_size(instr->dest) == 16);
2503       LLVMTypeRef vec_type = NULL;
2504       bool is_float = false;
2505       switch (nir_alu_type_get_base_type(instr->dest_type)) {
2506       case nir_type_float:
2507          is_float = true;
2508          break;
2509       case nir_type_int:
2510          vec_type = bld_base->int16_bld.vec_type;
2511          break;
2512       case nir_type_uint:
2513          vec_type = bld_base->uint16_bld.vec_type;
2514          break;
2515       default:
2516          unreachable("unexpected alu type");
2517       }
2518       for (int i = 0; i < nir_dest_num_components(instr->dest); ++i) {
2519          if (is_float) {
2520             texel[i] = lp_build_float_to_half(gallivm, texel[i]);
2521          } else {
2522             texel[i] = LLVMBuildBitCast(builder, texel[i], bld_base->int_bld.vec_type, "");
2523             texel[i] = LLVMBuildTrunc(builder, texel[i], vec_type, "");
2524          }
2525       }
2526    }
2527
2528    assign_dest(bld_base, &instr->dest, texel);
2529 }
2530
2531
2532 static void
2533 visit_ssa_undef(struct lp_build_nir_context *bld_base,
2534                 const nir_ssa_undef_instr *instr)
2535 {
2536    unsigned num_components = instr->def.num_components;
2537    LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
2538    struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
2539                                                     instr->def.bit_size);
2540    for (unsigned i = 0; i < num_components; i++)
2541       undef[i] = LLVMGetUndef(undef_bld->vec_type);
2542    memset(&undef[num_components], 0, NIR_MAX_VEC_COMPONENTS - num_components);
2543    assign_ssa_dest(bld_base, &instr->def, undef);
2544 }
2545
2546
2547 static void
2548 visit_jump(struct lp_build_nir_context *bld_base,
2549            const nir_jump_instr *instr)
2550 {
2551    switch (instr->type) {
2552    case nir_jump_break:
2553       bld_base->break_stmt(bld_base);
2554       break;
2555    case nir_jump_continue:
2556       bld_base->continue_stmt(bld_base);
2557       break;
2558    default:
2559       unreachable("Unknown jump instr\n");
2560    }
2561 }
2562
2563
2564 static void
2565 visit_deref(struct lp_build_nir_context *bld_base,
2566             nir_deref_instr *instr)
2567 {
2568    if (!nir_deref_mode_is_one_of(instr, nir_var_mem_shared |
2569                                         nir_var_mem_global)) {
2570       return;
2571    }
2572
2573    LLVMValueRef result = NULL;
2574    switch(instr->deref_type) {
2575    case nir_deref_type_var: {
2576       struct hash_entry *entry =
2577          _mesa_hash_table_search(bld_base->vars, instr->var);
2578       result = entry->data;
2579       break;
2580    }
2581    default:
2582       unreachable("Unhandled deref_instr deref type");
2583    }
2584
2585    assign_ssa(bld_base, instr->dest.ssa.index, result);
2586 }
2587
2588
2589 static void
2590 visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
2591 {
2592    nir_foreach_instr(instr, block)
2593    {
2594       switch (instr->type) {
2595       case nir_instr_type_alu:
2596          visit_alu(bld_base, nir_instr_as_alu(instr));
2597          break;
2598       case nir_instr_type_load_const:
2599          visit_load_const(bld_base, nir_instr_as_load_const(instr));
2600          break;
2601       case nir_instr_type_intrinsic:
2602          visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
2603          break;
2604       case nir_instr_type_tex:
2605          visit_tex(bld_base, nir_instr_as_tex(instr));
2606          break;
2607       case nir_instr_type_phi:
2608          assert(0);
2609          break;
2610       case nir_instr_type_ssa_undef:
2611          visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
2612          break;
2613       case nir_instr_type_jump:
2614          visit_jump(bld_base, nir_instr_as_jump(instr));
2615          break;
2616       case nir_instr_type_deref:
2617          visit_deref(bld_base, nir_instr_as_deref(instr));
2618          break;
2619       default:
2620          fprintf(stderr, "Unknown NIR instr type: ");
2621          nir_print_instr(instr, stderr);
2622          fprintf(stderr, "\n");
2623          abort();
2624       }
2625    }
2626 }
2627
2628
2629 static void
2630 visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
2631 {
2632    LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
2633
2634    bld_base->if_cond(bld_base, cond);
2635    visit_cf_list(bld_base, &if_stmt->then_list);
2636
2637    if (!exec_list_is_empty(&if_stmt->else_list)) {
2638       bld_base->else_stmt(bld_base);
2639       visit_cf_list(bld_base, &if_stmt->else_list);
2640    }
2641    bld_base->endif_stmt(bld_base);
2642 }
2643
2644
2645 static void
2646 visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
2647 {
2648    bld_base->bgnloop(bld_base);
2649    visit_cf_list(bld_base, &loop->body);
2650    bld_base->endloop(bld_base);
2651 }
2652
2653
2654 static void
2655 visit_cf_list(struct lp_build_nir_context *bld_base,
2656               struct exec_list *list)
2657 {
2658    foreach_list_typed(nir_cf_node, node, node, list)
2659    {
2660       switch (node->type) {
2661       case nir_cf_node_block:
2662          visit_block(bld_base, nir_cf_node_as_block(node));
2663          break;
2664       case nir_cf_node_if:
2665          visit_if(bld_base, nir_cf_node_as_if(node));
2666          break;
2667       case nir_cf_node_loop:
2668          visit_loop(bld_base, nir_cf_node_as_loop(node));
2669          break;
2670       default:
2671          assert(0);
2672       }
2673    }
2674 }
2675
2676
2677 static void
2678 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
2679                           struct nir_shader *nir,
2680                           struct nir_variable *variable)
2681 {
2682    bld_base->emit_var_decl(bld_base, variable);
2683 }
2684
2685
2686 /* vector registers are stored as arrays in LLVM side,
2687    so we can use GEP on them, as to do exec mask stores
2688    we need to operate on a single components.
2689    arrays are:
2690    0.x, 1.x, 2.x, 3.x
2691    0.y, 1.y, 2.y, 3.y
2692    ....
2693 */
2694 static LLVMTypeRef
2695 get_register_type(struct lp_build_nir_context *bld_base,
2696                   nir_register *reg)
2697 {
2698    if (is_aos(bld_base))
2699       return bld_base->base.int_vec_type;
2700
2701    struct lp_build_context *int_bld =
2702       get_int_bld(bld_base, true, reg->bit_size == 1 ? 32 : reg->bit_size);
2703
2704    LLVMTypeRef type = int_bld->vec_type;
2705    if (reg->num_array_elems)
2706       type = LLVMArrayType(type, reg->num_array_elems);
2707    if (reg->num_components > 1)
2708       type = LLVMArrayType(type, reg->num_components);
2709
2710    return type;
2711 }
2712
2713
2714 bool lp_build_nir_llvm(struct lp_build_nir_context *bld_base,
2715                        struct nir_shader *nir)
2716 {
2717    struct nir_function *func;
2718
2719    nir_convert_from_ssa(nir, true);
2720    nir_lower_locals_to_regs(nir);
2721    nir_remove_dead_derefs(nir);
2722    nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
2723
2724    if (is_aos(bld_base)) {
2725       nir_move_vec_src_uses_to_dest(nir);
2726       nir_lower_vec_to_movs(nir, NULL, NULL);
2727    }
2728
2729    nir_foreach_shader_out_variable(variable, nir)
2730       handle_shader_output_decl(bld_base, nir, variable);
2731
2732    if (nir->info.io_lowered) {
2733       uint64_t outputs_written = nir->info.outputs_written;
2734
2735       while (outputs_written) {
2736          unsigned location = u_bit_scan64(&outputs_written);
2737          nir_variable var = {0};
2738
2739          var.type = glsl_vec4_type();
2740          var.data.mode = nir_var_shader_out;
2741          var.data.location = location;
2742          var.data.driver_location = util_bitcount64(nir->info.outputs_written &
2743                                                     BITFIELD64_MASK(location));
2744          bld_base->emit_var_decl(bld_base, &var);
2745       }
2746    }
2747
2748    bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2749                                             _mesa_key_pointer_equal);
2750    bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2751                                             _mesa_key_pointer_equal);
2752    bld_base->range_ht = _mesa_pointer_hash_table_create(NULL);
2753
2754    func = (struct nir_function *)exec_list_get_head(&nir->functions);
2755
2756    nir_foreach_register(reg, &func->impl->registers) {
2757       LLVMTypeRef type = get_register_type(bld_base, reg);
2758       LLVMValueRef reg_alloc = lp_build_alloca(bld_base->base.gallivm,
2759                                                type, "reg");
2760       _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2761    }
2762    nir_index_ssa_defs(func->impl);
2763    bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
2764    visit_cf_list(bld_base, &func->impl->body);
2765
2766    free(bld_base->ssa_defs);
2767    ralloc_free(bld_base->vars);
2768    ralloc_free(bld_base->regs);
2769    ralloc_free(bld_base->range_ht);
2770    return true;
2771 }
2772
2773
2774 /* do some basic opts to remove some things we don't want to see. */
2775 void
2776 lp_build_opt_nir(struct nir_shader *nir)
2777 {
2778    bool progress;
2779
2780    static const struct nir_lower_tex_options lower_tex_options = {
2781       .lower_tg4_offsets = true,
2782       .lower_txp = ~0u,
2783       .lower_invalid_implicit_lod = true,
2784    };
2785    NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
2786    NIR_PASS_V(nir, nir_lower_frexp);
2787
2788    NIR_PASS_V(nir, nir_lower_flrp, 16|32|64, true);
2789    NIR_PASS_V(nir, nir_lower_fp16_casts);
2790    do {
2791       progress = false;
2792       NIR_PASS(progress, nir, nir_opt_constant_folding);
2793       NIR_PASS(progress, nir, nir_opt_algebraic);
2794       NIR_PASS(progress, nir, nir_lower_pack);
2795
2796       nir_lower_tex_options options = { .lower_invalid_implicit_lod = true, };
2797       NIR_PASS_V(nir, nir_lower_tex, &options);
2798
2799       const nir_lower_subgroups_options subgroups_options = {
2800          .subgroup_size = lp_native_vector_width / 32,
2801          .ballot_bit_size = 32,
2802          .ballot_components = 1,
2803          .lower_to_scalar = true,
2804          .lower_subgroup_masks = true,
2805          .lower_relative_shuffle = true,
2806       };
2807       NIR_PASS(progress, nir, nir_lower_subgroups, &subgroups_options);
2808    } while (progress);
2809
2810    do {
2811       progress = false;
2812       NIR_PASS(progress, nir, nir_opt_algebraic_late);
2813       if (progress) {
2814          NIR_PASS_V(nir, nir_copy_prop);
2815          NIR_PASS_V(nir, nir_opt_dce);
2816          NIR_PASS_V(nir, nir_opt_cse);
2817       }
2818    } while (progress);
2819
2820    if (nir_lower_bool_to_int32(nir)) {
2821       NIR_PASS_V(nir, nir_copy_prop);
2822       NIR_PASS_V(nir, nir_opt_dce);
2823    }
2824 }