2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "aco_instruction_selection.h"
28 #include "aco_builder.h"
30 #include "aco_interface.h"
32 #include "common/ac_nir.h"
33 #include "common/sid.h"
35 #include "util/fast_idiv_by_const.h"
36 #include "util/memstream.h"
49 #define isel_err(...) _isel_err(ctx, __FILE__, __LINE__, __VA_ARGS__)
52 _isel_err(isel_context* ctx, const char* file, unsigned line, const nir_instr* instr,
57 struct u_memstream mem;
58 u_memstream_open(&mem, &out, &outsize);
59 FILE* const memf = u_memstream_get(&mem);
61 fprintf(memf, "%s: ", msg);
62 nir_print_instr(instr, memf);
63 u_memstream_close(&mem);
65 _aco_err(ctx->program, file, line, out);
73 bool exec_potentially_empty_discard_old;
74 bool exec_potentially_empty_break_old;
75 uint16_t exec_potentially_empty_break_depth_old;
79 bool uniform_has_then_branch;
80 bool then_branch_divergent;
88 unsigned header_idx_old;
90 bool divergent_cont_old;
91 bool divergent_branch_old;
92 bool divergent_if_old;
95 static bool visit_cf_list(struct isel_context* ctx, struct exec_list* list);
98 add_logical_edge(unsigned pred_idx, Block* succ)
100 succ->logical_preds.emplace_back(pred_idx);
104 add_linear_edge(unsigned pred_idx, Block* succ)
106 succ->linear_preds.emplace_back(pred_idx);
110 add_edge(unsigned pred_idx, Block* succ)
112 add_logical_edge(pred_idx, succ);
113 add_linear_edge(pred_idx, succ);
117 append_logical_start(Block* b)
119 Builder(NULL, b).pseudo(aco_opcode::p_logical_start);
123 append_logical_end(Block* b)
125 Builder(NULL, b).pseudo(aco_opcode::p_logical_end);
129 get_ssa_temp(struct isel_context* ctx, nir_ssa_def* def)
131 uint32_t id = ctx->first_temp_id + def->index;
132 return Temp(id, ctx->program->temp_rc[id]);
136 emit_mbcnt(isel_context* ctx, Temp dst, Operand mask = Operand(), Operand base = Operand::zero())
138 Builder bld(ctx->program, ctx->block);
139 assert(mask.isUndefined() || mask.isTemp() || (mask.isFixed() && mask.physReg() == exec));
140 assert(mask.isUndefined() || mask.bytes() == bld.lm.bytes());
142 if (ctx->program->wave_size == 32) {
143 Operand mask_lo = mask.isUndefined() ? Operand::c32(-1u) : mask;
144 return bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, Definition(dst), mask_lo, base);
147 Operand mask_lo = Operand::c32(-1u);
148 Operand mask_hi = Operand::c32(-1u);
151 RegClass rc = RegClass(mask.regClass().type(), 1);
152 Builder::Result mask_split =
153 bld.pseudo(aco_opcode::p_split_vector, bld.def(rc), bld.def(rc), mask);
154 mask_lo = Operand(mask_split.def(0).getTemp());
155 mask_hi = Operand(mask_split.def(1).getTemp());
156 } else if (mask.physReg() == exec) {
157 mask_lo = Operand(exec_lo, s1);
158 mask_hi = Operand(exec_hi, s1);
161 Temp mbcnt_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), mask_lo, base);
163 if (ctx->program->gfx_level <= GFX7)
164 return bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, Definition(dst), mask_hi, mbcnt_lo);
166 return bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, Definition(dst), mask_hi, mbcnt_lo);
170 emit_wqm(Builder& bld, Temp src, Temp dst = Temp(0, s1), bool program_needs_wqm = false)
172 if (bld.program->stage != fragment_fs) {
176 return bld.copy(Definition(dst), src);
177 } else if (!dst.id()) {
178 dst = bld.tmp(src.regClass());
181 assert(src.size() == dst.size());
182 bld.pseudo(aco_opcode::p_wqm, Definition(dst), src);
183 bld.program->needs_wqm |= program_needs_wqm;
188 emit_bpermute(isel_context* ctx, Builder& bld, Temp index, Temp data)
190 if (index.regClass() == s1)
191 return bld.readlane(bld.def(s1), data, index);
193 if (ctx->options->gfx_level <= GFX7) {
194 /* GFX6-7: there is no bpermute instruction */
195 Operand index_op(index);
196 Operand input_data(data);
197 index_op.setLateKill(true);
198 input_data.setLateKill(true);
200 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(bld.lm), bld.def(bld.lm, vcc),
201 index_op, input_data);
202 } else if (ctx->options->gfx_level >= GFX10 && ctx->program->wave_size == 64) {
204 /* GFX10 wave64 mode: emulate full-wave bpermute */
206 bld.vopc(aco_opcode::v_cmp_ge_u32, bld.def(bld.lm), Operand::c32(31u), index);
207 Builder::Result index_is_lo_split =
208 bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), index_is_lo);
209 Temp index_is_lo_n1 = bld.sop1(aco_opcode::s_not_b32, bld.def(s1), bld.def(s1, scc),
210 index_is_lo_split.def(1).getTemp());
211 Operand same_half = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2),
212 index_is_lo_split.def(0).getTemp(), index_is_lo_n1);
213 Operand index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), index);
214 Operand input_data(data);
216 index_x4.setLateKill(true);
217 input_data.setLateKill(true);
218 same_half.setLateKill(true);
220 /* We need one pair of shared VGPRs:
221 * Note, that these have twice the allocation granularity of normal VGPRs */
222 ctx->program->config->num_shared_vgprs = 2 * ctx->program->dev.vgpr_alloc_granule;
224 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc),
225 index_x4, input_data, same_half);
227 /* GFX8-9 or GFX10 wave32: bpermute works normally */
228 Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), index);
229 return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
234 emit_masked_swizzle(isel_context* ctx, Builder& bld, Temp src, unsigned mask)
236 if (ctx->options->gfx_level >= GFX8) {
237 unsigned and_mask = mask & 0x1f;
238 unsigned or_mask = (mask >> 5) & 0x1f;
239 unsigned xor_mask = (mask >> 10) & 0x1f;
241 uint16_t dpp_ctrl = 0xffff;
243 if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
244 unsigned res[4] = {0, 1, 2, 3};
245 for (unsigned i = 0; i < 4; i++)
246 res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
247 dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
248 } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
249 dpp_ctrl = dpp_row_rr(8);
250 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
251 dpp_ctrl = dpp_row_mirror;
252 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
253 dpp_ctrl = dpp_row_half_mirror;
254 } else if (ctx->options->gfx_level >= GFX10 && (and_mask & 0x18) == 0x18 && or_mask < 8 &&
256 // DPP8 comes last, as it does not allow several modifiers like `abs` that are available with DPP16
257 Builder::Result ret = bld.vop1_dpp8(aco_opcode::v_mov_b32, bld.def(v1), src);
258 for (unsigned i = 0; i < 8; i++) {
259 ret.instr->dpp8().lane_sel[i] = (((i & and_mask) | or_mask) ^ xor_mask) & 0x7;
264 if (dpp_ctrl != 0xffff)
265 return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
268 return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
272 as_vgpr(Builder& bld, Temp val)
274 if (val.type() == RegType::sgpr)
275 return bld.copy(bld.def(RegType::vgpr, val.size()), val);
276 assert(val.type() == RegType::vgpr);
281 as_vgpr(isel_context* ctx, Temp val)
283 Builder bld(ctx->program, ctx->block);
284 return as_vgpr(bld, val);
287 // assumes a != 0xffffffff
289 emit_v_div_u32(isel_context* ctx, Temp dst, Temp a, uint32_t b)
292 Builder bld(ctx->program, ctx->block);
294 if (util_is_power_of_two_or_zero(b)) {
295 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand::c32(util_logbase2(b)), a);
299 util_fast_udiv_info info = util_compute_fast_udiv_info(b, 32, 32);
301 assert(info.multiplier <= 0xffffffff);
303 bool pre_shift = info.pre_shift != 0;
304 bool increment = info.increment != 0;
305 bool multiply = true;
306 bool post_shift = info.post_shift != 0;
308 if (!pre_shift && !increment && !multiply && !post_shift) {
309 bld.copy(Definition(dst), a);
313 Temp pre_shift_dst = a;
315 pre_shift_dst = (increment || multiply || post_shift) ? bld.tmp(v1) : dst;
316 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(pre_shift_dst), Operand::c32(info.pre_shift),
320 Temp increment_dst = pre_shift_dst;
322 increment_dst = (post_shift || multiply) ? bld.tmp(v1) : dst;
323 bld.vadd32(Definition(increment_dst), Operand::c32(info.increment), pre_shift_dst);
326 Temp multiply_dst = increment_dst;
328 multiply_dst = post_shift ? bld.tmp(v1) : dst;
329 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(multiply_dst), increment_dst,
330 bld.copy(bld.def(v1), Operand::c32(info.multiplier)));
334 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand::c32(info.post_shift),
340 emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, Temp dst)
342 Builder bld(ctx->program, ctx->block);
343 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand::c32(idx));
347 emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst_rc)
349 /* no need to extract the whole vector */
350 if (src.regClass() == dst_rc) {
355 assert(src.bytes() > (idx * dst_rc.bytes()));
356 Builder bld(ctx->program, ctx->block);
357 auto it = ctx->allocated_vec.find(src.id());
358 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
359 if (it->second[idx].regClass() == dst_rc) {
360 return it->second[idx];
362 assert(!dst_rc.is_subdword());
363 assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
364 return bld.copy(bld.def(dst_rc), it->second[idx]);
368 if (dst_rc.is_subdword())
369 src = as_vgpr(ctx, src);
371 if (src.bytes() == dst_rc.bytes()) {
373 return bld.copy(bld.def(dst_rc), src);
375 Temp dst = bld.tmp(dst_rc);
376 emit_extract_vector(ctx, src, idx, dst);
382 emit_split_vector(isel_context* ctx, Temp vec_src, unsigned num_components)
384 if (num_components == 1)
386 if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
389 if (num_components > vec_src.size()) {
390 if (vec_src.type() == RegType::sgpr) {
391 /* should still help get_alu_src() */
392 emit_split_vector(ctx, vec_src, vec_src.size());
395 /* sub-dword split */
396 rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
398 rc = RegClass(vec_src.type(), vec_src.size() / num_components);
400 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
401 aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
402 split->operands[0] = Operand(vec_src);
403 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
404 for (unsigned i = 0; i < num_components; i++) {
405 elems[i] = ctx->program->allocateTmp(rc);
406 split->definitions[i] = Definition(elems[i]);
408 ctx->block->instructions.emplace_back(std::move(split));
409 ctx->allocated_vec.emplace(vec_src.id(), elems);
412 /* This vector expansion uses a mask to determine which elements in the new vector
413 * come from the original vector. The other elements are undefined. */
415 expand_vector(isel_context* ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask,
416 bool zero_padding = false)
418 assert(vec_src.type() == RegType::vgpr);
419 Builder bld(ctx->program, ctx->block);
421 if (dst.type() == RegType::sgpr && num_components > dst.size()) {
422 Temp tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, 2 * num_components));
423 expand_vector(ctx, vec_src, tmp_dst, num_components, mask, zero_padding);
424 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp_dst);
425 ctx->allocated_vec[dst.id()] = ctx->allocated_vec[tmp_dst.id()];
429 emit_split_vector(ctx, vec_src, util_bitcount(mask));
434 if (num_components == 1) {
435 if (dst.type() == RegType::sgpr)
436 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
438 bld.copy(Definition(dst), vec_src);
442 unsigned component_bytes = dst.bytes() / num_components;
443 RegClass src_rc = RegClass::get(RegType::vgpr, component_bytes);
444 RegClass dst_rc = RegClass::get(dst.type(), component_bytes);
445 assert(dst.type() == RegType::vgpr || !src_rc.is_subdword());
446 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
448 Temp padding = Temp(0, dst_rc);
450 padding = bld.copy(bld.def(dst_rc), Operand::zero(component_bytes));
452 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
453 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
454 vec->definitions[0] = Definition(dst);
456 for (unsigned i = 0; i < num_components; i++) {
457 if (mask & (1 << i)) {
458 Temp src = emit_extract_vector(ctx, vec_src, k++, src_rc);
459 if (dst.type() == RegType::sgpr)
460 src = bld.as_uniform(src);
461 vec->operands[i] = Operand(src);
464 vec->operands[i] = Operand::zero(component_bytes);
468 ctx->block->instructions.emplace_back(std::move(vec));
469 ctx->allocated_vec.emplace(dst.id(), elems);
472 /* adjust misaligned small bit size loads */
474 byte_align_scalar(isel_context* ctx, Temp vec, Operand offset, Temp dst)
476 Builder bld(ctx->program, ctx->block);
478 Temp select = Temp();
479 if (offset.isConstant()) {
480 assert(offset.constantValue() && offset.constantValue() < 4);
481 shift = Operand::c32(offset.constantValue() * 8);
483 /* bit_offset = 8 * (offset & 0x3) */
485 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand::c32(3u));
486 select = bld.tmp(s1);
487 shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp,
491 if (vec.size() == 1) {
492 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
493 } else if (vec.size() == 2) {
494 Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
495 bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
497 emit_split_vector(ctx, dst, 2);
499 emit_extract_vector(ctx, tmp, 0, dst);
500 } else if (vec.size() == 3 || vec.size() == 4) {
501 Temp lo = bld.tmp(s2), hi;
502 if (vec.size() == 3) {
503 /* this can happen if we use VMEM for a uniform load */
505 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
508 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
509 hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand::zero());
511 if (select != Temp())
513 bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand::zero(), bld.scc(select));
514 lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
515 Temp mid = bld.tmp(s1);
516 lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
517 hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
518 mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
519 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
520 emit_split_vector(ctx, dst, 2);
525 byte_align_vector(isel_context* ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
527 Builder bld(ctx->program, ctx->block);
528 if (offset.isTemp()) {
529 Temp tmp[4] = {vec, vec, vec, vec};
531 if (vec.size() == 4) {
532 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
533 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]),
534 Definition(tmp[2]), Definition(tmp[3]), vec);
535 } else if (vec.size() == 3) {
536 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
537 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]),
538 Definition(tmp[2]), vec);
539 } else if (vec.size() == 2) {
540 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
541 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
543 for (unsigned i = 0; i < dst.size(); i++)
544 tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
548 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
550 offset = Operand::zero();
553 unsigned num_components = vec.bytes() / component_size;
554 if (vec.regClass() == dst.regClass()) {
555 assert(offset.constantValue() == 0);
556 bld.copy(Definition(dst), vec);
557 emit_split_vector(ctx, dst, num_components);
561 emit_split_vector(ctx, vec, num_components);
562 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
563 RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
565 assert(offset.constantValue() % component_size == 0);
566 unsigned skip = offset.constantValue() / component_size;
567 for (unsigned i = skip; i < num_components; i++)
568 elems[i - skip] = emit_extract_vector(ctx, vec, i, rc);
570 if (dst.type() == RegType::vgpr) {
571 /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
572 num_components = dst.bytes() / component_size;
573 aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(
574 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
575 for (unsigned i = 0; i < num_components; i++)
576 create_vec->operands[i] = Operand(elems[i]);
577 create_vec->definitions[0] = Definition(dst);
578 bld.insert(std::move(create_vec));
581 /* if dst is sgpr - split the src, but move the original to sgpr. */
582 vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
583 byte_align_scalar(ctx, vec, offset, dst);
585 assert(dst.size() == vec.size());
586 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
589 ctx->allocated_vec.emplace(dst.id(), elems);
593 get_ssa_temp_tex(struct isel_context* ctx, nir_ssa_def* def, bool is_16bit)
595 RegClass rc = RegClass::get(RegType::vgpr, (is_16bit ? 2 : 4) * def->num_components);
596 Temp tmp = get_ssa_temp(ctx, def);
597 if (tmp.bytes() != rc.bytes())
598 return emit_extract_vector(ctx, tmp, 0, rc);
604 bool_to_vector_condition(isel_context* ctx, Temp val, Temp dst = Temp(0, s2))
606 Builder bld(ctx->program, ctx->block);
608 dst = bld.tmp(bld.lm);
610 assert(val.regClass() == s1);
611 assert(dst.regClass() == bld.lm);
613 return bld.sop2(Builder::s_cselect, Definition(dst), Operand::c32(-1), Operand::zero(),
618 bool_to_scalar_condition(isel_context* ctx, Temp val, Temp dst = Temp(0, s1))
620 Builder bld(ctx->program, ctx->block);
624 assert(val.regClass() == bld.lm);
625 assert(dst.regClass() == s1);
627 /* if we're currently in WQM mode, ensure that the source is also computed in WQM */
628 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(dst)), val, Operand(exec, bld.lm));
633 * Copies the first src_bits of the input to the output Temp. Input bits at positions larger than
634 * src_bits and dst_bits are truncated.
636 * Sign extension may be applied using the sign_extend parameter. The position of the input sign
637 * bit is indicated by src_bits in this case.
639 * If dst.bytes() is larger than dst_bits/8, the value of the upper bits is undefined.
642 convert_int(isel_context* ctx, Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits,
643 bool sign_extend, Temp dst = Temp())
645 assert(!(sign_extend && dst_bits < src_bits) &&
646 "Shrinking integers is not supported for signed inputs");
649 if (dst_bits % 32 == 0 || src.type() == RegType::sgpr)
650 dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u));
652 dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword());
655 assert(src.type() == RegType::sgpr || src_bits == src.bytes() * 8);
656 assert(dst.type() == RegType::sgpr || dst_bits == dst.bytes() * 8);
658 if (dst.bytes() == src.bytes() && dst_bits < src_bits) {
659 /* Copy the raw value, leaving an undefined value in the upper bits for
660 * the caller to handle appropriately */
661 return bld.copy(Definition(dst), src);
662 } else if (dst.bytes() < src.bytes()) {
663 return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand::zero());
668 tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1);
671 } else if (src.regClass() == s1) {
672 assert(src_bits < 32);
673 bld.pseudo(aco_opcode::p_extract, Definition(tmp), bld.def(s1, scc), src, Operand::zero(),
674 Operand::c32(src_bits), Operand::c32((unsigned)sign_extend));
676 assert(src_bits < 32);
677 bld.pseudo(aco_opcode::p_extract, Definition(tmp), src, Operand::zero(), Operand::c32(src_bits),
678 Operand::c32((unsigned)sign_extend));
681 if (dst_bits == 64) {
682 if (sign_extend && dst.regClass() == s2) {
684 bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand::c32(31u));
685 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
686 } else if (sign_extend && dst.regClass() == v2) {
687 Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), tmp);
688 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
690 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand::zero());
697 enum sgpr_extract_mode {
704 extract_8_16_bit_sgpr_element(isel_context* ctx, Temp dst, nir_alu_src* src, sgpr_extract_mode mode)
706 Temp vec = get_ssa_temp(ctx, src->src.ssa);
707 unsigned src_size = src->src.ssa->bit_size;
708 unsigned swizzle = src->swizzle[0];
710 if (vec.size() > 1) {
711 assert(src_size == 16);
712 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
713 swizzle = swizzle & 1;
716 Builder bld(ctx->program, ctx->block);
717 Temp tmp = dst.regClass() == s2 ? bld.tmp(s1) : dst;
719 if (mode == sgpr_extract_undef && swizzle == 0)
720 bld.copy(Definition(tmp), vec);
722 bld.pseudo(aco_opcode::p_extract, Definition(tmp), bld.def(s1, scc), Operand(vec),
723 Operand::c32(swizzle), Operand::c32(src_size),
724 Operand::c32((mode == sgpr_extract_sext)));
726 if (dst.regClass() == s2)
727 convert_int(ctx, bld, tmp, 32, 64, mode == sgpr_extract_sext, dst);
733 get_alu_src(struct isel_context* ctx, nir_alu_src src, unsigned size = 1)
735 if (src.src.ssa->num_components == 1 && size == 1)
736 return get_ssa_temp(ctx, src.src.ssa);
738 Temp vec = get_ssa_temp(ctx, src.src.ssa);
739 unsigned elem_size = src.src.ssa->bit_size / 8u;
740 bool identity_swizzle = true;
742 for (unsigned i = 0; identity_swizzle && i < size; i++) {
743 if (src.swizzle[i] != i)
744 identity_swizzle = false;
746 if (identity_swizzle)
747 return emit_extract_vector(ctx, vec, 0, RegClass::get(vec.type(), elem_size * size));
749 assert(elem_size > 0);
750 assert(vec.bytes() % elem_size == 0);
752 if (elem_size < 4 && vec.type() == RegType::sgpr && size == 1) {
753 assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
754 return extract_8_16_bit_sgpr_element(ctx, ctx->program->allocateTmp(s1), &src,
758 bool as_uniform = elem_size < 4 && vec.type() == RegType::sgpr;
760 vec = as_vgpr(ctx, vec);
762 RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword()
763 : RegClass(vec.type(), elem_size / 4);
765 return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
768 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
769 aco_ptr<Pseudo_instruction> vec_instr{create_instruction<Pseudo_instruction>(
770 aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
771 for (unsigned i = 0; i < size; ++i) {
772 elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
773 vec_instr->operands[i] = Operand{elems[i]};
775 Temp dst = ctx->program->allocateTmp(RegClass(vec.type(), elem_size * size / 4));
776 vec_instr->definitions[0] = Definition(dst);
777 ctx->block->instructions.emplace_back(std::move(vec_instr));
778 ctx->allocated_vec.emplace(dst.id(), elems);
779 return vec.type() == RegType::sgpr ? Builder(ctx->program, ctx->block).as_uniform(dst) : dst;
784 get_alu_src_vop3p(struct isel_context* ctx, nir_alu_src src)
786 /* returns v2b or v1 for vop3p usage.
787 * The source expects exactly 2 16bit components
788 * which are within the same dword
790 assert(src.src.ssa->bit_size == 16);
791 assert(src.swizzle[0] >> 1 == src.swizzle[1] >> 1);
793 Temp tmp = get_ssa_temp(ctx, src.src.ssa);
797 /* the size is larger than 1 dword: check the swizzle */
798 unsigned dword = src.swizzle[0] >> 1;
800 /* extract a full dword if possible */
801 if (tmp.bytes() >= (dword + 1) * 4) {
802 /* if the source is splitted into components, use p_create_vector */
803 auto it = ctx->allocated_vec.find(tmp.id());
804 if (it != ctx->allocated_vec.end()) {
805 unsigned index = dword << 1;
806 Builder bld(ctx->program, ctx->block);
807 if (it->second[index].regClass() == v2b)
808 return bld.pseudo(aco_opcode::p_create_vector, bld.def(v1), it->second[index],
809 it->second[index + 1]);
811 return emit_extract_vector(ctx, tmp, dword, v1);
813 /* This must be a swizzled access to %a.zz where %a is v6b */
814 assert(((src.swizzle[0] | src.swizzle[1]) & 1) == 0);
815 assert(tmp.regClass() == v6b && dword == 1);
816 return emit_extract_vector(ctx, tmp, dword * 2, v2b);
821 get_alu_src_ub(isel_context* ctx, nir_alu_instr* instr, int src_idx)
823 nir_ssa_scalar scalar =
824 nir_ssa_scalar{instr->src[src_idx].src.ssa, instr->src[src_idx].swizzle[0]};
825 return nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, scalar, &ctx->ub_config);
829 convert_pointer_to_64_bit(isel_context* ctx, Temp ptr, bool non_uniform = false)
833 Builder bld(ctx->program, ctx->block);
834 if (ptr.type() == RegType::vgpr && !non_uniform)
835 ptr = bld.as_uniform(ptr);
836 return bld.pseudo(aco_opcode::p_create_vector, bld.def(RegClass(ptr.type(), 2)), ptr,
837 Operand::c32((unsigned)ctx->options->address32_hi));
841 emit_sop2_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
842 bool writes_scc, uint8_t uses_ub = 0)
844 aco_ptr<SOP2_instruction> sop2{
845 create_instruction<SOP2_instruction>(op, Format::SOP2, 2, writes_scc ? 2 : 1)};
846 sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
847 sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
848 sop2->definitions[0] = Definition(dst);
849 if (instr->no_unsigned_wrap)
850 sop2->definitions[0].setNUW(true);
852 sop2->definitions[1] = Definition(ctx->program->allocateId(s1), scc, s1);
854 for (int i = 0; i < 2; i++) {
855 if (uses_ub & (1 << i)) {
856 uint32_t src_ub = get_alu_src_ub(ctx, instr, i);
857 if (src_ub <= 0xffff)
858 sop2->operands[i].set16bit(true);
859 else if (src_ub <= 0xffffff)
860 sop2->operands[i].set24bit(true);
864 ctx->block->instructions.emplace_back(std::move(sop2));
868 emit_vop2_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode opc, Temp dst,
869 bool commutative, bool swap_srcs = false, bool flush_denorms = false,
870 bool nuw = false, uint8_t uses_ub = 0)
872 Builder bld(ctx->program, ctx->block);
873 bld.is_precise = instr->exact;
875 Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
876 Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
877 if (src1.type() == RegType::sgpr) {
878 if (commutative && src0.type() == RegType::vgpr) {
883 src1 = as_vgpr(ctx, src1);
887 Operand op[2] = {Operand(src0), Operand(src1)};
889 for (int i = 0; i < 2; i++) {
890 if (uses_ub & (1 << i)) {
891 uint32_t src_ub = get_alu_src_ub(ctx, instr, swap_srcs ? !i : i);
892 if (src_ub <= 0xffff)
893 op[i].set16bit(true);
894 else if (src_ub <= 0xffffff)
895 op[i].set24bit(true);
899 if (flush_denorms && ctx->program->gfx_level < GFX9) {
900 assert(dst.size() == 1);
901 Temp tmp = bld.vop2(opc, bld.def(v1), op[0], op[1]);
902 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0x3f800000u), tmp);
905 bld.nuw().vop2(opc, Definition(dst), op[0], op[1]);
907 bld.vop2(opc, Definition(dst), op[0], op[1]);
913 emit_vop2_instruction_logic64(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
915 Builder bld(ctx->program, ctx->block);
916 bld.is_precise = instr->exact;
918 Temp src0 = get_alu_src(ctx, instr->src[0]);
919 Temp src1 = get_alu_src(ctx, instr->src[1]);
921 if (src1.type() == RegType::sgpr) {
922 assert(src0.type() == RegType::vgpr);
923 std::swap(src0, src1);
926 Temp src00 = bld.tmp(src0.type(), 1);
927 Temp src01 = bld.tmp(src0.type(), 1);
928 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
929 Temp src10 = bld.tmp(v1);
930 Temp src11 = bld.tmp(v1);
931 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
932 Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
933 Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
934 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
938 emit_vop3a_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
939 bool flush_denorms = false, unsigned num_sources = 2, bool swap_srcs = false)
941 assert(num_sources == 2 || num_sources == 3);
942 Temp src[3] = {Temp(0, v1), Temp(0, v1), Temp(0, v1)};
943 bool has_sgpr = false;
944 for (unsigned i = 0; i < num_sources; i++) {
945 src[i] = get_alu_src(ctx, instr->src[swap_srcs ? 1 - i : i]);
947 src[i] = as_vgpr(ctx, src[i]);
949 has_sgpr = src[i].type() == RegType::sgpr;
952 Builder bld(ctx->program, ctx->block);
953 bld.is_precise = instr->exact;
954 if (flush_denorms && ctx->program->gfx_level < GFX9) {
956 if (num_sources == 3)
957 tmp = bld.vop3(op, bld.def(dst.regClass()), src[0], src[1], src[2]);
959 tmp = bld.vop3(op, bld.def(dst.regClass()), src[0], src[1]);
961 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0x3f800000u), tmp);
963 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand::c64(0x3FF0000000000000), tmp);
964 } else if (num_sources == 3) {
965 bld.vop3(op, Definition(dst), src[0], src[1], src[2]);
967 bld.vop3(op, Definition(dst), src[0], src[1]);
972 emit_vop3p_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
973 bool swap_srcs = false)
975 Temp src0 = get_alu_src_vop3p(ctx, instr->src[swap_srcs]);
976 Temp src1 = get_alu_src_vop3p(ctx, instr->src[!swap_srcs]);
977 if (src0.type() == RegType::sgpr && src1.type() == RegType::sgpr)
978 src1 = as_vgpr(ctx, src1);
979 assert(instr->dest.dest.ssa.num_components == 2);
981 /* swizzle to opsel: all swizzles are either 0 (x) or 1 (y) */
983 (instr->src[!swap_srcs].swizzle[0] & 1) << 1 | (instr->src[swap_srcs].swizzle[0] & 1);
985 (instr->src[!swap_srcs].swizzle[1] & 1) << 1 | (instr->src[swap_srcs].swizzle[1] & 1);
987 Builder bld(ctx->program, ctx->block);
988 bld.is_precise = instr->exact;
989 Builder::Result res = bld.vop3p(op, Definition(dst), src0, src1, opsel_lo, opsel_hi);
994 emit_idot_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst, bool clamp)
996 Temp src[3] = {Temp(0, v1), Temp(0, v1), Temp(0, v1)};
997 bool has_sgpr = false;
998 for (unsigned i = 0; i < 3; i++) {
999 src[i] = get_alu_src(ctx, instr->src[i]);
1001 src[i] = as_vgpr(ctx, src[i]);
1003 has_sgpr = src[i].type() == RegType::sgpr;
1006 Builder bld(ctx->program, ctx->block);
1007 bld.is_precise = instr->exact;
1008 bld.vop3p(op, Definition(dst), src[0], src[1], src[2], 0x0, 0x7).instr->vop3p().clamp = clamp;
1012 emit_vop1_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1014 Builder bld(ctx->program, ctx->block);
1015 bld.is_precise = instr->exact;
1016 if (dst.type() == RegType::sgpr)
1017 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
1018 bld.vop1(op, bld.def(RegType::vgpr, dst.size()), get_alu_src(ctx, instr->src[0])));
1020 bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
1024 emit_vopc_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1026 Temp src0 = get_alu_src(ctx, instr->src[0]);
1027 Temp src1 = get_alu_src(ctx, instr->src[1]);
1028 assert(src0.size() == src1.size());
1030 aco_ptr<Instruction> vopc;
1031 if (src1.type() == RegType::sgpr) {
1032 if (src0.type() == RegType::vgpr) {
1033 /* to swap the operands, we might also have to change the opcode */
1035 case aco_opcode::v_cmp_lt_f16: op = aco_opcode::v_cmp_gt_f16; break;
1036 case aco_opcode::v_cmp_ge_f16: op = aco_opcode::v_cmp_le_f16; break;
1037 case aco_opcode::v_cmp_lt_i16: op = aco_opcode::v_cmp_gt_i16; break;
1038 case aco_opcode::v_cmp_ge_i16: op = aco_opcode::v_cmp_le_i16; break;
1039 case aco_opcode::v_cmp_lt_u16: op = aco_opcode::v_cmp_gt_u16; break;
1040 case aco_opcode::v_cmp_ge_u16: op = aco_opcode::v_cmp_le_u16; break;
1041 case aco_opcode::v_cmp_lt_f32: op = aco_opcode::v_cmp_gt_f32; break;
1042 case aco_opcode::v_cmp_ge_f32: op = aco_opcode::v_cmp_le_f32; break;
1043 case aco_opcode::v_cmp_lt_i32: op = aco_opcode::v_cmp_gt_i32; break;
1044 case aco_opcode::v_cmp_ge_i32: op = aco_opcode::v_cmp_le_i32; break;
1045 case aco_opcode::v_cmp_lt_u32: op = aco_opcode::v_cmp_gt_u32; break;
1046 case aco_opcode::v_cmp_ge_u32: op = aco_opcode::v_cmp_le_u32; break;
1047 case aco_opcode::v_cmp_lt_f64: op = aco_opcode::v_cmp_gt_f64; break;
1048 case aco_opcode::v_cmp_ge_f64: op = aco_opcode::v_cmp_le_f64; break;
1049 case aco_opcode::v_cmp_lt_i64: op = aco_opcode::v_cmp_gt_i64; break;
1050 case aco_opcode::v_cmp_ge_i64: op = aco_opcode::v_cmp_le_i64; break;
1051 case aco_opcode::v_cmp_lt_u64: op = aco_opcode::v_cmp_gt_u64; break;
1052 case aco_opcode::v_cmp_ge_u64: op = aco_opcode::v_cmp_le_u64; break;
1053 default: /* eq and ne are commutative */ break;
1059 src1 = as_vgpr(ctx, src1);
1063 Builder bld(ctx->program, ctx->block);
1064 bld.vopc(op, Definition(dst), src0, src1);
1068 emit_sopc_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1070 Temp src0 = get_alu_src(ctx, instr->src[0]);
1071 Temp src1 = get_alu_src(ctx, instr->src[1]);
1072 Builder bld(ctx->program, ctx->block);
1074 assert(dst.regClass() == bld.lm);
1075 assert(src0.type() == RegType::sgpr);
1076 assert(src1.type() == RegType::sgpr);
1077 assert(src0.regClass() == src1.regClass());
1079 /* Emit the SALU comparison instruction */
1080 Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
1081 /* Turn the result into a per-lane bool */
1082 bool_to_vector_condition(ctx, cmp, dst);
1086 emit_comparison(isel_context* ctx, nir_alu_instr* instr, Temp dst, aco_opcode v16_op,
1087 aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes,
1088 aco_opcode s64_op = aco_opcode::num_opcodes)
1090 aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op
1091 : instr->src[0].src.ssa->bit_size == 32 ? s32_op
1092 : aco_opcode::num_opcodes;
1093 aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op
1094 : instr->src[0].src.ssa->bit_size == 32 ? v32_op
1096 bool use_valu = s_op == aco_opcode::num_opcodes || nir_dest_is_divergent(instr->dest.dest) ||
1097 get_ssa_temp(ctx, instr->src[0].src.ssa).type() == RegType::vgpr ||
1098 get_ssa_temp(ctx, instr->src[1].src.ssa).type() == RegType::vgpr;
1099 aco_opcode op = use_valu ? v_op : s_op;
1100 assert(op != aco_opcode::num_opcodes);
1101 assert(dst.regClass() == ctx->program->lane_mask);
1104 emit_vopc_instruction(ctx, instr, op, dst);
1106 emit_sopc_instruction(ctx, instr, op, dst);
1110 emit_boolean_logic(isel_context* ctx, nir_alu_instr* instr, Builder::WaveSpecificOpcode op,
1113 Builder bld(ctx->program, ctx->block);
1114 Temp src0 = get_alu_src(ctx, instr->src[0]);
1115 Temp src1 = get_alu_src(ctx, instr->src[1]);
1117 assert(dst.regClass() == bld.lm);
1118 assert(src0.regClass() == bld.lm);
1119 assert(src1.regClass() == bld.lm);
1121 bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
1125 emit_bcsel(isel_context* ctx, nir_alu_instr* instr, Temp dst)
1127 Builder bld(ctx->program, ctx->block);
1128 Temp cond = get_alu_src(ctx, instr->src[0]);
1129 Temp then = get_alu_src(ctx, instr->src[1]);
1130 Temp els = get_alu_src(ctx, instr->src[2]);
1132 assert(cond.regClass() == bld.lm);
1134 if (dst.type() == RegType::vgpr) {
1135 aco_ptr<Instruction> bcsel;
1136 if (dst.size() == 1) {
1137 then = as_vgpr(ctx, then);
1138 els = as_vgpr(ctx, els);
1140 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond);
1141 } else if (dst.size() == 2) {
1142 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
1143 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then);
1144 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
1145 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), els);
1147 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, cond);
1148 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, cond);
1150 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1152 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1157 if (instr->dest.dest.ssa.bit_size == 1) {
1158 assert(dst.regClass() == bld.lm);
1159 assert(then.regClass() == bld.lm);
1160 assert(els.regClass() == bld.lm);
1163 if (!nir_src_is_divergent(instr->src[0].src)) { /* uniform condition and values in sgpr */
1164 if (dst.regClass() == s1 || dst.regClass() == s2) {
1165 assert((then.regClass() == s1 || then.regClass() == s2) &&
1166 els.regClass() == then.regClass());
1167 assert(dst.size() == then.size());
1169 dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
1170 bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
1172 isel_err(&instr->instr, "Unimplemented uniform bcsel bit size");
1177 /* divergent boolean bcsel
1178 * this implements bcsel on bools: dst = s0 ? s1 : s2
1179 * are going to be: dst = (s0 & s1) | (~s0 & s2) */
1180 assert(instr->dest.dest.ssa.bit_size == 1);
1182 if (cond.id() != then.id())
1183 then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
1185 if (cond.id() == els.id())
1186 bld.copy(Definition(dst), then);
1188 bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
1189 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
1193 emit_scaled_op(isel_context* ctx, Builder& bld, Definition dst, Temp val, aco_opcode op,
1196 /* multiply by 16777216 to handle denormals */
1197 Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.def(bld.lm), as_vgpr(ctx, val),
1198 bld.copy(bld.def(v1), Operand::c32((1u << 7) | (1u << 4))));
1199 Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::c32(0x4b800000u), val);
1200 scaled = bld.vop1(op, bld.def(v1), scaled);
1201 scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::c32(undo), scaled);
1203 Temp not_scaled = bld.vop1(op, bld.def(v1), val);
1205 bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal);
1209 emit_rcp(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1211 if (ctx->block->fp_mode.denorm32 == 0) {
1212 bld.vop1(aco_opcode::v_rcp_f32, dst, val);
1216 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u);
1220 emit_rsq(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1222 if (ctx->block->fp_mode.denorm32 == 0) {
1223 bld.vop1(aco_opcode::v_rsq_f32, dst, val);
1227 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u);
1231 emit_sqrt(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1233 if (ctx->block->fp_mode.denorm32 == 0) {
1234 bld.vop1(aco_opcode::v_sqrt_f32, dst, val);
1238 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u);
1242 emit_log2(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1244 if (ctx->block->fp_mode.denorm32 == 0) {
1245 bld.vop1(aco_opcode::v_log_f32, dst, val);
1249 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u);
1253 emit_trunc_f64(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1255 if (ctx->options->gfx_level >= GFX7)
1256 return bld.vop1(aco_opcode::v_trunc_f64, Definition(dst), val);
1258 /* GFX6 doesn't support V_TRUNC_F64, lower it. */
1259 /* TODO: create more efficient code! */
1260 if (val.type() == RegType::sgpr)
1261 val = as_vgpr(ctx, val);
1263 /* Split the input value. */
1264 Temp val_lo = bld.tmp(v1), val_hi = bld.tmp(v1);
1265 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
1267 /* Extract the exponent and compute the unbiased value. */
1269 bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), val_hi, Operand::c32(20u), Operand::c32(11u));
1270 exponent = bld.vsub32(bld.def(v1), exponent, Operand::c32(1023u));
1272 /* Extract the fractional part. */
1273 Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::c32(-1u),
1274 Operand::c32(0x000fffffu));
1275 fract_mask = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), fract_mask, exponent);
1277 Temp fract_mask_lo = bld.tmp(v1), fract_mask_hi = bld.tmp(v1);
1278 bld.pseudo(aco_opcode::p_split_vector, Definition(fract_mask_lo), Definition(fract_mask_hi),
1281 Temp fract_lo = bld.tmp(v1), fract_hi = bld.tmp(v1);
1282 Temp tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_lo);
1283 fract_lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_lo, tmp);
1284 tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_hi);
1285 fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
1287 /* Get the sign bit. */
1288 Temp sign = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x80000000u), val_hi);
1290 /* Decide the operation to apply depending on the unbiased exponent. */
1292 bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.def(bld.lm), exponent, Operand::zero());
1293 Temp dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_lo,
1294 bld.copy(bld.def(v1), Operand::zero()), exp_lt0);
1295 Temp dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_hi, sign, exp_lt0);
1296 Temp exp_gt51 = bld.vopc_e64(aco_opcode::v_cmp_gt_i32, bld.def(s2), exponent, Operand::c32(51u));
1297 dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_lo, val_lo, exp_gt51);
1298 dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_hi, val_hi, exp_gt51);
1300 return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi);
1304 emit_floor_f64(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1306 if (ctx->options->gfx_level >= GFX7)
1307 return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
1309 /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
1310 * lowered at NIR level for precision reasons). */
1311 Temp src0 = as_vgpr(ctx, val);
1313 Temp mask = bld.copy(bld.def(s1), Operand::c32(3u)); /* isnan */
1314 Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::c32(-1u),
1315 Operand::c32(0x3fefffffu));
1317 Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.def(bld.lm), src0, mask);
1318 Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0);
1319 Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val);
1321 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
1322 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0);
1323 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
1324 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min);
1326 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan);
1327 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan);
1329 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
1331 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v);
1332 add->vop3().neg[1] = true;
1334 return add->definitions[0].getTemp();
1338 uadd32_sat(Builder& bld, Definition dst, Temp src0, Temp src1)
1340 if (bld.program->gfx_level < GFX8) {
1341 Builder::Result add = bld.vadd32(bld.def(v1), src0, src1, true);
1342 return bld.vop2_e64(aco_opcode::v_cndmask_b32, dst, add.def(0).getTemp(), Operand::c32(-1),
1343 add.def(1).getTemp());
1346 Builder::Result add(NULL);
1347 if (bld.program->gfx_level >= GFX9) {
1348 add = bld.vop2_e64(aco_opcode::v_add_u32, dst, src0, src1);
1350 add = bld.vop2_e64(aco_opcode::v_add_co_u32, dst, bld.def(bld.lm), src0, src1);
1352 add.instr->vop3().clamp = 1;
1353 return dst.getTemp();
1357 usub32_sat(Builder& bld, Definition dst, Temp src0, Temp src1)
1359 if (bld.program->gfx_level < GFX8) {
1360 Builder::Result sub = bld.vsub32(bld.def(v1), src0, src1, true);
1361 return bld.vop2_e64(aco_opcode::v_cndmask_b32, dst, sub.def(0).getTemp(), Operand::c32(0u),
1362 sub.def(1).getTemp());
1365 Builder::Result sub(NULL);
1366 if (bld.program->gfx_level >= GFX9) {
1367 sub = bld.vop2_e64(aco_opcode::v_sub_u32, dst, src0, src1);
1369 sub = bld.vop2_e64(aco_opcode::v_sub_co_u32, dst, bld.def(bld.lm), src0, src1);
1371 sub.instr->vop3().clamp = 1;
1372 return dst.getTemp();
1376 visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
1378 if (!instr->dest.dest.is_ssa) {
1379 isel_err(&instr->instr, "nir alu dst not in ssa");
1382 Builder bld(ctx->program, ctx->block);
1383 bld.is_precise = instr->exact;
1384 Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
1385 switch (instr->op) {
1391 case nir_op_vec16: {
1392 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
1393 unsigned num = instr->dest.dest.ssa.num_components;
1394 for (unsigned i = 0; i < num; ++i)
1395 elems[i] = get_alu_src(ctx, instr->src[i]);
1397 if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
1398 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
1399 aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
1400 RegClass elem_rc = RegClass::get(RegType::vgpr, instr->dest.dest.ssa.bit_size / 8u);
1401 for (unsigned i = 0; i < num; ++i) {
1402 if (elems[i].type() == RegType::sgpr && elem_rc.is_subdword())
1403 elems[i] = emit_extract_vector(ctx, elems[i], 0, elem_rc);
1404 vec->operands[i] = Operand{elems[i]};
1406 vec->definitions[0] = Definition(dst);
1407 ctx->block->instructions.emplace_back(std::move(vec));
1408 ctx->allocated_vec.emplace(dst.id(), elems);
1410 bool use_s_pack = ctx->program->gfx_level >= GFX9;
1411 Temp mask = bld.copy(bld.def(s1), Operand::c32((1u << instr->dest.dest.ssa.bit_size) - 1));
1413 std::array<Temp, NIR_MAX_VEC_COMPONENTS> packed;
1414 uint32_t const_vals[NIR_MAX_VEC_COMPONENTS] = {};
1415 for (unsigned i = 0; i < num; i++) {
1416 unsigned packed_size = use_s_pack ? 16 : 32;
1417 unsigned idx = i * instr->dest.dest.ssa.bit_size / packed_size;
1418 unsigned offset = i * instr->dest.dest.ssa.bit_size % packed_size;
1419 if (nir_src_is_const(instr->src[i].src)) {
1420 const_vals[idx] |= nir_src_as_uint(instr->src[i].src) << offset;
1423 if (nir_src_is_undef(instr->src[i].src))
1426 if (offset != packed_size - instr->dest.dest.ssa.bit_size)
1428 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
1431 elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), elems[i],
1432 Operand::c32(offset));
1434 if (packed[idx].id())
1435 packed[idx] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[i],
1438 packed[idx] = elems[i];
1442 for (unsigned i = 0; i < dst.size(); i++) {
1443 bool same = !!packed[i * 2].id() == !!packed[i * 2 + 1].id();
1445 if (packed[i * 2].id() && packed[i * 2 + 1].id())
1446 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1), packed[i * 2],
1448 else if (packed[i * 2 + 1].id())
1449 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1),
1450 Operand::c32(const_vals[i * 2]), packed[i * 2 + 1]);
1451 else if (packed[i * 2].id())
1452 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1), packed[i * 2],
1453 Operand::c32(const_vals[i * 2 + 1]));
1456 const_vals[i] = const_vals[i * 2] | (const_vals[i * 2 + 1] << 16);
1462 for (unsigned i = 0; i < dst.size(); i++) {
1463 if (const_vals[i] && packed[i].id())
1464 packed[i] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
1465 Operand::c32(const_vals[i]), packed[i]);
1466 else if (!packed[i].id())
1467 packed[i] = bld.copy(bld.def(s1), Operand::c32(const_vals[i]));
1470 if (dst.size() == 1)
1471 bld.copy(Definition(dst), packed[0]);
1472 else if (dst.size() == 2)
1473 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), packed[0], packed[1]);
1475 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), packed[0], packed[1],
1481 Temp src = get_alu_src(ctx, instr->src[0]);
1482 if (src.type() == RegType::vgpr && dst.type() == RegType::sgpr) {
1483 /* use size() instead of bytes() for 8/16-bit */
1484 assert(src.size() == dst.size() && "wrong src or dst register class for nir_op_mov");
1485 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
1487 assert(src.bytes() == dst.bytes() && "wrong src or dst register class for nir_op_mov");
1488 bld.copy(Definition(dst), src);
1493 Temp src = get_alu_src(ctx, instr->src[0]);
1494 if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1495 emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
1496 } else if (dst.regClass() == v2) {
1497 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1498 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1499 lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
1500 hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
1501 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
1502 } else if (dst.type() == RegType::sgpr) {
1503 aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
1504 bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
1506 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1511 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1512 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
1514 unsigned opsel_lo = (instr->src[0].swizzle[0] & 1) << 1;
1515 unsigned opsel_hi = ((instr->src[0].swizzle[1] & 1) << 1) | 1;
1517 Temp sub = bld.vop3p(aco_opcode::v_pk_sub_u16, Definition(bld.tmp(v1)), Operand::zero(),
1518 src, opsel_lo, opsel_hi);
1519 bld.vop3p(aco_opcode::v_pk_max_i16, Definition(dst), sub, src, opsel_lo, opsel_hi);
1522 Temp src = get_alu_src(ctx, instr->src[0]);
1523 if (dst.regClass() == s1) {
1524 bld.sop1(aco_opcode::s_abs_i32, Definition(dst), bld.def(s1, scc), src);
1525 } else if (dst.regClass() == v1) {
1526 bld.vop2(aco_opcode::v_max_i32, Definition(dst), src,
1527 bld.vsub32(bld.def(v1), Operand::zero(), src));
1528 } else if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1530 aco_opcode::v_max_i16_e64, Definition(dst), src,
1531 bld.vop3(aco_opcode::v_sub_u16_e64, Definition(bld.tmp(v2b)), Operand::zero(2), src));
1532 } else if (dst.regClass() == v2b) {
1533 src = as_vgpr(ctx, src);
1534 bld.vop2(aco_opcode::v_max_i16, Definition(dst), src,
1535 bld.vop2(aco_opcode::v_sub_u16, Definition(bld.tmp(v2b)), Operand::zero(2), src));
1537 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1541 case nir_op_isign: {
1542 Temp src = get_alu_src(ctx, instr->src[0]);
1543 if (dst.regClass() == s1) {
1545 bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand::c32(-1));
1546 bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand::c32(1u));
1547 } else if (dst.regClass() == s2) {
1549 bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand::c32(63u));
1551 if (ctx->program->gfx_level >= GFX8)
1552 neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand::zero());
1555 bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand::zero())
1558 /* SCC gets zero-extended to 64 bit */
1559 bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
1560 } else if (dst.regClass() == v1) {
1561 bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand::c32(-1), src, Operand::c32(1u));
1562 } else if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX9) {
1563 bld.vop3(aco_opcode::v_med3_i16, Definition(dst), Operand::c16(-1), src, Operand::c16(1u));
1564 } else if (dst.regClass() == v2b) {
1565 src = as_vgpr(ctx, src);
1566 bld.vop2(aco_opcode::v_max_i16, Definition(dst), Operand::c16(-1),
1567 bld.vop2(aco_opcode::v_min_i16, Definition(bld.tmp(v1)), Operand::c16(1u), src));
1568 } else if (dst.regClass() == v2) {
1569 Temp upper = emit_extract_vector(ctx, src, 1, v1);
1570 Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), upper);
1571 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.def(bld.lm), Operand::zero(), src);
1572 Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(1u), neg, gtz);
1573 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), neg, gtz);
1574 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1576 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1581 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1582 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_i16_e64, dst);
1583 } else if (dst.regClass() == v2b) {
1584 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i16, dst, true);
1585 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1586 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_i16, dst);
1587 } else if (dst.regClass() == v1) {
1588 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i32, dst, true);
1589 } else if (dst.regClass() == s1) {
1590 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
1592 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1597 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1598 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_u16_e64, dst);
1599 } else if (dst.regClass() == v2b) {
1600 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u16, dst, true);
1601 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1602 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_u16, dst);
1603 } else if (dst.regClass() == v1) {
1604 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u32, dst, true);
1605 } else if (dst.regClass() == s1) {
1606 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
1608 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1613 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1614 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_i16_e64, dst);
1615 } else if (dst.regClass() == v2b) {
1616 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i16, dst, true);
1617 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1618 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_i16, dst);
1619 } else if (dst.regClass() == v1) {
1620 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i32, dst, true);
1621 } else if (dst.regClass() == s1) {
1622 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
1624 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1629 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1630 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_u16_e64, dst);
1631 } else if (dst.regClass() == v2b) {
1632 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u16, dst, true);
1633 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1634 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_u16, dst);
1635 } else if (dst.regClass() == v1) {
1636 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u32, dst, true);
1637 } else if (dst.regClass() == s1) {
1638 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
1640 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1645 if (instr->dest.dest.ssa.bit_size == 1) {
1646 emit_boolean_logic(ctx, instr, Builder::s_or, dst);
1647 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1648 emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
1649 } else if (dst.regClass() == v2) {
1650 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
1651 } else if (dst.regClass() == s1) {
1652 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
1653 } else if (dst.regClass() == s2) {
1654 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
1656 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1661 if (instr->dest.dest.ssa.bit_size == 1) {
1662 emit_boolean_logic(ctx, instr, Builder::s_and, dst);
1663 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1664 emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
1665 } else if (dst.regClass() == v2) {
1666 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
1667 } else if (dst.regClass() == s1) {
1668 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
1669 } else if (dst.regClass() == s2) {
1670 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
1672 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1677 if (instr->dest.dest.ssa.bit_size == 1) {
1678 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
1679 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1680 emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
1681 } else if (dst.regClass() == v2) {
1682 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
1683 } else if (dst.regClass() == s1) {
1684 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
1685 } else if (dst.regClass() == s2) {
1686 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
1688 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1693 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1694 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshrrev_b16_e64, dst, false, 2, true);
1695 } else if (dst.regClass() == v2b) {
1696 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b16, dst, false, true);
1697 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1698 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_lshrrev_b16, dst, true);
1699 } else if (dst.regClass() == v1) {
1700 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true);
1701 } else if (dst.regClass() == v2 && ctx->program->gfx_level >= GFX8) {
1702 bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1703 get_alu_src(ctx, instr->src[0]));
1704 } else if (dst.regClass() == v2) {
1705 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshr_b64, dst);
1706 } else if (dst.regClass() == s2) {
1707 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true);
1708 } else if (dst.regClass() == s1) {
1709 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
1711 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1716 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1717 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshlrev_b16_e64, dst, false, 2, true);
1718 } else if (dst.regClass() == v2b) {
1719 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b16, dst, false, true);
1720 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1721 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_lshlrev_b16, dst, true);
1722 } else if (dst.regClass() == v1) {
1723 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true, false,
1725 } else if (dst.regClass() == v2 && ctx->program->gfx_level >= GFX8) {
1726 bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1727 get_alu_src(ctx, instr->src[0]));
1728 } else if (dst.regClass() == v2) {
1729 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshl_b64, dst);
1730 } else if (dst.regClass() == s1) {
1731 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true, 1);
1732 } else if (dst.regClass() == s2) {
1733 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
1735 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1740 if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10) {
1741 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ashrrev_i16_e64, dst, false, 2, true);
1742 } else if (dst.regClass() == v2b) {
1743 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i16, dst, false, true);
1744 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1745 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_ashrrev_i16, dst, true);
1746 } else if (dst.regClass() == v1) {
1747 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true);
1748 } else if (dst.regClass() == v2 && ctx->program->gfx_level >= GFX8) {
1749 bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1750 get_alu_src(ctx, instr->src[0]));
1751 } else if (dst.regClass() == v2) {
1752 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ashr_i64, dst);
1753 } else if (dst.regClass() == s1) {
1754 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true);
1755 } else if (dst.regClass() == s2) {
1756 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
1758 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1762 case nir_op_find_lsb: {
1763 Temp src = get_alu_src(ctx, instr->src[0]);
1764 if (src.regClass() == s1) {
1765 bld.sop1(aco_opcode::s_ff1_i32_b32, Definition(dst), src);
1766 } else if (src.regClass() == v1) {
1767 emit_vop1_instruction(ctx, instr, aco_opcode::v_ffbl_b32, dst);
1768 } else if (src.regClass() == s2) {
1769 bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
1771 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1775 case nir_op_ufind_msb:
1776 case nir_op_ifind_msb: {
1777 Temp src = get_alu_src(ctx, instr->src[0]);
1778 if (src.regClass() == s1 || src.regClass() == s2) {
1779 aco_opcode op = src.regClass() == s2
1780 ? (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b64
1781 : aco_opcode::s_flbit_i32_i64)
1782 : (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b32
1783 : aco_opcode::s_flbit_i32);
1784 Temp msb_rev = bld.sop1(op, bld.def(s1), src);
1786 Builder::Result sub = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
1787 Operand::c32(src.size() * 32u - 1u), msb_rev);
1788 Temp msb = sub.def(0).getTemp();
1789 Temp carry = sub.def(1).getTemp();
1791 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand::c32(-1), msb,
1793 } else if (src.regClass() == v1) {
1795 instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1796 Temp msb_rev = bld.tmp(v1);
1797 emit_vop1_instruction(ctx, instr, op, msb_rev);
1798 Temp msb = bld.tmp(v1);
1800 bld.vsub32(Definition(msb), Operand::c32(31u), Operand(msb_rev), true).def(1).getTemp();
1801 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), msb, msb_rev, carry);
1802 } else if (src.regClass() == v2) {
1804 instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1806 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1807 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1809 lo = uadd32_sat(bld, bld.def(v1), bld.copy(bld.def(s1), Operand::c32(32u)),
1810 bld.vop1(op, bld.def(v1), lo));
1811 hi = bld.vop1(op, bld.def(v1), hi);
1812 Temp found_hi = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::c32(-1), hi);
1814 Temp msb_rev = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lo, hi, found_hi);
1816 Temp msb = bld.tmp(v1);
1818 bld.vsub32(Definition(msb), Operand::c32(63u), Operand(msb_rev), true).def(1).getTemp();
1819 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), msb, msb_rev, carry);
1821 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1826 Temp src = get_alu_src(ctx, instr->src[0]);
1827 if (src.regClass() == s1) {
1828 Temp msb_rev = bld.sop1(aco_opcode::s_flbit_i32_b32, bld.def(s1), src);
1829 bld.sop2(aco_opcode::s_min_u32, Definition(dst), Operand::c32(32u), msb_rev);
1830 } else if (src.regClass() == v1) {
1831 Temp msb_rev = bld.vop1(aco_opcode::v_ffbh_u32, bld.def(v1), src);
1832 bld.vop2(aco_opcode::v_min_u32, Definition(dst), Operand::c32(32u), msb_rev);
1834 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1838 case nir_op_bitfield_reverse: {
1839 if (dst.regClass() == s1) {
1840 bld.sop1(aco_opcode::s_brev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1841 } else if (dst.regClass() == v1) {
1842 bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1844 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1849 if (dst.regClass() == s1) {
1850 emit_sop2_instruction(ctx, instr, aco_opcode::s_add_u32, dst, true);
1852 } else if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX10) {
1853 emit_vop3a_instruction(ctx, instr, aco_opcode::v_add_u16_e64, dst);
1855 } else if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX8) {
1856 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_u16, dst, true);
1858 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1859 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_u16, dst);
1863 Temp src0 = get_alu_src(ctx, instr->src[0]);
1864 Temp src1 = get_alu_src(ctx, instr->src[1]);
1865 if (dst.type() == RegType::vgpr && dst.bytes() <= 4) {
1866 bld.vadd32(Definition(dst), Operand(src0), Operand(src1));
1870 assert(src0.size() == 2 && src1.size() == 2);
1871 Temp src00 = bld.tmp(src0.type(), 1);
1872 Temp src01 = bld.tmp(dst.type(), 1);
1873 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1874 Temp src10 = bld.tmp(src1.type(), 1);
1875 Temp src11 = bld.tmp(dst.type(), 1);
1876 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1878 if (dst.regClass() == s2) {
1879 Temp carry = bld.tmp(s1);
1881 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1882 Temp dst1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), src01, src11,
1884 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1885 } else if (dst.regClass() == v2) {
1886 Temp dst0 = bld.tmp(v1);
1887 Temp carry = bld.vadd32(Definition(dst0), src00, src10, true).def(1).getTemp();
1888 Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
1889 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1891 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1895 case nir_op_uadd_sat: {
1896 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1897 Instruction* add_instr =
1898 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_u16, dst);
1899 add_instr->vop3p().clamp = 1;
1902 Temp src0 = get_alu_src(ctx, instr->src[0]);
1903 Temp src1 = get_alu_src(ctx, instr->src[1]);
1904 if (dst.regClass() == s1) {
1905 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
1906 bld.sop2(aco_opcode::s_add_u32, Definition(tmp), bld.scc(Definition(carry)), src0, src1);
1907 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand::c32(-1), tmp,
1910 } else if (dst.regClass() == v2b) {
1911 Instruction* add_instr;
1912 if (ctx->program->gfx_level >= GFX10) {
1913 add_instr = bld.vop3(aco_opcode::v_add_u16_e64, Definition(dst), src0, src1).instr;
1915 if (src1.type() == RegType::sgpr)
1916 std::swap(src0, src1);
1918 bld.vop2_e64(aco_opcode::v_add_u16, Definition(dst), src0, as_vgpr(ctx, src1)).instr;
1920 add_instr->vop3().clamp = 1;
1922 } else if (dst.regClass() == v1) {
1923 uadd32_sat(bld, Definition(dst), src0, src1);
1927 assert(src0.size() == 2 && src1.size() == 2);
1929 Temp src00 = bld.tmp(src0.type(), 1);
1930 Temp src01 = bld.tmp(src0.type(), 1);
1931 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1932 Temp src10 = bld.tmp(src1.type(), 1);
1933 Temp src11 = bld.tmp(src1.type(), 1);
1934 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1936 if (dst.regClass() == s2) {
1937 Temp carry0 = bld.tmp(s1);
1938 Temp carry1 = bld.tmp(s1);
1941 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry0)), src00, src10);
1942 Temp no_sat1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(Definition(carry1)),
1943 src01, src11, bld.scc(carry0));
1945 Temp no_sat = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), no_sat0, no_sat1);
1947 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand::c64(-1), no_sat,
1949 } else if (dst.regClass() == v2) {
1950 Temp no_sat0 = bld.tmp(v1);
1951 Temp dst0 = bld.tmp(v1);
1952 Temp dst1 = bld.tmp(v1);
1954 Temp carry0 = bld.vadd32(Definition(no_sat0), src00, src10, true).def(1).getTemp();
1957 if (ctx->program->gfx_level >= GFX8) {
1958 carry1 = bld.tmp(bld.lm);
1959 bld.vop2_e64(aco_opcode::v_addc_co_u32, Definition(dst1), Definition(carry1),
1960 as_vgpr(ctx, src01), as_vgpr(ctx, src11), carry0)
1964 Temp no_sat1 = bld.tmp(v1);
1965 carry1 = bld.vadd32(Definition(no_sat1), src01, src11, true, carry0).def(1).getTemp();
1966 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst1), no_sat1, Operand::c32(-1),
1970 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst0), no_sat0, Operand::c32(-1),
1972 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1974 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1978 case nir_op_iadd_sat: {
1979 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1980 Instruction* add_instr =
1981 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_i16, dst);
1982 add_instr->vop3p().clamp = 1;
1985 Temp src0 = get_alu_src(ctx, instr->src[0]);
1986 Temp src1 = get_alu_src(ctx, instr->src[1]);
1987 if (dst.regClass() == s1) {
1988 Temp cond = bld.sopc(aco_opcode::s_cmp_lt_i32, bld.def(s1, scc), src1, Operand::zero());
1989 Temp bound = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(bld.def(s1, scc)),
1990 Operand::c32(INT32_MAX), cond);
1991 Temp overflow = bld.tmp(s1);
1993 bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.scc(Definition(overflow)), src0, src1);
1994 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), bound, add, bld.scc(overflow));
1998 src1 = as_vgpr(ctx, src1);
2000 if (dst.regClass() == v2b) {
2001 Instruction* add_instr =
2002 bld.vop3(aco_opcode::v_add_i16, Definition(dst), src0, src1).instr;
2003 add_instr->vop3().clamp = 1;
2004 } else if (dst.regClass() == v1) {
2005 Instruction* add_instr =
2006 bld.vop3(aco_opcode::v_add_i32, Definition(dst), src0, src1).instr;
2007 add_instr->vop3().clamp = 1;
2009 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2013 case nir_op_uadd_carry: {
2014 Temp src0 = get_alu_src(ctx, instr->src[0]);
2015 Temp src1 = get_alu_src(ctx, instr->src[1]);
2016 if (dst.regClass() == s1) {
2017 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
2020 if (dst.regClass() == v1) {
2021 Temp carry = bld.vadd32(bld.def(v1), src0, src1, true).def(1).getTemp();
2022 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), Operand::c32(1u),
2027 Temp src00 = bld.tmp(src0.type(), 1);
2028 Temp src01 = bld.tmp(dst.type(), 1);
2029 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2030 Temp src10 = bld.tmp(src1.type(), 1);
2031 Temp src11 = bld.tmp(dst.type(), 1);
2032 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2033 if (dst.regClass() == s2) {
2034 Temp carry = bld.tmp(s1);
2035 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
2036 carry = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11,
2040 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand::zero());
2041 } else if (dst.regClass() == v2) {
2042 Temp carry = bld.vadd32(bld.def(v1), src00, src10, true).def(1).getTemp();
2043 carry = bld.vadd32(bld.def(v1), src01, src11, true, carry).def(1).getTemp();
2044 carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
2045 Operand::c32(1u), carry);
2046 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand::zero());
2048 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2053 if (dst.regClass() == s1) {
2054 emit_sop2_instruction(ctx, instr, aco_opcode::s_sub_i32, dst, true);
2056 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2057 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_sub_u16, dst);
2061 Temp src0 = get_alu_src(ctx, instr->src[0]);
2062 Temp src1 = get_alu_src(ctx, instr->src[1]);
2063 if (dst.regClass() == v1) {
2064 bld.vsub32(Definition(dst), src0, src1);
2066 } else if (dst.bytes() <= 2) {
2067 if (ctx->program->gfx_level >= GFX10)
2068 bld.vop3(aco_opcode::v_sub_u16_e64, Definition(dst), src0, src1);
2069 else if (src1.type() == RegType::sgpr)
2070 bld.vop2(aco_opcode::v_subrev_u16, Definition(dst), src1, as_vgpr(ctx, src0));
2071 else if (ctx->program->gfx_level >= GFX8)
2072 bld.vop2(aco_opcode::v_sub_u16, Definition(dst), src0, as_vgpr(ctx, src1));
2074 bld.vsub32(Definition(dst), src0, src1);
2078 Temp src00 = bld.tmp(src0.type(), 1);
2079 Temp src01 = bld.tmp(dst.type(), 1);
2080 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2081 Temp src10 = bld.tmp(src1.type(), 1);
2082 Temp src11 = bld.tmp(dst.type(), 1);
2083 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2084 if (dst.regClass() == s2) {
2085 Temp borrow = bld.tmp(s1);
2087 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
2088 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), src01, src11,
2090 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2091 } else if (dst.regClass() == v2) {
2092 Temp lower = bld.tmp(v1);
2093 Temp borrow = bld.vsub32(Definition(lower), src00, src10, true).def(1).getTemp();
2094 Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
2095 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2097 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2101 case nir_op_usub_borrow: {
2102 Temp src0 = get_alu_src(ctx, instr->src[0]);
2103 Temp src1 = get_alu_src(ctx, instr->src[1]);
2104 if (dst.regClass() == s1) {
2105 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
2107 } else if (dst.regClass() == v1) {
2108 Temp borrow = bld.vsub32(bld.def(v1), src0, src1, true).def(1).getTemp();
2109 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), Operand::c32(1u),
2114 Temp src00 = bld.tmp(src0.type(), 1);
2115 Temp src01 = bld.tmp(dst.type(), 1);
2116 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2117 Temp src10 = bld.tmp(src1.type(), 1);
2118 Temp src11 = bld.tmp(dst.type(), 1);
2119 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2120 if (dst.regClass() == s2) {
2121 Temp borrow = bld.tmp(s1);
2122 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
2123 borrow = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11,
2127 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand::zero());
2128 } else if (dst.regClass() == v2) {
2129 Temp borrow = bld.vsub32(bld.def(v1), src00, src10, true).def(1).getTemp();
2130 borrow = bld.vsub32(bld.def(v1), src01, src11, true, Operand(borrow)).def(1).getTemp();
2131 borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
2132 Operand::c32(1u), borrow);
2133 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand::zero());
2135 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2139 case nir_op_usub_sat: {
2140 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2141 Instruction* sub_instr = emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_sub_u16, dst);
2142 sub_instr->vop3p().clamp = 1;
2145 Temp src0 = get_alu_src(ctx, instr->src[0]);
2146 Temp src1 = get_alu_src(ctx, instr->src[1]);
2147 if (dst.regClass() == s1) {
2148 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
2149 bld.sop2(aco_opcode::s_sub_u32, Definition(tmp), bld.scc(Definition(carry)), src0, src1);
2150 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand::c32(0), tmp, bld.scc(carry));
2152 } else if (dst.regClass() == v2b) {
2153 Instruction* sub_instr;
2154 if (ctx->program->gfx_level >= GFX10) {
2155 sub_instr = bld.vop3(aco_opcode::v_sub_u16_e64, Definition(dst), src0, src1).instr;
2157 aco_opcode op = aco_opcode::v_sub_u16;
2158 if (src1.type() == RegType::sgpr) {
2159 std::swap(src0, src1);
2160 op = aco_opcode::v_subrev_u16;
2162 sub_instr = bld.vop2_e64(op, Definition(dst), src0, as_vgpr(ctx, src1)).instr;
2164 sub_instr->vop3().clamp = 1;
2166 } else if (dst.regClass() == v1) {
2167 usub32_sat(bld, Definition(dst), src0, as_vgpr(ctx, src1));
2171 assert(src0.size() == 2 && src1.size() == 2);
2172 Temp src00 = bld.tmp(src0.type(), 1);
2173 Temp src01 = bld.tmp(src0.type(), 1);
2174 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2175 Temp src10 = bld.tmp(src1.type(), 1);
2176 Temp src11 = bld.tmp(src1.type(), 1);
2177 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2179 if (dst.regClass() == s2) {
2180 Temp carry0 = bld.tmp(s1);
2181 Temp carry1 = bld.tmp(s1);
2184 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry0)), src00, src10);
2185 Temp no_sat1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(Definition(carry1)),
2186 src01, src11, bld.scc(carry0));
2188 Temp no_sat = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), no_sat0, no_sat1);
2190 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand::c64(0ull), no_sat,
2192 } else if (dst.regClass() == v2) {
2193 Temp no_sat0 = bld.tmp(v1);
2194 Temp dst0 = bld.tmp(v1);
2195 Temp dst1 = bld.tmp(v1);
2197 Temp carry0 = bld.vsub32(Definition(no_sat0), src00, src10, true).def(1).getTemp();
2200 if (ctx->program->gfx_level >= GFX8) {
2201 carry1 = bld.tmp(bld.lm);
2202 bld.vop2_e64(aco_opcode::v_subb_co_u32, Definition(dst1), Definition(carry1),
2203 as_vgpr(ctx, src01), as_vgpr(ctx, src11), carry0)
2207 Temp no_sat1 = bld.tmp(v1);
2208 carry1 = bld.vsub32(Definition(no_sat1), src01, src11, true, carry0).def(1).getTemp();
2209 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst1), no_sat1, Operand::c32(0u),
2213 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst0), no_sat0, Operand::c32(0u),
2215 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2217 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2221 case nir_op_isub_sat: {
2222 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2223 Instruction* sub_instr = emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_sub_i16, dst);
2224 sub_instr->vop3p().clamp = 1;
2227 Temp src0 = get_alu_src(ctx, instr->src[0]);
2228 Temp src1 = get_alu_src(ctx, instr->src[1]);
2229 if (dst.regClass() == s1) {
2230 Temp cond = bld.sopc(aco_opcode::s_cmp_gt_i32, bld.def(s1, scc), src1, Operand::zero());
2231 Temp bound = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(bld.def(s1, scc)),
2232 Operand::c32(INT32_MAX), cond);
2233 Temp overflow = bld.tmp(s1);
2235 bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.scc(Definition(overflow)), src0, src1);
2236 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), bound, sub, bld.scc(overflow));
2240 src1 = as_vgpr(ctx, src1);
2242 if (dst.regClass() == v2b) {
2243 Instruction* sub_instr =
2244 bld.vop3(aco_opcode::v_sub_i16, Definition(dst), src0, src1).instr;
2245 sub_instr->vop3().clamp = 1;
2246 } else if (dst.regClass() == v1) {
2247 Instruction* sub_instr =
2248 bld.vop3(aco_opcode::v_sub_i32, Definition(dst), src0, src1).instr;
2249 sub_instr->vop3().clamp = 1;
2251 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2256 if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX10) {
2257 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_lo_u16_e64, dst);
2258 } else if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX8) {
2259 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_lo_u16, dst, true);
2260 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2261 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_mul_lo_u16, dst);
2262 } else if (dst.type() == RegType::vgpr) {
2263 uint32_t src0_ub = get_alu_src_ub(ctx, instr, 0);
2264 uint32_t src1_ub = get_alu_src_ub(ctx, instr, 1);
2266 if (src0_ub <= 0xffffff && src1_ub <= 0xffffff) {
2267 bool nuw_16bit = src0_ub <= 0xffff && src1_ub <= 0xffff && src0_ub * src1_ub <= 0xffff;
2268 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_u32_u24, dst,
2269 true /* commutative */, false, false, nuw_16bit);
2270 } else if (nir_src_is_const(instr->src[0].src)) {
2271 bld.v_mul_imm(Definition(dst), get_alu_src(ctx, instr->src[1]),
2272 nir_src_as_uint(instr->src[0].src), false);
2273 } else if (nir_src_is_const(instr->src[1].src)) {
2274 bld.v_mul_imm(Definition(dst), get_alu_src(ctx, instr->src[0]),
2275 nir_src_as_uint(instr->src[1].src), false);
2277 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_lo_u32, dst);
2279 } else if (dst.regClass() == s1) {
2280 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
2282 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2286 case nir_op_umul_high: {
2287 if (dst.regClass() == s1 && ctx->options->gfx_level >= GFX9) {
2288 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_hi_u32, dst, false);
2289 } else if (dst.bytes() == 4) {
2290 uint32_t src0_ub = get_alu_src_ub(ctx, instr, 0);
2291 uint32_t src1_ub = get_alu_src_ub(ctx, instr, 1);
2293 Temp tmp = dst.regClass() == s1 ? bld.tmp(v1) : dst;
2294 if (src0_ub <= 0xffffff && src1_ub <= 0xffffff) {
2295 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_hi_u32_u24, tmp, true);
2297 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_hi_u32, tmp);
2300 if (dst.regClass() == s1)
2301 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2303 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2307 case nir_op_imul_high: {
2308 if (dst.regClass() == v1) {
2309 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_hi_i32, dst);
2310 } else if (dst.regClass() == s1 && ctx->options->gfx_level >= GFX9) {
2311 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_hi_i32, dst, false);
2312 } else if (dst.regClass() == s1) {
2313 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
2314 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
2315 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2317 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2322 if (dst.regClass() == v2b) {
2323 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, dst, true);
2324 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2325 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_mul_f16, dst);
2326 } else if (dst.regClass() == v1) {
2327 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
2328 } else if (dst.regClass() == v2) {
2329 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_f64, dst);
2331 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2335 case nir_op_fmulz: {
2336 if (dst.regClass() == v1) {
2337 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_legacy_f32, dst, true);
2339 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2344 if (dst.regClass() == v2b) {
2345 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, dst, true);
2346 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2347 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_f16, dst);
2348 } else if (dst.regClass() == v1) {
2349 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
2350 } else if (dst.regClass() == v2) {
2351 emit_vop3a_instruction(ctx, instr, aco_opcode::v_add_f64, dst);
2353 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2358 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2359 Instruction* add = emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_f16, dst);
2360 VOP3P_instruction& sub = add->vop3p();
2361 sub.neg_lo[1] = true;
2362 sub.neg_hi[1] = true;
2366 Temp src0 = get_alu_src(ctx, instr->src[0]);
2367 Temp src1 = get_alu_src(ctx, instr->src[1]);
2368 if (dst.regClass() == v2b) {
2369 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
2370 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, dst, false);
2372 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, dst, true);
2373 } else if (dst.regClass() == v1) {
2374 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
2375 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
2377 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
2378 } else if (dst.regClass() == v2) {
2379 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), as_vgpr(ctx, src0),
2380 as_vgpr(ctx, src1));
2381 add->vop3().neg[1] = true;
2383 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2388 if (dst.regClass() == v2b) {
2389 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f16, dst, false, 3);
2390 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2391 assert(instr->dest.dest.ssa.num_components == 2);
2393 Temp src0 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[0]));
2394 Temp src1 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[1]));
2395 Temp src2 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[2]));
2397 /* swizzle to opsel: all swizzles are either 0 (x) or 1 (y) */
2398 unsigned opsel_lo = 0, opsel_hi = 0;
2399 for (unsigned i = 0; i < 3; i++) {
2400 opsel_lo |= (instr->src[i].swizzle[0] & 1) << i;
2401 opsel_hi |= (instr->src[i].swizzle[1] & 1) << i;
2404 bld.vop3p(aco_opcode::v_pk_fma_f16, Definition(dst), src0, src1, src2, opsel_lo, opsel_hi);
2405 } else if (dst.regClass() == v1) {
2406 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f32, dst,
2407 ctx->block->fp_mode.must_flush_denorms32, 3);
2408 } else if (dst.regClass() == v2) {
2409 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f64, dst, false, 3);
2411 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2415 case nir_op_ffmaz: {
2416 if (dst.regClass() == v1) {
2417 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_legacy_f32, dst,
2418 ctx->block->fp_mode.must_flush_denorms32, 3);
2420 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2425 if (dst.regClass() == v2b) {
2426 // TODO: check fp_mode.must_flush_denorms16_64
2427 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, dst, true);
2428 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2429 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_f16, dst);
2430 } else if (dst.regClass() == v1) {
2431 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false,
2432 ctx->block->fp_mode.must_flush_denorms32);
2433 } else if (dst.regClass() == v2) {
2434 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_f64, dst,
2435 ctx->block->fp_mode.must_flush_denorms16_64);
2437 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2442 if (dst.regClass() == v2b) {
2443 // TODO: check fp_mode.must_flush_denorms16_64
2444 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, dst, true);
2445 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2446 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_f16, dst, true);
2447 } else if (dst.regClass() == v1) {
2448 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false,
2449 ctx->block->fp_mode.must_flush_denorms32);
2450 } else if (dst.regClass() == v2) {
2451 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_f64, dst,
2452 ctx->block->fp_mode.must_flush_denorms16_64);
2454 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2458 case nir_op_sdot_4x8_iadd: {
2459 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_i32_i8, dst, false);
2462 case nir_op_sdot_4x8_iadd_sat: {
2463 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_i32_i8, dst, true);
2466 case nir_op_udot_4x8_uadd: {
2467 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_u32_u8, dst, false);
2470 case nir_op_udot_4x8_uadd_sat: {
2471 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_u32_u8, dst, true);
2474 case nir_op_sdot_2x16_iadd: {
2475 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_i32_i16, dst, false);
2478 case nir_op_sdot_2x16_iadd_sat: {
2479 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_i32_i16, dst, true);
2482 case nir_op_udot_2x16_uadd: {
2483 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_u32_u16, dst, false);
2486 case nir_op_udot_2x16_uadd_sat: {
2487 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_u32_u16, dst, true);
2490 case nir_op_cube_face_coord_amd: {
2491 Temp in = get_alu_src(ctx, instr->src[0], 3);
2492 Temp src[3] = {emit_extract_vector(ctx, in, 0, v1), emit_extract_vector(ctx, in, 1, v1),
2493 emit_extract_vector(ctx, in, 2, v1)};
2494 Temp ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), src[0], src[1], src[2]);
2495 ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
2496 Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
2497 Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
2498 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3f000000u /*0.5*/),
2499 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, ma));
2500 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3f000000u /*0.5*/),
2501 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, ma));
2502 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
2505 case nir_op_cube_face_index_amd: {
2506 Temp in = get_alu_src(ctx, instr->src[0], 3);
2507 Temp src[3] = {emit_extract_vector(ctx, in, 0, v1), emit_extract_vector(ctx, in, 1, v1),
2508 emit_extract_vector(ctx, in, 2, v1)};
2509 bld.vop3(aco_opcode::v_cubeid_f32, Definition(dst), src[0], src[1], src[2]);
2512 case nir_op_bcsel: {
2513 emit_bcsel(ctx, instr, dst);
2517 if (dst.regClass() == v2b) {
2518 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f16, dst);
2519 } else if (dst.regClass() == v1) {
2520 Temp src = get_alu_src(ctx, instr->src[0]);
2521 emit_rsq(ctx, bld, Definition(dst), src);
2522 } else if (dst.regClass() == v2) {
2523 /* Lowered at NIR level for precision reasons. */
2524 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
2526 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2531 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2532 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
2533 Instruction* vop3p =
2534 bld.vop3p(aco_opcode::v_pk_mul_f16, Definition(dst), src, Operand::c16(0x3C00),
2535 instr->src[0].swizzle[0] & 1, instr->src[0].swizzle[1] & 1);
2536 vop3p->vop3p().neg_lo[0] = true;
2537 vop3p->vop3p().neg_hi[0] = true;
2540 Temp src = get_alu_src(ctx, instr->src[0]);
2541 if (dst.regClass() == v2b) {
2542 bld.vop2(aco_opcode::v_mul_f16, Definition(dst), Operand::c16(0xbc00u), as_vgpr(ctx, src));
2543 } else if (dst.regClass() == v1) {
2544 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0xbf800000u),
2546 } else if (dst.regClass() == v2) {
2547 if (ctx->block->fp_mode.must_flush_denorms16_64)
2548 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand::c64(0x3FF0000000000000),
2550 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
2551 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2552 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand::c32(0x80000000u), upper);
2553 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2555 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2560 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2561 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
2562 Instruction* vop3p =
2563 bld.vop3p(aco_opcode::v_pk_max_f16, Definition(dst), src, src,
2564 instr->src[0].swizzle[0] & 1 ? 3 : 0, instr->src[0].swizzle[1] & 1 ? 3 : 0)
2566 vop3p->vop3p().neg_lo[1] = true;
2567 vop3p->vop3p().neg_hi[1] = true;
2570 Temp src = get_alu_src(ctx, instr->src[0]);
2571 if (dst.regClass() == v2b) {
2572 Instruction* mul = bld.vop2_e64(aco_opcode::v_mul_f16, Definition(dst),
2573 Operand::c16(0x3c00), as_vgpr(ctx, src))
2575 mul->vop3().abs[1] = true;
2576 } else if (dst.regClass() == v1) {
2577 Instruction* mul = bld.vop2_e64(aco_opcode::v_mul_f32, Definition(dst),
2578 Operand::c32(0x3f800000u), as_vgpr(ctx, src))
2580 mul->vop3().abs[1] = true;
2581 } else if (dst.regClass() == v2) {
2582 if (ctx->block->fp_mode.must_flush_denorms16_64)
2583 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand::c64(0x3FF0000000000000),
2585 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
2586 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2587 upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7FFFFFFFu), upper);
2588 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2590 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2595 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2596 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
2597 Instruction* vop3p =
2598 bld.vop3p(aco_opcode::v_pk_mul_f16, Definition(dst), src, Operand::c16(0x3C00),
2599 instr->src[0].swizzle[0] & 1, instr->src[0].swizzle[1] & 1);
2600 vop3p->vop3p().clamp = true;
2603 Temp src = get_alu_src(ctx, instr->src[0]);
2604 if (dst.regClass() == v2b) {
2605 bld.vop3(aco_opcode::v_med3_f16, Definition(dst), Operand::c16(0u), Operand::c16(0x3c00),
2607 } else if (dst.regClass() == v1) {
2608 bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand::zero(),
2609 Operand::c32(0x3f800000u), src);
2610 /* apparently, it is not necessary to flush denorms if this instruction is used with these
2612 // TODO: confirm that this holds under any circumstances
2613 } else if (dst.regClass() == v2) {
2614 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand::zero());
2615 add->vop3().clamp = true;
2617 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2621 case nir_op_flog2: {
2622 if (dst.regClass() == v2b) {
2623 emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f16, dst);
2624 } else if (dst.regClass() == v1) {
2625 Temp src = get_alu_src(ctx, instr->src[0]);
2626 emit_log2(ctx, bld, Definition(dst), src);
2628 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2633 if (dst.regClass() == v2b) {
2634 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f16, dst);
2635 } else if (dst.regClass() == v1) {
2636 Temp src = get_alu_src(ctx, instr->src[0]);
2637 emit_rcp(ctx, bld, Definition(dst), src);
2638 } else if (dst.regClass() == v2) {
2639 /* Lowered at NIR level for precision reasons. */
2640 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
2642 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2646 case nir_op_fexp2: {
2647 if (dst.regClass() == v2b) {
2648 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f16, dst);
2649 } else if (dst.regClass() == v1) {
2650 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
2652 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2656 case nir_op_fsqrt: {
2657 if (dst.regClass() == v2b) {
2658 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f16, dst);
2659 } else if (dst.regClass() == v1) {
2660 Temp src = get_alu_src(ctx, instr->src[0]);
2661 emit_sqrt(ctx, bld, Definition(dst), src);
2662 } else if (dst.regClass() == v2) {
2663 /* Lowered at NIR level for precision reasons. */
2664 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
2666 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2670 case nir_op_ffract: {
2671 if (dst.regClass() == v2b) {
2672 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f16, dst);
2673 } else if (dst.regClass() == v1) {
2674 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
2675 } else if (dst.regClass() == v2) {
2676 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
2678 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2682 case nir_op_ffloor: {
2683 if (dst.regClass() == v2b) {
2684 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f16, dst);
2685 } else if (dst.regClass() == v1) {
2686 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
2687 } else if (dst.regClass() == v2) {
2688 Temp src = get_alu_src(ctx, instr->src[0]);
2689 emit_floor_f64(ctx, bld, Definition(dst), src);
2691 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2695 case nir_op_fceil: {
2696 if (dst.regClass() == v2b) {
2697 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f16, dst);
2698 } else if (dst.regClass() == v1) {
2699 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
2700 } else if (dst.regClass() == v2) {
2701 if (ctx->options->gfx_level >= GFX7) {
2702 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
2704 /* GFX6 doesn't support V_CEIL_F64, lower it. */
2705 /* trunc = trunc(src0)
2706 * if (src0 > 0.0 && src0 != trunc)
2709 Temp src0 = get_alu_src(ctx, instr->src[0]);
2710 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src0);
2712 bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, Operand::zero());
2713 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f64, bld.def(bld.lm), src0, trunc);
2714 Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), tmp0, tmp1);
2715 Temp add = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
2716 bld.copy(bld.def(v1), Operand::zero()),
2717 bld.copy(bld.def(v1), Operand::c32(0x3ff00000u)), cond);
2718 add = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2),
2719 bld.copy(bld.def(v1), Operand::zero()), add);
2720 bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
2723 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2727 case nir_op_ftrunc: {
2728 if (dst.regClass() == v2b) {
2729 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f16, dst);
2730 } else if (dst.regClass() == v1) {
2731 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
2732 } else if (dst.regClass() == v2) {
2733 Temp src = get_alu_src(ctx, instr->src[0]);
2734 emit_trunc_f64(ctx, bld, Definition(dst), src);
2736 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2740 case nir_op_fround_even: {
2741 if (dst.regClass() == v2b) {
2742 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f16, dst);
2743 } else if (dst.regClass() == v1) {
2744 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
2745 } else if (dst.regClass() == v2) {
2746 if (ctx->options->gfx_level >= GFX7) {
2747 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
2749 /* GFX6 doesn't support V_RNDNE_F64, lower it. */
2750 Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
2751 Temp src0 = get_alu_src(ctx, instr->src[0]);
2752 bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
2754 Temp bitmask = bld.sop1(aco_opcode::s_brev_b32, bld.def(s1),
2755 bld.copy(bld.def(s1), Operand::c32(-2u)));
2757 bld.vop3(aco_opcode::v_bfi_b32, bld.def(v1), bitmask,
2758 bld.copy(bld.def(v1), Operand::c32(0x43300000u)), as_vgpr(ctx, src0_hi));
2760 bld.vop3(aco_opcode::v_add_f64, bld.def(v2), src0,
2761 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), bfi));
2763 bld.vop3(aco_opcode::v_add_f64, bld.def(v2), tmp,
2764 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), bfi));
2765 sub->vop3().neg[1] = true;
2766 tmp = sub->definitions[0].getTemp();
2768 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::c32(-1u),
2769 Operand::c32(0x432fffffu));
2770 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, v);
2771 vop3->vop3().abs[0] = true;
2772 Temp cond = vop3->definitions[0].getTemp();
2774 Temp tmp_lo = bld.tmp(v1), tmp_hi = bld.tmp(v1);
2775 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp_lo), Definition(tmp_hi), tmp);
2776 Temp dst0 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_lo,
2777 as_vgpr(ctx, src0_lo), cond);
2778 Temp dst1 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_hi,
2779 as_vgpr(ctx, src0_hi), cond);
2781 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2784 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2788 case nir_op_fsin_amd:
2789 case nir_op_fcos_amd: {
2790 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2791 aco_ptr<Instruction> norm;
2792 if (dst.regClass() == v2b) {
2794 instr->op == nir_op_fsin_amd ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16;
2795 bld.vop1(opcode, Definition(dst), src);
2796 } else if (dst.regClass() == v1) {
2797 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
2798 if (ctx->options->gfx_level < GFX9)
2799 src = bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), src);
2802 instr->op == nir_op_fsin_amd ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
2803 bld.vop1(opcode, Definition(dst), src);
2805 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2809 case nir_op_ldexp: {
2810 if (dst.regClass() == v2b) {
2811 emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, dst, false);
2812 } else if (dst.regClass() == v1) {
2813 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ldexp_f32, dst);
2814 } else if (dst.regClass() == v2) {
2815 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ldexp_f64, dst);
2817 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2821 case nir_op_frexp_sig: {
2822 if (dst.regClass() == v2b) {
2823 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f16, dst);
2824 } else if (dst.regClass() == v1) {
2825 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f32, dst);
2826 } else if (dst.regClass() == v2) {
2827 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f64, dst);
2829 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2833 case nir_op_frexp_exp: {
2834 if (instr->src[0].src.ssa->bit_size == 16) {
2835 Temp src = get_alu_src(ctx, instr->src[0]);
2836 Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src);
2837 tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand::zero());
2838 convert_int(ctx, bld, tmp, 8, 32, true, dst);
2839 } else if (instr->src[0].src.ssa->bit_size == 32) {
2840 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_exp_i32_f32, dst);
2841 } else if (instr->src[0].src.ssa->bit_size == 64) {
2842 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_exp_i32_f64, dst);
2844 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2848 case nir_op_fsign: {
2849 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2850 if (dst.regClass() == v2b) {
2851 assert(ctx->program->gfx_level >= GFX9);
2852 /* replace negative zero with positive zero */
2853 src = bld.vop2(aco_opcode::v_add_f16, bld.def(v2b), Operand::zero(), src);
2855 bld.vop3(aco_opcode::v_med3_i16, bld.def(v2b), Operand::c16(-1), src, Operand::c16(1u));
2856 bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
2857 } else if (dst.regClass() == v1) {
2858 src = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::zero(), src);
2860 bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand::c32(-1), src, Operand::c32(1u));
2861 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
2862 } else if (dst.regClass() == v2) {
2863 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.def(bld.lm), Operand::zero(), src);
2864 Temp tmp = bld.copy(bld.def(v1), Operand::c32(0x3FF00000u));
2865 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp,
2866 emit_extract_vector(ctx, src, 1, v1), cond);
2868 cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.def(bld.lm), Operand::zero(), src);
2869 tmp = bld.copy(bld.def(v1), Operand::c32(0xBFF00000u));
2870 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
2872 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(), upper);
2874 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2879 case nir_op_f2f16_rtne: {
2880 Temp src = get_alu_src(ctx, instr->src[0]);
2881 if (instr->src[0].src.ssa->bit_size == 64)
2882 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2883 if (instr->op == nir_op_f2f16_rtne && ctx->block->fp_mode.round16_64 != fp_round_ne)
2884 /* We emit s_round_mode/s_setreg_imm32 in lower_to_hw_instr to
2885 * keep value numbering and the scheduler simpler.
2887 bld.vop1(aco_opcode::p_cvt_f16_f32_rtne, Definition(dst), src);
2889 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2892 case nir_op_f2f16_rtz: {
2893 Temp src = get_alu_src(ctx, instr->src[0]);
2894 if (instr->src[0].src.ssa->bit_size == 64)
2895 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2896 if (ctx->block->fp_mode.round16_64 == fp_round_tz)
2897 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2898 else if (ctx->program->gfx_level == GFX8 || ctx->program->gfx_level == GFX9)
2899 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32_e64, Definition(dst), src, Operand::zero());
2901 bld.vop2(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src, as_vgpr(ctx, src));
2904 case nir_op_f2f32: {
2905 if (instr->src[0].src.ssa->bit_size == 16) {
2906 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
2907 } else if (instr->src[0].src.ssa->bit_size == 64) {
2908 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
2910 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2914 case nir_op_f2f64: {
2915 Temp src = get_alu_src(ctx, instr->src[0]);
2916 if (instr->src[0].src.ssa->bit_size == 16)
2917 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2918 bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src);
2921 case nir_op_i2f16: {
2922 assert(dst.regClass() == v2b);
2923 Temp src = get_alu_src(ctx, instr->src[0]);
2924 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2925 if (input_size <= 16) {
2926 /* Expand integer to the size expected by the uint→float converter used below */
2927 unsigned target_size = (ctx->program->gfx_level >= GFX8 ? 16 : 32);
2928 if (input_size != target_size) {
2929 src = convert_int(ctx, bld, src, input_size, target_size, true);
2931 } else if (input_size == 64) {
2932 /* Truncate down to 32 bits; if any of the upper bits are relevant,
2933 * the value does not fall into the single-precision float range
2934 * anyway. SPIR-V does not mandate any specific behavior for such
2937 src = convert_int(ctx, bld, src, 64, 32, false);
2940 if (ctx->program->gfx_level >= GFX8 && input_size <= 16) {
2941 bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
2943 /* Convert to f32 and then down to f16. This is needed to handle
2944 * inputs slightly outside the range [INT16_MIN, INT16_MAX],
2945 * which are representable via f16 but wouldn't be converted
2946 * correctly by v_cvt_f16_i16.
2948 * This is also the fallback-path taken on GFX7 and earlier, which
2949 * do not support direct f16⟷i16 conversions.
2951 src = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), src);
2952 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2956 case nir_op_i2f32: {
2957 assert(dst.size() == 1);
2958 Temp src = get_alu_src(ctx, instr->src[0]);
2959 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2960 if (input_size <= 32) {
2961 if (input_size <= 16) {
2962 /* Sign-extend to 32-bits */
2963 src = convert_int(ctx, bld, src, input_size, 32, true);
2965 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
2967 assert(input_size == 64);
2968 RegClass rc = RegClass(src.type(), 1);
2969 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2970 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2971 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2972 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2973 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2974 upper = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), lower, upper);
2975 bld.vop1(aco_opcode::v_cvt_f32_f64, Definition(dst), upper);
2980 case nir_op_i2f64: {
2981 if (instr->src[0].src.ssa->bit_size <= 32) {
2982 Temp src = get_alu_src(ctx, instr->src[0]);
2983 if (instr->src[0].src.ssa->bit_size <= 16)
2984 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
2985 bld.vop1(aco_opcode::v_cvt_f64_i32, Definition(dst), src);
2986 } else if (instr->src[0].src.ssa->bit_size == 64) {
2987 Temp src = get_alu_src(ctx, instr->src[0]);
2988 RegClass rc = RegClass(src.type(), 1);
2989 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2990 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2991 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2992 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2993 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2994 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2997 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3001 case nir_op_u2f16: {
3002 assert(dst.regClass() == v2b);
3003 Temp src = get_alu_src(ctx, instr->src[0]);
3004 const unsigned input_size = instr->src[0].src.ssa->bit_size;
3005 if (input_size <= 16) {
3006 /* Expand integer to the size expected by the uint→float converter used below */
3007 unsigned target_size = (ctx->program->gfx_level >= GFX8 ? 16 : 32);
3008 if (input_size != target_size) {
3009 src = convert_int(ctx, bld, src, input_size, target_size, false);
3011 } else if (input_size == 64) {
3012 /* Truncate down to 32 bits; if any of the upper bits are non-zero,
3013 * the value does not fall into the single-precision float range
3014 * anyway. SPIR-V does not mandate any specific behavior for such
3017 src = convert_int(ctx, bld, src, 64, 32, false);
3020 if (ctx->program->gfx_level >= GFX8) {
3021 /* float16 has a range of [0, 65519]. Converting from larger
3022 * inputs is UB, so we just need to consider the lower 16 bits */
3023 bld.vop1(aco_opcode::v_cvt_f16_u16, Definition(dst), src);
3025 /* GFX7 and earlier do not support direct f16⟷u16 conversions */
3026 src = bld.vop1(aco_opcode::v_cvt_f32_u32, bld.def(v1), src);
3027 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
3031 case nir_op_u2f32: {
3032 assert(dst.size() == 1);
3033 Temp src = get_alu_src(ctx, instr->src[0]);
3034 const unsigned input_size = instr->src[0].src.ssa->bit_size;
3035 if (input_size == 8) {
3036 bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src);
3037 } else if (input_size <= 32) {
3038 if (input_size == 16)
3039 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
3040 bld.vop1(aco_opcode::v_cvt_f32_u32, Definition(dst), src);
3042 assert(input_size == 64);
3043 RegClass rc = RegClass(src.type(), 1);
3044 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
3045 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
3046 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
3047 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
3048 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
3049 upper = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), lower, upper);
3050 bld.vop1(aco_opcode::v_cvt_f32_f64, Definition(dst), upper);
3054 case nir_op_u2f64: {
3055 if (instr->src[0].src.ssa->bit_size <= 32) {
3056 Temp src = get_alu_src(ctx, instr->src[0]);
3057 if (instr->src[0].src.ssa->bit_size <= 16)
3058 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
3059 bld.vop1(aco_opcode::v_cvt_f64_u32, Definition(dst), src);
3060 } else if (instr->src[0].src.ssa->bit_size == 64) {
3061 Temp src = get_alu_src(ctx, instr->src[0]);
3062 RegClass rc = RegClass(src.type(), 1);
3063 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
3064 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
3065 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
3066 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
3067 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
3068 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
3070 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3075 case nir_op_f2i16: {
3076 if (instr->src[0].src.ssa->bit_size == 16) {
3077 if (ctx->program->gfx_level >= GFX8) {
3078 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i16_f16, dst);
3080 /* GFX7 and earlier do not support direct f16⟷i16 conversions */
3081 Temp tmp = bld.tmp(v1);
3082 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, tmp);
3083 tmp = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp);
3084 tmp = convert_int(ctx, bld, tmp, 32, instr->dest.dest.ssa.bit_size, false,
3085 (dst.type() == RegType::sgpr) ? Temp() : dst);
3086 if (dst.type() == RegType::sgpr) {
3087 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
3090 } else if (instr->src[0].src.ssa->bit_size == 32) {
3091 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
3093 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
3098 case nir_op_f2u16: {
3099 if (instr->src[0].src.ssa->bit_size == 16) {
3100 if (ctx->program->gfx_level >= GFX8) {
3101 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u16_f16, dst);
3103 /* GFX7 and earlier do not support direct f16⟷u16 conversions */
3104 Temp tmp = bld.tmp(v1);
3105 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, tmp);
3106 tmp = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp);
3107 tmp = convert_int(ctx, bld, tmp, 32, instr->dest.dest.ssa.bit_size, false,
3108 (dst.type() == RegType::sgpr) ? Temp() : dst);
3109 if (dst.type() == RegType::sgpr) {
3110 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
3113 } else if (instr->src[0].src.ssa->bit_size == 32) {
3114 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
3116 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
3120 case nir_op_f2i32: {
3121 Temp src = get_alu_src(ctx, instr->src[0]);
3122 if (instr->src[0].src.ssa->bit_size == 16) {
3123 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
3124 if (dst.type() == RegType::vgpr) {
3125 bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), tmp);
3127 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
3128 bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp));
3130 } else if (instr->src[0].src.ssa->bit_size == 32) {
3131 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
3132 } else if (instr->src[0].src.ssa->bit_size == 64) {
3133 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
3135 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3139 case nir_op_f2u32: {
3140 Temp src = get_alu_src(ctx, instr->src[0]);
3141 if (instr->src[0].src.ssa->bit_size == 16) {
3142 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
3143 if (dst.type() == RegType::vgpr) {
3144 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), tmp);
3146 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
3147 bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp));
3149 } else if (instr->src[0].src.ssa->bit_size == 32) {
3150 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
3151 } else if (instr->src[0].src.ssa->bit_size == 64) {
3152 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
3154 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3158 case nir_op_f2i64: {
3159 Temp src = get_alu_src(ctx, instr->src[0]);
3160 if (instr->src[0].src.ssa->bit_size == 16)
3161 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
3163 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
3164 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
3165 exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand::zero(), exponent,
3167 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffu), src);
3168 Temp sign = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), src);
3169 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(0x800000u), mantissa);
3170 mantissa = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(7u), mantissa);
3171 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), mantissa);
3172 Temp new_exponent = bld.tmp(v1);
3174 bld.vsub32(Definition(new_exponent), Operand::c32(63u), exponent, true).def(1).getTemp();
3175 if (ctx->program->gfx_level >= GFX8)
3176 mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa);
3178 mantissa = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), mantissa, new_exponent);
3179 Temp saturate = bld.vop1(aco_opcode::v_bfrev_b32, bld.def(v1), Operand::c32(0xfffffffeu));
3180 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
3181 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3182 lower = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), lower,
3183 Operand::c32(0xffffffffu), borrow);
3184 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), upper, saturate, borrow);
3185 lower = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, lower);
3186 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, upper);
3187 Temp new_lower = bld.tmp(v1);
3188 borrow = bld.vsub32(Definition(new_lower), lower, sign, true).def(1).getTemp();
3189 Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow);
3190 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper);
3192 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
3193 if (src.type() == RegType::vgpr)
3194 src = bld.as_uniform(src);
3195 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src,
3196 Operand::c32(0x80017u));
3197 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent,
3198 Operand::c32(126u));
3199 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand::zero(),
3201 exponent = bld.sop2(aco_opcode::s_min_i32, bld.def(s1), bld.def(s1, scc),
3202 Operand::c32(64u), exponent);
3203 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3204 Operand::c32(0x7fffffu), src);
3206 bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand::c32(31u));
3207 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
3208 Operand::c32(0x800000u), mantissa);
3209 mantissa = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), mantissa,
3211 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(), mantissa);
3212 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3213 Operand::c32(63u), exponent);
3215 bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent);
3216 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), exponent,
3217 Operand::c32(0xffffffffu)); // exp >= 64
3218 Temp saturate = bld.sop1(aco_opcode::s_brev_b64, bld.def(s2), Operand::c32(0xfffffffeu));
3219 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), saturate, mantissa, cond);
3220 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
3221 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3222 lower = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, lower);
3223 upper = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, upper);
3224 Temp borrow = bld.tmp(s1);
3226 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), lower, sign);
3227 upper = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), upper, sign,
3229 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3231 } else if (instr->src[0].src.ssa->bit_size == 64) {
3232 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3233 Operand::c32(0x3df00000u));
3234 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
3235 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
3236 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3237 Operand::c32(0xc1f00000u));
3238 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
3239 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
3240 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
3241 Temp upper = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), floor);
3242 if (dst.type() == RegType::sgpr) {
3243 lower = bld.as_uniform(lower);
3244 upper = bld.as_uniform(upper);
3246 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3249 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3253 case nir_op_f2u64: {
3254 Temp src = get_alu_src(ctx, instr->src[0]);
3255 if (instr->src[0].src.ssa->bit_size == 16)
3256 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
3258 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
3259 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
3260 Temp exponent_in_range =
3261 bld.vopc(aco_opcode::v_cmp_ge_i32, bld.def(bld.lm), Operand::c32(64u), exponent);
3262 exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand::zero(), exponent);
3263 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffu), src);
3264 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(0x800000u), mantissa);
3265 Temp exponent_small = bld.vsub32(bld.def(v1), Operand::c32(24u), exponent);
3266 Temp small = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), exponent_small, mantissa);
3267 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), mantissa);
3268 Temp new_exponent = bld.tmp(v1);
3270 bld.vsub32(Definition(new_exponent), exponent, Operand::c32(24u), true).def(1).getTemp();
3271 if (ctx->program->gfx_level >= GFX8)
3272 mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa);
3274 mantissa = bld.vop3(aco_opcode::v_lshl_b64, bld.def(v2), mantissa, new_exponent);
3275 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
3276 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3277 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lower, small, cond_small);
3278 upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), upper, Operand::zero(),
3280 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(0xffffffffu), lower,
3282 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(0xffffffffu), upper,
3284 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3286 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
3287 if (src.type() == RegType::vgpr)
3288 src = bld.as_uniform(src);
3289 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src,
3290 Operand::c32(0x80017u));
3291 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent,
3292 Operand::c32(126u));
3293 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand::zero(),
3295 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3296 Operand::c32(0x7fffffu), src);
3297 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
3298 Operand::c32(0x800000u), mantissa);
3299 Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3300 Operand::c32(24u), exponent);
3301 Temp small = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), mantissa,
3303 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(), mantissa);
3304 Temp exponent_large = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3305 exponent, Operand::c32(24u));
3306 mantissa = bld.sop2(aco_opcode::s_lshl_b64, bld.def(s2), bld.def(s1, scc), mantissa,
3309 bld.sopc(aco_opcode::s_cmp_ge_i32, bld.def(s1, scc), Operand::c32(64u), exponent);
3310 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), mantissa,
3311 Operand::c32(0xffffffffu), cond);
3312 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
3313 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3315 bld.sopc(aco_opcode::s_cmp_le_i32, bld.def(s1, scc), exponent, Operand::c32(24u));
3316 lower = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), small, lower, cond_small);
3318 bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::zero(), upper, cond_small);
3319 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3321 } else if (instr->src[0].src.ssa->bit_size == 64) {
3322 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3323 Operand::c32(0x3df00000u));
3324 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
3325 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
3326 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3327 Operand::c32(0xc1f00000u));
3328 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
3329 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
3330 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
3331 Temp upper = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), floor);
3332 if (dst.type() == RegType::sgpr) {
3333 lower = bld.as_uniform(lower);
3334 upper = bld.as_uniform(upper);
3336 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3339 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3343 case nir_op_b2f16: {
3344 Temp src = get_alu_src(ctx, instr->src[0]);
3345 assert(src.regClass() == bld.lm);
3347 if (dst.regClass() == s1) {
3348 src = bool_to_scalar_condition(ctx, src);
3349 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand::c32(0x3c00u), src);
3350 } else if (dst.regClass() == v2b) {
3351 Temp one = bld.copy(bld.def(v1), Operand::c32(0x3c00u));
3352 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), one, src);
3354 unreachable("Wrong destination register class for nir_op_b2f16.");
3358 case nir_op_b2f32: {
3359 Temp src = get_alu_src(ctx, instr->src[0]);
3360 assert(src.regClass() == bld.lm);
3362 if (dst.regClass() == s1) {
3363 src = bool_to_scalar_condition(ctx, src);
3364 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand::c32(0x3f800000u), src);
3365 } else if (dst.regClass() == v1) {
3366 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(),
3367 Operand::c32(0x3f800000u), src);
3369 unreachable("Wrong destination register class for nir_op_b2f32.");
3373 case nir_op_b2f64: {
3374 Temp src = get_alu_src(ctx, instr->src[0]);
3375 assert(src.regClass() == bld.lm);
3377 if (dst.regClass() == s2) {
3378 src = bool_to_scalar_condition(ctx, src);
3379 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand::c32(0x3f800000u),
3380 Operand::zero(), bld.scc(src));
3381 } else if (dst.regClass() == v2) {
3382 Temp one = bld.copy(bld.def(v1), Operand::c32(0x3FF00000u));
3384 bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), one, src);
3385 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(), upper);
3387 unreachable("Wrong destination register class for nir_op_b2f64.");
3394 case nir_op_i2i64: {
3395 if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
3396 /* no need to do the extract in get_alu_src() */
3397 sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size
3399 : sgpr_extract_undef;
3400 extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
3402 const unsigned input_bitsize = instr->src[0].src.ssa->bit_size;
3403 const unsigned output_bitsize = instr->dest.dest.ssa.bit_size;
3404 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]), input_bitsize, output_bitsize,
3405 output_bitsize > input_bitsize, dst);
3412 case nir_op_u2u64: {
3413 if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
3414 /* no need to do the extract in get_alu_src() */
3415 sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size
3417 : sgpr_extract_undef;
3418 extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
3420 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]), instr->src[0].src.ssa->bit_size,
3421 instr->dest.dest.ssa.bit_size, false, dst);
3429 case nir_op_b2i64: {
3430 Temp src = get_alu_src(ctx, instr->src[0]);
3431 assert(src.regClass() == bld.lm);
3433 Temp tmp = dst.bytes() == 8 ? bld.tmp(RegClass::get(dst.type(), 4)) : dst;
3434 if (tmp.regClass() == s1) {
3435 bool_to_scalar_condition(ctx, src, tmp);
3436 } else if (tmp.type() == RegType::vgpr) {
3437 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(tmp), Operand::zero(), Operand::c32(1u),
3440 unreachable("Invalid register class for b2i32");
3444 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand::zero());
3449 Temp src = get_alu_src(ctx, instr->src[0]);
3450 assert(dst.regClass() == bld.lm);
3452 if (src.type() == RegType::vgpr) {
3453 assert(src.regClass() == v1 || src.regClass() == v2);
3454 assert(dst.regClass() == bld.lm);
3455 bld.vopc(src.size() == 2 ? aco_opcode::v_cmp_lg_u64 : aco_opcode::v_cmp_lg_u32,
3456 Definition(dst), Operand::zero(), src);
3458 assert(src.regClass() == s1 || src.regClass() == s2);
3460 if (src.regClass() == s2 && ctx->program->gfx_level <= GFX7) {
3462 bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), Operand::zero(), src)
3466 tmp = bld.sopc(src.size() == 2 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::s_cmp_lg_u32,
3467 bld.scc(bld.def(s1)), Operand::zero(), src);
3469 bool_to_vector_condition(ctx, tmp, dst);
3473 case nir_op_unpack_64_2x32:
3474 case nir_op_unpack_32_2x16:
3475 case nir_op_unpack_64_4x16:
3476 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3477 emit_split_vector(ctx, dst, instr->op == nir_op_unpack_64_4x16 ? 4 : 2);
3479 case nir_op_pack_64_2x32_split: {
3480 Temp src0 = get_alu_src(ctx, instr->src[0]);
3481 Temp src1 = get_alu_src(ctx, instr->src[1]);
3483 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
3486 case nir_op_unpack_64_2x32_split_x:
3487 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()),
3488 get_alu_src(ctx, instr->src[0]));
3490 case nir_op_unpack_64_2x32_split_y:
3491 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst),
3492 get_alu_src(ctx, instr->src[0]));
3494 case nir_op_unpack_32_2x16_split_x:
3495 if (dst.type() == RegType::vgpr) {
3496 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()),
3497 get_alu_src(ctx, instr->src[0]));
3499 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3502 case nir_op_unpack_32_2x16_split_y:
3503 if (dst.type() == RegType::vgpr) {
3504 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst),
3505 get_alu_src(ctx, instr->src[0]));
3507 bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc),
3508 get_alu_src(ctx, instr->src[0]), Operand::c32(1u), Operand::c32(16u),
3512 case nir_op_pack_32_2x16_split: {
3513 Temp src0 = get_alu_src(ctx, instr->src[0]);
3514 Temp src1 = get_alu_src(ctx, instr->src[1]);
3515 if (dst.regClass() == v1) {
3516 src0 = emit_extract_vector(ctx, src0, 0, v2b);
3517 src1 = emit_extract_vector(ctx, src1, 0, v2b);
3518 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
3520 src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0,
3521 Operand::c32(0xFFFFu));
3522 src1 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), src1,
3524 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), src0, src1);
3528 case nir_op_pack_32_4x8: bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0], 4)); break;
3529 case nir_op_pack_half_2x16_split: {
3530 if (dst.regClass() == v1) {
3531 if (ctx->program->gfx_level == GFX8 || ctx->program->gfx_level == GFX9)
3532 emit_vop3a_instruction(ctx, instr, aco_opcode::v_cvt_pkrtz_f16_f32_e64, dst);
3534 emit_vop2_instruction(ctx, instr, aco_opcode::v_cvt_pkrtz_f16_f32, dst, false);
3536 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3540 case nir_op_pack_unorm_2x16:
3541 case nir_op_pack_snorm_2x16: {
3542 Temp src = get_alu_src(ctx, instr->src[0], 2);
3543 Temp src0 = emit_extract_vector(ctx, src, 0, v1);
3544 Temp src1 = emit_extract_vector(ctx, src, 1, v1);
3545 aco_opcode opcode = instr->op == nir_op_pack_unorm_2x16 ? aco_opcode::v_cvt_pknorm_u16_f32
3546 : aco_opcode::v_cvt_pknorm_i16_f32;
3547 bld.vop3(opcode, Definition(dst), src0, src1);
3550 case nir_op_pack_uint_2x16:
3551 case nir_op_pack_sint_2x16: {
3552 Temp src = get_alu_src(ctx, instr->src[0], 2);
3553 Temp src0 = emit_extract_vector(ctx, src, 0, v1);
3554 Temp src1 = emit_extract_vector(ctx, src, 1, v1);
3555 aco_opcode opcode = instr->op == nir_op_pack_uint_2x16 ? aco_opcode::v_cvt_pk_u16_u32
3556 : aco_opcode::v_cvt_pk_i16_i32;
3557 bld.vop3(opcode, Definition(dst), src0, src1);
3560 case nir_op_unpack_half_2x16_split_x_flush_to_zero:
3561 case nir_op_unpack_half_2x16_split_x: {
3562 Temp src = get_alu_src(ctx, instr->src[0]);
3563 if (src.regClass() == v1)
3564 src = bld.pseudo(aco_opcode::p_split_vector, bld.def(v2b), bld.def(v2b), src);
3565 if (dst.regClass() == v1) {
3566 assert(ctx->block->fp_mode.must_flush_denorms16_64 ==
3567 (instr->op == nir_op_unpack_half_2x16_split_x_flush_to_zero));
3568 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), src);
3570 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3574 case nir_op_unpack_half_2x16_split_y_flush_to_zero:
3575 case nir_op_unpack_half_2x16_split_y: {
3576 Temp src = get_alu_src(ctx, instr->src[0]);
3577 if (src.regClass() == s1)
3578 src = bld.pseudo(aco_opcode::p_extract, bld.def(s1), bld.def(s1, scc), src,
3579 Operand::c32(1u), Operand::c32(16u), Operand::zero());
3582 bld.pseudo(aco_opcode::p_split_vector, bld.def(v2b), bld.def(v2b), src).def(1).getTemp();
3583 if (dst.regClass() == v1) {
3584 assert(ctx->block->fp_mode.must_flush_denorms16_64 ==
3585 (instr->op == nir_op_unpack_half_2x16_split_y_flush_to_zero));
3586 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), src);
3588 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3592 case nir_op_sad_u8x4: {
3593 assert(dst.regClass() == v1);
3594 emit_vop3a_instruction(ctx, instr, aco_opcode::v_sad_u8, dst, false, 3u, false);
3597 case nir_op_fquantize2f16: {
3598 Temp src = get_alu_src(ctx, instr->src[0]);
3599 Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v2b), src);
3602 if (ctx->program->gfx_level >= GFX8) {
3603 Temp mask = bld.copy(
3604 bld.def(s1), Operand::c32(0x36Fu)); /* value is NOT negative/positive denormal value */
3605 cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.def(bld.lm), f16, mask);
3606 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
3608 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
3609 * so compare the result and flush to 0 if it's smaller.
3611 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
3612 Temp smallest = bld.copy(bld.def(s1), Operand::c32(0x38800000u));
3613 Instruction* tmp0 = bld.vopc_e64(aco_opcode::v_cmp_lt_f32, bld.def(bld.lm), f32, smallest);
3614 tmp0->vop3().abs[0] = true;
3615 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f32, bld.def(bld.lm), Operand::zero(), f32);
3616 cmp_res = bld.sop2(aco_opcode::s_nand_b64, bld.def(s2), bld.def(s1, scc),
3617 tmp0->definitions[0].getTemp(), tmp1);
3620 if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32) {
3622 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::zero(), as_vgpr(ctx, src));
3623 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res);
3625 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), f32, cmp_res);
3630 Temp bits = get_alu_src(ctx, instr->src[0]);
3631 Temp offset = get_alu_src(ctx, instr->src[1]);
3633 if (dst.regClass() == s1) {
3634 bld.sop2(aco_opcode::s_bfm_b32, Definition(dst), bits, offset);
3635 } else if (dst.regClass() == v1) {
3636 bld.vop3(aco_opcode::v_bfm_b32, Definition(dst), bits, offset);
3638 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3642 case nir_op_bitfield_select: {
3644 /* dst = (insert & bitmask) | (base & ~bitmask) */
3645 if (dst.regClass() == s1) {
3646 Temp bitmask = get_alu_src(ctx, instr->src[0]);
3647 Temp insert = get_alu_src(ctx, instr->src[1]);
3648 Temp base = get_alu_src(ctx, instr->src[2]);
3649 aco_ptr<Instruction> sop2;
3650 nir_const_value* const_bitmask = nir_src_as_const_value(instr->src[0].src);
3651 nir_const_value* const_insert = nir_src_as_const_value(instr->src[1].src);
3653 if (const_insert && const_bitmask) {
3654 lhs = Operand::c32(const_insert->u32 & const_bitmask->u32);
3657 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), insert, bitmask);
3658 lhs = Operand(insert);
3662 nir_const_value* const_base = nir_src_as_const_value(instr->src[2].src);
3663 if (const_base && const_bitmask) {
3664 rhs = Operand::c32(const_base->u32 & ~const_bitmask->u32);
3666 base = bld.sop2(aco_opcode::s_andn2_b32, bld.def(s1), bld.def(s1, scc), base, bitmask);
3667 rhs = Operand(base);
3670 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), rhs, lhs);
3672 } else if (dst.regClass() == v1) {
3673 emit_vop3a_instruction(ctx, instr, aco_opcode::v_bfi_b32, dst, false, 3);
3675 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3681 if (dst.bytes() != 4)
3682 unreachable("Unsupported BFE bit size");
3684 if (dst.type() == RegType::sgpr) {
3685 Temp base = get_alu_src(ctx, instr->src[0]);
3687 nir_const_value* const_offset = nir_src_as_const_value(instr->src[1].src);
3688 nir_const_value* const_bits = nir_src_as_const_value(instr->src[2].src);
3689 if (const_offset && const_bits) {
3690 uint32_t extract = (const_bits->u32 << 16) | (const_offset->u32 & 0x1f);
3692 instr->op == nir_op_ubfe ? aco_opcode::s_bfe_u32 : aco_opcode::s_bfe_i32;
3693 bld.sop2(opcode, Definition(dst), bld.def(s1, scc), base, Operand::c32(extract));
3697 Temp offset = get_alu_src(ctx, instr->src[1]);
3698 Temp bits = get_alu_src(ctx, instr->src[2]);
3699 if (instr->op == nir_op_ubfe) {
3700 Temp mask = bld.sop2(aco_opcode::s_bfm_b32, bld.def(s1), bits, offset);
3702 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), base, mask);
3703 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), masked, offset);
3705 Operand bits_op = const_bits ? Operand::c32(const_bits->u32 << 16)
3706 : bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1),
3707 bld.def(s1, scc), bits, Operand::c32(16u));
3708 Operand offset_op = const_offset
3709 ? Operand::c32(const_offset->u32 & 0x1fu)
3710 : bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3711 offset, Operand::c32(0x1fu));
3714 bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), bits_op, offset_op);
3715 bld.sop2(aco_opcode::s_bfe_i32, Definition(dst), bld.def(s1, scc), base, extract);
3720 instr->op == nir_op_ubfe ? aco_opcode::v_bfe_u32 : aco_opcode::v_bfe_i32;
3721 emit_vop3a_instruction(ctx, instr, opcode, dst, false, 3);
3725 case nir_op_extract_u8:
3726 case nir_op_extract_i8:
3727 case nir_op_extract_u16:
3728 case nir_op_extract_i16: {
3729 bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
3730 unsigned comp = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 4 : 2;
3731 uint32_t bits = comp == 4 ? 8 : 16;
3732 unsigned index = nir_src_as_uint(instr->src[1].src);
3733 if (bits >= instr->dest.dest.ssa.bit_size || index * bits >= instr->dest.dest.ssa.bit_size) {
3735 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3736 } else if (dst.regClass() == s1 && instr->dest.dest.ssa.bit_size == 16) {
3737 Temp vec = get_ssa_temp(ctx, instr->src[0].src.ssa);
3738 unsigned swizzle = instr->src[0].swizzle[0];
3739 if (vec.size() > 1) {
3740 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
3741 swizzle = swizzle & 1;
3743 index += swizzle * instr->dest.dest.ssa.bit_size / bits;
3744 bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc), Operand(vec),
3745 Operand::c32(index), Operand::c32(bits), Operand::c32(is_signed));
3747 Temp src = get_alu_src(ctx, instr->src[0]);
3748 Definition def(dst);
3749 if (dst.bytes() == 8) {
3750 src = emit_extract_vector(ctx, src, index / comp, RegClass(src.type(), 1));
3752 def = bld.def(src.type(), 1);
3754 assert(def.bytes() <= 4);
3755 if (def.regClass() == s1) {
3756 bld.pseudo(aco_opcode::p_extract, def, bld.def(s1, scc), Operand(src),
3757 Operand::c32(index), Operand::c32(bits), Operand::c32(is_signed));
3759 src = emit_extract_vector(ctx, src, 0, def.regClass());
3760 bld.pseudo(aco_opcode::p_extract, def, Operand(src), Operand::c32(index),
3761 Operand::c32(bits), Operand::c32(is_signed));
3763 if (dst.size() == 2)
3764 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), def.getTemp(),
3769 case nir_op_insert_u8:
3770 case nir_op_insert_u16: {
3771 unsigned comp = instr->op == nir_op_insert_u8 ? 4 : 2;
3772 uint32_t bits = comp == 4 ? 8 : 16;
3773 unsigned index = nir_src_as_uint(instr->src[1].src);
3774 if (bits >= instr->dest.dest.ssa.bit_size || index * bits >= instr->dest.dest.ssa.bit_size) {
3776 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3778 Temp src = get_alu_src(ctx, instr->src[0]);
3779 Definition def(dst);
3781 if (dst.bytes() == 8) {
3782 src = emit_extract_vector(ctx, src, 0u, RegClass(src.type(), 1));
3783 swap = index >= comp;
3785 def = bld.def(src.type(), 1);
3787 if (def.regClass() == s1) {
3788 bld.pseudo(aco_opcode::p_insert, def, bld.def(s1, scc), Operand(src),
3789 Operand::c32(index), Operand::c32(bits));
3791 src = emit_extract_vector(ctx, src, 0, def.regClass());
3792 bld.pseudo(aco_opcode::p_insert, def, Operand(src), Operand::c32(index),
3793 Operand::c32(bits));
3795 if (dst.size() == 2 && swap)
3796 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(),
3798 else if (dst.size() == 2)
3799 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), def.getTemp(),
3804 case nir_op_bit_count: {
3805 Temp src = get_alu_src(ctx, instr->src[0]);
3806 if (src.regClass() == s1) {
3807 bld.sop1(aco_opcode::s_bcnt1_i32_b32, Definition(dst), bld.def(s1, scc), src);
3808 } else if (src.regClass() == v1) {
3809 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), src, Operand::zero());
3810 } else if (src.regClass() == v2) {
3811 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), emit_extract_vector(ctx, src, 1, v1),
3812 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1),
3813 emit_extract_vector(ctx, src, 0, v1), Operand::zero()));
3814 } else if (src.regClass() == s2) {
3815 bld.sop1(aco_opcode::s_bcnt1_i32_b64, Definition(dst), bld.def(s1, scc), src);
3817 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3822 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f16, aco_opcode::v_cmp_lt_f32,
3823 aco_opcode::v_cmp_lt_f64);
3827 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f16, aco_opcode::v_cmp_ge_f32,
3828 aco_opcode::v_cmp_ge_f64);
3832 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32,
3833 aco_opcode::v_cmp_eq_f64);
3837 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32,
3838 aco_opcode::v_cmp_neq_f64);
3842 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i16, aco_opcode::v_cmp_lt_i32,
3843 aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
3847 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i16, aco_opcode::v_cmp_ge_i32,
3848 aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
3852 if (instr->src[0].src.ssa->bit_size == 1)
3853 emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
3856 ctx, instr, dst, aco_opcode::v_cmp_eq_i16, aco_opcode::v_cmp_eq_i32,
3857 aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
3858 ctx->program->gfx_level >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes);
3862 if (instr->src[0].src.ssa->bit_size == 1)
3863 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
3866 ctx, instr, dst, aco_opcode::v_cmp_lg_i16, aco_opcode::v_cmp_lg_i32,
3867 aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
3868 ctx->program->gfx_level >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes);
3872 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u16, aco_opcode::v_cmp_lt_u32,
3873 aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
3877 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u16, aco_opcode::v_cmp_ge_u32,
3878 aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
3883 case nir_op_fddx_fine:
3884 case nir_op_fddy_fine:
3885 case nir_op_fddx_coarse:
3886 case nir_op_fddy_coarse: {
3887 if (!nir_src_is_divergent(instr->src[0].src)) {
3888 /* Source is the same in all lanes, so the derivative is zero.
3889 * This also avoids emitting invalid IR.
3891 bld.copy(Definition(dst), Operand::zero());
3895 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
3896 uint16_t dpp_ctrl1, dpp_ctrl2;
3897 if (instr->op == nir_op_fddx_fine) {
3898 dpp_ctrl1 = dpp_quad_perm(0, 0, 2, 2);
3899 dpp_ctrl2 = dpp_quad_perm(1, 1, 3, 3);
3900 } else if (instr->op == nir_op_fddy_fine) {
3901 dpp_ctrl1 = dpp_quad_perm(0, 1, 0, 1);
3902 dpp_ctrl2 = dpp_quad_perm(2, 3, 2, 3);
3904 dpp_ctrl1 = dpp_quad_perm(0, 0, 0, 0);
3905 if (instr->op == nir_op_fddx || instr->op == nir_op_fddx_coarse)
3906 dpp_ctrl2 = dpp_quad_perm(1, 1, 1, 1);
3908 dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
3912 if (ctx->program->gfx_level >= GFX8) {
3913 Temp tl = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl1);
3914 tmp = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), src, tl, dpp_ctrl2);
3916 Temp tl = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl1);
3917 Temp tr = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl2);
3918 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), tr, tl);
3920 emit_wqm(bld, tmp, dst, true);
3923 default: isel_err(&instr->instr, "Unknown NIR ALU instr");
3928 visit_load_const(isel_context* ctx, nir_load_const_instr* instr)
3930 Temp dst = get_ssa_temp(ctx, &instr->def);
3932 // TODO: we really want to have the resulting type as this would allow for 64bit literals
3933 // which get truncated the lsb if double and msb if int
3934 // for now, we only use s_mov_b64 with 64bit inline constants
3935 assert(instr->def.num_components == 1 && "Vector load_const should be lowered to scalar.");
3936 assert(dst.type() == RegType::sgpr);
3938 Builder bld(ctx->program, ctx->block);
3940 if (instr->def.bit_size == 1) {
3941 assert(dst.regClass() == bld.lm);
3942 int val = instr->value[0].b ? -1 : 0;
3943 Operand op = bld.lm.size() == 1 ? Operand::c32(val) : Operand::c64(val);
3944 bld.copy(Definition(dst), op);
3945 } else if (instr->def.bit_size == 8) {
3946 bld.copy(Definition(dst), Operand::c32(instr->value[0].u8));
3947 } else if (instr->def.bit_size == 16) {
3948 /* sign-extend to use s_movk_i32 instead of a literal */
3949 bld.copy(Definition(dst), Operand::c32(instr->value[0].i16));
3950 } else if (dst.size() == 1) {
3951 bld.copy(Definition(dst), Operand::c32(instr->value[0].u32));
3953 assert(dst.size() != 1);
3954 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
3955 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
3956 if (instr->def.bit_size == 64)
3957 for (unsigned i = 0; i < dst.size(); i++)
3958 vec->operands[i] = Operand::c32(instr->value[0].u64 >> i * 32);
3960 for (unsigned i = 0; i < dst.size(); i++)
3961 vec->operands[i] = Operand::c32(instr->value[i].u32);
3963 vec->definitions[0] = Definition(dst);
3964 ctx->block->instructions.emplace_back(std::move(vec));
3969 can_use_byte_align_for_global_load(unsigned num_components, unsigned component_size,
3970 unsigned align_, bool support_12_byte)
3972 /* Only use byte-align for 8/16-bit loads if we won't have to increase it's size and won't have
3973 * to use unsupported load sizes.
3975 assert(util_is_power_of_two_nonzero(align_));
3977 assert(component_size < 4);
3978 unsigned load_size = num_components * component_size;
3979 int new_size = align(load_size + (4 - align_), 4);
3980 return new_size == align(load_size, 4) && (new_size != 12 || support_12_byte);
3985 struct LoadEmitInfo {
3988 unsigned num_components;
3989 unsigned component_size;
3990 Temp resource = Temp(0, s1); /* buffer resource or base 64-bit address */
3991 unsigned component_stride = 0;
3992 unsigned const_offset = 0;
3993 unsigned align_mul = 0;
3994 unsigned align_offset = 0;
3998 unsigned swizzle_component_size = 0;
3999 memory_sync_info sync;
4000 Temp soffset = Temp(0, s1);
4003 struct EmitLoadParameters {
4004 using Callback = Temp (*)(Builder& bld, const LoadEmitInfo& info, Temp offset,
4005 unsigned bytes_needed, unsigned align, unsigned const_offset,
4009 bool byte_align_loads;
4010 bool supports_8bit_16bit_loads;
4011 unsigned max_const_offset_plus_one;
4015 emit_load(isel_context* ctx, Builder& bld, const LoadEmitInfo& info,
4016 const EmitLoadParameters& params)
4018 unsigned load_size = info.num_components * info.component_size;
4019 unsigned component_size = info.component_size;
4021 unsigned num_vals = 0;
4022 Temp* const vals = (Temp*)alloca(info.dst.bytes() * sizeof(Temp));
4024 unsigned const_offset = info.const_offset;
4026 const unsigned align_mul = info.align_mul ? info.align_mul : component_size;
4027 unsigned align_offset = (info.align_offset + const_offset) % align_mul;
4029 unsigned bytes_read = 0;
4030 while (bytes_read < load_size) {
4031 unsigned bytes_needed = load_size - bytes_read;
4033 /* add buffer for unaligned loads */
4035 if (params.byte_align_loads) {
4036 byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
4040 if (bytes_needed > 2 || (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) ||
4041 !params.supports_8bit_16bit_loads) {
4042 if (info.component_stride) {
4043 assert(params.supports_8bit_16bit_loads && "unimplemented");
4047 bytes_needed += byte_align == -1 ? 4 - info.align_mul : byte_align;
4048 bytes_needed = align(bytes_needed, 4);
4055 if (info.swizzle_component_size)
4056 bytes_needed = MIN2(bytes_needed, info.swizzle_component_size);
4057 if (info.component_stride)
4058 bytes_needed = MIN2(bytes_needed, info.component_size);
4060 bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4);
4062 /* reduce constant offset */
4063 Operand offset = info.offset;
4064 unsigned reduced_const_offset = const_offset;
4065 bool remove_const_offset_completely = need_to_align_offset;
4067 (remove_const_offset_completely || const_offset >= params.max_const_offset_plus_one)) {
4068 unsigned to_add = const_offset;
4069 if (remove_const_offset_completely) {
4070 reduced_const_offset = 0;
4073 const_offset / params.max_const_offset_plus_one * params.max_const_offset_plus_one;
4074 reduced_const_offset %= params.max_const_offset_plus_one;
4076 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
4077 if (offset.isConstant()) {
4078 offset = Operand::c32(offset.constantValue() + to_add);
4079 } else if (offset_tmp.regClass() == s1) {
4080 offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), offset_tmp,
4081 Operand::c32(to_add));
4082 } else if (offset_tmp.regClass() == v1) {
4083 offset = bld.vadd32(bld.def(v1), offset_tmp, Operand::c32(to_add));
4085 Temp lo = bld.tmp(offset_tmp.type(), 1);
4086 Temp hi = bld.tmp(offset_tmp.type(), 1);
4087 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
4089 if (offset_tmp.regClass() == s2) {
4090 Temp carry = bld.tmp(s1);
4091 lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), lo,
4092 Operand::c32(to_add));
4093 hi = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), hi, carry);
4094 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), lo, hi);
4096 Temp new_lo = bld.tmp(v1);
4098 bld.vadd32(Definition(new_lo), lo, Operand::c32(to_add), true).def(1).getTemp();
4099 hi = bld.vadd32(bld.def(v1), hi, Operand::zero(), false, carry);
4100 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_lo, hi);
4105 /* align offset down if needed */
4106 Operand aligned_offset = offset;
4107 unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
4108 if (need_to_align_offset) {
4110 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
4111 if (offset.isConstant()) {
4112 aligned_offset = Operand::c32(offset.constantValue() & 0xfffffffcu);
4113 } else if (offset_tmp.regClass() == s1) {
4114 aligned_offset = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
4115 Operand::c32(0xfffffffcu), offset_tmp);
4116 } else if (offset_tmp.regClass() == s2) {
4117 aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc),
4118 Operand::c64(0xfffffffffffffffcllu), offset_tmp);
4119 } else if (offset_tmp.regClass() == v1) {
4121 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0xfffffffcu), offset_tmp);
4122 } else if (offset_tmp.regClass() == v2) {
4123 Temp hi = bld.tmp(v1), lo = bld.tmp(v1);
4124 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
4125 lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0xfffffffcu), lo);
4126 aligned_offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), lo, hi);
4129 Temp aligned_offset_tmp =
4130 aligned_offset.isTemp() ? aligned_offset.getTemp() : bld.copy(bld.def(s1), aligned_offset);
4132 Temp val = params.callback(bld, info, aligned_offset_tmp, bytes_needed, align,
4133 reduced_const_offset, byte_align ? Temp() : info.dst);
4135 /* the callback wrote directly to dst */
4136 if (val == info.dst) {
4137 assert(num_vals == 0);
4138 emit_split_vector(ctx, info.dst, info.num_components);
4142 /* shift result right if needed */
4143 if (params.byte_align_loads && info.component_size < 4) {
4144 Operand byte_align_off = Operand::c32(byte_align);
4145 if (byte_align == -1) {
4146 if (offset.isConstant())
4147 byte_align_off = Operand::c32(offset.constantValue() % 4u);
4148 else if (offset.size() == 2)
4149 byte_align_off = Operand(emit_extract_vector(ctx, offset.getTemp(), 0,
4150 RegClass(offset.getTemp().type(), 1)));
4152 byte_align_off = offset;
4155 assert(val.bytes() >= load_size && "unimplemented");
4156 if (val.type() == RegType::sgpr)
4157 byte_align_scalar(ctx, val, byte_align_off, info.dst);
4159 byte_align_vector(ctx, val, byte_align_off, info.dst, component_size);
4163 /* add result to list and advance */
4164 if (info.component_stride) {
4165 assert(val.bytes() == info.component_size && "unimplemented");
4166 const_offset += info.component_stride;
4167 align_offset = (align_offset + info.component_stride) % align_mul;
4169 const_offset += val.bytes();
4170 align_offset = (align_offset + val.bytes()) % align_mul;
4172 bytes_read += val.bytes();
4173 vals[num_vals++] = val;
4176 /* create array of components */
4177 unsigned components_split = 0;
4178 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
4179 bool has_vgprs = false;
4180 for (unsigned i = 0; i < num_vals;) {
4181 Temp* const tmp = (Temp*)alloca(num_vals * sizeof(Temp));
4182 unsigned num_tmps = 0;
4183 unsigned tmp_size = 0;
4184 RegType reg_type = RegType::sgpr;
4185 while ((!tmp_size || (tmp_size % component_size)) && i < num_vals) {
4186 if (vals[i].type() == RegType::vgpr)
4187 reg_type = RegType::vgpr;
4188 tmp_size += vals[i].bytes();
4189 tmp[num_tmps++] = vals[i++];
4192 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
4193 aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)};
4194 for (unsigned j = 0; j < num_tmps; j++)
4195 vec->operands[j] = Operand(tmp[j]);
4196 tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size));
4197 vec->definitions[0] = Definition(tmp[0]);
4198 bld.insert(std::move(vec));
4201 if (tmp[0].bytes() % component_size) {
4203 assert(i == num_vals);
4205 RegClass::get(reg_type, tmp[0].bytes() / component_size * component_size);
4207 bld.pseudo(aco_opcode::p_extract_vector, bld.def(new_rc), tmp[0], Operand::zero());
4210 RegClass elem_rc = RegClass::get(reg_type, component_size);
4212 unsigned start = components_split;
4214 if (tmp_size == elem_rc.bytes()) {
4215 allocated_vec[components_split++] = tmp[0];
4217 assert(tmp_size % elem_rc.bytes() == 0);
4218 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
4219 aco_opcode::p_split_vector, Format::PSEUDO, 1, tmp_size / elem_rc.bytes())};
4220 for (auto& def : split->definitions) {
4221 Temp component = bld.tmp(elem_rc);
4222 allocated_vec[components_split++] = component;
4223 def = Definition(component);
4225 split->operands[0] = Operand(tmp[0]);
4226 bld.insert(std::move(split));
4229 /* try to p_as_uniform early so we can create more optimizable code and
4230 * also update allocated_vec */
4231 for (unsigned j = start; j < components_split; j++) {
4232 if (allocated_vec[j].bytes() % 4 == 0 && info.dst.type() == RegType::sgpr)
4233 allocated_vec[j] = bld.as_uniform(allocated_vec[j]);
4234 has_vgprs |= allocated_vec[j].type() == RegType::vgpr;
4238 /* concatenate components and p_as_uniform() result if needed */
4239 if (info.dst.type() == RegType::vgpr || !has_vgprs)
4240 ctx->allocated_vec.emplace(info.dst.id(), allocated_vec);
4243 MAX2((int)info.dst.bytes() - int(allocated_vec[0].bytes() * info.num_components), 0);
4245 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
4246 aco_opcode::p_create_vector, Format::PSEUDO, info.num_components + !!padding_bytes, 1)};
4247 for (unsigned i = 0; i < info.num_components; i++)
4248 vec->operands[i] = Operand(allocated_vec[i]);
4250 vec->operands[info.num_components] = Operand(RegClass::get(RegType::vgpr, padding_bytes));
4251 if (info.dst.type() == RegType::sgpr && has_vgprs) {
4252 Temp tmp = bld.tmp(RegType::vgpr, info.dst.size());
4253 vec->definitions[0] = Definition(tmp);
4254 bld.insert(std::move(vec));
4255 bld.pseudo(aco_opcode::p_as_uniform, Definition(info.dst), tmp);
4257 vec->definitions[0] = Definition(info.dst);
4258 bld.insert(std::move(vec));
4263 load_lds_size_m0(Builder& bld)
4265 /* m0 does not need to be initialized on GFX9+ */
4266 if (bld.program->gfx_level >= GFX9)
4269 return bld.m0((Temp)bld.copy(bld.def(s1, m0), Operand::c32(0xffffffffu)));
4273 lds_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4274 unsigned align, unsigned const_offset, Temp dst_hint)
4276 offset = offset.regClass() == s1 ? bld.copy(bld.def(v1), offset) : offset;
4278 Operand m = load_lds_size_m0(bld);
4280 bool large_ds_read = bld.program->gfx_level >= GFX7;
4281 bool usable_read2 = bld.program->gfx_level >= GFX7;
4286 if (bytes_needed >= 16 && align % 16 == 0 && large_ds_read) {
4288 op = aco_opcode::ds_read_b128;
4289 } else if (bytes_needed >= 16 && align % 8 == 0 && const_offset % 8 == 0 && usable_read2) {
4292 op = aco_opcode::ds_read2_b64;
4293 } else if (bytes_needed >= 12 && align % 16 == 0 && large_ds_read) {
4295 op = aco_opcode::ds_read_b96;
4296 } else if (bytes_needed >= 8 && align % 8 == 0) {
4298 op = aco_opcode::ds_read_b64;
4299 } else if (bytes_needed >= 8 && align % 4 == 0 && const_offset % 4 == 0 && usable_read2) {
4302 op = aco_opcode::ds_read2_b32;
4303 } else if (bytes_needed >= 4 && align % 4 == 0) {
4305 op = aco_opcode::ds_read_b32;
4306 } else if (bytes_needed >= 2 && align % 2 == 0) {
4308 op = bld.program->gfx_level >= GFX9 ? aco_opcode::ds_read_u16_d16 : aco_opcode::ds_read_u16;
4311 op = bld.program->gfx_level >= GFX9 ? aco_opcode::ds_read_u8_d16 : aco_opcode::ds_read_u8;
4314 unsigned const_offset_unit = read2 ? size / 2u : 1u;
4315 unsigned const_offset_range = read2 ? 255 * const_offset_unit : 65536;
4317 if (const_offset > (const_offset_range - const_offset_unit)) {
4318 unsigned excess = const_offset - (const_offset % const_offset_range);
4319 offset = bld.vadd32(bld.def(v1), offset, Operand::c32(excess));
4320 const_offset -= excess;
4323 const_offset /= const_offset_unit;
4325 RegClass rc = RegClass::get(RegType::vgpr, size);
4326 Temp val = rc == info.dst.regClass() && dst_hint.id() ? dst_hint : bld.tmp(rc);
4329 instr = bld.ds(op, Definition(val), offset, m, const_offset, const_offset + 1);
4331 instr = bld.ds(op, Definition(val), offset, m, const_offset);
4332 instr->ds().sync = info.sync;
4334 if (m.isUndefined())
4335 instr->operands.pop_back();
4340 const EmitLoadParameters lds_load_params{lds_load_callback, false, true, UINT32_MAX};
4343 smem_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4344 unsigned align, unsigned const_offset, Temp dst_hint)
4346 assert(align >= 4u);
4348 bool buffer = info.resource.id() && info.resource.bytes() == 16;
4349 Temp addr = info.resource;
4350 if (!buffer && !addr.id()) {
4355 bytes_needed = MIN2(bytes_needed, 64);
4356 unsigned needed_round_up = util_next_power_of_two(bytes_needed);
4357 unsigned needed_round_down = needed_round_up >> (needed_round_up != bytes_needed ? 1 : 0);
4358 /* Only round-up global loads if it's aligned so that it won't cross pages */
4359 bytes_needed = buffer || align % needed_round_up == 0 ? needed_round_up : needed_round_down;
4362 if (bytes_needed <= 4) {
4363 op = buffer ? aco_opcode::s_buffer_load_dword : aco_opcode::s_load_dword;
4364 } else if (bytes_needed <= 8) {
4365 op = buffer ? aco_opcode::s_buffer_load_dwordx2 : aco_opcode::s_load_dwordx2;
4366 } else if (bytes_needed <= 16) {
4367 op = buffer ? aco_opcode::s_buffer_load_dwordx4 : aco_opcode::s_load_dwordx4;
4368 } else if (bytes_needed <= 32) {
4369 op = buffer ? aco_opcode::s_buffer_load_dwordx8 : aco_opcode::s_load_dwordx8;
4371 assert(bytes_needed == 64);
4372 op = buffer ? aco_opcode::s_buffer_load_dwordx16 : aco_opcode::s_load_dwordx16;
4375 aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
4378 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
4379 Operand::c32(const_offset));
4380 load->operands[0] = Operand(info.resource);
4381 load->operands[1] = Operand(offset);
4383 load->operands[0] = Operand(addr);
4384 if (offset.id() && const_offset)
4385 load->operands[1] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
4386 Operand::c32(const_offset));
4387 else if (offset.id())
4388 load->operands[1] = Operand(offset);
4390 load->operands[1] = Operand::c32(const_offset);
4392 RegClass rc(RegType::sgpr, DIV_ROUND_UP(bytes_needed, 4u));
4393 Temp val = dst_hint.id() && dst_hint.regClass() == rc ? dst_hint : bld.tmp(rc);
4394 load->definitions[0] = Definition(val);
4395 load->glc = info.glc;
4396 load->dlc = info.glc && (bld.program->gfx_level == GFX10 || bld.program->gfx_level == GFX10_3);
4397 load->sync = info.sync;
4398 bld.insert(std::move(load));
4402 const EmitLoadParameters smem_load_params{smem_load_callback, true, false, 1024};
4405 mubuf_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4406 unsigned align_, unsigned const_offset, Temp dst_hint)
4408 Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
4409 Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
4411 if (info.soffset.id()) {
4412 if (soffset.isTemp())
4413 vaddr = bld.copy(bld.def(v1), soffset);
4414 soffset = Operand(info.soffset);
4417 unsigned bytes_size = 0;
4419 if (bytes_needed == 1 || align_ % 2) {
4421 op = aco_opcode::buffer_load_ubyte;
4422 } else if (bytes_needed == 2 || align_ % 4) {
4424 op = aco_opcode::buffer_load_ushort;
4425 } else if (bytes_needed <= 4) {
4427 op = aco_opcode::buffer_load_dword;
4428 } else if (bytes_needed <= 8) {
4430 op = aco_opcode::buffer_load_dwordx2;
4431 } else if (bytes_needed <= 12 && bld.program->gfx_level > GFX6) {
4433 op = aco_opcode::buffer_load_dwordx3;
4436 op = aco_opcode::buffer_load_dwordx4;
4438 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4439 mubuf->operands[0] = Operand(info.resource);
4440 mubuf->operands[1] = vaddr;
4441 mubuf->operands[2] = soffset;
4442 mubuf->offen = (offset.type() == RegType::vgpr);
4443 mubuf->glc = info.glc;
4445 info.glc && (bld.program->gfx_level == GFX10 || bld.program->gfx_level == GFX10_3);
4446 mubuf->slc = info.slc;
4447 mubuf->sync = info.sync;
4448 mubuf->offset = const_offset;
4449 mubuf->swizzled = info.swizzle_component_size != 0;
4450 RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
4451 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
4452 mubuf->definitions[0] = Definition(val);
4453 bld.insert(std::move(mubuf));
4458 const EmitLoadParameters mubuf_load_params{mubuf_load_callback, true, true, 4096};
4461 scratch_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4462 unsigned align_, unsigned const_offset, Temp dst_hint)
4464 unsigned bytes_size = 0;
4466 if (bytes_needed == 1 || align_ % 2u) {
4468 op = aco_opcode::scratch_load_ubyte;
4469 } else if (bytes_needed == 2 || align_ % 4u) {
4471 op = aco_opcode::scratch_load_ushort;
4472 } else if (bytes_needed <= 4) {
4474 op = aco_opcode::scratch_load_dword;
4475 } else if (bytes_needed <= 8) {
4477 op = aco_opcode::scratch_load_dwordx2;
4478 } else if (bytes_needed <= 12) {
4480 op = aco_opcode::scratch_load_dwordx3;
4483 op = aco_opcode::scratch_load_dwordx4;
4485 RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
4486 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
4487 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, Format::SCRATCH, 2, 1)};
4488 flat->operands[0] = offset.regClass() == s1 ? Operand(v1) : Operand(offset);
4489 flat->operands[1] = offset.regClass() == s1 ? Operand(offset) : Operand(s1);
4490 flat->sync = info.sync;
4491 flat->offset = const_offset;
4492 flat->definitions[0] = Definition(val);
4493 bld.insert(std::move(flat));
4498 const EmitLoadParameters scratch_mubuf_load_params{mubuf_load_callback, false, true, 4096};
4499 const EmitLoadParameters scratch_flat_load_params{scratch_load_callback, false, true, 2048};
4502 get_gfx6_global_rsrc(Builder& bld, Temp addr)
4504 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4505 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
4507 if (addr.type() == RegType::vgpr)
4508 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand::zero(), Operand::zero(),
4509 Operand::c32(-1u), Operand::c32(rsrc_conf));
4510 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand::c32(-1u),
4511 Operand::c32(rsrc_conf));
4515 add64_32(Builder& bld, Temp src0, Temp src1)
4517 Temp src00 = bld.tmp(src0.type(), 1);
4518 Temp src01 = bld.tmp(src0.type(), 1);
4519 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
4521 if (src0.type() == RegType::vgpr || src1.type() == RegType::vgpr) {
4522 Temp dst0 = bld.tmp(v1);
4523 Temp carry = bld.vadd32(Definition(dst0), src00, src1, true).def(1).getTemp();
4524 Temp dst1 = bld.vadd32(bld.def(v1), src01, Operand::zero(), false, carry);
4525 return bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
4527 Temp carry = bld.tmp(s1);
4529 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src1);
4530 Temp dst1 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), src01, carry);
4531 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), dst0, dst1);
4536 lower_global_address(Builder& bld, uint32_t offset_in, Temp* address_inout,
4537 uint32_t* const_offset_inout, Temp* offset_inout)
4539 Temp address = *address_inout;
4540 uint64_t const_offset = *const_offset_inout + offset_in;
4541 Temp offset = *offset_inout;
4543 uint64_t max_const_offset_plus_one =
4544 1; /* GFX7/8/9: FLAT loads do not support constant offsets */
4545 if (bld.program->gfx_level >= GFX9)
4546 max_const_offset_plus_one = bld.program->dev.scratch_global_offset_max;
4547 else if (bld.program->gfx_level == GFX6)
4548 max_const_offset_plus_one = 4096; /* MUBUF has a 12-bit unsigned offset field */
4549 uint64_t excess_offset = const_offset - (const_offset % max_const_offset_plus_one);
4550 const_offset %= max_const_offset_plus_one;
4553 while (unlikely(excess_offset > UINT32_MAX)) {
4554 address = add64_32(bld, address, bld.copy(bld.def(s1), Operand::c32(UINT32_MAX)));
4555 excess_offset -= UINT32_MAX;
4558 offset = bld.copy(bld.def(s1), Operand::c32(excess_offset));
4560 /* If we add to "offset", we would transform the indended
4561 * "address + u2u64(offset) + u2u64(const_offset)" into
4562 * "address + u2u64(offset + const_offset)", so add to the address.
4563 * This could be more efficient if excess_offset>UINT32_MAX by doing a full 64-bit addition,
4564 * but that should be really rare.
4566 while (excess_offset) {
4567 uint32_t src2 = MIN2(excess_offset, UINT32_MAX);
4568 address = add64_32(bld, address, bld.copy(bld.def(s1), Operand::c32(src2)));
4569 excess_offset -= src2;
4573 if (bld.program->gfx_level == GFX6) {
4574 /* GFX6 (MUBUF): (SGPR address, SGPR offset) or (VGPR address, SGPR offset) */
4575 if (offset.type() != RegType::sgpr) {
4576 address = add64_32(bld, address, offset);
4579 offset = offset.id() ? offset : bld.copy(bld.def(s1), Operand::zero());
4580 } else if (bld.program->gfx_level <= GFX8) {
4581 /* GFX7,8 (FLAT): VGPR address */
4583 address = add64_32(bld, address, offset);
4586 address = as_vgpr(bld, address);
4588 /* GFX9+ (GLOBAL): (VGPR address), or (SGPR address and VGPR offset) */
4589 if (address.type() == RegType::vgpr && offset.id()) {
4590 address = add64_32(bld, address, offset);
4592 } else if (address.type() == RegType::sgpr && offset.id()) {
4593 offset = as_vgpr(bld, offset);
4595 if (address.type() == RegType::sgpr && !offset.id())
4596 offset = bld.copy(bld.def(v1), bld.copy(bld.def(s1), Operand::zero()));
4599 *address_inout = address;
4600 *const_offset_inout = const_offset;
4601 *offset_inout = offset;
4605 global_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4606 unsigned align_, unsigned const_offset, Temp dst_hint)
4608 Temp addr = info.resource;
4613 lower_global_address(bld, 0, &addr, &const_offset, &offset);
4615 unsigned bytes_size = 0;
4616 bool use_mubuf = bld.program->gfx_level == GFX6;
4617 bool global = bld.program->gfx_level >= GFX9;
4619 if (bytes_needed == 1 || align_ % 2u) {
4621 op = use_mubuf ? aco_opcode::buffer_load_ubyte
4622 : global ? aco_opcode::global_load_ubyte
4623 : aco_opcode::flat_load_ubyte;
4624 } else if (bytes_needed == 2 || align_ % 4u) {
4626 op = use_mubuf ? aco_opcode::buffer_load_ushort
4627 : global ? aco_opcode::global_load_ushort
4628 : aco_opcode::flat_load_ushort;
4629 } else if (bytes_needed <= 4) {
4631 op = use_mubuf ? aco_opcode::buffer_load_dword
4632 : global ? aco_opcode::global_load_dword
4633 : aco_opcode::flat_load_dword;
4634 } else if (bytes_needed <= 8 || (bytes_needed <= 12 && use_mubuf)) {
4636 op = use_mubuf ? aco_opcode::buffer_load_dwordx2
4637 : global ? aco_opcode::global_load_dwordx2
4638 : aco_opcode::flat_load_dwordx2;
4639 } else if (bytes_needed <= 12 && !use_mubuf) {
4641 op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
4644 op = use_mubuf ? aco_opcode::buffer_load_dwordx4
4645 : global ? aco_opcode::global_load_dwordx4
4646 : aco_opcode::flat_load_dwordx4;
4648 RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
4649 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
4651 aco_ptr<MUBUF_instruction> mubuf{
4652 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4653 mubuf->operands[0] = Operand(get_gfx6_global_rsrc(bld, addr));
4654 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
4655 mubuf->operands[2] = Operand(offset);
4656 mubuf->glc = info.glc;
4658 mubuf->offset = const_offset;
4659 mubuf->addr64 = addr.type() == RegType::vgpr;
4660 mubuf->disable_wqm = false;
4661 mubuf->sync = info.sync;
4662 mubuf->definitions[0] = Definition(val);
4663 bld.insert(std::move(mubuf));
4665 aco_ptr<FLAT_instruction> flat{
4666 create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
4667 if (addr.regClass() == s2) {
4668 assert(global && offset.id() && offset.type() == RegType::vgpr);
4669 flat->operands[0] = Operand(offset);
4670 flat->operands[1] = Operand(addr);
4672 assert(addr.type() == RegType::vgpr && !offset.id());
4673 flat->operands[0] = Operand(addr);
4674 flat->operands[1] = Operand(s1);
4676 flat->glc = info.glc;
4678 info.glc && (bld.program->gfx_level == GFX10 || bld.program->gfx_level == GFX10_3);
4679 flat->sync = info.sync;
4680 assert(global || !const_offset);
4681 flat->offset = const_offset;
4682 flat->definitions[0] = Definition(val);
4683 bld.insert(std::move(flat));
4689 const EmitLoadParameters global_load_params{global_load_callback, true, true, UINT32_MAX};
4692 load_lds(isel_context* ctx, unsigned elem_size_bytes, unsigned num_components, Temp dst,
4693 Temp address, unsigned base_offset, unsigned align)
4695 assert(util_is_power_of_two_nonzero(align));
4697 Builder bld(ctx->program, ctx->block);
4699 LoadEmitInfo info = {Operand(as_vgpr(ctx, address)), dst, num_components, elem_size_bytes};
4700 info.align_mul = align;
4701 info.align_offset = 0;
4702 info.sync = memory_sync_info(storage_shared);
4703 info.const_offset = base_offset;
4704 emit_load(ctx, bld, info, lds_load_params);
4710 split_store_data(isel_context* ctx, RegType dst_type, unsigned count, Temp* dst, unsigned* bytes,
4716 Builder bld(ctx->program, ctx->block);
4718 /* count == 1 fast path */
4720 if (dst_type == RegType::sgpr)
4721 dst[0] = bld.as_uniform(src);
4723 dst[0] = as_vgpr(ctx, src);
4727 /* elem_size_bytes is the greatest common divisor which is a power of 2 */
4728 unsigned elem_size_bytes =
4729 1u << (ffs(std::accumulate(bytes, bytes + count, 8, std::bit_or<>{})) - 1);
4731 ASSERTED bool is_subdword = elem_size_bytes < 4;
4732 assert(!is_subdword || dst_type == RegType::vgpr);
4734 for (unsigned i = 0; i < count; i++)
4735 dst[i] = bld.tmp(RegClass::get(dst_type, bytes[i]));
4737 std::vector<Temp> temps;
4738 /* use allocated_vec if possible */
4739 auto it = ctx->allocated_vec.find(src.id());
4740 if (it != ctx->allocated_vec.end()) {
4741 if (!it->second[0].id())
4743 unsigned elem_size = it->second[0].bytes();
4744 assert(src.bytes() % elem_size == 0);
4746 for (unsigned i = 0; i < src.bytes() / elem_size; i++) {
4747 if (!it->second[i].id())
4750 if (elem_size_bytes % elem_size)
4753 temps.insert(temps.end(), it->second.begin(), it->second.begin() + src.bytes() / elem_size);
4754 elem_size_bytes = elem_size;
4758 /* split src if necessary */
4759 if (temps.empty()) {
4760 if (is_subdword && src.type() == RegType::sgpr)
4761 src = as_vgpr(ctx, src);
4762 if (dst_type == RegType::sgpr)
4763 src = bld.as_uniform(src);
4765 unsigned num_elems = src.bytes() / elem_size_bytes;
4766 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(
4767 aco_opcode::p_split_vector, Format::PSEUDO, 1, num_elems)};
4768 split->operands[0] = Operand(src);
4769 for (unsigned i = 0; i < num_elems; i++) {
4770 temps.emplace_back(bld.tmp(RegClass::get(dst_type, elem_size_bytes)));
4771 split->definitions[i] = Definition(temps.back());
4773 bld.insert(std::move(split));
4777 for (unsigned i = 0; i < count; i++) {
4778 unsigned op_count = dst[i].bytes() / elem_size_bytes;
4779 if (op_count == 1) {
4780 if (dst_type == RegType::sgpr)
4781 dst[i] = bld.as_uniform(temps[idx++]);
4783 dst[i] = as_vgpr(ctx, temps[idx++]);
4787 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
4788 Format::PSEUDO, op_count, 1)};
4789 for (unsigned j = 0; j < op_count; j++) {
4790 Temp tmp = temps[idx++];
4791 if (dst_type == RegType::sgpr)
4792 tmp = bld.as_uniform(tmp);
4793 vec->operands[j] = Operand(tmp);
4795 vec->definitions[0] = Definition(dst[i]);
4796 bld.insert(std::move(vec));
4802 scan_write_mask(uint32_t mask, uint32_t todo_mask, int* start, int* count)
4804 unsigned start_elem = ffs(todo_mask) - 1;
4805 bool skip = !(mask & (1 << start_elem));
4807 mask = ~mask & todo_mask;
4811 u_bit_scan_consecutive_range(&mask, start, count);
4817 advance_write_mask(uint32_t* todo_mask, int start, int count)
4819 *todo_mask &= ~u_bit_consecutive(0, count) << start;
4823 store_lds(isel_context* ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask, Temp address,
4824 unsigned base_offset, unsigned align)
4826 assert(util_is_power_of_two_nonzero(align));
4827 assert(util_is_power_of_two_nonzero(elem_size_bytes) && elem_size_bytes <= 8);
4829 Builder bld(ctx->program, ctx->block);
4830 bool large_ds_write = ctx->options->gfx_level >= GFX7;
4831 bool usable_write2 = ctx->options->gfx_level >= GFX7;
4833 unsigned write_count = 0;
4834 Temp write_datas[32];
4835 unsigned offsets[32];
4837 aco_opcode opcodes[32];
4839 wrmask = util_widen_mask(wrmask, elem_size_bytes);
4841 const unsigned wrmask_bitcnt = util_bitcount(wrmask);
4842 uint32_t todo = u_bit_consecutive(0, data.bytes());
4844 if (u_bit_consecutive(0, wrmask_bitcnt) == wrmask)
4845 todo = MIN2(todo, wrmask);
4849 if (!scan_write_mask(wrmask, todo, &offset, &byte)) {
4850 offsets[write_count] = offset;
4851 bytes[write_count] = byte;
4852 opcodes[write_count] = aco_opcode::num_opcodes;
4854 advance_write_mask(&todo, offset, byte);
4858 bool aligned2 = offset % 2 == 0 && align % 2 == 0;
4859 bool aligned4 = offset % 4 == 0 && align % 4 == 0;
4860 bool aligned8 = offset % 8 == 0 && align % 8 == 0;
4861 bool aligned16 = offset % 16 == 0 && align % 16 == 0;
4863 // TODO: use ds_write_b8_d16_hi/ds_write_b16_d16_hi if beneficial
4864 aco_opcode op = aco_opcode::num_opcodes;
4865 if (byte >= 16 && aligned16 && large_ds_write) {
4866 op = aco_opcode::ds_write_b128;
4868 } else if (byte >= 12 && aligned16 && large_ds_write) {
4869 op = aco_opcode::ds_write_b96;
4871 } else if (byte >= 8 && aligned8) {
4872 op = aco_opcode::ds_write_b64;
4874 } else if (byte >= 4 && aligned4) {
4875 op = aco_opcode::ds_write_b32;
4877 } else if (byte >= 2 && aligned2) {
4878 op = aco_opcode::ds_write_b16;
4880 } else if (byte >= 1) {
4881 op = aco_opcode::ds_write_b8;
4887 offsets[write_count] = offset;
4888 bytes[write_count] = byte;
4889 opcodes[write_count] = op;
4891 advance_write_mask(&todo, offset, byte);
4894 Operand m = load_lds_size_m0(bld);
4896 split_store_data(ctx, RegType::vgpr, write_count, write_datas, bytes, data);
4898 for (unsigned i = 0; i < write_count; i++) {
4899 aco_opcode op = opcodes[i];
4900 if (op == aco_opcode::num_opcodes)
4903 Temp split_data = write_datas[i];
4905 unsigned second = write_count;
4906 if (usable_write2 && (op == aco_opcode::ds_write_b32 || op == aco_opcode::ds_write_b64)) {
4907 for (second = i + 1; second < write_count; second++) {
4908 if (opcodes[second] == op && (offsets[second] - offsets[i]) % split_data.bytes() == 0) {
4909 op = split_data.bytes() == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
4910 opcodes[second] = aco_opcode::num_opcodes;
4916 bool write2 = op == aco_opcode::ds_write2_b32 || op == aco_opcode::ds_write2_b64;
4917 unsigned write2_off = (offsets[second] - offsets[i]) / split_data.bytes();
4919 unsigned inline_offset = base_offset + offsets[i];
4920 unsigned max_offset = write2 ? (255 - write2_off) * split_data.bytes() : 65535;
4921 Temp address_offset = address;
4922 if (inline_offset > max_offset) {
4923 address_offset = bld.vadd32(bld.def(v1), Operand::c32(base_offset), address_offset);
4924 inline_offset = offsets[i];
4927 /* offsets[i] shouldn't be large enough for this to happen */
4928 assert(inline_offset <= max_offset);
4932 Temp second_data = write_datas[second];
4933 inline_offset /= split_data.bytes();
4934 instr = bld.ds(op, address_offset, split_data, second_data, m, inline_offset,
4935 inline_offset + write2_off);
4937 instr = bld.ds(op, address_offset, split_data, m, inline_offset);
4939 instr->ds().sync = memory_sync_info(storage_shared);
4941 if (m.isUndefined())
4942 instr->operands.pop_back();
4947 get_buffer_store_op(unsigned bytes)
4950 case 1: return aco_opcode::buffer_store_byte;
4951 case 2: return aco_opcode::buffer_store_short;
4952 case 4: return aco_opcode::buffer_store_dword;
4953 case 8: return aco_opcode::buffer_store_dwordx2;
4954 case 12: return aco_opcode::buffer_store_dwordx3;
4955 case 16: return aco_opcode::buffer_store_dwordx4;
4957 unreachable("Unexpected store size");
4958 return aco_opcode::num_opcodes;
4962 split_buffer_store(isel_context* ctx, nir_intrinsic_instr* instr, bool smem, RegType dst_type,
4963 Temp data, unsigned writemask, int swizzle_element_size, unsigned* write_count,
4964 Temp* write_datas, unsigned* offsets)
4966 unsigned write_count_with_skips = 0;
4970 /* determine how to split the data */
4971 unsigned todo = u_bit_consecutive(0, data.bytes());
4974 skips[write_count_with_skips] = !scan_write_mask(writemask, todo, &offset, &byte);
4975 offsets[write_count_with_skips] = offset;
4976 if (skips[write_count_with_skips]) {
4977 bytes[write_count_with_skips] = byte;
4978 advance_write_mask(&todo, offset, byte);
4979 write_count_with_skips++;
4983 /* only supported sizes are 1, 2, 4, 8, 12 and 16 bytes and can't be
4984 * larger than swizzle_element_size */
4985 byte = MIN2(byte, swizzle_element_size);
4987 byte = byte > 4 ? byte & ~0x3 : MIN2(byte, 2);
4989 /* SMEM and GFX6 VMEM can't emit 12-byte stores */
4990 if ((ctx->program->gfx_level == GFX6 || smem) && byte == 12)
4993 /* dword or larger stores have to be dword-aligned */
4994 unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
4995 unsigned align_offset = (instr ? nir_intrinsic_align_offset(instr) : 0) + offset;
4996 bool dword_aligned = align_offset % 4 == 0 && align_mul % 4 == 0;
4998 byte = MIN2(byte, (align_offset % 2 == 0 && align_mul % 2 == 0) ? 2 : 1);
5000 bytes[write_count_with_skips] = byte;
5001 advance_write_mask(&todo, offset, byte);
5002 write_count_with_skips++;
5005 /* actually split data */
5006 split_store_data(ctx, dst_type, write_count_with_skips, write_datas, bytes, data);
5009 for (unsigned i = 0; i < write_count_with_skips; i++) {
5012 write_datas[*write_count] = write_datas[i];
5013 offsets[*write_count] = offsets[i];
5019 create_vec_from_array(isel_context* ctx, Temp arr[], unsigned cnt, RegType reg_type,
5020 unsigned elem_size_bytes, unsigned split_cnt = 0u, Temp dst = Temp())
5022 Builder bld(ctx->program, ctx->block);
5023 unsigned dword_size = elem_size_bytes / 4;
5026 dst = bld.tmp(RegClass(reg_type, cnt * dword_size));
5028 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
5029 aco_ptr<Pseudo_instruction> instr{
5030 create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, cnt, 1)};
5031 instr->definitions[0] = Definition(dst);
5033 for (unsigned i = 0; i < cnt; ++i) {
5035 assert(arr[i].size() == dword_size);
5036 allocated_vec[i] = arr[i];
5037 instr->operands[i] = Operand(arr[i]);
5039 Temp zero = bld.copy(bld.def(RegClass(reg_type, dword_size)),
5040 Operand::zero(dword_size == 2 ? 8 : 4));
5041 allocated_vec[i] = zero;
5042 instr->operands[i] = Operand(zero);
5046 bld.insert(std::move(instr));
5049 emit_split_vector(ctx, dst, split_cnt);
5051 ctx->allocated_vec.emplace(dst.id(), allocated_vec); /* emit_split_vector already does this */
5057 resolve_excess_vmem_const_offset(Builder& bld, Temp& voffset, unsigned const_offset)
5059 if (const_offset >= 4096) {
5060 unsigned excess_const_offset = const_offset / 4096u * 4096u;
5061 const_offset %= 4096u;
5064 voffset = bld.copy(bld.def(v1), Operand::c32(excess_const_offset));
5065 else if (unlikely(voffset.regClass() == s1))
5066 voffset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc),
5067 Operand::c32(excess_const_offset), Operand(voffset));
5068 else if (likely(voffset.regClass() == v1))
5069 voffset = bld.vadd32(bld.def(v1), Operand(voffset), Operand::c32(excess_const_offset));
5071 unreachable("Unsupported register class of voffset");
5074 return const_offset;
5078 emit_single_mubuf_store(isel_context* ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
5079 unsigned const_offset = 0u, memory_sync_info sync = memory_sync_info(),
5080 bool slc = false, bool swizzled = false)
5083 assert(vdata.size() != 3 || ctx->program->gfx_level != GFX6);
5084 assert(vdata.size() >= 1 && vdata.size() <= 4);
5086 Builder bld(ctx->program, ctx->block);
5087 aco_opcode op = get_buffer_store_op(vdata.bytes());
5088 const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
5090 Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
5091 Operand soffset_op = soffset.id() ? Operand(soffset) : Operand::zero();
5092 bool glc = ctx->program->gfx_level < GFX11;
5094 bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
5095 /* offen */ !voffset_op.isUndefined(), /* swizzled */ swizzled,
5096 /* idxen*/ false, /* addr64 */ false, /* disable_wqm */ false,
5097 /* glc */ glc, /* dlc*/ false, /* slc */ slc);
5099 r.instr->mubuf().sync = sync;
5103 store_vmem_mubuf(isel_context* ctx, Temp src, Temp descriptor, Temp voffset, Temp soffset,
5104 unsigned base_const_offset, unsigned elem_size_bytes, unsigned write_mask,
5105 bool allow_combining = true, memory_sync_info sync = memory_sync_info(),
5108 Builder bld(ctx->program, ctx->block);
5109 assert(elem_size_bytes == 1 || elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
5111 write_mask = util_widen_mask(write_mask, elem_size_bytes);
5113 unsigned write_count = 0;
5114 Temp write_datas[32];
5115 unsigned offsets[32];
5116 split_buffer_store(ctx, NULL, false, RegType::vgpr, src, write_mask, allow_combining ? 16 : 4,
5117 &write_count, write_datas, offsets);
5119 for (unsigned i = 0; i < write_count; i++) {
5120 unsigned const_offset = offsets[i] + base_const_offset;
5121 emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, sync,
5122 slc, !allow_combining);
5127 load_vmem_mubuf(isel_context* ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset,
5128 unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components,
5129 unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true,
5130 bool slc = false, memory_sync_info sync = memory_sync_info())
5132 assert(elem_size_bytes == 1 || elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
5133 assert((num_components * elem_size_bytes) == dst.bytes());
5134 assert(!!stride != allow_combining);
5136 Builder bld(ctx->program, ctx->block);
5138 LoadEmitInfo info = {Operand(voffset), dst, num_components, elem_size_bytes, descriptor};
5139 info.component_stride = allow_combining ? 0 : stride;
5142 info.swizzle_component_size = allow_combining ? 0 : 4;
5143 info.align_mul = MIN2(elem_size_bytes, 4);
5144 info.align_offset = 0;
5145 info.soffset = soffset;
5146 info.const_offset = base_const_offset;
5148 emit_load(ctx, bld, info, mubuf_load_params);
5152 wave_id_in_threadgroup(isel_context* ctx)
5154 Builder bld(ctx->program, ctx->block);
5155 return bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
5156 get_arg(ctx, ctx->args->ac.merged_wave_info), Operand::c32(24u | (4u << 16)));
5160 thread_id_in_threadgroup(isel_context* ctx)
5162 /* tid_in_tg = wave_id * wave_size + tid_in_wave */
5164 Builder bld(ctx->program, ctx->block);
5165 Temp tid_in_wave = emit_mbcnt(ctx, bld.tmp(v1));
5167 if (ctx->program->workgroup_size <= ctx->program->wave_size)
5170 Temp wave_id_in_tg = wave_id_in_threadgroup(ctx);
5171 Temp num_pre_threads =
5172 bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), wave_id_in_tg,
5173 Operand::c32(ctx->program->wave_size == 64 ? 6u : 5u));
5174 return bld.vadd32(bld.def(v1), Operand(num_pre_threads), Operand(tid_in_wave));
5178 store_output_to_temps(isel_context* ctx, nir_intrinsic_instr* instr)
5180 unsigned write_mask = nir_intrinsic_write_mask(instr);
5181 unsigned component = nir_intrinsic_component(instr);
5182 unsigned idx = nir_intrinsic_base(instr) * 4u + component;
5183 nir_src offset = *nir_get_io_offset_src(instr);
5185 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5188 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
5190 if (instr->src[0].ssa->bit_size == 64)
5191 write_mask = util_widen_mask(write_mask, 2);
5193 RegClass rc = instr->src[0].ssa->bit_size == 16 ? v2b : v1;
5195 for (unsigned i = 0; i < 8; ++i) {
5196 if (write_mask & (1 << i)) {
5197 ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
5198 ctx->outputs.temps[idx] = emit_extract_vector(ctx, src, i, rc);
5203 if (ctx->stage == fragment_fs && ctx->program->info.ps.has_epilog) {
5204 unsigned index = nir_intrinsic_base(instr) - FRAG_RESULT_DATA0;
5206 if (nir_intrinsic_src_type(instr) == nir_type_float16) {
5207 ctx->output_color_types |= ACO_TYPE_FLOAT16 << (index * 2);
5208 } else if (nir_intrinsic_src_type(instr) == nir_type_int16) {
5209 ctx->output_color_types |= ACO_TYPE_INT16 << (index * 2);
5210 } else if (nir_intrinsic_src_type(instr) == nir_type_uint16) {
5211 ctx->output_color_types |= ACO_TYPE_UINT16 << (index * 2);
5219 load_input_from_temps(isel_context* ctx, nir_intrinsic_instr* instr, Temp dst)
5221 /* Only TCS per-vertex inputs are supported by this function.
5222 * Per-vertex inputs only match between the VS/TCS invocation id when the number of invocations
5225 if (ctx->shader->info.stage != MESA_SHADER_TESS_CTRL || !ctx->tcs_in_out_eq)
5228 nir_src* off_src = nir_get_io_offset_src(instr);
5229 nir_src* vertex_index_src = nir_get_io_arrayed_index_src(instr);
5230 nir_instr* vertex_index_instr = vertex_index_src->ssa->parent_instr;
5231 bool can_use_temps =
5232 nir_src_is_const(*off_src) && vertex_index_instr->type == nir_instr_type_intrinsic &&
5233 nir_instr_as_intrinsic(vertex_index_instr)->intrinsic == nir_intrinsic_load_invocation_id;
5238 unsigned idx = nir_intrinsic_base(instr) * 4u + nir_intrinsic_component(instr) +
5239 4 * nir_src_as_uint(*off_src);
5240 Temp* src = &ctx->inputs.temps[idx];
5241 create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u, 0, dst);
5246 static void export_vs_varying(isel_context* ctx, int slot, bool is_pos, int* next_pos);
5249 visit_store_output(isel_context* ctx, nir_intrinsic_instr* instr)
5251 if (ctx->stage == vertex_vs || ctx->stage == tess_eval_vs || ctx->stage == fragment_fs ||
5252 ctx->stage == vertex_ngg || ctx->stage == tess_eval_ngg || ctx->stage == mesh_ngg ||
5253 (ctx->stage == vertex_tess_control_hs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
5254 ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
5255 bool stored_to_temps = store_output_to_temps(ctx, instr);
5256 if (!stored_to_temps) {
5257 isel_err(instr->src[1].ssa->parent_instr, "Unimplemented output offset instruction");
5261 unreachable("Shader stage not implemented");
5266 emit_interp_instr(isel_context* ctx, unsigned idx, unsigned component, Temp src, Temp dst,
5269 Temp coord1 = emit_extract_vector(ctx, src, 0, v1);
5270 Temp coord2 = emit_extract_vector(ctx, src, 1, v1);
5272 Builder bld(ctx->program, ctx->block);
5274 if (dst.regClass() == v2b) {
5275 if (ctx->program->dev.has_16bank_lds) {
5276 assert(ctx->options->gfx_level <= GFX8);
5277 Builder::Result interp_p1 =
5278 bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1), Operand::c32(2u) /* P0 */,
5279 bld.m0(prim_mask), idx, component);
5280 interp_p1 = bld.vintrp(aco_opcode::v_interp_p1lv_f16, bld.def(v2b), coord1,
5281 bld.m0(prim_mask), interp_p1, idx, component);
5282 bld.vintrp(aco_opcode::v_interp_p2_legacy_f16, Definition(dst), coord2, bld.m0(prim_mask),
5283 interp_p1, idx, component);
5285 aco_opcode interp_p2_op = aco_opcode::v_interp_p2_f16;
5287 if (ctx->options->gfx_level == GFX8)
5288 interp_p2_op = aco_opcode::v_interp_p2_legacy_f16;
5290 Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1ll_f16, bld.def(v1), coord1,
5291 bld.m0(prim_mask), idx, component);
5292 bld.vintrp(interp_p2_op, Definition(dst), coord2, bld.m0(prim_mask), interp_p1, idx,
5296 Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1,
5297 bld.m0(prim_mask), idx, component);
5299 if (ctx->program->dev.has_16bank_lds)
5300 interp_p1.instr->operands[0].setLateKill(true);
5302 bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2, bld.m0(prim_mask), interp_p1,
5308 emit_load_frag_coord(isel_context* ctx, Temp dst, unsigned num_components)
5310 Builder bld(ctx->program, ctx->block);
5312 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(
5313 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1));
5314 for (unsigned i = 0; i < num_components; i++) {
5315 if (ctx->args->ac.frag_pos[i].used)
5316 vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i]));
5318 vec->operands[i] = Operand(v1);
5320 if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) {
5321 assert(num_components == 4);
5323 bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3]));
5326 for (Operand& op : vec->operands)
5327 op = op.isUndefined() ? Operand::zero() : op;
5329 vec->definitions[0] = Definition(dst);
5330 ctx->block->instructions.emplace_back(std::move(vec));
5331 emit_split_vector(ctx, dst, num_components);
5336 emit_load_frag_shading_rate(isel_context* ctx, Temp dst)
5338 Builder bld(ctx->program, ctx->block);
5341 /* VRS Rate X = Ancillary[2:3]
5342 * VRS Rate Y = Ancillary[4:5]
5344 Temp x_rate = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), get_arg(ctx, ctx->args->ac.ancillary),
5345 Operand::c32(2u), Operand::c32(2u));
5346 Temp y_rate = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), get_arg(ctx, ctx->args->ac.ancillary),
5347 Operand::c32(4u), Operand::c32(2u));
5349 /* xRate = xRate == 0x1 ? Horizontal2Pixels : None. */
5350 cond = bld.vopc(aco_opcode::v_cmp_eq_i32, bld.def(bld.lm), Operand::c32(1u), Operand(x_rate));
5351 x_rate = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand::zero()),
5352 bld.copy(bld.def(v1), Operand::c32(4u)), cond);
5354 /* yRate = yRate == 0x1 ? Vertical2Pixels : None. */
5355 cond = bld.vopc(aco_opcode::v_cmp_eq_i32, bld.def(bld.lm), Operand::c32(1u), Operand(y_rate));
5356 y_rate = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand::zero()),
5357 bld.copy(bld.def(v1), Operand::c32(1u)), cond);
5359 bld.vop2(aco_opcode::v_or_b32, Definition(dst), Operand(x_rate), Operand(y_rate));
5363 visit_load_interpolated_input(isel_context* ctx, nir_intrinsic_instr* instr)
5365 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5366 Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
5367 unsigned idx = nir_intrinsic_base(instr);
5368 unsigned component = nir_intrinsic_component(instr);
5369 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
5371 assert(nir_src_is_const(instr->src[1]) && !nir_src_as_uint(instr->src[1]));
5373 if (instr->dest.ssa.num_components == 1) {
5374 emit_interp_instr(ctx, idx, component, coords, dst, prim_mask);
5376 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(
5377 aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1));
5378 for (unsigned i = 0; i < instr->dest.ssa.num_components; i++) {
5379 Temp tmp = ctx->program->allocateTmp(instr->dest.ssa.bit_size == 16 ? v2b : v1);
5380 emit_interp_instr(ctx, idx, component + i, coords, tmp, prim_mask);
5381 vec->operands[i] = Operand(tmp);
5383 vec->definitions[0] = Definition(dst);
5384 ctx->block->instructions.emplace_back(std::move(vec));
5389 check_vertex_fetch_size(isel_context* ctx, const ac_vtx_format_info* vtx_info, unsigned offset,
5390 unsigned binding_align, unsigned channels)
5392 if (!(vtx_info->has_hw_format & BITFIELD_BIT(channels - 1)))
5395 /* Split typed vertex buffer loads on GFX6 and GFX10+ to avoid any
5396 * alignment issues that triggers memory violations and eventually a GPU
5397 * hang. This can happen if the stride (static or dynamic) is unaligned and
5398 * also if the VBO offset is aligned to a scalar (eg. stride is 8 and VBO
5399 * offset is 2 for R16G16B16A16_SNORM).
5401 unsigned vertex_byte_size = vtx_info->chan_byte_size * channels;
5402 return (ctx->options->gfx_level >= GFX7 && ctx->options->gfx_level <= GFX9) ||
5403 (offset % vertex_byte_size == 0 && MAX2(binding_align, 1) % vertex_byte_size == 0);
5407 get_fetch_format(isel_context* ctx, const ac_vtx_format_info* vtx_info, unsigned offset,
5408 unsigned* channels, unsigned max_channels, unsigned binding_align)
5410 if (!vtx_info->chan_byte_size) {
5411 *channels = vtx_info->num_channels;
5412 return vtx_info->hw_format[0];
5415 unsigned num_channels = *channels;
5416 if (!check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, *channels)) {
5417 unsigned new_channels = num_channels + 1;
5418 /* first, assume more loads is worse and try using a larger data format */
5419 while (new_channels <= max_channels &&
5420 !check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, new_channels)) {
5424 if (new_channels > max_channels) {
5425 /* then try decreasing load size (at the cost of more loads) */
5426 new_channels = *channels;
5427 while (new_channels > 1 &&
5428 !check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, new_channels))
5432 if (new_channels < *channels)
5433 *channels = new_channels;
5434 num_channels = new_channels;
5437 return vtx_info->hw_format[num_channels - 1];
5441 visit_load_input(isel_context* ctx, nir_intrinsic_instr* instr)
5443 Builder bld(ctx->program, ctx->block);
5444 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5445 nir_src offset = *nir_get_io_offset_src(instr);
5447 if (ctx->shader->info.stage == MESA_SHADER_VERTEX && ctx->program->info.vs.dynamic_inputs) {
5448 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5449 isel_err(offset.ssa->parent_instr,
5450 "Unimplemented non-zero nir_intrinsic_load_input offset");
5452 unsigned location = nir_intrinsic_base(instr) - VERT_ATTRIB_GENERIC0;
5453 unsigned bitsize = instr->dest.ssa.bit_size;
5454 unsigned component = nir_intrinsic_component(instr) >> (bitsize == 64 ? 1 : 0);
5455 unsigned num_components = instr->dest.ssa.num_components;
5457 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
5458 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5459 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5460 for (unsigned i = 0; i < num_components; i++) {
5461 if (bitsize == 64) {
5462 Temp input = get_arg(ctx, ctx->args->vs_inputs[location + (component + i) / 2]);
5463 elems[i] = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2),
5464 emit_extract_vector(ctx, input, (component + i) * 2 % 4, v1),
5465 emit_extract_vector(ctx, input, (component + i) * 2 % 4 + 1, v1));
5467 Temp input = get_arg(ctx, ctx->args->vs_inputs[location]);
5468 elems[i] = emit_extract_vector(ctx, input, component + i, v1);
5470 if (bitsize == 16) {
5471 if (nir_alu_type_get_base_type(nir_intrinsic_dest_type(instr)) == nir_type_float)
5472 elems[i] = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v2b), elems[i]);
5474 elems[i] = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v2b), elems[i],
5477 vec->operands[i] = Operand(elems[i]);
5479 vec->definitions[0] = Definition(dst);
5480 ctx->block->instructions.emplace_back(std::move(vec));
5481 ctx->allocated_vec.emplace(dst.id(), elems);
5482 } else if (ctx->shader->info.stage == MESA_SHADER_VERTEX) {
5484 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5485 isel_err(offset.ssa->parent_instr,
5486 "Unimplemented non-zero nir_intrinsic_load_input offset");
5488 Temp vertex_buffers =
5489 convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.vertex_buffers));
5491 unsigned location = nir_intrinsic_base(instr) - VERT_ATTRIB_GENERIC0;
5492 unsigned bitsize = instr->dest.ssa.bit_size;
5493 unsigned component = nir_intrinsic_component(instr) >> (bitsize == 64 ? 1 : 0);
5494 unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[location];
5495 uint32_t attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[location];
5496 uint32_t attrib_stride = ctx->options->key.vs.vertex_attribute_strides[location];
5497 enum pipe_format attrib_format =
5498 (enum pipe_format)ctx->options->key.vs.vertex_attribute_formats[location];
5499 unsigned binding_align = ctx->options->key.vs.vertex_binding_align[attrib_binding];
5501 const struct ac_vtx_format_info* vtx_info =
5502 ac_get_vtx_format_info(GFX8, CHIP_POLARIS10, attrib_format);
5504 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa) << component;
5505 unsigned num_channels = MIN2(util_last_bit(mask), vtx_info->num_channels);
5507 unsigned desc_index =
5508 ctx->program->info.vs.use_per_attribute_vb_descs ? location : attrib_binding;
5509 desc_index = util_bitcount(ctx->program->info.vs.vb_desc_usage_mask &
5510 u_bit_consecutive(0, desc_index));
5511 Operand off = bld.copy(bld.def(s1), Operand::c32(desc_index * 16u));
5512 Temp list = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), vertex_buffers, off);
5515 if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) {
5516 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location];
5517 Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance);
5519 Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id);
5521 Temp divided = bld.tmp(v1);
5522 emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor);
5523 index = bld.vadd32(bld.def(v1), start_instance, divided);
5525 index = bld.vadd32(bld.def(v1), start_instance, instance_id);
5528 index = bld.copy(bld.def(v1), start_instance);
5531 index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->ac.base_vertex),
5532 get_arg(ctx, ctx->args->ac.vertex_id));
5535 Temp* const channels = (Temp*)alloca(num_channels * sizeof(Temp));
5536 unsigned channel_start = 0;
5537 bool direct_fetch = false;
5539 /* skip unused channels at the start */
5540 if (vtx_info->chan_byte_size) {
5541 channel_start = ffs(mask) - 1;
5542 for (unsigned i = 0; i < MIN2(channel_start, num_channels); i++)
5543 channels[i] = Temp(0, s1);
5547 while (channel_start < num_channels) {
5548 unsigned fetch_component = num_channels - channel_start;
5549 unsigned fetch_offset = attrib_offset + channel_start * vtx_info->chan_byte_size;
5551 /* use MUBUF when possible to avoid possible alignment issues */
5552 /* TODO: we could use SDWA to unpack 8/16-bit attributes without extra instructions */
5553 bool use_mubuf = vtx_info->chan_byte_size == 4 && bitsize != 16;
5554 unsigned fetch_fmt = V_008F0C_BUF_DATA_FORMAT_INVALID;
5556 fetch_fmt = get_fetch_format(ctx, vtx_info, fetch_offset, &fetch_component,
5557 vtx_info->num_channels - channel_start, binding_align);
5559 /* GFX6 only supports loading vec3 with MTBUF, split to vec2,scalar. */
5560 if (fetch_component == 3 && ctx->options->gfx_level == GFX6)
5561 fetch_component = 2;
5564 unsigned fetch_bytes = fetch_component * bitsize / 8;
5566 Temp fetch_index = index;
5567 if (attrib_stride != 0 && fetch_offset > attrib_stride) {
5569 bld.vadd32(bld.def(v1), Operand::c32(fetch_offset / attrib_stride), fetch_index);
5570 fetch_offset = fetch_offset % attrib_stride;
5573 Operand soffset = Operand::zero();
5574 if (fetch_offset >= 4096) {
5575 soffset = bld.copy(bld.def(s1), Operand::c32(fetch_offset / 4096 * 4096));
5576 fetch_offset %= 4096;
5580 switch (fetch_bytes) {
5582 assert(!use_mubuf && bitsize == 16);
5583 opcode = aco_opcode::tbuffer_load_format_d16_x;
5586 if (bitsize == 16) {
5588 opcode = aco_opcode::tbuffer_load_format_d16_xy;
5591 use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
5595 assert(!use_mubuf && bitsize == 16);
5596 opcode = aco_opcode::tbuffer_load_format_d16_xyz;
5599 if (bitsize == 16) {
5601 opcode = aco_opcode::tbuffer_load_format_d16_xyzw;
5604 use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
5608 assert(ctx->options->gfx_level >= GFX7 ||
5609 (!use_mubuf && ctx->options->gfx_level == GFX6));
5611 use_mubuf ? aco_opcode::buffer_load_dwordx3 : aco_opcode::tbuffer_load_format_xyz;
5615 use_mubuf ? aco_opcode::buffer_load_dwordx4 : aco_opcode::tbuffer_load_format_xyzw;
5617 default: unreachable("Unimplemented load_input vector size");
5621 if (channel_start == 0 && fetch_bytes == dst.bytes()) {
5622 direct_fetch = true;
5625 fetch_dst = bld.tmp(RegClass::get(RegType::vgpr, fetch_bytes));
5629 Instruction* mubuf = bld.mubuf(opcode, Definition(fetch_dst), list, fetch_index,
5630 soffset, fetch_offset, false, false, true)
5632 mubuf->mubuf().vtx_binding = attrib_binding + 1;
5634 unsigned dfmt = fetch_fmt & 0xf;
5635 unsigned nfmt = fetch_fmt >> 4;
5636 Instruction* mtbuf = bld.mtbuf(opcode, Definition(fetch_dst), list, fetch_index,
5637 soffset, dfmt, nfmt, fetch_offset, false, true)
5639 mtbuf->mtbuf().vtx_binding = attrib_binding + 1;
5642 emit_split_vector(ctx, fetch_dst, fetch_dst.bytes() * 8 / bitsize);
5644 if (fetch_component == 1) {
5645 channels[channel_start] = fetch_dst;
5647 for (unsigned i = 0; i < MIN2(fetch_component, num_channels - channel_start); i++)
5648 channels[channel_start + i] = emit_extract_vector(
5649 ctx, fetch_dst, i, RegClass::get(RegType::vgpr, bitsize / 8u));
5652 channel_start += fetch_component;
5655 if (!direct_fetch) {
5657 nir_alu_type_get_base_type(nir_intrinsic_dest_type(instr)) == nir_type_float;
5659 unsigned num_components = instr->dest.ssa.num_components;
5661 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
5662 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5663 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5664 unsigned num_temp = 0;
5665 for (unsigned i = 0; i < num_components; i++) {
5666 unsigned idx = i + component;
5667 if (idx < num_channels && channels[idx].id()) {
5668 Temp channel = channels[idx];
5669 vec->operands[i] = Operand(channel);
5673 } else if (bitsize == 64) {
5674 /* 22.1.1. Attribute Location and Component Assignment of Vulkan 1.3 specification:
5675 * For 64-bit data types, no default attribute values are provided. Input variables
5676 * must not use more components than provided by the attribute.
5678 vec->operands[i] = Operand(v2);
5679 } else if (is_float && idx == 3) {
5680 vec->operands[i] = bitsize == 16 ? Operand::c16(0x3c00u) : Operand::c32(0x3f800000u);
5681 } else if (!is_float && idx == 3) {
5682 vec->operands[i] = Operand::get_const(ctx->options->gfx_level, 1u, bitsize / 8u);
5684 vec->operands[i] = Operand::zero(bitsize / 8u);
5687 vec->definitions[0] = Definition(dst);
5688 ctx->block->instructions.emplace_back(std::move(vec));
5689 emit_split_vector(ctx, dst, num_components);
5691 if (num_temp == num_components)
5692 ctx->allocated_vec.emplace(dst.id(), elems);
5694 } else if (ctx->shader->info.stage == MESA_SHADER_FRAGMENT) {
5695 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5696 isel_err(offset.ssa->parent_instr,
5697 "Unimplemented non-zero nir_intrinsic_load_input offset");
5699 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
5701 unsigned idx = nir_intrinsic_base(instr);
5702 unsigned component = nir_intrinsic_component(instr);
5703 unsigned vertex_id = 2; /* P0 */
5705 if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
5706 nir_const_value* src0 = nir_src_as_const_value(instr->src[0]);
5707 switch (src0->u32) {
5709 vertex_id = 2; /* P0 */
5712 vertex_id = 0; /* P10 */
5715 vertex_id = 1; /* P20 */
5717 default: unreachable("invalid vertex index");
5721 if (instr->dest.ssa.num_components == 1 &&
5722 instr->dest.ssa.bit_size != 64) {
5723 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(dst), Operand::c32(vertex_id),
5724 bld.m0(prim_mask), idx, component);
5726 unsigned num_components = instr->dest.ssa.num_components;
5727 if (instr->dest.ssa.bit_size == 64)
5728 num_components *= 2;
5729 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5730 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5731 for (unsigned i = 0; i < num_components; i++) {
5732 unsigned chan_component = (component + i) % 4;
5733 unsigned chan_idx = idx + (component + i) / 4;
5734 vec->operands[i] = bld.vintrp(
5735 aco_opcode::v_interp_mov_f32, bld.def(instr->dest.ssa.bit_size == 16 ? v2b : v1),
5736 Operand::c32(vertex_id), bld.m0(prim_mask), chan_idx, chan_component);
5738 vec->definitions[0] = Definition(dst);
5739 bld.insert(std::move(vec));
5742 unreachable("Shader stage not implemented");
5747 visit_load_tcs_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
5749 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
5751 Builder bld(ctx->program, ctx->block);
5752 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5754 if (load_input_from_temps(ctx, instr, dst))
5757 unreachable("LDS-based TCS input should have been lowered in NIR.");
5761 visit_load_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
5763 switch (ctx->shader->info.stage) {
5764 case MESA_SHADER_TESS_CTRL: visit_load_tcs_per_vertex_input(ctx, instr); break;
5765 default: unreachable("Unimplemented shader stage");
5770 visit_load_tess_coord(isel_context* ctx, nir_intrinsic_instr* instr)
5772 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
5774 Builder bld(ctx->program, ctx->block);
5775 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5777 Operand tes_u(get_arg(ctx, ctx->args->ac.tes_u));
5778 Operand tes_v(get_arg(ctx, ctx->args->ac.tes_v));
5779 Operand tes_w = Operand::zero();
5781 if (ctx->shader->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
5782 Temp tmp = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), tes_u, tes_v);
5783 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand::c32(0x3f800000u /* 1.0f */), tmp);
5784 tes_w = Operand(tmp);
5787 Temp tess_coord = bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tes_u, tes_v, tes_w);
5788 emit_split_vector(ctx, tess_coord, 3);
5792 load_buffer(isel_context* ctx, unsigned num_components, unsigned component_size, Temp dst,
5793 Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset, bool glc = false,
5794 bool allow_smem = true, memory_sync_info sync = memory_sync_info())
5796 Builder bld(ctx->program, ctx->block);
5799 dst.type() != RegType::vgpr && (!glc || ctx->options->gfx_level >= GFX8) && allow_smem;
5801 offset = bld.as_uniform(offset);
5803 /* GFX6-7 are affected by a hw bug that prevents address clamping to
5804 * work correctly when the SGPR offset is used.
5806 if (offset.type() == RegType::sgpr && ctx->options->gfx_level < GFX8)
5807 offset = as_vgpr(ctx, offset);
5810 LoadEmitInfo info = {Operand(offset), dst, num_components, component_size, rsrc};
5813 info.align_mul = align_mul;
5814 info.align_offset = align_offset;
5816 emit_load(ctx, bld, info, smem_load_params);
5818 emit_load(ctx, bld, info, mubuf_load_params);
5822 visit_load_ubo(isel_context* ctx, nir_intrinsic_instr* instr)
5824 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5825 Builder bld(ctx->program, ctx->block);
5826 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5828 unsigned size = instr->dest.ssa.bit_size / 8;
5829 load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
5830 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr));
5834 visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr)
5836 Builder bld(ctx->program, ctx->block);
5837 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5838 unsigned offset = nir_intrinsic_base(instr);
5839 unsigned count = instr->dest.ssa.num_components;
5840 nir_const_value* index_cv = nir_src_as_const_value(instr->src[0]);
5842 if (instr->dest.ssa.bit_size == 64)
5845 if (index_cv && instr->dest.ssa.bit_size >= 32) {
5846 unsigned start = (offset + index_cv->u32) / 4u;
5847 uint64_t mask = BITFIELD64_MASK(count) << start;
5848 if ((ctx->args->ac.inline_push_const_mask | mask) == ctx->args->ac.inline_push_const_mask &&
5849 start + count <= (sizeof(ctx->args->ac.inline_push_const_mask) * 8u)) {
5850 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5851 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5852 aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
5853 unsigned arg_index =
5854 util_bitcount64(ctx->args->ac.inline_push_const_mask & BITFIELD64_MASK(start));
5855 for (unsigned i = 0; i < count; ++i) {
5856 elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[arg_index++]);
5857 vec->operands[i] = Operand{elems[i]};
5859 vec->definitions[0] = Definition(dst);
5860 ctx->block->instructions.emplace_back(std::move(vec));
5861 ctx->allocated_vec.emplace(dst.id(), elems);
5866 Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5867 if (offset != 0) // TODO check if index != 0 as well
5868 index = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
5869 Operand::c32(offset), index);
5870 Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
5873 bool aligned = true;
5875 if (instr->dest.ssa.bit_size == 8) {
5876 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5877 bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
5879 vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
5880 } else if (instr->dest.ssa.bit_size == 16) {
5881 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5883 vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
5888 switch (vec.size()) {
5889 case 1: op = aco_opcode::s_load_dword; break;
5890 case 2: op = aco_opcode::s_load_dwordx2; break;
5895 case 4: op = aco_opcode::s_load_dwordx4; break;
5900 case 8: op = aco_opcode::s_load_dwordx8; break;
5901 default: unreachable("unimplemented or forbidden load_push_constant.");
5904 bld.smem(op, Definition(vec), ptr, index).instr->smem().prevent_overflow = true;
5907 Operand byte_offset = index_cv ? Operand::c32((offset + index_cv->u32) % 4) : Operand(index);
5908 byte_align_scalar(ctx, vec, byte_offset, dst);
5913 emit_split_vector(ctx, vec, 4);
5914 RegClass rc = dst.size() == 3 ? s1 : s2;
5915 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), emit_extract_vector(ctx, vec, 0, rc),
5916 emit_extract_vector(ctx, vec, 1, rc), emit_extract_vector(ctx, vec, 2, rc));
5918 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
5922 visit_load_constant(isel_context* ctx, nir_intrinsic_instr* instr)
5924 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5926 Builder bld(ctx->program, ctx->block);
5928 uint32_t desc_type =
5929 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5930 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
5931 if (ctx->options->gfx_level >= GFX10) {
5932 desc_type |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
5933 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
5934 S_008F0C_RESOURCE_LEVEL(ctx->options->gfx_level < GFX11);
5936 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5937 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
5940 unsigned base = nir_intrinsic_base(instr);
5941 unsigned range = nir_intrinsic_range(instr);
5943 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
5944 if (base && offset.type() == RegType::sgpr)
5945 offset = bld.nuw().sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
5946 Operand::c32(base));
5947 else if (base && offset.type() == RegType::vgpr)
5948 offset = bld.vadd32(bld.def(v1), Operand::c32(base), offset);
5950 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5951 bld.pseudo(aco_opcode::p_constaddr, bld.def(s2), bld.def(s1, scc),
5952 Operand::c32(ctx->constant_data_offset)),
5953 Operand::c32(MIN2(base + range, ctx->shader->constant_data_size)),
5954 Operand::c32(desc_type));
5955 unsigned size = instr->dest.ssa.bit_size / 8;
5956 // TODO: get alignment information for subdword constants
5957 load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0);
5960 /* Packs multiple Temps of different sizes in to a vector of v1 Temps.
5961 * The byte count of each input Temp must be a multiple of 2.
5963 static std::vector<Temp>
5964 emit_pack_v1(isel_context* ctx, const std::vector<Temp>& unpacked)
5966 Builder bld(ctx->program, ctx->block);
5967 std::vector<Temp> packed;
5969 for (Temp tmp : unpacked) {
5970 assert(tmp.bytes() % 2 == 0);
5971 unsigned byte_idx = 0;
5972 while (byte_idx < tmp.bytes()) {
5973 if (low != Temp()) {
5974 Temp high = emit_extract_vector(ctx, tmp, byte_idx / 2, v2b);
5975 Temp dword = bld.pseudo(aco_opcode::p_create_vector, bld.def(v1), low, high);
5977 packed.push_back(dword);
5979 } else if (byte_idx % 4 == 0 && (byte_idx + 4) <= tmp.bytes()) {
5980 packed.emplace_back(emit_extract_vector(ctx, tmp, byte_idx / 4, v1));
5983 low = emit_extract_vector(ctx, tmp, byte_idx / 2, v2b);
5988 if (low != Temp()) {
5989 Temp dword = bld.pseudo(aco_opcode::p_create_vector, bld.def(v1), low, Operand(v2b));
5990 packed.push_back(dword);
5996 should_declare_array(isel_context* ctx, enum glsl_sampler_dim sampler_dim, bool is_array)
5998 if (sampler_dim == GLSL_SAMPLER_DIM_BUF)
6000 ac_image_dim dim = ac_get_sampler_dim(ctx->options->gfx_level, sampler_dim, is_array);
6001 return dim == ac_image_cube || dim == ac_image_1darray || dim == ac_image_2darray ||
6002 dim == ac_image_2darraymsaa;
6006 image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
6009 case GLSL_SAMPLER_DIM_BUF: return 1;
6010 case GLSL_SAMPLER_DIM_1D: return array ? 2 : 1;
6011 case GLSL_SAMPLER_DIM_2D: return array ? 3 : 2;
6012 case GLSL_SAMPLER_DIM_MS: return array ? 3 : 2;
6013 case GLSL_SAMPLER_DIM_3D:
6014 case GLSL_SAMPLER_DIM_CUBE: return 3;
6015 case GLSL_SAMPLER_DIM_RECT:
6016 case GLSL_SAMPLER_DIM_SUBPASS: return 2;
6017 case GLSL_SAMPLER_DIM_SUBPASS_MS: return 2;
6023 static MIMG_instruction*
6024 emit_mimg(Builder& bld, aco_opcode op, Definition dst, Temp rsrc, Operand samp,
6025 std::vector<Temp> coords, unsigned wqm_mask = 0, Operand vdata = Operand(v1))
6027 /* Limit NSA instructions to 3 dwords on GFX10 to avoid stability issues. */
6028 unsigned max_nsa_size = bld.program->gfx_level >= GFX10_3 ? 13 : 5;
6029 bool use_nsa = bld.program->gfx_level >= GFX10 && coords.size() <= max_nsa_size;
6032 Temp coord = coords[0];
6033 if (coords.size() > 1) {
6034 coord = bld.tmp(RegType::vgpr, coords.size());
6036 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
6037 aco_opcode::p_create_vector, Format::PSEUDO, coords.size(), 1)};
6038 for (unsigned i = 0; i < coords.size(); i++)
6039 vec->operands[i] = Operand(coords[i]);
6040 vec->definitions[0] = Definition(coord);
6041 bld.insert(std::move(vec));
6042 } else if (coord.type() == RegType::sgpr) {
6043 coord = bld.copy(bld.def(v1), coord);
6047 /* We don't need the bias, sample index, compare value or offset to be
6048 * computed in WQM but if the p_create_vector copies the coordinates, then it
6049 * needs to be in WQM. */
6050 coord = emit_wqm(bld, coord, bld.tmp(coord.regClass()), true);
6056 for (unsigned i = 0; i < coords.size(); i++) {
6057 if (wqm_mask & (1u << i))
6058 coords[i] = emit_wqm(bld, coords[i], bld.tmp(coords[i].regClass()), true);
6061 for (Temp& coord : coords) {
6062 if (coord.type() == RegType::sgpr)
6063 coord = bld.copy(bld.def(v1), coord);
6067 aco_ptr<MIMG_instruction> mimg{
6068 create_instruction<MIMG_instruction>(op, Format::MIMG, 3 + coords.size(), dst.isTemp())};
6070 mimg->definitions[0] = dst;
6071 mimg->operands[0] = Operand(rsrc);
6072 mimg->operands[1] = samp;
6073 mimg->operands[2] = vdata;
6074 for (unsigned i = 0; i < coords.size(); i++)
6075 mimg->operands[3 + i] = Operand(coords[i]);
6077 MIMG_instruction* res = mimg.get();
6078 bld.insert(std::move(mimg));
6083 visit_bvh64_intersect_ray_amd(isel_context* ctx, nir_intrinsic_instr* instr)
6085 Builder bld(ctx->program, ctx->block);
6086 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6087 Temp resource = get_ssa_temp(ctx, instr->src[0].ssa);
6088 Temp node = get_ssa_temp(ctx, instr->src[1].ssa);
6089 Temp tmax = get_ssa_temp(ctx, instr->src[2].ssa);
6090 Temp origin = get_ssa_temp(ctx, instr->src[3].ssa);
6091 Temp dir = get_ssa_temp(ctx, instr->src[4].ssa);
6092 Temp inv_dir = get_ssa_temp(ctx, instr->src[5].ssa);
6094 std::vector<Temp> args;
6095 args.push_back(emit_extract_vector(ctx, node, 0, v1));
6096 args.push_back(emit_extract_vector(ctx, node, 1, v1));
6097 args.push_back(as_vgpr(ctx, tmax));
6098 args.push_back(emit_extract_vector(ctx, origin, 0, v1));
6099 args.push_back(emit_extract_vector(ctx, origin, 1, v1));
6100 args.push_back(emit_extract_vector(ctx, origin, 2, v1));
6101 args.push_back(emit_extract_vector(ctx, dir, 0, v1));
6102 args.push_back(emit_extract_vector(ctx, dir, 1, v1));
6103 args.push_back(emit_extract_vector(ctx, dir, 2, v1));
6104 args.push_back(emit_extract_vector(ctx, inv_dir, 0, v1));
6105 args.push_back(emit_extract_vector(ctx, inv_dir, 1, v1));
6106 args.push_back(emit_extract_vector(ctx, inv_dir, 2, v1));
6108 MIMG_instruction* mimg = emit_mimg(bld, aco_opcode::image_bvh64_intersect_ray, Definition(dst),
6109 resource, Operand(s4), args);
6110 mimg->dim = ac_image_1d;
6116 static std::vector<Temp>
6117 get_image_coords(isel_context* ctx, const nir_intrinsic_instr* instr)
6120 Temp src0 = get_ssa_temp(ctx, instr->src[1].ssa);
6121 bool a16 = instr->src[1].ssa->bit_size == 16;
6122 RegClass rc = a16 ? v2b : v1;
6123 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6124 bool is_array = nir_intrinsic_image_array(instr);
6125 ASSERTED bool add_frag_pos =
6126 (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
6127 assert(!add_frag_pos && "Input attachments should be lowered.");
6128 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
6129 bool gfx9_1d = ctx->options->gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
6130 int count = image_type_to_components_count(dim, is_array);
6131 std::vector<Temp> coords;
6132 Builder bld(ctx->program, ctx->block);
6135 coords.emplace_back(emit_extract_vector(ctx, src0, 0, rc));
6136 coords.emplace_back(bld.copy(bld.def(rc), Operand::zero(a16 ? 2 : 4)));
6138 coords.emplace_back(emit_extract_vector(ctx, src0, 1, rc));
6140 for (int i = 0; i < count; i++)
6141 coords.emplace_back(emit_extract_vector(ctx, src0, i, rc));
6144 if (ctx->options->key.image_2d_view_of_3d &&
6145 dim == GLSL_SAMPLER_DIM_2D && !is_array) {
6146 /* The hw can't bind a slice of a 3D image as a 2D image, because it
6147 * ignores BASE_ARRAY if the target is 3D. The workaround is to read
6148 * BASE_ARRAY and set it as the 3rd address operand for all 2D images.
6150 assert(ctx->options->gfx_level == GFX9);
6151 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6152 Temp rsrc_word5 = emit_extract_vector(ctx, rsrc, 5, v1);
6153 /* Extract the BASE_ARRAY field [0:12] from the descriptor. */
6155 bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), rsrc_word5,
6156 Operand::c32(0u), Operand::c32(13u));
6158 coords.emplace_back(emit_extract_vector(ctx, first_layer, 0, v2b));
6160 coords.emplace_back(first_layer);
6164 assert(instr->src[2].ssa->bit_size == (a16 ? 16 : 32));
6165 coords.emplace_back(get_ssa_temp_tex(ctx, instr->src[2].ssa, a16));
6168 if (instr->intrinsic == nir_intrinsic_bindless_image_load ||
6169 instr->intrinsic == nir_intrinsic_bindless_image_sparse_load ||
6170 instr->intrinsic == nir_intrinsic_bindless_image_store) {
6171 int lod_index = instr->intrinsic == nir_intrinsic_bindless_image_store ? 4 : 3;
6172 assert(instr->src[lod_index].ssa->bit_size == (a16 ? 16 : 32));
6174 nir_src_is_const(instr->src[lod_index]) && nir_src_as_uint(instr->src[lod_index]) == 0;
6177 coords.emplace_back(get_ssa_temp_tex(ctx, instr->src[lod_index].ssa, a16));
6180 return emit_pack_v1(ctx, coords);
6184 get_memory_sync_info(nir_intrinsic_instr* instr, storage_class storage, unsigned semantics)
6186 /* atomicrmw might not have NIR_INTRINSIC_ACCESS and there's nothing interesting there anyway */
6187 if (semantics & semantic_atomicrmw)
6188 return memory_sync_info(storage, semantics);
6190 unsigned access = nir_intrinsic_access(instr);
6192 if (access & ACCESS_VOLATILE)
6193 semantics |= semantic_volatile;
6194 if (access & ACCESS_CAN_REORDER)
6195 semantics |= semantic_can_reorder | semantic_private;
6197 return memory_sync_info(storage, semantics);
6201 emit_tfe_init(Builder& bld, Temp dst)
6203 Temp tmp = bld.tmp(dst.regClass());
6205 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
6206 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
6207 for (unsigned i = 0; i < dst.size(); i++)
6208 vec->operands[i] = Operand::zero();
6209 vec->definitions[0] = Definition(tmp);
6210 /* Since this is fixed to an instruction's definition register, any CSE will
6211 * just create copies. Copying costs about the same as zero-initialization,
6212 * but these copies can break up clauses.
6214 vec->definitions[0].setNoCSE(true);
6215 bld.insert(std::move(vec));
6217 return Operand(tmp);
6221 visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr)
6223 Builder bld(ctx->program, ctx->block);
6224 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6225 bool is_array = nir_intrinsic_image_array(instr);
6226 bool is_sparse = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
6227 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6229 memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
6230 unsigned access = nir_intrinsic_access(instr);
6232 unsigned result_size = instr->dest.ssa.num_components - is_sparse;
6233 unsigned expand_mask =
6234 nir_ssa_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size);
6235 expand_mask = MAX2(expand_mask, 1); /* this can be zero in the case of sparse image loads */
6236 if (dim == GLSL_SAMPLER_DIM_BUF)
6237 expand_mask = (1u << util_last_bit(expand_mask)) - 1u;
6238 unsigned dmask = expand_mask;
6239 if (instr->dest.ssa.bit_size == 64) {
6241 /* only R64_UINT and R64_SINT supported. x is in xy of the result, w in zw */
6242 dmask = ((expand_mask & 0x1) ? 0x3 : 0) | ((expand_mask & 0x8) ? 0xc : 0);
6245 expand_mask |= 1 << result_size;
6247 bool d16 = instr->dest.ssa.bit_size == 16;
6248 assert(!d16 || !is_sparse);
6250 unsigned num_bytes = util_bitcount(dmask) * (d16 ? 2 : 4) + is_sparse * 4;
6253 if (num_bytes == dst.bytes() && dst.type() == RegType::vgpr)
6256 tmp = bld.tmp(RegClass::get(RegType::vgpr, num_bytes));
6258 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6260 if (dim == GLSL_SAMPLER_DIM_BUF) {
6261 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6265 switch (util_bitcount(dmask)) {
6266 case 1: opcode = aco_opcode::buffer_load_format_x; break;
6267 case 2: opcode = aco_opcode::buffer_load_format_xy; break;
6268 case 3: opcode = aco_opcode::buffer_load_format_xyz; break;
6269 case 4: opcode = aco_opcode::buffer_load_format_xyzw; break;
6270 default: unreachable(">4 channel buffer image load");
6273 switch (util_bitcount(dmask)) {
6274 case 1: opcode = aco_opcode::buffer_load_format_d16_x; break;
6275 case 2: opcode = aco_opcode::buffer_load_format_d16_xy; break;
6276 case 3: opcode = aco_opcode::buffer_load_format_d16_xyz; break;
6277 case 4: opcode = aco_opcode::buffer_load_format_d16_xyzw; break;
6278 default: unreachable(">4 channel buffer image load");
6281 aco_ptr<MUBUF_instruction> load{
6282 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 3 + is_sparse, 1)};
6283 load->operands[0] = Operand(resource);
6284 load->operands[1] = Operand(vindex);
6285 load->operands[2] = Operand::c32(0);
6286 load->definitions[0] = Definition(tmp);
6288 load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
6290 load->glc && (ctx->options->gfx_level == GFX10 || ctx->options->gfx_level == GFX10_3);
6292 load->tfe = is_sparse;
6294 load->operands[3] = emit_tfe_init(bld, tmp);
6295 ctx->block->instructions.emplace_back(std::move(load));
6297 std::vector<Temp> coords = get_image_coords(ctx, instr);
6299 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
6300 aco_opcode opcode = level_zero ? aco_opcode::image_load : aco_opcode::image_load_mip;
6302 Operand vdata = is_sparse ? emit_tfe_init(bld, tmp) : Operand(v1);
6303 MIMG_instruction* load =
6304 emit_mimg(bld, opcode, Definition(tmp), resource, Operand(s4), coords, 0, vdata);
6305 load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
6307 load->glc && (ctx->options->gfx_level == GFX10 || ctx->options->gfx_level == GFX10_3);
6308 load->dim = ac_get_image_dim(ctx->options->gfx_level, dim, is_array);
6309 load->a16 = instr->src[1].ssa->bit_size == 16;
6311 load->dmask = dmask;
6313 load->da = should_declare_array(ctx, dim, is_array);
6315 load->tfe = is_sparse;
6318 if (is_sparse && instr->dest.ssa.bit_size == 64) {
6319 /* The result components are 64-bit but the sparse residency code is
6320 * 32-bit. So add a zero to the end so expand_vector() works correctly.
6322 tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, tmp.size() + 1), tmp,
6326 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, expand_mask,
6327 instr->dest.ssa.bit_size == 64);
6331 visit_image_store(isel_context* ctx, nir_intrinsic_instr* instr)
6333 Builder bld(ctx->program, ctx->block);
6334 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6335 bool is_array = nir_intrinsic_image_array(instr);
6336 Temp data = get_ssa_temp(ctx, instr->src[3].ssa);
6337 bool d16 = instr->src[3].ssa->bit_size == 16;
6339 /* only R64_UINT and R64_SINT supported */
6340 if (instr->src[3].ssa->bit_size == 64 && data.bytes() > 8)
6341 data = emit_extract_vector(ctx, data, 0, RegClass(data.type(), 2));
6342 data = as_vgpr(ctx, data);
6344 uint32_t num_components = d16 ? instr->src[3].ssa->num_components : data.size();
6346 memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
6347 unsigned access = nir_intrinsic_access(instr);
6348 bool glc = ctx->options->gfx_level == GFX6 ||
6349 ((access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE)) &&
6350 ctx->program->gfx_level < GFX11);
6352 if (dim == GLSL_SAMPLER_DIM_BUF) {
6353 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6354 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6357 switch (num_components) {
6358 case 1: opcode = aco_opcode::buffer_store_format_x; break;
6359 case 2: opcode = aco_opcode::buffer_store_format_xy; break;
6360 case 3: opcode = aco_opcode::buffer_store_format_xyz; break;
6361 case 4: opcode = aco_opcode::buffer_store_format_xyzw; break;
6362 default: unreachable(">4 channel buffer image store");
6365 switch (num_components) {
6366 case 1: opcode = aco_opcode::buffer_store_format_d16_x; break;
6367 case 2: opcode = aco_opcode::buffer_store_format_d16_xy; break;
6368 case 3: opcode = aco_opcode::buffer_store_format_d16_xyz; break;
6369 case 4: opcode = aco_opcode::buffer_store_format_d16_xyzw; break;
6370 default: unreachable(">4 channel buffer image store");
6373 aco_ptr<MUBUF_instruction> store{
6374 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
6375 store->operands[0] = Operand(rsrc);
6376 store->operands[1] = Operand(vindex);
6377 store->operands[2] = Operand::c32(0);
6378 store->operands[3] = Operand(data);
6379 store->idxen = true;
6382 store->disable_wqm = true;
6384 ctx->program->needs_exact = true;
6385 ctx->block->instructions.emplace_back(std::move(store));
6389 assert(data.type() == RegType::vgpr);
6390 std::vector<Temp> coords = get_image_coords(ctx, instr);
6391 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6393 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
6394 aco_opcode opcode = level_zero ? aco_opcode::image_store : aco_opcode::image_store_mip;
6396 uint32_t dmask = BITFIELD_MASK(num_components);
6397 /* remove zero/undef elements from data, components which aren't in dmask
6400 if (instr->src[3].ssa->bit_size == 32 || instr->src[3].ssa->bit_size == 16) {
6401 for (uint32_t i = 0; i < instr->num_components; i++) {
6402 nir_ssa_scalar comp = nir_ssa_scalar_resolved(instr->src[3].ssa, i);
6403 if ((nir_ssa_scalar_is_const(comp) && nir_ssa_scalar_as_uint(comp) == 0) ||
6404 nir_ssa_scalar_is_undef(comp))
6405 dmask &= ~BITFIELD_BIT(i);
6408 /* dmask cannot be 0, at least one vgpr is always read */
6412 if (dmask != BITFIELD_MASK(num_components)) {
6413 uint32_t dmask_count = util_bitcount(dmask);
6414 RegClass rc = d16 ? v2b : v1;
6415 if (dmask_count == 1) {
6416 data = emit_extract_vector(ctx, data, ffs(dmask) - 1, rc);
6418 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
6419 aco_opcode::p_create_vector, Format::PSEUDO, dmask_count, 1)};
6421 u_foreach_bit(bit, dmask) {
6422 vec->operands[index++] = Operand(emit_extract_vector(ctx, data, bit, rc));
6424 data = bld.tmp(RegClass::get(RegType::vgpr, dmask_count * rc.bytes()));
6425 vec->definitions[0] = Definition(data);
6426 bld.insert(std::move(vec));
6431 MIMG_instruction* store =
6432 emit_mimg(bld, opcode, Definition(), resource, Operand(s4), coords, 0, Operand(data));
6435 store->dim = ac_get_image_dim(ctx->options->gfx_level, dim, is_array);
6436 store->a16 = instr->src[1].ssa->bit_size == 16;
6438 store->dmask = dmask;
6440 store->da = should_declare_array(ctx, dim, is_array);
6441 store->disable_wqm = true;
6443 ctx->program->needs_exact = true;
6448 visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
6450 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6451 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6452 bool is_array = nir_intrinsic_image_array(instr);
6453 Builder bld(ctx->program, ctx->block);
6455 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
6456 bool cmpswap = instr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap;
6457 bool is_64bit = data.bytes() == 8;
6458 assert((data.bytes() == 4 || data.bytes() == 8) && "only 32/64-bit image atomics implemented.");
6461 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(is_64bit ? v4 : v2),
6462 get_ssa_temp(ctx, instr->src[4].ssa), data);
6464 aco_opcode buf_op, buf_op64, image_op;
6465 switch (instr->intrinsic) {
6466 case nir_intrinsic_bindless_image_atomic_add:
6467 buf_op = aco_opcode::buffer_atomic_add;
6468 buf_op64 = aco_opcode::buffer_atomic_add_x2;
6469 image_op = aco_opcode::image_atomic_add;
6471 case nir_intrinsic_bindless_image_atomic_umin:
6472 buf_op = aco_opcode::buffer_atomic_umin;
6473 buf_op64 = aco_opcode::buffer_atomic_umin_x2;
6474 image_op = aco_opcode::image_atomic_umin;
6476 case nir_intrinsic_bindless_image_atomic_imin:
6477 buf_op = aco_opcode::buffer_atomic_smin;
6478 buf_op64 = aco_opcode::buffer_atomic_smin_x2;
6479 image_op = aco_opcode::image_atomic_smin;
6481 case nir_intrinsic_bindless_image_atomic_umax:
6482 buf_op = aco_opcode::buffer_atomic_umax;
6483 buf_op64 = aco_opcode::buffer_atomic_umax_x2;
6484 image_op = aco_opcode::image_atomic_umax;
6486 case nir_intrinsic_bindless_image_atomic_imax:
6487 buf_op = aco_opcode::buffer_atomic_smax;
6488 buf_op64 = aco_opcode::buffer_atomic_smax_x2;
6489 image_op = aco_opcode::image_atomic_smax;
6491 case nir_intrinsic_bindless_image_atomic_and:
6492 buf_op = aco_opcode::buffer_atomic_and;
6493 buf_op64 = aco_opcode::buffer_atomic_and_x2;
6494 image_op = aco_opcode::image_atomic_and;
6496 case nir_intrinsic_bindless_image_atomic_or:
6497 buf_op = aco_opcode::buffer_atomic_or;
6498 buf_op64 = aco_opcode::buffer_atomic_or_x2;
6499 image_op = aco_opcode::image_atomic_or;
6501 case nir_intrinsic_bindless_image_atomic_xor:
6502 buf_op = aco_opcode::buffer_atomic_xor;
6503 buf_op64 = aco_opcode::buffer_atomic_xor_x2;
6504 image_op = aco_opcode::image_atomic_xor;
6506 case nir_intrinsic_bindless_image_atomic_exchange:
6507 buf_op = aco_opcode::buffer_atomic_swap;
6508 buf_op64 = aco_opcode::buffer_atomic_swap_x2;
6509 image_op = aco_opcode::image_atomic_swap;
6511 case nir_intrinsic_bindless_image_atomic_comp_swap:
6512 buf_op = aco_opcode::buffer_atomic_cmpswap;
6513 buf_op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6514 image_op = aco_opcode::image_atomic_cmpswap;
6516 case nir_intrinsic_bindless_image_atomic_fmin:
6517 buf_op = aco_opcode::buffer_atomic_fmin;
6518 buf_op64 = aco_opcode::buffer_atomic_fmin_x2;
6519 image_op = aco_opcode::image_atomic_fmin;
6521 case nir_intrinsic_bindless_image_atomic_fmax:
6522 buf_op = aco_opcode::buffer_atomic_fmax;
6523 buf_op64 = aco_opcode::buffer_atomic_fmax_x2;
6524 image_op = aco_opcode::image_atomic_fmax;
6527 unreachable("visit_image_atomic should only be called with "
6528 "nir_intrinsic_bindless_image_atomic_* instructions.");
6531 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6532 memory_sync_info sync = get_memory_sync_info(instr, storage_image, semantic_atomicrmw);
6534 if (dim == GLSL_SAMPLER_DIM_BUF) {
6535 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6536 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6537 // assert(ctx->options->gfx_level < GFX9 && "GFX9 stride size workaround not yet
6539 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(
6540 is_64bit ? buf_op64 : buf_op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6541 mubuf->operands[0] = Operand(resource);
6542 mubuf->operands[1] = Operand(vindex);
6543 mubuf->operands[2] = Operand::c32(0);
6544 mubuf->operands[3] = Operand(data);
6546 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6547 if (return_previous)
6548 mubuf->definitions[0] = def;
6550 mubuf->idxen = true;
6551 mubuf->glc = return_previous;
6552 mubuf->dlc = false; /* Not needed for atomics */
6553 mubuf->disable_wqm = true;
6555 ctx->program->needs_exact = true;
6556 ctx->block->instructions.emplace_back(std::move(mubuf));
6557 if (return_previous && cmpswap)
6558 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6562 std::vector<Temp> coords = get_image_coords(ctx, instr);
6563 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6565 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6566 MIMG_instruction* mimg =
6567 emit_mimg(bld, image_op, def, resource, Operand(s4), coords, 0, Operand(data));
6568 mimg->glc = return_previous;
6569 mimg->dlc = false; /* Not needed for atomics */
6570 mimg->dim = ac_get_image_dim(ctx->options->gfx_level, dim, is_array);
6571 mimg->dmask = (1 << data.size()) - 1;
6572 mimg->a16 = instr->src[1].ssa->bit_size == 16;
6574 mimg->da = should_declare_array(ctx, dim, is_array);
6575 mimg->disable_wqm = true;
6577 ctx->program->needs_exact = true;
6578 if (return_previous && cmpswap)
6579 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6584 visit_load_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6586 Builder bld(ctx->program, ctx->block);
6587 unsigned num_components = instr->num_components;
6589 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6590 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6592 unsigned access = nir_intrinsic_access(instr);
6593 bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
6594 unsigned size = instr->dest.ssa.bit_size / 8;
6596 bool allow_smem = access & ACCESS_CAN_REORDER;
6598 load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
6599 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, allow_smem,
6600 get_memory_sync_info(instr, storage_buffer, 0));
6604 visit_store_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6606 Builder bld(ctx->program, ctx->block);
6607 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
6608 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6609 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6610 Temp offset = get_ssa_temp(ctx, instr->src[2].ssa);
6612 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
6614 memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
6616 (nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE)) &&
6617 ctx->program->gfx_level < GFX11;
6619 unsigned write_count = 0;
6620 Temp write_datas[32];
6621 unsigned offsets[32];
6622 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, 16, &write_count,
6623 write_datas, offsets);
6625 /* GFX6-7 are affected by a hw bug that prevents address clamping to work
6626 * correctly when the SGPR offset is used.
6628 if (offset.type() == RegType::sgpr && ctx->options->gfx_level < GFX8)
6629 offset = as_vgpr(ctx, offset);
6631 for (unsigned i = 0; i < write_count; i++) {
6632 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
6634 aco_ptr<MUBUF_instruction> store{
6635 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6636 store->operands[0] = Operand(rsrc);
6637 store->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6638 store->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
6639 store->operands[3] = Operand(write_datas[i]);
6640 store->offset = offsets[i];
6641 store->offen = (offset.type() == RegType::vgpr);
6644 store->disable_wqm = true;
6646 ctx->program->needs_exact = true;
6647 ctx->block->instructions.emplace_back(std::move(store));
6652 visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6654 Builder bld(ctx->program, ctx->block);
6655 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6656 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
6657 bool cmpswap = instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap;
6660 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6661 get_ssa_temp(ctx, instr->src[3].ssa), data);
6663 Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
6664 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6666 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6668 aco_opcode op32, op64;
6669 switch (instr->intrinsic) {
6670 case nir_intrinsic_ssbo_atomic_add:
6671 op32 = aco_opcode::buffer_atomic_add;
6672 op64 = aco_opcode::buffer_atomic_add_x2;
6674 case nir_intrinsic_ssbo_atomic_imin:
6675 op32 = aco_opcode::buffer_atomic_smin;
6676 op64 = aco_opcode::buffer_atomic_smin_x2;
6678 case nir_intrinsic_ssbo_atomic_umin:
6679 op32 = aco_opcode::buffer_atomic_umin;
6680 op64 = aco_opcode::buffer_atomic_umin_x2;
6682 case nir_intrinsic_ssbo_atomic_imax:
6683 op32 = aco_opcode::buffer_atomic_smax;
6684 op64 = aco_opcode::buffer_atomic_smax_x2;
6686 case nir_intrinsic_ssbo_atomic_umax:
6687 op32 = aco_opcode::buffer_atomic_umax;
6688 op64 = aco_opcode::buffer_atomic_umax_x2;
6690 case nir_intrinsic_ssbo_atomic_and:
6691 op32 = aco_opcode::buffer_atomic_and;
6692 op64 = aco_opcode::buffer_atomic_and_x2;
6694 case nir_intrinsic_ssbo_atomic_or:
6695 op32 = aco_opcode::buffer_atomic_or;
6696 op64 = aco_opcode::buffer_atomic_or_x2;
6698 case nir_intrinsic_ssbo_atomic_xor:
6699 op32 = aco_opcode::buffer_atomic_xor;
6700 op64 = aco_opcode::buffer_atomic_xor_x2;
6702 case nir_intrinsic_ssbo_atomic_exchange:
6703 op32 = aco_opcode::buffer_atomic_swap;
6704 op64 = aco_opcode::buffer_atomic_swap_x2;
6706 case nir_intrinsic_ssbo_atomic_comp_swap:
6707 op32 = aco_opcode::buffer_atomic_cmpswap;
6708 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6710 case nir_intrinsic_ssbo_atomic_fmin:
6711 op32 = aco_opcode::buffer_atomic_fmin;
6712 op64 = aco_opcode::buffer_atomic_fmin_x2;
6714 case nir_intrinsic_ssbo_atomic_fmax:
6715 op32 = aco_opcode::buffer_atomic_fmax;
6716 op64 = aco_opcode::buffer_atomic_fmax_x2;
6720 "visit_atomic_ssbo should only be called with nir_intrinsic_ssbo_atomic_* instructions.");
6722 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6723 aco_ptr<MUBUF_instruction> mubuf{
6724 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6725 mubuf->operands[0] = Operand(rsrc);
6726 mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6727 mubuf->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
6728 mubuf->operands[3] = Operand(data);
6730 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6731 if (return_previous)
6732 mubuf->definitions[0] = def;
6734 mubuf->offen = (offset.type() == RegType::vgpr);
6735 mubuf->glc = return_previous;
6736 mubuf->dlc = false; /* Not needed for atomics */
6737 mubuf->disable_wqm = true;
6738 mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
6739 ctx->program->needs_exact = true;
6740 ctx->block->instructions.emplace_back(std::move(mubuf));
6741 if (return_previous && cmpswap)
6742 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6746 parse_global(isel_context* ctx, nir_intrinsic_instr* intrin, Temp* address, uint32_t* const_offset,
6749 bool is_store = intrin->intrinsic == nir_intrinsic_store_global_amd;
6750 *address = get_ssa_temp(ctx, intrin->src[is_store ? 1 : 0].ssa);
6752 *const_offset = nir_intrinsic_base(intrin);
6754 unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
6755 nir_src offset_src = intrin->src[num_src - 1];
6756 if (!nir_src_is_const(offset_src) || nir_src_as_uint(offset_src))
6757 *offset = get_ssa_temp(ctx, offset_src.ssa);
6763 visit_load_global(isel_context* ctx, nir_intrinsic_instr* instr)
6765 Builder bld(ctx->program, ctx->block);
6766 unsigned num_components = instr->num_components;
6767 unsigned component_size = instr->dest.ssa.bit_size / 8;
6770 uint32_t const_offset;
6771 parse_global(ctx, instr, &addr, &const_offset, &offset);
6773 LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->dest.ssa), num_components,
6776 info.resource = addr;
6777 info.offset = Operand(offset);
6779 info.const_offset = const_offset;
6780 info.glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
6781 info.align_mul = nir_intrinsic_align_mul(instr);
6782 info.align_offset = nir_intrinsic_align_offset(instr);
6783 info.sync = get_memory_sync_info(instr, storage_buffer, 0);
6785 /* Don't expand global loads when they use MUBUF or SMEM.
6786 * Global loads don't have the bounds checking that buffer loads have that
6789 unsigned align = nir_intrinsic_align(instr);
6790 bool byte_align_for_smem_mubuf =
6791 can_use_byte_align_for_global_load(num_components, component_size, align, false);
6793 /* VMEM stores don't update the SMEM cache and it's difficult to prove that
6794 * it's safe to use SMEM */
6796 (nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE) && byte_align_for_smem_mubuf;
6797 if (info.dst.type() == RegType::vgpr || (info.glc && ctx->options->gfx_level < GFX8) ||
6799 EmitLoadParameters params = global_load_params;
6800 params.byte_align_loads = ctx->options->gfx_level > GFX6 || byte_align_for_smem_mubuf;
6801 emit_load(ctx, bld, info, params);
6803 if (info.resource.id())
6804 info.resource = bld.as_uniform(info.resource);
6805 info.offset = Operand(bld.as_uniform(info.offset));
6806 emit_load(ctx, bld, info, smem_load_params);
6811 visit_store_global(isel_context* ctx, nir_intrinsic_instr* instr)
6813 Builder bld(ctx->program, ctx->block);
6814 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6815 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6817 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6818 memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
6820 (nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE)) &&
6821 ctx->program->gfx_level < GFX11;
6823 unsigned write_count = 0;
6824 Temp write_datas[32];
6825 unsigned offsets[32];
6826 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, 16, &write_count,
6827 write_datas, offsets);
6830 uint32_t const_offset;
6831 parse_global(ctx, instr, &addr, &const_offset, &offset);
6833 for (unsigned i = 0; i < write_count; i++) {
6834 Temp write_address = addr;
6835 uint32_t write_const_offset = const_offset;
6836 Temp write_offset = offset;
6837 lower_global_address(bld, offsets[i], &write_address, &write_const_offset, &write_offset);
6839 if (ctx->options->gfx_level >= GFX7) {
6840 bool global = ctx->options->gfx_level >= GFX9;
6842 switch (write_datas[i].bytes()) {
6843 case 1: op = global ? aco_opcode::global_store_byte : aco_opcode::flat_store_byte; break;
6844 case 2: op = global ? aco_opcode::global_store_short : aco_opcode::flat_store_short; break;
6845 case 4: op = global ? aco_opcode::global_store_dword : aco_opcode::flat_store_dword; break;
6847 op = global ? aco_opcode::global_store_dwordx2 : aco_opcode::flat_store_dwordx2;
6850 op = global ? aco_opcode::global_store_dwordx3 : aco_opcode::flat_store_dwordx3;
6853 op = global ? aco_opcode::global_store_dwordx4 : aco_opcode::flat_store_dwordx4;
6855 default: unreachable("store_global not implemented for this size.");
6858 aco_ptr<FLAT_instruction> flat{
6859 create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, 0)};
6860 if (write_address.regClass() == s2) {
6861 assert(global && write_offset.id() && write_offset.type() == RegType::vgpr);
6862 flat->operands[0] = Operand(write_offset);
6863 flat->operands[1] = Operand(write_address);
6865 assert(write_address.type() == RegType::vgpr && !write_offset.id());
6866 flat->operands[0] = Operand(write_address);
6867 flat->operands[1] = Operand(s1);
6869 flat->operands[2] = Operand(write_datas[i]);
6872 assert(global || !write_const_offset);
6873 flat->offset = write_const_offset;
6874 flat->disable_wqm = true;
6876 ctx->program->needs_exact = true;
6877 ctx->block->instructions.emplace_back(std::move(flat));
6879 assert(ctx->options->gfx_level == GFX6);
6881 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
6883 Temp rsrc = get_gfx6_global_rsrc(bld, write_address);
6885 aco_ptr<MUBUF_instruction> mubuf{
6886 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6887 mubuf->operands[0] = Operand(rsrc);
6888 mubuf->operands[1] =
6889 write_address.type() == RegType::vgpr ? Operand(write_address) : Operand(v1);
6890 mubuf->operands[2] = Operand(write_offset);
6891 mubuf->operands[3] = Operand(write_datas[i]);
6894 mubuf->offset = write_const_offset;
6895 mubuf->addr64 = write_address.type() == RegType::vgpr;
6896 mubuf->disable_wqm = true;
6898 ctx->program->needs_exact = true;
6899 ctx->block->instructions.emplace_back(std::move(mubuf));
6905 visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
6907 Builder bld(ctx->program, ctx->block);
6908 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6909 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6910 bool cmpswap = instr->intrinsic == nir_intrinsic_global_atomic_comp_swap_amd;
6913 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6914 get_ssa_temp(ctx, instr->src[2].ssa), data);
6916 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6918 aco_opcode op32, op64;
6921 uint32_t const_offset;
6922 parse_global(ctx, instr, &addr, &const_offset, &offset);
6923 lower_global_address(bld, 0, &addr, &const_offset, &offset);
6925 if (ctx->options->gfx_level >= GFX7) {
6926 bool global = ctx->options->gfx_level >= GFX9;
6927 switch (instr->intrinsic) {
6928 case nir_intrinsic_global_atomic_add_amd:
6929 op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add;
6930 op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2;
6932 case nir_intrinsic_global_atomic_imin_amd:
6933 op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin;
6934 op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2;
6936 case nir_intrinsic_global_atomic_umin_amd:
6937 op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin;
6938 op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2;
6940 case nir_intrinsic_global_atomic_imax_amd:
6941 op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax;
6942 op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2;
6944 case nir_intrinsic_global_atomic_umax_amd:
6945 op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax;
6946 op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2;
6948 case nir_intrinsic_global_atomic_and_amd:
6949 op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and;
6950 op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2;
6952 case nir_intrinsic_global_atomic_or_amd:
6953 op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or;
6954 op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2;
6956 case nir_intrinsic_global_atomic_xor_amd:
6957 op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor;
6958 op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2;
6960 case nir_intrinsic_global_atomic_exchange_amd:
6961 op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap;
6962 op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2;
6964 case nir_intrinsic_global_atomic_comp_swap_amd:
6965 op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap;
6966 op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2;
6968 case nir_intrinsic_global_atomic_fmin_amd:
6969 op32 = global ? aco_opcode::global_atomic_fmin : aco_opcode::flat_atomic_fmin;
6970 op64 = global ? aco_opcode::global_atomic_fmin_x2 : aco_opcode::flat_atomic_fmin_x2;
6972 case nir_intrinsic_global_atomic_fmax_amd:
6973 op32 = global ? aco_opcode::global_atomic_fmax : aco_opcode::flat_atomic_fmax;
6974 op64 = global ? aco_opcode::global_atomic_fmax_x2 : aco_opcode::flat_atomic_fmax_x2;
6977 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* "
6981 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6982 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(
6983 op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
6984 if (addr.regClass() == s2) {
6985 assert(global && offset.id() && offset.type() == RegType::vgpr);
6986 flat->operands[0] = Operand(offset);
6987 flat->operands[1] = Operand(addr);
6989 assert(addr.type() == RegType::vgpr && !offset.id());
6990 flat->operands[0] = Operand(addr);
6991 flat->operands[1] = Operand(s1);
6993 flat->operands[2] = Operand(data);
6994 if (return_previous)
6995 flat->definitions[0] = Definition(dst);
6996 flat->glc = return_previous;
6997 flat->dlc = false; /* Not needed for atomics */
6998 assert(global || !const_offset);
6999 flat->offset = const_offset;
7000 flat->disable_wqm = true;
7001 flat->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
7002 ctx->program->needs_exact = true;
7003 ctx->block->instructions.emplace_back(std::move(flat));
7005 assert(ctx->options->gfx_level == GFX6);
7007 switch (instr->intrinsic) {
7008 case nir_intrinsic_global_atomic_add_amd:
7009 op32 = aco_opcode::buffer_atomic_add;
7010 op64 = aco_opcode::buffer_atomic_add_x2;
7012 case nir_intrinsic_global_atomic_imin_amd:
7013 op32 = aco_opcode::buffer_atomic_smin;
7014 op64 = aco_opcode::buffer_atomic_smin_x2;
7016 case nir_intrinsic_global_atomic_umin_amd:
7017 op32 = aco_opcode::buffer_atomic_umin;
7018 op64 = aco_opcode::buffer_atomic_umin_x2;
7020 case nir_intrinsic_global_atomic_imax_amd:
7021 op32 = aco_opcode::buffer_atomic_smax;
7022 op64 = aco_opcode::buffer_atomic_smax_x2;
7024 case nir_intrinsic_global_atomic_umax_amd:
7025 op32 = aco_opcode::buffer_atomic_umax;
7026 op64 = aco_opcode::buffer_atomic_umax_x2;
7028 case nir_intrinsic_global_atomic_and_amd:
7029 op32 = aco_opcode::buffer_atomic_and;
7030 op64 = aco_opcode::buffer_atomic_and_x2;
7032 case nir_intrinsic_global_atomic_or_amd:
7033 op32 = aco_opcode::buffer_atomic_or;
7034 op64 = aco_opcode::buffer_atomic_or_x2;
7036 case nir_intrinsic_global_atomic_xor_amd:
7037 op32 = aco_opcode::buffer_atomic_xor;
7038 op64 = aco_opcode::buffer_atomic_xor_x2;
7040 case nir_intrinsic_global_atomic_exchange_amd:
7041 op32 = aco_opcode::buffer_atomic_swap;
7042 op64 = aco_opcode::buffer_atomic_swap_x2;
7044 case nir_intrinsic_global_atomic_comp_swap_amd:
7045 op32 = aco_opcode::buffer_atomic_cmpswap;
7046 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
7048 case nir_intrinsic_global_atomic_fmin_amd:
7049 op32 = aco_opcode::buffer_atomic_fmin;
7050 op64 = aco_opcode::buffer_atomic_fmin_x2;
7052 case nir_intrinsic_global_atomic_fmax_amd:
7053 op32 = aco_opcode::buffer_atomic_fmax;
7054 op64 = aco_opcode::buffer_atomic_fmax_x2;
7057 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* "
7061 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
7063 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
7065 aco_ptr<MUBUF_instruction> mubuf{
7066 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
7067 mubuf->operands[0] = Operand(rsrc);
7068 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
7069 mubuf->operands[2] = Operand(offset);
7070 mubuf->operands[3] = Operand(data);
7072 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
7073 if (return_previous)
7074 mubuf->definitions[0] = def;
7075 mubuf->glc = return_previous;
7077 mubuf->offset = const_offset;
7078 mubuf->addr64 = addr.type() == RegType::vgpr;
7079 mubuf->disable_wqm = true;
7080 mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
7081 ctx->program->needs_exact = true;
7082 ctx->block->instructions.emplace_back(std::move(mubuf));
7083 if (return_previous && cmpswap)
7084 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
7089 aco_storage_mode_from_nir_mem_mode(unsigned mem_mode)
7091 unsigned storage = storage_none;
7093 if (mem_mode & nir_var_shader_out)
7094 storage |= storage_vmem_output;
7095 if ((mem_mode & nir_var_mem_ssbo) || (mem_mode & nir_var_mem_global))
7096 storage |= storage_buffer;
7097 if (mem_mode & nir_var_mem_task_payload)
7098 storage |= storage_task_payload;
7099 if (mem_mode & nir_var_mem_shared)
7100 storage |= storage_shared;
7101 if (mem_mode & nir_var_image)
7102 storage |= storage_image;
7108 visit_load_buffer(isel_context* ctx, nir_intrinsic_instr* intrin)
7110 Builder bld(ctx->program, ctx->block);
7112 Temp dst = get_ssa_temp(ctx, &intrin->dest.ssa);
7113 Temp descriptor = bld.as_uniform(get_ssa_temp(ctx, intrin->src[0].ssa));
7114 Temp v_offset = as_vgpr(ctx, get_ssa_temp(ctx, intrin->src[1].ssa));
7115 Temp s_offset = bld.as_uniform(get_ssa_temp(ctx, intrin->src[2].ssa));
7117 bool swizzled = nir_intrinsic_is_swizzled(intrin);
7118 bool reorder = nir_intrinsic_can_reorder(intrin);
7119 bool slc = nir_intrinsic_slc_amd(intrin);
7121 unsigned const_offset = nir_intrinsic_base(intrin);
7122 unsigned elem_size_bytes = intrin->dest.ssa.bit_size / 8u;
7123 unsigned num_components = intrin->dest.ssa.num_components;
7124 unsigned swizzle_element_size = swizzled ? (ctx->program->gfx_level <= GFX8 ? 4 : 16) : 0;
7126 nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin);
7127 memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode));
7129 load_vmem_mubuf(ctx, dst, descriptor, v_offset, s_offset, const_offset, elem_size_bytes,
7130 num_components, swizzle_element_size, !swizzled, reorder, slc, sync);
7134 visit_store_buffer(isel_context* ctx, nir_intrinsic_instr* intrin)
7136 Temp store_src = get_ssa_temp(ctx, intrin->src[0].ssa);
7137 Temp descriptor = get_ssa_temp(ctx, intrin->src[1].ssa);
7138 Temp v_offset = get_ssa_temp(ctx, intrin->src[2].ssa);
7139 Temp s_offset = get_ssa_temp(ctx, intrin->src[3].ssa);
7141 bool swizzled = nir_intrinsic_is_swizzled(intrin);
7142 bool slc = nir_intrinsic_slc_amd(intrin);
7144 unsigned const_offset = nir_intrinsic_base(intrin);
7145 unsigned write_mask = nir_intrinsic_write_mask(intrin);
7146 unsigned elem_size_bytes = intrin->src[0].ssa->bit_size / 8u;
7148 nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin);
7149 memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode));
7151 store_vmem_mubuf(ctx, store_src, descriptor, v_offset, s_offset, const_offset, elem_size_bytes,
7152 write_mask, !swizzled, sync, slc);
7156 visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr)
7158 Builder bld(ctx->program, ctx->block);
7159 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7160 Temp base = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
7161 Temp offset = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
7163 aco_opcode opcode = aco_opcode::s_load_dword;
7166 assert(dst.bytes() <= 64);
7168 if (dst.bytes() > 32) {
7169 opcode = aco_opcode::s_load_dwordx16;
7171 } else if (dst.bytes() > 16) {
7172 opcode = aco_opcode::s_load_dwordx8;
7174 } else if (dst.bytes() > 8) {
7175 opcode = aco_opcode::s_load_dwordx4;
7177 } else if (dst.bytes() > 4) {
7178 opcode = aco_opcode::s_load_dwordx2;
7182 if (dst.size() != size) {
7183 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst),
7184 bld.smem(opcode, bld.def(RegType::sgpr, size), base, offset), Operand::c32(0u));
7186 bld.smem(opcode, Definition(dst), base, offset);
7188 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
7192 translate_nir_scope(nir_scope scope)
7195 case NIR_SCOPE_NONE:
7196 case NIR_SCOPE_INVOCATION: return scope_invocation;
7197 case NIR_SCOPE_SUBGROUP: return scope_subgroup;
7198 case NIR_SCOPE_WORKGROUP: return scope_workgroup;
7199 case NIR_SCOPE_QUEUE_FAMILY: return scope_queuefamily;
7200 case NIR_SCOPE_DEVICE: return scope_device;
7201 case NIR_SCOPE_SHADER_CALL: return scope_invocation;
7203 unreachable("invalid scope");
7207 emit_scoped_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
7209 Builder bld(ctx->program, ctx->block);
7211 unsigned storage_allowed = storage_buffer | storage_image;
7212 unsigned semantics = 0;
7213 sync_scope mem_scope = translate_nir_scope(nir_intrinsic_memory_scope(instr));
7214 sync_scope exec_scope = translate_nir_scope(nir_intrinsic_execution_scope(instr));
7216 /* We use shared storage for the following:
7217 * - compute shaders expose it in their API
7218 * - when tessellation is used, TCS and VS I/O is lowered to shared memory
7219 * - when GS is used on GFX9+, VS->GS and TES->GS I/O is lowered to shared memory
7220 * - additionally, when NGG is used on GFX10+, shared memory is used for certain features
7222 bool shared_storage_used = ctx->stage.hw == HWStage::CS || ctx->stage.hw == HWStage::LS ||
7223 ctx->stage.hw == HWStage::HS ||
7224 (ctx->stage.hw == HWStage::GS && ctx->program->gfx_level >= GFX9) ||
7225 ctx->stage.hw == HWStage::NGG;
7227 if (shared_storage_used)
7228 storage_allowed |= storage_shared;
7230 /* Task payload: Task Shader output, Mesh Shader input */
7231 if (ctx->stage.has(SWStage::MS) || ctx->stage.has(SWStage::TS))
7232 storage_allowed |= storage_task_payload;
7234 /* Allow VMEM output for all stages that can have outputs. */
7235 if (ctx->stage.hw != HWStage::CS && ctx->stage.hw != HWStage::FS)
7236 storage_allowed |= storage_vmem_output;
7238 /* Workgroup barriers can hang merged shaders that can potentially have 0 threads in either half.
7239 * They are allowed in CS, TCS, and in any NGG shader.
7241 ASSERTED bool workgroup_scope_allowed =
7242 ctx->stage.hw == HWStage::CS || ctx->stage.hw == HWStage::HS || ctx->stage.hw == HWStage::NGG;
7244 unsigned nir_storage = nir_intrinsic_memory_modes(instr);
7245 unsigned storage = aco_storage_mode_from_nir_mem_mode(nir_storage);
7246 storage &= storage_allowed;
7248 unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
7249 if (nir_semantics & NIR_MEMORY_ACQUIRE)
7250 semantics |= semantic_acquire | semantic_release;
7251 if (nir_semantics & NIR_MEMORY_RELEASE)
7252 semantics |= semantic_acquire | semantic_release;
7254 assert(!(nir_semantics & (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
7255 assert(exec_scope != scope_workgroup || workgroup_scope_allowed);
7257 bld.barrier(aco_opcode::p_barrier,
7258 memory_sync_info((storage_class)storage, (memory_semantics)semantics, mem_scope),
7263 visit_load_shared(isel_context* ctx, nir_intrinsic_instr* instr)
7265 // TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
7266 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7267 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7268 Builder bld(ctx->program, ctx->block);
7270 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
7271 unsigned num_components = instr->dest.ssa.num_components;
7272 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7273 load_lds(ctx, elem_size_bytes, num_components, dst, address, nir_intrinsic_base(instr), align);
7277 visit_store_shared(isel_context* ctx, nir_intrinsic_instr* instr)
7279 unsigned writemask = nir_intrinsic_write_mask(instr);
7280 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
7281 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
7282 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
7284 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7285 store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
7289 visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
7291 unsigned offset = nir_intrinsic_base(instr);
7292 Builder bld(ctx->program, ctx->block);
7293 Operand m = load_lds_size_m0(bld);
7294 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
7295 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7297 unsigned num_operands = 3;
7298 aco_opcode op32, op64, op32_rtn, op64_rtn;
7299 switch (instr->intrinsic) {
7300 case nir_intrinsic_shared_atomic_add:
7301 op32 = aco_opcode::ds_add_u32;
7302 op64 = aco_opcode::ds_add_u64;
7303 op32_rtn = aco_opcode::ds_add_rtn_u32;
7304 op64_rtn = aco_opcode::ds_add_rtn_u64;
7306 case nir_intrinsic_shared_atomic_imin:
7307 op32 = aco_opcode::ds_min_i32;
7308 op64 = aco_opcode::ds_min_i64;
7309 op32_rtn = aco_opcode::ds_min_rtn_i32;
7310 op64_rtn = aco_opcode::ds_min_rtn_i64;
7312 case nir_intrinsic_shared_atomic_umin:
7313 op32 = aco_opcode::ds_min_u32;
7314 op64 = aco_opcode::ds_min_u64;
7315 op32_rtn = aco_opcode::ds_min_rtn_u32;
7316 op64_rtn = aco_opcode::ds_min_rtn_u64;
7318 case nir_intrinsic_shared_atomic_imax:
7319 op32 = aco_opcode::ds_max_i32;
7320 op64 = aco_opcode::ds_max_i64;
7321 op32_rtn = aco_opcode::ds_max_rtn_i32;
7322 op64_rtn = aco_opcode::ds_max_rtn_i64;
7324 case nir_intrinsic_shared_atomic_umax:
7325 op32 = aco_opcode::ds_max_u32;
7326 op64 = aco_opcode::ds_max_u64;
7327 op32_rtn = aco_opcode::ds_max_rtn_u32;
7328 op64_rtn = aco_opcode::ds_max_rtn_u64;
7330 case nir_intrinsic_shared_atomic_and:
7331 op32 = aco_opcode::ds_and_b32;
7332 op64 = aco_opcode::ds_and_b64;
7333 op32_rtn = aco_opcode::ds_and_rtn_b32;
7334 op64_rtn = aco_opcode::ds_and_rtn_b64;
7336 case nir_intrinsic_shared_atomic_or:
7337 op32 = aco_opcode::ds_or_b32;
7338 op64 = aco_opcode::ds_or_b64;
7339 op32_rtn = aco_opcode::ds_or_rtn_b32;
7340 op64_rtn = aco_opcode::ds_or_rtn_b64;
7342 case nir_intrinsic_shared_atomic_xor:
7343 op32 = aco_opcode::ds_xor_b32;
7344 op64 = aco_opcode::ds_xor_b64;
7345 op32_rtn = aco_opcode::ds_xor_rtn_b32;
7346 op64_rtn = aco_opcode::ds_xor_rtn_b64;
7348 case nir_intrinsic_shared_atomic_exchange:
7349 op32 = aco_opcode::ds_write_b32;
7350 op64 = aco_opcode::ds_write_b64;
7351 op32_rtn = aco_opcode::ds_wrxchg_rtn_b32;
7352 op64_rtn = aco_opcode::ds_wrxchg_rtn_b64;
7354 case nir_intrinsic_shared_atomic_comp_swap:
7355 op32 = aco_opcode::ds_cmpst_b32;
7356 op64 = aco_opcode::ds_cmpst_b64;
7357 op32_rtn = aco_opcode::ds_cmpst_rtn_b32;
7358 op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
7361 case nir_intrinsic_shared_atomic_fadd:
7362 op32 = aco_opcode::ds_add_f32;
7363 op32_rtn = aco_opcode::ds_add_rtn_f32;
7364 op64 = aco_opcode::num_opcodes;
7365 op64_rtn = aco_opcode::num_opcodes;
7367 case nir_intrinsic_shared_atomic_fmin:
7368 op32 = aco_opcode::ds_min_f32;
7369 op32_rtn = aco_opcode::ds_min_rtn_f32;
7370 op64 = aco_opcode::ds_min_f64;
7371 op64_rtn = aco_opcode::ds_min_rtn_f64;
7373 case nir_intrinsic_shared_atomic_fmax:
7374 op32 = aco_opcode::ds_max_f32;
7375 op32_rtn = aco_opcode::ds_max_rtn_f32;
7376 op64 = aco_opcode::ds_max_f64;
7377 op64_rtn = aco_opcode::ds_max_rtn_f64;
7379 default: unreachable("Unhandled shared atomic intrinsic");
7382 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
7385 if (data.size() == 1) {
7386 assert(instr->dest.ssa.bit_size == 32);
7387 op = return_previous ? op32_rtn : op32;
7389 assert(instr->dest.ssa.bit_size == 64);
7390 op = return_previous ? op64_rtn : op64;
7393 if (offset > 65535) {
7394 address = bld.vadd32(bld.def(v1), Operand::c32(offset), address);
7398 aco_ptr<DS_instruction> ds;
7400 create_instruction<DS_instruction>(op, Format::DS, num_operands, return_previous ? 1 : 0));
7401 ds->operands[0] = Operand(address);
7402 ds->operands[1] = Operand(data);
7403 if (num_operands == 4) {
7404 Temp data2 = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
7405 ds->operands[2] = Operand(data2);
7407 ds->operands[num_operands - 1] = m;
7408 ds->offset0 = offset;
7409 if (return_previous)
7410 ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
7411 ds->sync = memory_sync_info(storage_shared, semantic_atomicrmw);
7413 if (m.isUndefined())
7414 ds->operands.pop_back();
7416 ctx->block->instructions.emplace_back(std::move(ds));
7420 visit_access_shared2_amd(isel_context* ctx, nir_intrinsic_instr* instr)
7422 bool is_store = instr->intrinsic == nir_intrinsic_store_shared2_amd;
7423 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[is_store].ssa));
7424 Builder bld(ctx->program, ctx->block);
7426 assert(bld.program->gfx_level >= GFX7);
7428 bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->dest.ssa.bit_size) == 64;
7429 uint8_t offset0 = nir_intrinsic_offset0(instr);
7430 uint8_t offset1 = nir_intrinsic_offset1(instr);
7431 bool st64 = nir_intrinsic_st64(instr);
7433 Operand m = load_lds_size_m0(bld);
7436 aco_opcode op = st64
7437 ? (is64bit ? aco_opcode::ds_write2st64_b64 : aco_opcode::ds_write2st64_b32)
7438 : (is64bit ? aco_opcode::ds_write2_b64 : aco_opcode::ds_write2_b32);
7439 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
7440 RegClass comp_rc = is64bit ? v2 : v1;
7441 Temp data0 = emit_extract_vector(ctx, data, 0, comp_rc);
7442 Temp data1 = emit_extract_vector(ctx, data, 1, comp_rc);
7443 ds = bld.ds(op, address, data0, data1, m, offset0, offset1);
7445 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7446 Definition tmp_dst(dst.type() == RegType::vgpr ? dst : bld.tmp(is64bit ? v4 : v2));
7447 aco_opcode op = st64 ? (is64bit ? aco_opcode::ds_read2st64_b64 : aco_opcode::ds_read2st64_b32)
7448 : (is64bit ? aco_opcode::ds_read2_b64 : aco_opcode::ds_read2_b32);
7449 ds = bld.ds(op, tmp_dst, address, m, offset0, offset1);
7451 ds->ds().sync = memory_sync_info(storage_shared);
7452 if (m.isUndefined())
7453 ds->operands.pop_back();
7456 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7457 if (dst.type() == RegType::sgpr) {
7458 emit_split_vector(ctx, ds->definitions[0].getTemp(), dst.size());
7460 /* Use scalar v_readfirstlane_b32 for better 32-bit copy propagation */
7461 for (unsigned i = 0; i < dst.size(); i++)
7462 comp[i] = bld.as_uniform(emit_extract_vector(ctx, ds->definitions[0].getTemp(), i, v1));
7464 Temp comp0 = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), comp[0], comp[1]);
7465 Temp comp1 = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), comp[2], comp[3]);
7466 ctx->allocated_vec[comp0.id()] = {comp[0], comp[1]};
7467 ctx->allocated_vec[comp1.id()] = {comp[2], comp[3]};
7468 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), comp0, comp1);
7469 ctx->allocated_vec[dst.id()] = {comp0, comp1};
7471 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), comp[0], comp[1]);
7475 emit_split_vector(ctx, dst, 2);
7480 get_scratch_resource(isel_context* ctx)
7482 Builder bld(ctx->program, ctx->block);
7483 Temp scratch_addr = ctx->program->private_segment_buffer;
7484 if (ctx->stage.hw != HWStage::CS)
7486 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand::zero());
7488 uint32_t rsrc_conf =
7489 S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);
7491 if (ctx->program->gfx_level >= GFX10) {
7492 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
7493 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
7494 S_008F0C_RESOURCE_LEVEL(ctx->program->gfx_level < GFX11);
7495 } else if (ctx->program->gfx_level <=
7496 GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
7497 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
7498 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
7501 /* older generations need element size = 4 bytes. element size removed in GFX9 */
7502 if (ctx->program->gfx_level <= GFX8)
7503 rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
7505 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand::c32(-1u),
7506 Operand::c32(rsrc_conf));
7510 visit_load_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
7512 Builder bld(ctx->program, ctx->block);
7513 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7515 LoadEmitInfo info = {Operand(v1), dst, instr->dest.ssa.num_components,
7516 instr->dest.ssa.bit_size / 8u};
7517 info.align_mul = nir_intrinsic_align_mul(instr);
7518 info.align_offset = nir_intrinsic_align_offset(instr);
7519 info.swizzle_component_size = ctx->program->gfx_level <= GFX8 ? 4 : 0;
7520 info.sync = memory_sync_info(storage_scratch, semantic_private);
7521 if (ctx->program->gfx_level >= GFX9) {
7522 if (nir_src_is_const(instr->src[0])) {
7523 uint32_t max = ctx->program->dev.scratch_global_offset_max + 1;
7525 bld.copy(bld.def(s1), Operand::c32(ROUND_DOWN_TO(nir_src_as_uint(instr->src[0]), max)));
7526 info.const_offset = nir_src_as_uint(instr->src[0]) % max;
7528 info.offset = Operand(get_ssa_temp(ctx, instr->src[0].ssa));
7530 EmitLoadParameters params = scratch_flat_load_params;
7531 params.max_const_offset_plus_one = ctx->program->dev.scratch_global_offset_max + 1;
7532 emit_load(ctx, bld, info, params);
7534 info.resource = get_scratch_resource(ctx);
7535 info.offset = Operand(as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)));
7536 info.soffset = ctx->program->scratch_offset;
7537 emit_load(ctx, bld, info, scratch_mubuf_load_params);
7542 visit_store_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
7544 Builder bld(ctx->program, ctx->block);
7545 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7546 Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
7548 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
7549 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
7551 unsigned write_count = 0;
7552 Temp write_datas[32];
7553 unsigned offsets[32];
7554 unsigned swizzle_component_size = ctx->program->gfx_level <= GFX8 ? 4 : 16;
7555 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, swizzle_component_size,
7556 &write_count, write_datas, offsets);
7558 if (ctx->program->gfx_level >= GFX9) {
7559 uint32_t max = ctx->program->dev.scratch_global_offset_max + 1;
7560 offset = nir_src_is_const(instr->src[1]) ? Temp(0, s1) : offset;
7561 uint32_t base_const_offset =
7562 nir_src_is_const(instr->src[1]) ? nir_src_as_uint(instr->src[1]) : 0;
7564 for (unsigned i = 0; i < write_count; i++) {
7566 switch (write_datas[i].bytes()) {
7567 case 1: op = aco_opcode::scratch_store_byte; break;
7568 case 2: op = aco_opcode::scratch_store_short; break;
7569 case 4: op = aco_opcode::scratch_store_dword; break;
7570 case 8: op = aco_opcode::scratch_store_dwordx2; break;
7571 case 12: op = aco_opcode::scratch_store_dwordx3; break;
7572 case 16: op = aco_opcode::scratch_store_dwordx4; break;
7573 default: unreachable("Unexpected store size");
7576 uint32_t const_offset = base_const_offset + offsets[i];
7577 assert(const_offset < max || offset.id() == 0);
7579 Operand addr = offset.regClass() == s1 ? Operand(v1) : Operand(offset);
7580 Operand saddr = offset.regClass() == s1 ? Operand(offset) : Operand(s1);
7581 if (offset.id() == 0)
7582 saddr = bld.copy(bld.def(s1), Operand::c32(ROUND_DOWN_TO(const_offset, max)));
7584 bld.scratch(op, addr, saddr, write_datas[i], const_offset % max,
7585 memory_sync_info(storage_scratch, semantic_private));
7588 Temp rsrc = get_scratch_resource(ctx);
7589 offset = as_vgpr(ctx, offset);
7590 for (unsigned i = 0; i < write_count; i++) {
7591 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
7592 Instruction* mubuf = bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset,
7593 write_datas[i], offsets[i], true, true);
7594 mubuf->mubuf().sync = memory_sync_info(storage_scratch, semantic_private);
7600 visit_emit_vertex_with_counter(isel_context* ctx, nir_intrinsic_instr* instr)
7602 Builder bld(ctx->program, ctx->block);
7604 unsigned stream = nir_intrinsic_stream_id(instr);
7605 Temp next_vertex = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7606 next_vertex = bld.v_mul_imm(bld.def(v1), next_vertex, 4u);
7607 nir_const_value* next_vertex_cv = nir_src_as_const_value(instr->src[0]);
7611 bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer,
7612 Operand::c32(RING_GSVS_GS * 16u));
7614 unsigned num_components = ctx->program->info.gs.num_stream_output_components[stream];
7616 unsigned stride = 4u * num_components * ctx->shader->info.gs.vertices_out;
7617 unsigned stream_offset = 0;
7618 for (unsigned i = 0; i < stream; i++) {
7619 unsigned prev_stride = 4u * ctx->program->info.gs.num_stream_output_components[i] *
7620 ctx->shader->info.gs.vertices_out;
7621 stream_offset += prev_stride * ctx->program->wave_size;
7624 /* Limit on the stride field for <= GFX7. */
7625 assert(stride < (1 << 14));
7627 Temp gsvs_dwords[4];
7628 for (unsigned i = 0; i < 4; i++)
7629 gsvs_dwords[i] = bld.tmp(s1);
7630 bld.pseudo(aco_opcode::p_split_vector, Definition(gsvs_dwords[0]), Definition(gsvs_dwords[1]),
7631 Definition(gsvs_dwords[2]), Definition(gsvs_dwords[3]), gsvs_ring);
7633 if (stream_offset) {
7634 Temp stream_offset_tmp = bld.copy(bld.def(s1), Operand::c32(stream_offset));
7636 Temp carry = bld.tmp(s1);
7637 gsvs_dwords[0] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)),
7638 gsvs_dwords[0], stream_offset_tmp);
7639 gsvs_dwords[1] = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc),
7640 gsvs_dwords[1], Operand::zero(), bld.scc(carry));
7643 gsvs_dwords[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1],
7644 Operand::c32(S_008F04_STRIDE(stride)));
7645 gsvs_dwords[2] = bld.copy(bld.def(s1), Operand::c32(ctx->program->wave_size));
7647 gsvs_ring = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), gsvs_dwords[0], gsvs_dwords[1],
7648 gsvs_dwords[2], gsvs_dwords[3]);
7650 unsigned offset = 0;
7651 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; i++) {
7652 if (ctx->program->info.gs.output_streams[i] != stream)
7655 for (unsigned j = 0; j < 4; j++) {
7656 if (!(ctx->program->info.gs.output_usage_mask[i] & (1 << j)))
7659 if (ctx->outputs.mask[i] & (1 << j)) {
7660 Operand vaddr_offset = next_vertex_cv ? Operand(v1) : Operand(next_vertex);
7661 unsigned const_offset = (offset + (next_vertex_cv ? next_vertex_cv->u32 : 0u)) * 4u;
7662 if (const_offset >= 4096u) {
7663 if (vaddr_offset.isUndefined())
7664 vaddr_offset = bld.copy(bld.def(v1), Operand::c32(const_offset / 4096u * 4096u));
7666 vaddr_offset = bld.vadd32(bld.def(v1), Operand::c32(const_offset / 4096u * 4096u),
7668 const_offset %= 4096u;
7671 aco_ptr<MTBUF_instruction> mtbuf{create_instruction<MTBUF_instruction>(
7672 aco_opcode::tbuffer_store_format_x, Format::MTBUF, 4, 0)};
7673 mtbuf->operands[0] = Operand(gsvs_ring);
7674 mtbuf->operands[1] = vaddr_offset;
7675 mtbuf->operands[2] = Operand(get_arg(ctx, ctx->args->ac.gs2vs_offset));
7676 mtbuf->operands[3] = Operand(ctx->outputs.temps[i * 4u + j]);
7677 mtbuf->offen = !vaddr_offset.isUndefined();
7678 mtbuf->dfmt = V_008F0C_BUF_DATA_FORMAT_32;
7679 mtbuf->nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
7680 mtbuf->offset = const_offset;
7681 mtbuf->glc = ctx->program->gfx_level < GFX11;
7683 mtbuf->sync = memory_sync_info(storage_vmem_output, semantic_can_reorder);
7684 bld.insert(std::move(mtbuf));
7687 offset += ctx->shader->info.gs.vertices_out;
7690 /* outputs for the next vertex are undefined and keeping them around can
7691 * create invalid IR with control flow */
7692 ctx->outputs.mask[i] = 0;
7695 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(false, true, stream));
7699 emit_boolean_reduce(isel_context* ctx, nir_op op, unsigned cluster_size, Temp src)
7701 Builder bld(ctx->program, ctx->block);
7703 if (cluster_size == 1) {
7706 if (op == nir_op_iand && cluster_size == 4) {
7707 /* subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val) */
7709 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
7710 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc),
7711 bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp));
7712 } else if (op == nir_op_ior && cluster_size == 4) {
7713 /* subgroupClusteredOr(val, 4) -> wqm(val & exec) */
7715 Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc),
7716 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)));
7717 } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) {
7718 /* subgroupAnd(val) -> (exec & ~val) == 0 */
7720 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src)
7723 Temp cond = bool_to_vector_condition(ctx, emit_wqm(bld, tmp));
7724 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), cond);
7725 } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) {
7726 /* subgroupOr(val) -> (val & exec) != 0 */
7728 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))
7731 return bool_to_vector_condition(ctx, tmp);
7732 } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) {
7733 /* subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1 */
7735 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7736 tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp);
7737 tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand::c32(1u))
7740 return bool_to_vector_condition(ctx, tmp);
7742 /* subgroupClustered{And,Or,Xor}(val, n):
7743 * lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) (just v_mbcnt_lo on wave32)
7744 * cluster_offset = ~(n - 1) & lane_id cluster_mask = ((1 << n) - 1)
7745 * subgroupClusteredAnd():
7746 * return ((val | ~exec) >> cluster_offset) & cluster_mask == cluster_mask
7747 * subgroupClusteredOr():
7748 * return ((val & exec) >> cluster_offset) & cluster_mask != 0
7749 * subgroupClusteredXor():
7750 * return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0
7752 Temp lane_id = emit_mbcnt(ctx, bld.tmp(v1));
7753 Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1),
7754 Operand::c32(~uint32_t(cluster_size - 1)), lane_id);
7757 if (op == nir_op_iand)
7758 tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src,
7759 Operand(exec, bld.lm));
7762 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7764 uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u;
7766 if (ctx->program->gfx_level <= GFX7)
7767 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), tmp, cluster_offset);
7768 else if (ctx->program->wave_size == 64)
7769 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
7771 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp);
7772 tmp = emit_extract_vector(ctx, tmp, 0, v1);
7773 if (cluster_mask != 0xffffffff)
7774 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(cluster_mask), tmp);
7776 if (op == nir_op_iand) {
7777 return bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand::c32(cluster_mask),
7779 } else if (op == nir_op_ior) {
7780 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
7781 } else if (op == nir_op_ixor) {
7782 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u),
7783 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand::zero()));
7784 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
7792 emit_boolean_exclusive_scan(isel_context* ctx, nir_op op, Temp src)
7794 Builder bld(ctx->program, ctx->block);
7795 assert(src.regClass() == bld.lm);
7797 /* subgroupExclusiveAnd(val) -> mbcnt(exec & ~val) == 0
7798 * subgroupExclusiveOr(val) -> mbcnt(val & exec) != 0
7799 * subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0
7802 if (op == nir_op_iand)
7804 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
7806 tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7808 Temp mbcnt = emit_mbcnt(ctx, bld.tmp(v1), Operand(tmp));
7810 if (op == nir_op_iand)
7811 return bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand::zero(), mbcnt);
7812 else if (op == nir_op_ior)
7813 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), mbcnt);
7814 else if (op == nir_op_ixor)
7815 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(),
7816 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), mbcnt));
7823 emit_boolean_inclusive_scan(isel_context* ctx, nir_op op, Temp src)
7825 Builder bld(ctx->program, ctx->block);
7827 /* subgroupInclusiveAnd(val) -> subgroupExclusiveAnd(val) && val
7828 * subgroupInclusiveOr(val) -> subgroupExclusiveOr(val) || val
7829 * subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val
7831 Temp tmp = emit_boolean_exclusive_scan(ctx, op, src);
7832 if (op == nir_op_iand)
7833 return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7834 else if (op == nir_op_ior)
7835 return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7836 else if (op == nir_op_ixor)
7837 return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7844 get_reduce_op(nir_op op, unsigned bit_size)
7847 #define CASEI(name) \
7848 case nir_op_##name: \
7849 return (bit_size == 32) ? name##32 \
7850 : (bit_size == 16) ? name##16 \
7851 : (bit_size == 8) ? name##8 \
7853 #define CASEF(name) \
7854 case nir_op_##name: return (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : name##64;
7868 default: unreachable("unknown reduction op");
7875 emit_uniform_subgroup(isel_context* ctx, nir_intrinsic_instr* instr, Temp src)
7877 Builder bld(ctx->program, ctx->block);
7878 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7879 assert(dst.regClass().type() != RegType::vgpr);
7880 if (src.regClass().type() == RegType::vgpr)
7881 bld.pseudo(aco_opcode::p_as_uniform, dst, src);
7887 emit_addition_uniform_reduce(isel_context* ctx, nir_op op, Definition dst, nir_src src, Temp count)
7889 Builder bld(ctx->program, ctx->block);
7890 Temp src_tmp = get_ssa_temp(ctx, src.ssa);
7892 if (op == nir_op_fadd) {
7893 src_tmp = as_vgpr(ctx, src_tmp);
7894 Temp tmp = dst.regClass() == s1 ? bld.tmp(RegClass::get(RegType::vgpr, src.ssa->bit_size / 8))
7897 if (src.ssa->bit_size == 16) {
7898 count = bld.vop1(aco_opcode::v_cvt_f16_u16, bld.def(v2b), count);
7899 bld.vop2(aco_opcode::v_mul_f16, Definition(tmp), count, src_tmp);
7901 assert(src.ssa->bit_size == 32);
7902 count = bld.vop1(aco_opcode::v_cvt_f32_u32, bld.def(v1), count);
7903 bld.vop2(aco_opcode::v_mul_f32, Definition(tmp), count, src_tmp);
7906 if (tmp != dst.getTemp())
7907 bld.pseudo(aco_opcode::p_as_uniform, dst, tmp);
7912 if (dst.regClass() == s1)
7913 src_tmp = bld.as_uniform(src_tmp);
7915 if (op == nir_op_ixor && count.type() == RegType::sgpr)
7917 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), count, Operand::c32(1u));
7918 else if (op == nir_op_ixor)
7919 count = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), count);
7921 assert(dst.getTemp().type() == count.type());
7923 if (nir_src_is_const(src)) {
7924 if (nir_src_as_uint(src) == 1 && dst.bytes() <= 2)
7925 bld.pseudo(aco_opcode::p_extract_vector, dst, count, Operand::zero());
7926 else if (nir_src_as_uint(src) == 1)
7927 bld.copy(dst, count);
7928 else if (nir_src_as_uint(src) == 0)
7929 bld.copy(dst, Operand::zero(dst.bytes()));
7930 else if (count.type() == RegType::vgpr)
7931 bld.v_mul_imm(dst, count, nir_src_as_uint(src));
7933 bld.sop2(aco_opcode::s_mul_i32, dst, src_tmp, count);
7934 } else if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX10) {
7935 bld.vop3(aco_opcode::v_mul_lo_u16_e64, dst, src_tmp, count);
7936 } else if (dst.bytes() <= 2 && ctx->program->gfx_level >= GFX8) {
7937 bld.vop2(aco_opcode::v_mul_lo_u16, dst, src_tmp, count);
7938 } else if (dst.getTemp().type() == RegType::vgpr) {
7939 bld.vop3(aco_opcode::v_mul_lo_u32, dst, src_tmp, count);
7941 bld.sop2(aco_opcode::s_mul_i32, dst, src_tmp, count);
7946 emit_uniform_reduce(isel_context* ctx, nir_intrinsic_instr* instr)
7948 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
7949 if (op == nir_op_imul || op == nir_op_fmul)
7952 if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) {
7953 Builder bld(ctx->program, ctx->block);
7954 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7955 unsigned bit_size = instr->src[0].ssa->bit_size;
7960 bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), Operand(exec, bld.lm));
7962 emit_addition_uniform_reduce(ctx, op, dst, instr->src[0], thread_count);
7964 emit_uniform_subgroup(ctx, instr, get_ssa_temp(ctx, instr->src[0].ssa));
7971 emit_uniform_scan(isel_context* ctx, nir_intrinsic_instr* instr)
7973 Builder bld(ctx->program, ctx->block);
7974 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7975 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
7976 bool inc = instr->intrinsic == nir_intrinsic_inclusive_scan;
7978 if (op == nir_op_imul || op == nir_op_fmul)
7981 if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) {
7982 if (instr->src[0].ssa->bit_size > 32)
7987 packed_tid = emit_mbcnt(ctx, bld.tmp(v1), Operand(exec, bld.lm), Operand::c32(1u));
7989 packed_tid = emit_mbcnt(ctx, bld.tmp(v1), Operand(exec, bld.lm));
7991 emit_addition_uniform_reduce(ctx, op, dst, instr->src[0], packed_tid);
7995 assert(op == nir_op_imin || op == nir_op_umin || op == nir_op_imax || op == nir_op_umax ||
7996 op == nir_op_iand || op == nir_op_ior || op == nir_op_fmin || op == nir_op_fmax);
7999 emit_uniform_subgroup(ctx, instr, get_ssa_temp(ctx, instr->src[0].ssa));
8003 /* Copy the source and write the reduction operation identity to the first lane. */
8004 Temp lane = bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm));
8005 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8006 ReduceOp reduce_op = get_reduce_op(op, instr->src[0].ssa->bit_size);
8007 if (dst.bytes() == 8) {
8008 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8009 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8010 uint32_t identity_lo = get_reduction_identity(reduce_op, 0);
8011 uint32_t identity_hi = get_reduction_identity(reduce_op, 1);
8014 bld.writelane(bld.def(v1), bld.copy(bld.def(s1, m0), Operand::c32(identity_lo)), lane, lo);
8016 bld.writelane(bld.def(v1), bld.copy(bld.def(s1, m0), Operand::c32(identity_hi)), lane, hi);
8017 bld.pseudo(aco_opcode::p_create_vector, dst, lo, hi);
8019 uint32_t identity = get_reduction_identity(reduce_op, 0);
8020 bld.writelane(dst, bld.copy(bld.def(s1, m0), Operand::c32(identity)), lane,
8028 emit_reduction_instr(isel_context* ctx, aco_opcode aco_op, ReduceOp op, unsigned cluster_size,
8029 Definition dst, Temp src)
8031 assert(src.bytes() <= 8);
8032 assert(src.type() == RegType::vgpr);
8034 Builder bld(ctx->program, ctx->block);
8036 unsigned num_defs = 0;
8038 defs[num_defs++] = dst;
8039 defs[num_defs++] = bld.def(bld.lm); /* used internally to save/restore exec */
8041 /* scalar identity temporary */
8042 bool need_sitmp = (ctx->program->gfx_level <= GFX7 || ctx->program->gfx_level >= GFX10) &&
8043 aco_op != aco_opcode::p_reduce;
8044 if (aco_op == aco_opcode::p_exclusive_scan) {
8045 need_sitmp |= (op == imin8 || op == imin16 || op == imin32 || op == imin64 || op == imax8 ||
8046 op == imax16 || op == imax32 || op == imax64 || op == fmin16 || op == fmin32 ||
8047 op == fmin64 || op == fmax16 || op == fmax32 || op == fmax64 || op == fmul16 ||
8051 defs[num_defs++] = bld.def(RegType::sgpr, dst.size());
8054 defs[num_defs++] = bld.def(s1, scc);
8057 bool clobber_vcc = false;
8058 if ((op == iadd32 || op == imul64) && ctx->program->gfx_level < GFX9)
8060 if ((op == iadd8 || op == iadd16) && ctx->program->gfx_level < GFX8)
8062 if (op == iadd64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)
8066 defs[num_defs++] = bld.def(bld.lm, vcc);
8068 Pseudo_reduction_instruction* reduce = create_instruction<Pseudo_reduction_instruction>(
8069 aco_op, Format::PSEUDO_REDUCTION, 3, num_defs);
8070 reduce->operands[0] = Operand(src);
8071 /* setup_reduce_temp will update these undef operands if needed */
8072 reduce->operands[1] = Operand(RegClass(RegType::vgpr, dst.size()).as_linear());
8073 reduce->operands[2] = Operand(v1.as_linear());
8074 std::copy(defs, defs + num_defs, reduce->definitions.begin());
8076 reduce->reduce_op = op;
8077 reduce->cluster_size = cluster_size;
8078 bld.insert(std::move(reduce));
8080 return dst.getTemp();
8084 emit_interp_center(isel_context* ctx, Temp dst, Temp bary, Temp pos1, Temp pos2)
8086 Builder bld(ctx->program, ctx->block);
8087 Temp p1 = emit_extract_vector(ctx, bary, 0, v1);
8088 Temp p2 = emit_extract_vector(ctx, bary, 1, v1);
8090 Temp ddx_1, ddx_2, ddy_1, ddy_2;
8091 uint32_t dpp_ctrl0 = dpp_quad_perm(0, 0, 0, 0);
8092 uint32_t dpp_ctrl1 = dpp_quad_perm(1, 1, 1, 1);
8093 uint32_t dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
8096 if (ctx->program->gfx_level >= GFX8) {
8097 Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_ctrl0);
8098 ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl1);
8099 ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl2);
8100 Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_ctrl0);
8101 ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl1);
8102 ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl2);
8104 Temp tl_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl0);
8105 ddx_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl1);
8106 ddx_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_1, tl_1);
8107 ddy_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl2);
8108 ddy_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_1, tl_1);
8110 Temp tl_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl0);
8111 ddx_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl1);
8112 ddx_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_2, tl_2);
8113 ddy_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl2);
8114 ddy_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_2, tl_2);
8117 /* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */
8119 ctx->program->gfx_level >= GFX10_3 ? aco_opcode::v_fma_f32 : aco_opcode::v_mad_f32;
8120 Temp tmp1 = bld.vop3(mad, bld.def(v1), ddx_1, pos1, p1);
8121 Temp tmp2 = bld.vop3(mad, bld.def(v1), ddx_2, pos1, p2);
8122 tmp1 = bld.vop3(mad, bld.def(v1), ddy_1, pos2, tmp1);
8123 tmp2 = bld.vop3(mad, bld.def(v1), ddy_2, pos2, tmp2);
8124 Temp wqm1 = bld.tmp(v1);
8125 emit_wqm(bld, tmp1, wqm1, true);
8126 Temp wqm2 = bld.tmp(v1);
8127 emit_wqm(bld, tmp2, wqm2, true);
8128 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), wqm1, wqm2);
8132 Temp merged_wave_info_to_mask(isel_context* ctx, unsigned i);
8133 void ngg_emit_sendmsg_gs_alloc_req(isel_context* ctx, Temp vtx_cnt, Temp prm_cnt);
8134 static void create_primitive_exports(isel_context *ctx, Temp prim_ch1);
8135 static void create_vs_exports(isel_context* ctx);
8138 get_interp_param(isel_context* ctx, nir_intrinsic_op intrin,
8139 enum glsl_interp_mode interp)
8141 bool linear = interp == INTERP_MODE_NOPERSPECTIVE;
8142 if (intrin == nir_intrinsic_load_barycentric_pixel ||
8143 intrin == nir_intrinsic_load_barycentric_at_sample ||
8144 intrin == nir_intrinsic_load_barycentric_at_offset) {
8145 return get_arg(ctx, linear ? ctx->args->ac.linear_center : ctx->args->ac.persp_center);
8146 } else if (intrin == nir_intrinsic_load_barycentric_centroid) {
8147 return linear ? ctx->linear_centroid : ctx->persp_centroid;
8149 assert(intrin == nir_intrinsic_load_barycentric_sample);
8150 return get_arg(ctx, linear ? ctx->args->ac.linear_sample : ctx->args->ac.persp_sample);
8155 visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
8157 Builder bld(ctx->program, ctx->block);
8158 switch (instr->intrinsic) {
8159 case nir_intrinsic_load_barycentric_sample:
8160 case nir_intrinsic_load_barycentric_pixel:
8161 case nir_intrinsic_load_barycentric_centroid: {
8162 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
8163 Temp bary = get_interp_param(ctx, instr->intrinsic, mode);
8164 assert(bary.size() == 2);
8165 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8166 bld.copy(Definition(dst), bary);
8167 emit_split_vector(ctx, dst, 2);
8170 case nir_intrinsic_load_barycentric_model: {
8171 Temp model = get_arg(ctx, ctx->args->ac.pull_model);
8172 assert(model.size() == 3);
8173 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8174 bld.copy(Definition(dst), model);
8175 emit_split_vector(ctx, dst, 3);
8178 case nir_intrinsic_load_barycentric_at_sample: {
8179 Temp bary = get_interp_param(ctx, instr->intrinsic, (glsl_interp_mode)nir_intrinsic_interp_mode(instr));
8180 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8181 uint32_t sample_pos_offset = RING_PS_SAMPLE_POSITIONS * 16;
8182 if (ctx->options->key.ps.num_samples == 2) {
8183 sample_pos_offset += 1 << 3;
8184 } else if (ctx->options->key.ps.num_samples == 4) {
8185 sample_pos_offset += 3 << 3;
8186 } else if (ctx->options->key.ps.num_samples == 8) {
8187 sample_pos_offset += 7 << 3;
8189 assert(ctx->options->key.ps.num_samples == 0);
8190 bld.copy(Definition(dst), bary);
8191 emit_split_vector(ctx, dst, 2);
8196 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
8197 nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
8198 Temp private_segment_buffer = ctx->program->private_segment_buffer;
8199 // TODO: bounds checking?
8200 if (addr.type() == RegType::sgpr) {
8203 sample_pos_offset += const_addr->u32 << 3;
8204 offset = Operand::c32(sample_pos_offset);
8205 } else if (ctx->options->gfx_level >= GFX9) {
8206 offset = bld.sop2(aco_opcode::s_lshl3_add_u32, bld.def(s1), bld.def(s1, scc), addr,
8207 Operand::c32(sample_pos_offset));
8209 offset = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), addr,
8211 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
8212 Operand::c32(sample_pos_offset));
8215 Operand off = bld.copy(bld.def(s1), Operand(offset));
8217 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, off);
8219 } else if (ctx->options->gfx_level >= GFX9) {
8220 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
8221 sample_pos = bld.global(aco_opcode::global_load_dwordx2, bld.def(v2), addr,
8222 private_segment_buffer, sample_pos_offset);
8223 } else if (ctx->options->gfx_level >= GFX7) {
8224 /* addr += private_segment_buffer + sample_pos_offset */
8225 Temp tmp0 = bld.tmp(s1);
8226 Temp tmp1 = bld.tmp(s1);
8227 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp0), Definition(tmp1),
8228 private_segment_buffer);
8229 Definition scc_tmp = bld.def(s1, scc);
8230 tmp0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), scc_tmp, tmp0,
8231 Operand::c32(sample_pos_offset));
8232 tmp1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), tmp1,
8233 Operand::zero(), bld.scc(scc_tmp.getTemp()));
8234 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
8235 Temp pck0 = bld.tmp(v1);
8236 Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp();
8237 tmp1 = as_vgpr(ctx, tmp1);
8238 Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.def(bld.lm), tmp1,
8239 Operand::zero(), carry);
8240 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1);
8242 /* sample_pos = flat_load_dwordx2 addr */
8243 sample_pos = bld.flat(aco_opcode::flat_load_dwordx2, bld.def(v2), addr, Operand(s1));
8245 assert(ctx->options->gfx_level == GFX6);
8247 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
8248 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
8249 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
8250 Operand::zero(), Operand::c32(rsrc_conf));
8252 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
8253 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), addr, Operand::zero());
8255 sample_pos = bld.tmp(v2);
8257 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(
8258 aco_opcode::buffer_load_dwordx2, Format::MUBUF, 3, 1)};
8259 load->definitions[0] = Definition(sample_pos);
8260 load->operands[0] = Operand(rsrc);
8261 load->operands[1] = Operand(addr);
8262 load->operands[2] = Operand::zero();
8263 load->offset = sample_pos_offset;
8265 load->addr64 = true;
8268 load->disable_wqm = false;
8269 ctx->block->instructions.emplace_back(std::move(load));
8272 /* sample_pos -= 0.5 */
8273 Temp pos1 = bld.tmp(RegClass(sample_pos.type(), 1));
8274 Temp pos2 = bld.tmp(RegClass(sample_pos.type(), 1));
8275 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), sample_pos);
8276 pos1 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos1, Operand::c32(0x3f000000u));
8277 pos2 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos2, Operand::c32(0x3f000000u));
8279 emit_interp_center(ctx, dst, bary, pos1, pos2);
8282 case nir_intrinsic_load_barycentric_at_offset: {
8283 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
8284 RegClass rc = RegClass(offset.type(), 1);
8285 Temp pos1 = bld.tmp(rc), pos2 = bld.tmp(rc);
8286 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset);
8287 Temp bary = get_interp_param(ctx, instr->intrinsic, (glsl_interp_mode)nir_intrinsic_interp_mode(instr));
8288 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), bary, pos1, pos2);
8291 case nir_intrinsic_load_front_face: {
8292 bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8293 Operand::zero(), get_arg(ctx, ctx->args->ac.front_face));
8296 case nir_intrinsic_load_view_index: {
8297 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8298 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index)));
8301 case nir_intrinsic_load_frag_coord: {
8302 emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4);
8305 case nir_intrinsic_load_frag_shading_rate:
8306 emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8308 case nir_intrinsic_load_sample_pos: {
8309 Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]);
8310 Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]);
8312 aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8313 posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand::zero(),
8314 posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand::zero());
8317 case nir_intrinsic_load_tess_coord: visit_load_tess_coord(ctx, instr); break;
8318 case nir_intrinsic_load_interpolated_input: visit_load_interpolated_input(ctx, instr); break;
8319 case nir_intrinsic_store_output: visit_store_output(ctx, instr); break;
8320 case nir_intrinsic_load_input:
8321 case nir_intrinsic_load_input_vertex: visit_load_input(ctx, instr); break;
8322 case nir_intrinsic_load_per_vertex_input: visit_load_per_vertex_input(ctx, instr); break;
8323 case nir_intrinsic_load_ubo: visit_load_ubo(ctx, instr); break;
8324 case nir_intrinsic_load_push_constant: visit_load_push_constant(ctx, instr); break;
8325 case nir_intrinsic_load_constant: visit_load_constant(ctx, instr); break;
8326 case nir_intrinsic_load_shared: visit_load_shared(ctx, instr); break;
8327 case nir_intrinsic_store_shared: visit_store_shared(ctx, instr); break;
8328 case nir_intrinsic_shared_atomic_add:
8329 case nir_intrinsic_shared_atomic_imin:
8330 case nir_intrinsic_shared_atomic_umin:
8331 case nir_intrinsic_shared_atomic_imax:
8332 case nir_intrinsic_shared_atomic_umax:
8333 case nir_intrinsic_shared_atomic_and:
8334 case nir_intrinsic_shared_atomic_or:
8335 case nir_intrinsic_shared_atomic_xor:
8336 case nir_intrinsic_shared_atomic_exchange:
8337 case nir_intrinsic_shared_atomic_comp_swap:
8338 case nir_intrinsic_shared_atomic_fadd:
8339 case nir_intrinsic_shared_atomic_fmin:
8340 case nir_intrinsic_shared_atomic_fmax: visit_shared_atomic(ctx, instr); break;
8341 case nir_intrinsic_load_shared2_amd:
8342 case nir_intrinsic_store_shared2_amd: visit_access_shared2_amd(ctx, instr); break;
8343 case nir_intrinsic_bindless_image_load:
8344 case nir_intrinsic_bindless_image_sparse_load: visit_image_load(ctx, instr); break;
8345 case nir_intrinsic_bindless_image_store: visit_image_store(ctx, instr); break;
8346 case nir_intrinsic_bindless_image_atomic_add:
8347 case nir_intrinsic_bindless_image_atomic_umin:
8348 case nir_intrinsic_bindless_image_atomic_imin:
8349 case nir_intrinsic_bindless_image_atomic_umax:
8350 case nir_intrinsic_bindless_image_atomic_imax:
8351 case nir_intrinsic_bindless_image_atomic_and:
8352 case nir_intrinsic_bindless_image_atomic_or:
8353 case nir_intrinsic_bindless_image_atomic_xor:
8354 case nir_intrinsic_bindless_image_atomic_exchange:
8355 case nir_intrinsic_bindless_image_atomic_comp_swap:
8356 case nir_intrinsic_bindless_image_atomic_fmin:
8357 case nir_intrinsic_bindless_image_atomic_fmax: visit_image_atomic(ctx, instr); break;
8358 case nir_intrinsic_load_ssbo: visit_load_ssbo(ctx, instr); break;
8359 case nir_intrinsic_store_ssbo: visit_store_ssbo(ctx, instr); break;
8360 case nir_intrinsic_load_buffer_amd: visit_load_buffer(ctx, instr); break;
8361 case nir_intrinsic_store_buffer_amd: visit_store_buffer(ctx, instr); break;
8362 case nir_intrinsic_load_smem_amd: visit_load_smem(ctx, instr); break;
8363 case nir_intrinsic_load_global_amd: visit_load_global(ctx, instr); break;
8364 case nir_intrinsic_store_global_amd: visit_store_global(ctx, instr); break;
8365 case nir_intrinsic_global_atomic_add_amd:
8366 case nir_intrinsic_global_atomic_imin_amd:
8367 case nir_intrinsic_global_atomic_umin_amd:
8368 case nir_intrinsic_global_atomic_imax_amd:
8369 case nir_intrinsic_global_atomic_umax_amd:
8370 case nir_intrinsic_global_atomic_and_amd:
8371 case nir_intrinsic_global_atomic_or_amd:
8372 case nir_intrinsic_global_atomic_xor_amd:
8373 case nir_intrinsic_global_atomic_exchange_amd:
8374 case nir_intrinsic_global_atomic_comp_swap_amd:
8375 case nir_intrinsic_global_atomic_fmin_amd:
8376 case nir_intrinsic_global_atomic_fmax_amd: visit_global_atomic(ctx, instr); break;
8377 case nir_intrinsic_ssbo_atomic_add:
8378 case nir_intrinsic_ssbo_atomic_imin:
8379 case nir_intrinsic_ssbo_atomic_umin:
8380 case nir_intrinsic_ssbo_atomic_imax:
8381 case nir_intrinsic_ssbo_atomic_umax:
8382 case nir_intrinsic_ssbo_atomic_and:
8383 case nir_intrinsic_ssbo_atomic_or:
8384 case nir_intrinsic_ssbo_atomic_xor:
8385 case nir_intrinsic_ssbo_atomic_exchange:
8386 case nir_intrinsic_ssbo_atomic_comp_swap:
8387 case nir_intrinsic_ssbo_atomic_fmin:
8388 case nir_intrinsic_ssbo_atomic_fmax: visit_atomic_ssbo(ctx, instr); break;
8389 case nir_intrinsic_load_scratch: visit_load_scratch(ctx, instr); break;
8390 case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break;
8391 case nir_intrinsic_scoped_barrier: emit_scoped_barrier(ctx, instr); break;
8392 case nir_intrinsic_load_num_workgroups: {
8393 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8394 if (ctx->args->load_grid_size_from_user_sgpr) {
8395 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.num_work_groups));
8397 Temp addr = get_arg(ctx, ctx->args->ac.num_work_groups);
8398 assert(addr.regClass() == s2);
8399 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
8400 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), addr, Operand::zero()),
8401 bld.smem(aco_opcode::s_load_dword, bld.def(s1), addr, Operand::c32(8)));
8403 emit_split_vector(ctx, dst, 3);
8406 case nir_intrinsic_load_ray_launch_size_addr_amd: {
8407 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8408 Temp addr = get_arg(ctx, ctx->args->ac.ray_launch_size_addr);
8409 assert(addr.regClass() == s2);
8410 bld.copy(Definition(dst), Operand(addr));
8413 case nir_intrinsic_load_local_invocation_id: {
8414 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8415 if (ctx->options->gfx_level >= GFX11) {
8418 /* Thread IDs are packed in VGPR0, 10 bits per component. */
8419 for (uint32_t i = 0; i < 3; i++) {
8420 local_ids[i] = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
8421 get_arg(ctx, ctx->args->ac.local_invocation_ids),
8422 Operand::c32(i * 10u), Operand::c32(10u));
8425 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), local_ids[0], local_ids[1],
8428 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids)));
8430 emit_split_vector(ctx, dst, 3);
8433 case nir_intrinsic_load_workgroup_id: {
8434 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8435 if (ctx->stage.hw == HWStage::CS) {
8436 const struct ac_arg* ids = ctx->args->ac.workgroup_ids;
8437 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
8438 ids[0].used ? Operand(get_arg(ctx, ids[0])) : Operand::zero(),
8439 ids[1].used ? Operand(get_arg(ctx, ids[1])) : Operand::zero(),
8440 ids[2].used ? Operand(get_arg(ctx, ids[2])) : Operand::zero());
8441 emit_split_vector(ctx, dst, 3);
8443 isel_err(&instr->instr, "Unsupported stage for load_workgroup_id");
8447 case nir_intrinsic_load_local_invocation_index: {
8448 if (ctx->stage.hw == HWStage::LS || ctx->stage.hw == HWStage::HS) {
8449 if (ctx->options->gfx_level >= GFX11) {
8450 /* On GFX11, RelAutoIndex is WaveID * WaveSize + ThreadID. */
8452 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
8453 get_arg(ctx, ctx->args->ac.tcs_wave_id), Operand::c32(0u | (5u << 16)));
8455 Temp temp = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), wave_id,
8456 Operand::c32(ctx->program->wave_size));
8457 Temp thread_id = emit_mbcnt(ctx, bld.tmp(v1));
8459 bld.vadd32(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), temp, thread_id);
8461 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8462 get_arg(ctx, ctx->args->ac.vs_rel_patch_id));
8465 } else if (ctx->stage.hw == HWStage::GS || ctx->stage.hw == HWStage::NGG) {
8466 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), thread_id_in_threadgroup(ctx));
8468 } else if (ctx->program->workgroup_size <= ctx->program->wave_size) {
8469 emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8473 Temp id = emit_mbcnt(ctx, bld.tmp(v1));
8475 /* The tg_size bits [6:11] contain the subgroup id,
8476 * we need this multiplied by the wave size, and then OR the thread id to it.
8478 if (ctx->program->wave_size == 64) {
8479 /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just
8480 * feed that to v_or */
8481 Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
8482 Operand::c32(0xfc0u), get_arg(ctx, ctx->args->ac.tg_size));
8483 bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num,
8486 /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
8488 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
8489 get_arg(ctx, ctx->args->ac.tg_size), Operand::c32(0x6u | (0x6u << 16)));
8490 bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8491 tg_num, Operand::c32(0x5u), id);
8495 case nir_intrinsic_load_subgroup_id: {
8496 if (ctx->stage.hw == HWStage::CS) {
8497 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8498 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.tg_size),
8499 Operand::c32(0x6u | (0x6u << 16)));
8500 } else if (ctx->stage.hw == HWStage::NGG) {
8501 /* Get the id of the current wave within the threadgroup (workgroup) */
8502 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8503 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.merged_wave_info),
8504 Operand::c32(24u | (4u << 16)));
8506 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand::zero());
8510 case nir_intrinsic_load_subgroup_invocation: {
8511 emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8514 case nir_intrinsic_load_num_subgroups: {
8515 if (ctx->stage.hw == HWStage::CS)
8516 bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8517 bld.def(s1, scc), Operand::c32(0x3fu), get_arg(ctx, ctx->args->ac.tg_size));
8518 else if (ctx->stage.hw == HWStage::NGG)
8519 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8520 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.merged_wave_info),
8521 Operand::c32(28u | (4u << 16)));
8523 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand::c32(0x1u));
8526 case nir_intrinsic_ballot: {
8527 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8528 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8530 if (instr->src[0].ssa->bit_size == 1) {
8531 assert(src.regClass() == bld.lm);
8532 } else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) {
8533 src = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), src);
8534 } else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
8535 src = bld.vopc(aco_opcode::v_cmp_lg_u64, bld.def(bld.lm), Operand::zero(), src);
8537 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8540 /* Make sure that all inactive lanes return zero.
8541 * Value-numbering might remove the comparison above */
8542 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8543 if (dst.size() != bld.lm.size()) {
8544 /* Wave32 with ballot size set to 64 */
8546 bld.pseudo(aco_opcode::p_create_vector, bld.def(dst.regClass()), src, Operand::zero());
8549 emit_wqm(bld, src, dst);
8552 case nir_intrinsic_shuffle:
8553 case nir_intrinsic_read_invocation: {
8554 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8555 if (!nir_src_is_divergent(instr->src[0])) {
8556 emit_uniform_subgroup(ctx, instr, src);
8558 Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
8559 if (instr->intrinsic == nir_intrinsic_read_invocation ||
8560 !nir_src_is_divergent(instr->src[1]))
8561 tid = bld.as_uniform(tid);
8562 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8564 if (instr->dest.ssa.bit_size != 1)
8565 src = as_vgpr(ctx, src);
8567 if (src.regClass() == v1b || src.regClass() == v2b) {
8568 Temp tmp = bld.tmp(v1);
8569 tmp = emit_wqm(bld, emit_bpermute(ctx, bld, tid, src), tmp);
8570 if (dst.type() == RegType::vgpr)
8571 bld.pseudo(aco_opcode::p_split_vector, Definition(dst),
8572 bld.def(src.regClass() == v1b ? v3b : v2b), tmp);
8574 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
8575 } else if (src.regClass() == v1) {
8576 emit_wqm(bld, emit_bpermute(ctx, bld, tid, src), dst);
8577 } else if (src.regClass() == v2) {
8578 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8579 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8580 lo = emit_wqm(bld, emit_bpermute(ctx, bld, tid, lo));
8581 hi = emit_wqm(bld, emit_bpermute(ctx, bld, tid, hi));
8582 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8583 emit_split_vector(ctx, dst, 2);
8584 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
8585 assert(src.regClass() == bld.lm);
8586 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
8587 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8588 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
8589 assert(src.regClass() == bld.lm);
8591 if (ctx->program->gfx_level <= GFX7)
8592 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), src, tid);
8593 else if (ctx->program->wave_size == 64)
8594 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
8596 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src);
8597 tmp = emit_extract_vector(ctx, tmp, 0, v1);
8598 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), tmp);
8599 emit_wqm(bld, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp),
8602 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8607 case nir_intrinsic_load_sample_id: {
8608 bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8609 get_arg(ctx, ctx->args->ac.ancillary), Operand::c32(8u), Operand::c32(4u));
8612 case nir_intrinsic_read_first_invocation: {
8613 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8614 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8615 if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) {
8616 emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src), dst);
8617 } else if (src.regClass() == v2) {
8618 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8619 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8620 lo = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), lo));
8621 hi = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi));
8622 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8623 emit_split_vector(ctx, dst, 2);
8624 } else if (instr->dest.ssa.bit_size == 1) {
8625 assert(src.regClass() == bld.lm);
8626 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
8627 bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
8628 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8630 bld.copy(Definition(dst), src);
8634 case nir_intrinsic_vote_all: {
8635 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8636 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8637 assert(src.regClass() == bld.lm);
8638 assert(dst.regClass() == bld.lm);
8641 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src)
8644 Temp cond = bool_to_vector_condition(ctx, emit_wqm(bld, tmp));
8645 bld.sop1(Builder::s_not, Definition(dst), bld.def(s1, scc), cond);
8648 case nir_intrinsic_vote_any: {
8649 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8650 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8651 assert(src.regClass() == bld.lm);
8652 assert(dst.regClass() == bld.lm);
8654 Temp tmp = bool_to_scalar_condition(ctx, src);
8655 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8658 case nir_intrinsic_reduce:
8659 case nir_intrinsic_inclusive_scan:
8660 case nir_intrinsic_exclusive_scan: {
8661 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8662 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8663 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
8664 unsigned cluster_size =
8665 instr->intrinsic == nir_intrinsic_reduce ? nir_intrinsic_cluster_size(instr) : 0;
8666 cluster_size = util_next_power_of_two(
8667 MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
8669 if (!nir_src_is_divergent(instr->src[0]) && cluster_size == ctx->program->wave_size &&
8670 instr->dest.ssa.bit_size != 1) {
8671 /* We use divergence analysis to assign the regclass, so check if it's
8672 * working as expected */
8673 ASSERTED bool expected_divergent = instr->intrinsic == nir_intrinsic_exclusive_scan;
8674 if (instr->intrinsic == nir_intrinsic_inclusive_scan)
8675 expected_divergent = op == nir_op_iadd || op == nir_op_fadd || op == nir_op_ixor;
8676 assert(nir_dest_is_divergent(instr->dest) == expected_divergent);
8678 if (instr->intrinsic == nir_intrinsic_reduce) {
8679 if (emit_uniform_reduce(ctx, instr))
8681 } else if (emit_uniform_scan(ctx, instr)) {
8686 if (instr->dest.ssa.bit_size == 1) {
8687 if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
8689 else if (op == nir_op_iadd)
8691 else if (op == nir_op_umax || op == nir_op_imax)
8693 assert(op == nir_op_iand || op == nir_op_ior || op == nir_op_ixor);
8695 switch (instr->intrinsic) {
8696 case nir_intrinsic_reduce:
8697 emit_wqm(bld, emit_boolean_reduce(ctx, op, cluster_size, src), dst);
8699 case nir_intrinsic_exclusive_scan:
8700 emit_wqm(bld, emit_boolean_exclusive_scan(ctx, op, src), dst);
8702 case nir_intrinsic_inclusive_scan:
8703 emit_wqm(bld, emit_boolean_inclusive_scan(ctx, op, src), dst);
8705 default: assert(false);
8707 } else if (cluster_size == 1) {
8708 bld.copy(Definition(dst), src);
8710 unsigned bit_size = instr->src[0].ssa->bit_size;
8712 src = emit_extract_vector(ctx, src, 0, RegClass::get(RegType::vgpr, bit_size / 8));
8714 ReduceOp reduce_op = get_reduce_op(op, bit_size);
8717 switch (instr->intrinsic) {
8718 case nir_intrinsic_reduce: aco_op = aco_opcode::p_reduce; break;
8719 case nir_intrinsic_inclusive_scan: aco_op = aco_opcode::p_inclusive_scan; break;
8720 case nir_intrinsic_exclusive_scan: aco_op = aco_opcode::p_exclusive_scan; break;
8721 default: unreachable("unknown reduce intrinsic");
8724 Temp tmp_dst = emit_reduction_instr(ctx, aco_op, reduce_op, cluster_size,
8725 bld.def(dst.regClass()), src);
8726 emit_wqm(bld, tmp_dst, dst);
8730 case nir_intrinsic_quad_broadcast:
8731 case nir_intrinsic_quad_swap_horizontal:
8732 case nir_intrinsic_quad_swap_vertical:
8733 case nir_intrinsic_quad_swap_diagonal:
8734 case nir_intrinsic_quad_swizzle_amd: {
8735 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8737 if (!nir_dest_is_divergent(instr->dest)) {
8738 emit_uniform_subgroup(ctx, instr, src);
8742 /* Quad broadcast lane. */
8744 /* Use VALU for the bool instructions that don't have a SALU-only special case. */
8745 bool bool_use_valu = instr->dest.ssa.bit_size == 1;
8747 uint16_t dpp_ctrl = 0;
8749 switch (instr->intrinsic) {
8750 case nir_intrinsic_quad_swap_horizontal: dpp_ctrl = dpp_quad_perm(1, 0, 3, 2); break;
8751 case nir_intrinsic_quad_swap_vertical: dpp_ctrl = dpp_quad_perm(2, 3, 0, 1); break;
8752 case nir_intrinsic_quad_swap_diagonal: dpp_ctrl = dpp_quad_perm(3, 2, 1, 0); break;
8753 case nir_intrinsic_quad_swizzle_amd: dpp_ctrl = nir_intrinsic_swizzle_mask(instr); break;
8754 case nir_intrinsic_quad_broadcast:
8755 lane = nir_src_as_const_value(instr->src[1])->u32;
8756 dpp_ctrl = dpp_quad_perm(lane, lane, lane, lane);
8757 bool_use_valu = false;
8762 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8767 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
8768 Operand::c32(-1), src);
8769 else if (instr->dest.ssa.bit_size != 1)
8770 src = as_vgpr(ctx, src);
8772 /* Setup temporary destination. */
8775 else if (ctx->program->stage == fragment_fs)
8776 tmp = bld.tmp(dst.regClass());
8778 if (instr->dest.ssa.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) {
8779 /* Special case for quad broadcast using SALU only. */
8780 assert(src.regClass() == bld.lm && tmp.regClass() == bld.lm);
8782 uint32_t half_mask = 0x11111111u << lane;
8783 Operand mask_tmp = bld.lm.bytes() == 4
8784 ? Operand::c32(half_mask)
8785 : bld.pseudo(aco_opcode::p_create_vector, bld.def(bld.lm),
8786 Operand::c32(half_mask), Operand::c32(half_mask));
8789 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8790 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp, src);
8791 bld.sop1(Builder::s_wqm, Definition(tmp), src);
8792 } else if (instr->dest.ssa.bit_size <= 32 || bool_use_valu) {
8793 unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->dest.ssa.bit_size / 8;
8794 Definition def = excess_bytes ? bld.def(v1) : Definition(tmp);
8796 if (ctx->program->gfx_level >= GFX8)
8797 bld.vop1_dpp(aco_opcode::v_mov_b32, def, src, dpp_ctrl);
8799 bld.ds(aco_opcode::ds_swizzle_b32, def, src, (1 << 15) | dpp_ctrl);
8802 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp),
8803 bld.def(RegClass::get(tmp.type(), excess_bytes)), def.getTemp());
8804 } else if (instr->dest.ssa.bit_size == 64) {
8805 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8806 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8808 if (ctx->program->gfx_level >= GFX8) {
8809 lo = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl);
8810 hi = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl);
8812 lo = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, (1 << 15) | dpp_ctrl);
8813 hi = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, (1 << 15) | dpp_ctrl);
8816 bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lo, hi);
8817 emit_split_vector(ctx, tmp, 2);
8819 isel_err(&instr->instr, "Unimplemented NIR quad group instruction bit size.");
8822 if (tmp.id() != dst.id()) {
8824 tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
8826 /* Vulkan spec 9.25: Helper invocations must be active for quad group instructions. */
8827 emit_wqm(bld, tmp, dst, true);
8832 case nir_intrinsic_masked_swizzle_amd: {
8833 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8834 if (!nir_dest_is_divergent(instr->dest)) {
8835 emit_uniform_subgroup(ctx, instr, src);
8838 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8839 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
8841 if (instr->dest.ssa.bit_size != 1)
8842 src = as_vgpr(ctx, src);
8844 if (instr->dest.ssa.bit_size == 1) {
8845 assert(src.regClass() == bld.lm);
8846 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
8847 Operand::c32(-1), src);
8848 src = emit_masked_swizzle(ctx, bld, src, mask);
8849 Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), src);
8850 emit_wqm(bld, tmp, dst);
8851 } else if (dst.regClass() == v1b) {
8852 Temp tmp = emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask));
8853 emit_extract_vector(ctx, tmp, 0, dst);
8854 } else if (dst.regClass() == v2b) {
8855 Temp tmp = emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask));
8856 emit_extract_vector(ctx, tmp, 0, dst);
8857 } else if (dst.regClass() == v1) {
8858 emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask), dst);
8859 } else if (dst.regClass() == v2) {
8860 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8861 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8862 lo = emit_wqm(bld, emit_masked_swizzle(ctx, bld, lo, mask));
8863 hi = emit_wqm(bld, emit_masked_swizzle(ctx, bld, hi, mask));
8864 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8865 emit_split_vector(ctx, dst, 2);
8867 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8871 case nir_intrinsic_write_invocation_amd: {
8872 Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
8873 Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
8874 Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa));
8875 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8876 if (dst.regClass() == v1) {
8877 /* src2 is ignored for writelane. RA assigns the same reg for dst */
8878 emit_wqm(bld, bld.writelane(bld.def(v1), val, lane, src), dst);
8879 } else if (dst.regClass() == v2) {
8880 Temp src_lo = bld.tmp(v1), src_hi = bld.tmp(v1);
8881 Temp val_lo = bld.tmp(s1), val_hi = bld.tmp(s1);
8882 bld.pseudo(aco_opcode::p_split_vector, Definition(src_lo), Definition(src_hi), src);
8883 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
8884 Temp lo = emit_wqm(bld, bld.writelane(bld.def(v1), val_lo, lane, src_hi));
8885 Temp hi = emit_wqm(bld, bld.writelane(bld.def(v1), val_hi, lane, src_hi));
8886 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8887 emit_split_vector(ctx, dst, 2);
8889 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8893 case nir_intrinsic_mbcnt_amd: {
8894 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8895 Temp add_src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
8896 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8897 /* Fit 64-bit mask for wave32 */
8898 src = emit_extract_vector(ctx, src, 0, RegClass(src.type(), bld.lm.size()));
8899 Temp wqm_tmp = emit_mbcnt(ctx, bld.tmp(v1), Operand(src), Operand(add_src));
8900 emit_wqm(bld, wqm_tmp, dst);
8903 case nir_intrinsic_byte_permute_amd: {
8904 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8905 assert(dst.regClass() == v1);
8906 assert(ctx->program->gfx_level >= GFX8);
8907 bld.vop3(aco_opcode::v_perm_b32, Definition(dst), get_ssa_temp(ctx, instr->src[0].ssa),
8908 as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)),
8909 as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa)));
8912 case nir_intrinsic_lane_permute_16_amd: {
8913 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8914 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8915 assert(ctx->program->gfx_level >= GFX10);
8917 if (src.regClass() == s1) {
8918 bld.copy(Definition(dst), src);
8919 } else if (dst.regClass() == v1 && src.regClass() == v1) {
8920 bld.vop3(aco_opcode::v_permlane16_b32, Definition(dst), src,
8921 bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa)),
8922 bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa)));
8924 isel_err(&instr->instr, "Unimplemented lane_permute_16_amd");
8928 case nir_intrinsic_load_helper_invocation:
8929 case nir_intrinsic_is_helper_invocation: {
8930 /* load_helper() after demote() get lowered to is_helper().
8931 * Otherwise, these two behave the same. */
8932 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8933 bld.pseudo(aco_opcode::p_is_helper, Definition(dst), Operand(exec, bld.lm));
8934 ctx->block->kind |= block_kind_needs_lowering;
8935 ctx->program->needs_exact = true;
8938 case nir_intrinsic_demote:
8939 bld.pseudo(aco_opcode::p_demote_to_helper, Operand::c32(-1u));
8941 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8942 ctx->cf_info.exec_potentially_empty_discard = true;
8943 ctx->block->kind |= block_kind_uses_discard;
8944 ctx->program->needs_exact = true;
8946 case nir_intrinsic_demote_if: {
8947 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8948 assert(src.regClass() == bld.lm);
8950 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8951 bld.pseudo(aco_opcode::p_demote_to_helper, cond);
8953 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8954 ctx->cf_info.exec_potentially_empty_discard = true;
8955 ctx->block->kind |= block_kind_uses_discard;
8956 ctx->program->needs_exact = true;
8959 case nir_intrinsic_terminate:
8960 case nir_intrinsic_terminate_if:
8961 case nir_intrinsic_discard:
8962 case nir_intrinsic_discard_if: {
8963 Operand cond = Operand::c32(-1u);
8964 if (instr->intrinsic == nir_intrinsic_discard_if ||
8965 instr->intrinsic == nir_intrinsic_terminate_if) {
8966 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8967 assert(src.regClass() == bld.lm);
8969 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8972 bld.pseudo(aco_opcode::p_discard_if, cond);
8974 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8975 ctx->cf_info.exec_potentially_empty_discard = true;
8976 ctx->block->kind |= block_kind_uses_discard;
8977 ctx->program->needs_exact = true;
8980 case nir_intrinsic_first_invocation: {
8981 emit_wqm(bld, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
8982 get_ssa_temp(ctx, &instr->dest.ssa));
8985 case nir_intrinsic_last_invocation: {
8986 Temp flbit = bld.sop1(Builder::s_flbit_i32, bld.def(s1), Operand(exec, bld.lm));
8987 Temp last = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc),
8988 Operand::c32(ctx->program->wave_size - 1u), flbit);
8989 emit_wqm(bld, last, get_ssa_temp(ctx, &instr->dest.ssa));
8992 case nir_intrinsic_elect: {
8993 /* p_elect is lowered in aco_insert_exec_mask.
8994 * Use exec as an operand so value numbering and the pre-RA optimizer won't recognize
8995 * two p_elect with different exec masks as the same.
8997 Temp elected = bld.pseudo(aco_opcode::p_elect, bld.def(bld.lm), Operand(exec, bld.lm));
8998 emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->dest.ssa));
8999 ctx->block->kind |= block_kind_needs_lowering;
9002 case nir_intrinsic_shader_clock: {
9003 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9004 if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_SUBGROUP &&
9005 ctx->options->gfx_level >= GFX10_3) {
9006 /* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */
9007 Temp clock = bld.sopk(aco_opcode::s_getreg_b32, bld.def(s1), ((20 - 1) << 11) | 29);
9008 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), clock, Operand::zero());
9010 aco_opcode opcode = nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE
9011 ? aco_opcode::s_memrealtime
9012 : aco_opcode::s_memtime;
9013 bld.smem(opcode, Definition(dst), memory_sync_info(0, semantic_volatile));
9015 emit_split_vector(ctx, dst, 2);
9018 case nir_intrinsic_load_vertex_id_zero_base: {
9019 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9020 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
9023 case nir_intrinsic_load_first_vertex: {
9024 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9025 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex));
9028 case nir_intrinsic_load_base_instance: {
9029 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9030 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance));
9033 case nir_intrinsic_load_instance_id: {
9034 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9035 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id));
9038 case nir_intrinsic_load_draw_id: {
9039 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9040 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id));
9043 case nir_intrinsic_load_invocation_id: {
9044 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9046 if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
9047 if (ctx->options->gfx_level >= GFX10)
9048 bld.vop2_e64(aco_opcode::v_and_b32, Definition(dst), Operand::c32(127u),
9049 get_arg(ctx, ctx->args->ac.gs_invocation_id));
9051 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_invocation_id));
9052 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
9053 bld.vop3(aco_opcode::v_bfe_u32, Definition(dst), get_arg(ctx, ctx->args->ac.tcs_rel_ids),
9054 Operand::c32(8u), Operand::c32(5u));
9056 unreachable("Unsupported stage for load_invocation_id");
9061 case nir_intrinsic_load_primitive_id: {
9062 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9064 switch (ctx->shader->info.stage) {
9065 case MESA_SHADER_GEOMETRY:
9066 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
9068 case MESA_SHADER_TESS_CTRL:
9069 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tcs_patch_id));
9071 case MESA_SHADER_TESS_EVAL:
9072 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tes_patch_id));
9075 if (ctx->stage.hw == HWStage::NGG && !ctx->stage.has(SWStage::GS)) {
9076 /* In case of NGG, the GS threads always have the primitive ID
9077 * even if there is no SW GS. */
9078 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
9080 } else if (ctx->shader->info.stage == MESA_SHADER_VERTEX) {
9081 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vs_prim_id));
9084 unreachable("Unimplemented shader stage for nir_intrinsic_load_primitive_id");
9089 case nir_intrinsic_emit_vertex_with_counter: {
9090 assert(ctx->stage.hw == HWStage::GS);
9091 visit_emit_vertex_with_counter(ctx, instr);
9094 case nir_intrinsic_end_primitive_with_counter: {
9095 if (ctx->stage.hw != HWStage::NGG) {
9096 unsigned stream = nir_intrinsic_stream_id(instr);
9097 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1,
9098 sendmsg_gs(true, false, stream));
9102 case nir_intrinsic_set_vertex_and_primitive_count: {
9103 assert(ctx->stage.hw == HWStage::GS);
9104 /* unused in the legacy pipeline, the HW keeps track of this for us */
9107 case nir_intrinsic_has_input_vertex_amd:
9108 case nir_intrinsic_has_input_primitive_amd: {
9109 assert(ctx->stage.hw == HWStage::NGG);
9110 unsigned i = instr->intrinsic == nir_intrinsic_has_input_vertex_amd ? 0 : 1;
9111 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), merged_wave_info_to_mask(ctx, i));
9114 case nir_intrinsic_export_vertex_amd: {
9115 ctx->block->kind |= block_kind_export_end;
9116 create_vs_exports(ctx);
9119 case nir_intrinsic_export_primitive_amd: {
9120 Temp prim_ch1 = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
9121 create_primitive_exports(ctx, prim_ch1);
9124 case nir_intrinsic_alloc_vertices_and_primitives_amd: {
9125 assert(ctx->stage.hw == HWStage::NGG);
9126 Temp num_vertices = get_ssa_temp(ctx, instr->src[0].ssa);
9127 Temp num_primitives = get_ssa_temp(ctx, instr->src[1].ssa);
9128 ngg_emit_sendmsg_gs_alloc_req(ctx, num_vertices, num_primitives);
9131 case nir_intrinsic_gds_atomic_add_amd: {
9132 Temp store_val = get_ssa_temp(ctx, instr->src[0].ssa);
9133 Temp gds_addr = get_ssa_temp(ctx, instr->src[1].ssa);
9134 Temp m0_val = get_ssa_temp(ctx, instr->src[2].ssa);
9135 Operand m = bld.m0((Temp)bld.copy(bld.def(s1, m0), bld.as_uniform(m0_val)));
9136 bld.ds(aco_opcode::ds_add_u32, as_vgpr(ctx, gds_addr), as_vgpr(ctx, store_val), m, 0u, 0u,
9140 case nir_intrinsic_load_sbt_base_amd: {
9141 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9142 Temp addr = get_arg(ctx, ctx->args->ac.sbt_descriptors);
9143 assert(addr.regClass() == s2);
9144 bld.copy(Definition(dst), Operand(addr));
9147 case nir_intrinsic_bvh64_intersect_ray_amd: visit_bvh64_intersect_ray_amd(ctx, instr); break;
9148 case nir_intrinsic_overwrite_vs_arguments_amd: {
9149 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = get_ssa_temp(ctx, instr->src[0].ssa);
9150 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = get_ssa_temp(ctx, instr->src[1].ssa);
9153 case nir_intrinsic_overwrite_tes_arguments_amd: {
9154 ctx->arg_temps[ctx->args->ac.tes_u.arg_index] = get_ssa_temp(ctx, instr->src[0].ssa);
9155 ctx->arg_temps[ctx->args->ac.tes_v.arg_index] = get_ssa_temp(ctx, instr->src[1].ssa);
9156 ctx->arg_temps[ctx->args->ac.tes_rel_patch_id.arg_index] =
9157 get_ssa_temp(ctx, instr->src[2].ssa);
9158 ctx->arg_temps[ctx->args->ac.tes_patch_id.arg_index] = get_ssa_temp(ctx, instr->src[3].ssa);
9161 case nir_intrinsic_load_force_vrs_rates_amd: {
9162 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
9163 get_arg(ctx, ctx->args->ac.force_vrs_rates));
9166 case nir_intrinsic_load_scalar_arg_amd:
9167 case nir_intrinsic_load_vector_arg_amd: {
9168 assert(nir_intrinsic_base(instr) < ctx->args->ac.arg_count);
9169 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9170 Temp src = ctx->arg_temps[nir_intrinsic_base(instr)];
9172 assert(src.type() == (instr->intrinsic == nir_intrinsic_load_scalar_arg_amd ? RegType::sgpr : RegType::vgpr));
9173 bld.copy(Definition(dst), src);
9174 emit_split_vector(ctx, dst, dst.size());
9178 isel_err(&instr->instr, "Unimplemented intrinsic instr");
9186 build_cube_select(isel_context* ctx, Temp ma, Temp id, Temp deriv, Temp* out_ma, Temp* out_sc,
9189 Builder bld(ctx->program, ctx->block);
9191 Temp deriv_x = emit_extract_vector(ctx, deriv, 0, v1);
9192 Temp deriv_y = emit_extract_vector(ctx, deriv, 1, v1);
9193 Temp deriv_z = emit_extract_vector(ctx, deriv, 2, v1);
9195 Operand neg_one = Operand::c32(0xbf800000u);
9196 Operand one = Operand::c32(0x3f800000u);
9197 Operand two = Operand::c32(0x40000000u);
9198 Operand four = Operand::c32(0x40800000u);
9200 Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), Operand::zero(), ma);
9201 Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive);
9202 Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand::zero(), sgn_ma);
9204 Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), four, id);
9205 Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), two, id);
9206 is_ma_y = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), is_ma_y, is_ma_z);
9208 bld.sop2(aco_opcode::s_or_b64, bld.def(bld.lm), bld.def(s1, scc), is_ma_z, is_ma_y);
9211 Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x);
9212 Temp sgn = bld.vop2_e64(
9213 aco_opcode::v_cndmask_b32, bld.def(v1),
9214 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_sgn_ma, sgn_ma, is_ma_z), one, is_ma_y);
9215 *out_sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
9218 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_y, deriv_z, is_ma_y);
9219 sgn = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, sgn_ma, is_ma_y);
9220 *out_tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
9223 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
9224 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_x, deriv_y, is_ma_y),
9226 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffffu), tmp);
9227 *out_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), two, tmp);
9231 prepare_cube_coords(isel_context* ctx, std::vector<Temp>& coords, Temp* ddx, Temp* ddy,
9232 bool is_deriv, bool is_array)
9234 Builder bld(ctx->program, ctx->block);
9235 Temp ma, tc, sc, id;
9237 ctx->program->gfx_level >= GFX10_3 ? aco_opcode::v_fmaak_f32 : aco_opcode::v_madak_f32;
9239 ctx->program->gfx_level >= GFX10_3 ? aco_opcode::v_fmamk_f32 : aco_opcode::v_madmk_f32;
9241 /* see comment in ac_prepare_cube_coords() */
9242 if (is_array && ctx->options->gfx_level <= GFX8)
9243 coords[3] = bld.vop2(aco_opcode::v_max_f32, bld.def(v1), Operand::zero(), coords[3]);
9245 ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9247 aco_ptr<VOP3_instruction> vop3a{
9248 create_instruction<VOP3_instruction>(aco_opcode::v_rcp_f32, asVOP3(Format::VOP1), 1, 1)};
9249 vop3a->operands[0] = Operand(ma);
9250 vop3a->abs[0] = true;
9251 Temp invma = bld.tmp(v1);
9252 vop3a->definitions[0] = Definition(invma);
9253 ctx->block->instructions.emplace_back(std::move(vop3a));
9255 sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9257 sc = bld.vop2(madak, bld.def(v1), sc, invma, Operand::c32(0x3fc00000u /*1.5*/));
9259 tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9261 tc = bld.vop2(madak, bld.def(v1), tc, invma, Operand::c32(0x3fc00000u /*1.5*/));
9263 id = bld.vop3(aco_opcode::v_cubeid_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9266 sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, invma);
9267 tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, invma);
9269 for (unsigned i = 0; i < 2; i++) {
9270 /* see comment in ac_prepare_cube_coords() */
9272 Temp deriv_sc, deriv_tc;
9273 build_cube_select(ctx, ma, id, i ? *ddy : *ddx, &deriv_ma, &deriv_sc, &deriv_tc);
9275 deriv_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, invma);
9277 Temp x = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
9278 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_sc, invma),
9279 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, sc));
9280 Temp y = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
9281 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_tc, invma),
9282 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, tc));
9283 *(i ? ddy : ddx) = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), x, y);
9286 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3fc00000u /*1.5*/), sc);
9287 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3fc00000u /*1.5*/), tc);
9291 id = bld.vop2(madmk, bld.def(v1), coords[3], id, Operand::c32(0x41000000u /*8.0*/));
9292 coords.erase(coords.begin() + 3);
9300 get_const_vec(nir_ssa_def* vec, nir_const_value* cv[4])
9302 if (vec->parent_instr->type != nir_instr_type_alu)
9304 nir_alu_instr* vec_instr = nir_instr_as_alu(vec->parent_instr);
9305 if (vec_instr->op != nir_op_vec(vec->num_components))
9308 for (unsigned i = 0; i < vec->num_components; i++) {
9310 vec_instr->src[i].swizzle[0] == 0 ? nir_src_as_const_value(vec_instr->src[i].src) : NULL;
9315 visit_tex(isel_context* ctx, nir_tex_instr* instr)
9317 assert(instr->op != nir_texop_txf_ms && instr->op != nir_texop_samples_identical);
9319 Builder bld(ctx->program, ctx->block);
9320 bool has_bias = false, has_lod = false, level_zero = false, has_compare = false,
9321 has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false,
9322 has_sample_index = false, has_clamped_lod = false;
9323 Temp resource, sampler, bias = Temp(), compare = Temp(), sample_index = Temp(), lod = Temp(),
9324 offset = Temp(), ddx = Temp(), ddy = Temp(), clamped_lod = Temp(),
9326 std::vector<Temp> coords;
9327 std::vector<Temp> derivs;
9328 nir_const_value* const_offset[4] = {NULL, NULL, NULL, NULL};
9330 for (unsigned i = 0; i < instr->num_srcs; i++) {
9331 switch (instr->src[i].src_type) {
9332 case nir_tex_src_texture_handle:
9333 resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[i].src.ssa));
9335 case nir_tex_src_sampler_handle:
9336 sampler = bld.as_uniform(get_ssa_temp(ctx, instr->src[i].src.ssa));
9342 bool tg4_integer_workarounds = ctx->options->gfx_level <= GFX8 && instr->op == nir_texop_tg4 &&
9343 (instr->dest_type & (nir_type_int | nir_type_uint));
9344 bool tg4_integer_cube_workaround =
9345 tg4_integer_workarounds && instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
9347 bool a16 = false, g16 = false;
9349 int coord_idx = nir_tex_instr_src_index(instr, nir_tex_src_coord);
9351 a16 = instr->src[coord_idx].src.ssa->bit_size == 16;
9353 int ddx_idx = nir_tex_instr_src_index(instr, nir_tex_src_ddx);
9355 g16 = instr->src[ddx_idx].src.ssa->bit_size == 16;
9357 for (unsigned i = 0; i < instr->num_srcs; i++) {
9358 switch (instr->src[i].src_type) {
9359 case nir_tex_src_coord: {
9360 assert(instr->src[i].src.ssa->bit_size == (a16 ? 16 : 32));
9361 coord = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, a16);
9364 case nir_tex_src_bias:
9365 assert(instr->src[i].src.ssa->bit_size == (a16 ? 16 : 32));
9366 /* Doesn't need get_ssa_temp_tex because we pack it into its own dword anyway. */
9367 bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
9370 case nir_tex_src_lod: {
9371 if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0) {
9374 assert(instr->src[i].src.ssa->bit_size == (a16 ? 16 : 32));
9375 lod = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, a16);
9380 case nir_tex_src_min_lod:
9381 assert(instr->src[i].src.ssa->bit_size == (a16 ? 16 : 32));
9382 clamped_lod = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, a16);
9383 has_clamped_lod = true;
9385 case nir_tex_src_comparator:
9386 if (instr->is_shadow) {
9387 assert(instr->src[i].src.ssa->bit_size == 32);
9388 compare = get_ssa_temp(ctx, instr->src[i].src.ssa);
9392 case nir_tex_src_offset:
9393 assert(instr->src[i].src.ssa->bit_size == 32);
9394 offset = get_ssa_temp(ctx, instr->src[i].src.ssa);
9395 get_const_vec(instr->src[i].src.ssa, const_offset);
9398 case nir_tex_src_ddx:
9399 assert(instr->src[i].src.ssa->bit_size == (g16 ? 16 : 32));
9400 ddx = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, g16);
9403 case nir_tex_src_ddy:
9404 assert(instr->src[i].src.ssa->bit_size == (g16 ? 16 : 32));
9405 ddy = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, g16);
9408 case nir_tex_src_ms_index:
9409 assert(instr->src[i].src.ssa->bit_size == (a16 ? 16 : 32));
9410 sample_index = get_ssa_temp_tex(ctx, instr->src[i].src.ssa, a16);
9411 has_sample_index = true;
9413 case nir_tex_src_texture_offset:
9414 case nir_tex_src_sampler_offset:
9420 assert(instr->op != nir_texop_txf);
9422 aco_ptr<Instruction> tmp_instr;
9423 Temp acc, pack = Temp();
9425 uint32_t pack_const = 0;
9426 for (unsigned i = 0; i < offset.size(); i++) {
9427 if (!const_offset[i])
9429 pack_const |= (const_offset[i]->u32 & 0x3Fu) << (8u * i);
9432 if (offset.type() == RegType::sgpr) {
9433 for (unsigned i = 0; i < offset.size(); i++) {
9434 if (const_offset[i])
9437 acc = emit_extract_vector(ctx, offset, i, s1);
9438 acc = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), acc,
9439 Operand::c32(0x3Fu));
9442 acc = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), acc,
9443 Operand::c32(8u * i));
9446 if (pack == Temp()) {
9449 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), pack, acc);
9453 if (pack_const && pack != Temp())
9454 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
9455 Operand::c32(pack_const), pack);
9457 for (unsigned i = 0; i < offset.size(); i++) {
9458 if (const_offset[i])
9461 acc = emit_extract_vector(ctx, offset, i, v1);
9462 acc = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x3Fu), acc);
9465 acc = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(8u * i), acc);
9468 if (pack == Temp()) {
9471 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), pack, acc);
9475 if (pack_const && pack != Temp())
9476 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(pack_const), pack);
9478 if (pack_const && pack == Temp())
9479 offset = bld.copy(bld.def(v1), Operand::c32(pack_const));
9480 else if (pack == Temp())
9486 unsigned wqm_coord_count = 0;
9487 std::vector<Temp> unpacked_coord;
9488 if (ctx->options->gfx_level == GFX9 && instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
9489 instr->op != nir_texop_lod && instr->coord_components) {
9490 RegClass rc = a16 ? v2b : v1;
9491 for (unsigned i = 0; i < coord.bytes() / rc.bytes(); i++)
9492 unpacked_coord.emplace_back(emit_extract_vector(ctx, coord, i, rc));
9494 assert(unpacked_coord.size() > 0 && unpacked_coord.size() < 3);
9497 /* 0.5 for floating point coords, 0 for integer. */
9499 coord2d = instr->op == nir_texop_txf ? Operand::c16(0) : Operand::c16(0x3800);
9501 coord2d = instr->op == nir_texop_txf ? Operand::c32(0) : Operand::c32(0x3f000000);
9502 unpacked_coord.insert(std::next(unpacked_coord.begin()), bld.copy(bld.def(rc), coord2d));
9503 wqm_coord_count = a16 ? DIV_ROUND_UP(unpacked_coord.size(), 2) : unpacked_coord.size();
9504 } else if (coord != Temp()) {
9505 unpacked_coord.push_back(coord);
9506 wqm_coord_count = DIV_ROUND_UP(coord.bytes(), 4);
9509 if (has_sample_index)
9510 unpacked_coord.push_back(sample_index);
9512 unpacked_coord.push_back(lod);
9513 if (has_clamped_lod)
9514 unpacked_coord.push_back(clamped_lod);
9516 coords = emit_pack_v1(ctx, unpacked_coord);
9518 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE || !a16);
9519 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && instr->coord_components)
9520 prepare_cube_coords(ctx, coords, &ddx, &ddy, instr->op == nir_texop_txd,
9521 instr->is_array && instr->op != nir_texop_lod);
9523 /* pack derivatives */
9524 if (has_ddx || has_ddy) {
9525 RegClass rc = g16 ? v2b : v1;
9526 assert(a16 == g16 || ctx->options->gfx_level >= GFX10);
9527 std::array<Temp, 2> ddxddy = {ddx, ddy};
9528 for (Temp tmp : ddxddy) {
9531 std::vector<Temp> unpacked = {tmp};
9532 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->gfx_level == GFX9) {
9533 assert(has_ddx && has_ddy);
9534 Temp zero = bld.copy(bld.def(rc), Operand::zero(rc.bytes()));
9535 unpacked.push_back(zero);
9537 for (Temp derv : emit_pack_v1(ctx, unpacked))
9538 derivs.push_back(derv);
9543 bool da = should_declare_array(ctx, instr->sampler_dim, instr->is_array);
9545 /* Build tex instruction */
9546 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa) & 0xf;
9547 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
9548 dmask = u_bit_consecutive(0, util_last_bit(dmask));
9549 if (instr->is_sparse)
9550 dmask = MAX2(dmask, 1) | 0x10;
9552 ctx->options->gfx_level >= GFX10 && instr->sampler_dim != GLSL_SAMPLER_DIM_BUF
9553 ? ac_get_sampler_dim(ctx->options->gfx_level, instr->sampler_dim, instr->is_array)
9555 bool d16 = instr->dest.ssa.bit_size == 16;
9556 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9559 /* gather4 selects the component by dmask and always returns vec4 (vec5 if sparse) */
9560 if (instr->op == nir_texop_tg4) {
9561 assert(instr->dest.ssa.num_components == (4 + instr->is_sparse));
9562 if (instr->is_shadow)
9565 dmask = 1 << instr->component;
9566 if (tg4_integer_cube_workaround || dst.type() == RegType::sgpr)
9567 tmp_dst = bld.tmp(instr->is_sparse ? v5 : (d16 ? v2 : v4));
9568 } else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
9569 tmp_dst = bld.tmp(v1);
9570 } else if (util_bitcount(dmask) != instr->dest.ssa.num_components ||
9571 dst.type() == RegType::sgpr) {
9572 unsigned bytes = util_bitcount(dmask) * instr->dest.ssa.bit_size / 8;
9573 tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, bytes));
9576 Temp tg4_compare_cube_wa64 = Temp();
9578 if (tg4_integer_workarounds) {
9579 Temp tg4_lod = bld.copy(bld.def(v1), Operand::zero());
9580 Temp size = bld.tmp(v2);
9581 MIMG_instruction* tex = emit_mimg(bld, aco_opcode::image_get_resinfo, Definition(size),
9582 resource, Operand(s4), std::vector<Temp>{tg4_lod});
9586 emit_split_vector(ctx, size, size.size());
9589 for (unsigned i = 0; i < 2; i++) {
9590 half_texel[i] = emit_extract_vector(ctx, size, i, v1);
9591 half_texel[i] = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), half_texel[i]);
9592 half_texel[i] = bld.vop1(aco_opcode::v_rcp_iflag_f32, bld.def(v1), half_texel[i]);
9593 half_texel[i] = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1),
9594 Operand::c32(0xbf000000 /*-0.5*/), half_texel[i]);
9597 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
9598 /* In vulkan, whether the sampler uses unnormalized
9599 * coordinates or not is a dynamic property of the
9600 * sampler. Hence, to figure out whether or not we
9601 * need to divide by the texture size, we need to test
9602 * the sampler at runtime. This tests the bit set by
9603 * radv_init_sampler().
9605 unsigned bit_idx = ffs(S_008F30_FORCE_UNNORMALIZED(1)) - 1;
9607 bld.sopc(aco_opcode::s_bitcmp0_b32, bld.def(s1, scc), sampler, Operand::c32(bit_idx));
9609 not_needed = bool_to_vector_condition(ctx, not_needed);
9610 half_texel[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
9611 Operand::c32(0xbf000000 /*-0.5*/), half_texel[0], not_needed);
9612 half_texel[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
9613 Operand::c32(0xbf000000 /*-0.5*/), half_texel[1], not_needed);
9616 Temp new_coords[2] = {bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[0], half_texel[0]),
9617 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[1], half_texel[1])};
9619 if (tg4_integer_cube_workaround) {
9620 /* see comment in ac_nir_to_llvm.c's lower_gather4_integer() */
9621 Temp* const desc = (Temp*)alloca(resource.size() * sizeof(Temp));
9622 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(
9623 aco_opcode::p_split_vector, Format::PSEUDO, 1, resource.size())};
9624 split->operands[0] = Operand(resource);
9625 for (unsigned i = 0; i < resource.size(); i++) {
9626 desc[i] = bld.tmp(s1);
9627 split->definitions[i] = Definition(desc[i]);
9629 ctx->block->instructions.emplace_back(std::move(split));
9631 Temp dfmt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), desc[1],
9632 Operand::c32(20u | (6u << 16)));
9633 Temp compare_cube_wa = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), dfmt,
9634 Operand::c32(V_008F14_IMG_DATA_FORMAT_8_8_8_8));
9637 if (instr->dest_type & nir_type_uint) {
9638 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
9639 Operand::c32(V_008F14_IMG_NUM_FORMAT_USCALED),
9640 Operand::c32(V_008F14_IMG_NUM_FORMAT_UINT), bld.scc(compare_cube_wa));
9642 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
9643 Operand::c32(V_008F14_IMG_NUM_FORMAT_SSCALED),
9644 Operand::c32(V_008F14_IMG_NUM_FORMAT_SINT), bld.scc(compare_cube_wa));
9646 tg4_compare_cube_wa64 = bld.tmp(bld.lm);
9647 bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64);
9649 nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt,
9652 desc[1] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), desc[1],
9653 Operand::c32(C_008F14_NUM_FORMAT));
9654 desc[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), desc[1], nfmt);
9656 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
9657 aco_opcode::p_create_vector, Format::PSEUDO, resource.size(), 1)};
9658 for (unsigned i = 0; i < resource.size(); i++)
9659 vec->operands[i] = Operand(desc[i]);
9660 resource = bld.tmp(resource.regClass());
9661 vec->definitions[0] = Definition(resource);
9662 ctx->block->instructions.emplace_back(std::move(vec));
9664 new_coords[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), new_coords[0], coords[0],
9665 tg4_compare_cube_wa64);
9666 new_coords[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), new_coords[1], coords[1],
9667 tg4_compare_cube_wa64);
9669 coords[0] = new_coords[0];
9670 coords[1] = new_coords[1];
9673 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
9674 // FIXME: if (ctx->abi->gfx9_stride_size_workaround) return
9675 // ac_build_buffer_load_format_gfx9_safe()
9677 assert(coords.size() == 1);
9680 switch (util_last_bit(dmask & 0xf)) {
9681 case 1: op = aco_opcode::buffer_load_format_d16_x; break;
9682 case 2: op = aco_opcode::buffer_load_format_d16_xy; break;
9683 case 3: op = aco_opcode::buffer_load_format_d16_xyz; break;
9684 case 4: op = aco_opcode::buffer_load_format_d16_xyzw; break;
9685 default: unreachable("Tex instruction loads more than 4 components.");
9688 switch (util_last_bit(dmask & 0xf)) {
9689 case 1: op = aco_opcode::buffer_load_format_x; break;
9690 case 2: op = aco_opcode::buffer_load_format_xy; break;
9691 case 3: op = aco_opcode::buffer_load_format_xyz; break;
9692 case 4: op = aco_opcode::buffer_load_format_xyzw; break;
9693 default: unreachable("Tex instruction loads more than 4 components.");
9697 aco_ptr<MUBUF_instruction> mubuf{
9698 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3 + instr->is_sparse, 1)};
9699 mubuf->operands[0] = Operand(resource);
9700 mubuf->operands[1] = Operand(coords[0]);
9701 mubuf->operands[2] = Operand::c32(0);
9702 mubuf->definitions[0] = Definition(tmp_dst);
9703 mubuf->idxen = true;
9704 mubuf->tfe = instr->is_sparse;
9706 mubuf->operands[3] = emit_tfe_init(bld, tmp_dst);
9707 ctx->block->instructions.emplace_back(std::move(mubuf));
9709 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
9713 /* gather MIMG address components */
9714 std::vector<Temp> args;
9715 unsigned wqm_mask = 0;
9717 wqm_mask |= u_bit_consecutive(args.size(), 1);
9718 args.emplace_back(offset);
9721 args.emplace_back(emit_pack_v1(ctx, {bias})[0]);
9723 args.emplace_back(compare);
9725 args.insert(args.end(), derivs.begin(), derivs.end());
9727 wqm_mask |= u_bit_consecutive(args.size(), wqm_coord_count);
9728 args.insert(args.end(), coords.begin(), coords.end());
9730 if (instr->op == nir_texop_txf || instr->op == nir_texop_fragment_fetch_amd ||
9731 instr->op == nir_texop_fragment_mask_fetch_amd) {
9732 aco_opcode op = level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
9733 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS
9734 ? aco_opcode::image_load
9735 : aco_opcode::image_load_mip;
9736 Operand vdata = instr->is_sparse ? emit_tfe_init(bld, tmp_dst) : Operand(v1);
9737 MIMG_instruction* tex =
9738 emit_mimg(bld, op, Definition(tmp_dst), resource, Operand(s4), args, 0, vdata);
9739 if (instr->op == nir_texop_fragment_mask_fetch_amd)
9740 tex->dim = da ? ac_image_2darray : ac_image_2d;
9743 tex->dmask = dmask & 0xf;
9746 tex->tfe = instr->is_sparse;
9750 if (instr->op == nir_texop_fragment_mask_fetch_amd) {
9751 /* Use 0x76543210 if the image doesn't have FMASK. */
9752 assert(dmask == 1 && dst.bytes() == 4);
9753 assert(dst.id() != tmp_dst.id());
9755 if (dst.regClass() == s1) {
9756 Temp is_not_null = bld.sopc(aco_opcode::s_cmp_lg_u32, bld.def(s1, scc), Operand::zero(),
9757 emit_extract_vector(ctx, resource, 1, s1));
9758 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst),
9759 bld.as_uniform(tmp_dst), Operand::c32(0x76543210),
9760 bld.scc(is_not_null));
9762 Temp is_not_null = bld.tmp(bld.lm);
9763 bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(is_not_null), Operand::zero(),
9764 emit_extract_vector(ctx, resource, 1, s1));
9765 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst),
9766 bld.copy(bld.def(v1), Operand::c32(0x76543210)), tmp_dst, is_not_null);
9769 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
9774 bool separate_g16 = ctx->options->gfx_level >= GFX10 && g16;
9776 // TODO: would be better to do this by adding offsets, but needs the opcodes ordered.
9777 aco_opcode opcode = aco_opcode::image_sample;
9778 if (has_offset) { /* image_sample_*_o */
9779 if (has_clamped_lod) {
9781 opcode = aco_opcode::image_sample_c_cl_o;
9783 opcode = aco_opcode::image_sample_c_d_cl_o_g16;
9784 else if (has_derivs)
9785 opcode = aco_opcode::image_sample_c_d_cl_o;
9787 opcode = aco_opcode::image_sample_c_b_cl_o;
9789 opcode = aco_opcode::image_sample_cl_o;
9791 opcode = aco_opcode::image_sample_d_cl_o_g16;
9792 else if (has_derivs)
9793 opcode = aco_opcode::image_sample_d_cl_o;
9795 opcode = aco_opcode::image_sample_b_cl_o;
9797 } else if (has_compare) {
9798 opcode = aco_opcode::image_sample_c_o;
9800 opcode = aco_opcode::image_sample_c_d_o_g16;
9801 else if (has_derivs)
9802 opcode = aco_opcode::image_sample_c_d_o;
9804 opcode = aco_opcode::image_sample_c_b_o;
9806 opcode = aco_opcode::image_sample_c_lz_o;
9808 opcode = aco_opcode::image_sample_c_l_o;
9810 opcode = aco_opcode::image_sample_o;
9812 opcode = aco_opcode::image_sample_d_o_g16;
9813 else if (has_derivs)
9814 opcode = aco_opcode::image_sample_d_o;
9816 opcode = aco_opcode::image_sample_b_o;
9818 opcode = aco_opcode::image_sample_lz_o;
9820 opcode = aco_opcode::image_sample_l_o;
9822 } else if (has_clamped_lod) { /* image_sample_*_cl */
9824 opcode = aco_opcode::image_sample_c_cl;
9826 opcode = aco_opcode::image_sample_c_d_cl_g16;
9827 else if (has_derivs)
9828 opcode = aco_opcode::image_sample_c_d_cl;
9830 opcode = aco_opcode::image_sample_c_b_cl;
9832 opcode = aco_opcode::image_sample_cl;
9834 opcode = aco_opcode::image_sample_d_cl_g16;
9835 else if (has_derivs)
9836 opcode = aco_opcode::image_sample_d_cl;
9838 opcode = aco_opcode::image_sample_b_cl;
9840 } else { /* no offset */
9842 opcode = aco_opcode::image_sample_c;
9844 opcode = aco_opcode::image_sample_c_d_g16;
9845 else if (has_derivs)
9846 opcode = aco_opcode::image_sample_c_d;
9848 opcode = aco_opcode::image_sample_c_b;
9850 opcode = aco_opcode::image_sample_c_lz;
9852 opcode = aco_opcode::image_sample_c_l;
9854 opcode = aco_opcode::image_sample;
9856 opcode = aco_opcode::image_sample_d_g16;
9857 else if (has_derivs)
9858 opcode = aco_opcode::image_sample_d;
9860 opcode = aco_opcode::image_sample_b;
9862 opcode = aco_opcode::image_sample_lz;
9864 opcode = aco_opcode::image_sample_l;
9868 if (instr->op == nir_texop_tg4) {
9869 if (has_offset) { /* image_gather4_*_o */
9871 opcode = aco_opcode::image_gather4_c_lz_o;
9873 opcode = aco_opcode::image_gather4_c_l_o;
9875 opcode = aco_opcode::image_gather4_c_b_o;
9877 opcode = aco_opcode::image_gather4_lz_o;
9879 opcode = aco_opcode::image_gather4_l_o;
9881 opcode = aco_opcode::image_gather4_b_o;
9885 opcode = aco_opcode::image_gather4_c_lz;
9887 opcode = aco_opcode::image_gather4_c_l;
9889 opcode = aco_opcode::image_gather4_c_b;
9891 opcode = aco_opcode::image_gather4_lz;
9893 opcode = aco_opcode::image_gather4_l;
9895 opcode = aco_opcode::image_gather4_b;
9898 } else if (instr->op == nir_texop_lod) {
9899 opcode = aco_opcode::image_get_lod;
9902 bool implicit_derivs = bld.program->stage == fragment_fs && !has_derivs && !has_lod &&
9903 !level_zero && instr->sampler_dim != GLSL_SAMPLER_DIM_MS &&
9904 instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS;
9906 Operand vdata = instr->is_sparse ? emit_tfe_init(bld, tmp_dst) : Operand(v1);
9907 MIMG_instruction* tex = emit_mimg(bld, opcode, Definition(tmp_dst), resource, Operand(sampler),
9908 args, implicit_derivs ? wqm_mask : 0, vdata);
9910 tex->dmask = dmask & 0xf;
9912 tex->tfe = instr->is_sparse;
9916 if (tg4_integer_cube_workaround) {
9917 assert(tmp_dst.id() != dst.id());
9918 assert(tmp_dst.size() == dst.size());
9920 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
9922 for (unsigned i = 0; i < 4; i++) {
9923 val[i] = emit_extract_vector(ctx, tmp_dst, i, v1);
9925 if (instr->dest_type & nir_type_uint)
9926 cvt_val = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), val[i]);
9928 cvt_val = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), val[i]);
9929 val[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), val[i], cvt_val,
9930 tg4_compare_cube_wa64);
9933 Temp tmp = dst.regClass() == tmp_dst.regClass() ? dst : bld.tmp(tmp_dst.regClass());
9934 if (instr->is_sparse)
9935 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), val[0], val[1], val[2],
9936 val[3], emit_extract_vector(ctx, tmp_dst, 4, v1));
9938 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), val[0], val[1], val[2],
9941 unsigned mask = instr->op == nir_texop_tg4 ? (instr->is_sparse ? 0x1F : 0xF) : dmask;
9942 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask);
9946 get_phi_operand(isel_context* ctx, nir_ssa_def* ssa, RegClass rc, bool logical)
9948 Temp tmp = get_ssa_temp(ctx, ssa);
9949 if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
9951 } else if (logical && ssa->bit_size == 1 &&
9952 ssa->parent_instr->type == nir_instr_type_load_const) {
9953 if (ctx->program->wave_size == 64)
9954 return Operand::c64(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT64_MAX
9957 return Operand::c32(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT32_MAX
9960 return Operand(tmp);
9965 visit_phi(isel_context* ctx, nir_phi_instr* instr)
9967 aco_ptr<Pseudo_instruction> phi;
9968 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9969 assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
9971 bool logical = !dst.is_linear() || nir_dest_is_divergent(instr->dest);
9972 logical |= (ctx->block->kind & block_kind_merge) != 0;
9973 aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
9975 /* we want a sorted list of sources, since the predecessor list is also sorted */
9976 std::map<unsigned, nir_ssa_def*> phi_src;
9977 nir_foreach_phi_src (src, instr)
9978 phi_src[src->pred->index] = src->src.ssa;
9980 std::vector<unsigned>& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds;
9981 unsigned num_operands = 0;
9982 Operand* const operands = (Operand*)alloca(
9983 (std::max(exec_list_length(&instr->srcs), (unsigned)preds.size()) + 1) * sizeof(Operand));
9984 unsigned num_defined = 0;
9985 unsigned cur_pred_idx = 0;
9986 for (std::pair<unsigned, nir_ssa_def*> src : phi_src) {
9987 if (cur_pred_idx < preds.size()) {
9988 /* handle missing preds (IF merges with discard/break) and extra preds
9989 * (loop exit with discard) */
9990 unsigned block = ctx->cf_info.nir_to_aco[src.first];
9991 unsigned skipped = 0;
9992 while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block)
9994 if (cur_pred_idx + skipped < preds.size()) {
9995 for (unsigned i = 0; i < skipped; i++)
9996 operands[num_operands++] = Operand(dst.regClass());
9997 cur_pred_idx += skipped;
10002 /* Handle missing predecessors at the end. This shouldn't happen with loop
10003 * headers and we can't ignore these sources for loop header phis. */
10004 if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
10007 Operand op = get_phi_operand(ctx, src.second, dst.regClass(), logical);
10008 operands[num_operands++] = op;
10009 num_defined += !op.isUndefined();
10011 /* handle block_kind_continue_or_break at loop exit blocks */
10012 while (cur_pred_idx++ < preds.size())
10013 operands[num_operands++] = Operand(dst.regClass());
10015 /* If the loop ends with a break, still add a linear continue edge in case
10016 * that break is divergent or continue_or_break is used. We'll either remove
10017 * this operand later in visit_loop() if it's not necessary or replace the
10018 * undef with something correct. */
10019 if (!logical && ctx->block->kind & block_kind_loop_header) {
10020 nir_loop* loop = nir_cf_node_as_loop(instr->instr.block->cf_node.parent);
10021 nir_block* last = nir_loop_last_block(loop);
10022 if (last->successors[0] != instr->instr.block)
10023 operands[num_operands++] = Operand(RegClass());
10026 /* we can use a linear phi in some cases if one src is undef */
10027 if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) {
10028 phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO,
10031 Block* linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]];
10032 Block* invert = &ctx->program->blocks[linear_else->linear_preds[0]];
10033 assert(invert->kind & block_kind_invert);
10035 unsigned then_block = invert->linear_preds[0];
10037 Block* insert_block = NULL;
10038 for (unsigned i = 0; i < num_operands; i++) {
10039 Operand op = operands[i];
10040 if (op.isUndefined())
10042 insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block;
10043 phi->operands[0] = op;
10046 assert(insert_block); /* should be handled by the "num_defined == 0" case above */
10047 phi->operands[1] = Operand(dst.regClass());
10048 phi->definitions[0] = Definition(dst);
10049 insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi));
10053 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
10054 for (unsigned i = 0; i < num_operands; i++)
10055 phi->operands[i] = operands[i];
10056 phi->definitions[0] = Definition(dst);
10057 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
10061 visit_undef(isel_context* ctx, nir_ssa_undef_instr* instr)
10063 Temp dst = get_ssa_temp(ctx, &instr->def);
10065 assert(dst.type() == RegType::sgpr);
10067 if (dst.size() == 1) {
10068 Builder(ctx->program, ctx->block).copy(Definition(dst), Operand::zero());
10070 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
10071 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
10072 for (unsigned i = 0; i < dst.size(); i++)
10073 vec->operands[i] = Operand::zero();
10074 vec->definitions[0] = Definition(dst);
10075 ctx->block->instructions.emplace_back(std::move(vec));
10080 begin_loop(isel_context* ctx, loop_context* lc)
10082 // TODO: we might want to wrap the loop around a branch if exec_potentially_empty=true
10083 append_logical_end(ctx->block);
10084 ctx->block->kind |= block_kind_loop_preheader | block_kind_uniform;
10085 Builder bld(ctx->program, ctx->block);
10086 bld.branch(aco_opcode::p_branch, bld.def(s2));
10087 unsigned loop_preheader_idx = ctx->block->index;
10089 lc->loop_exit.kind |= (block_kind_loop_exit | (ctx->block->kind & block_kind_top_level));
10091 ctx->program->next_loop_depth++;
10093 Block* loop_header = ctx->program->create_and_insert_block();
10094 loop_header->kind |= block_kind_loop_header;
10095 add_edge(loop_preheader_idx, loop_header);
10096 ctx->block = loop_header;
10098 append_logical_start(ctx->block);
10100 lc->header_idx_old = std::exchange(ctx->cf_info.parent_loop.header_idx, loop_header->index);
10101 lc->exit_old = std::exchange(ctx->cf_info.parent_loop.exit, &lc->loop_exit);
10102 lc->divergent_cont_old = std::exchange(ctx->cf_info.parent_loop.has_divergent_continue, false);
10103 lc->divergent_branch_old = std::exchange(ctx->cf_info.parent_loop.has_divergent_branch, false);
10104 lc->divergent_if_old = std::exchange(ctx->cf_info.parent_if.is_divergent, false);
10108 end_loop(isel_context* ctx, loop_context* lc)
10110 // TODO: what if a loop ends with a unconditional or uniformly branched continue
10111 // and this branch is never taken?
10112 if (!ctx->cf_info.has_branch) {
10113 unsigned loop_header_idx = ctx->cf_info.parent_loop.header_idx;
10114 Builder bld(ctx->program, ctx->block);
10115 append_logical_end(ctx->block);
10117 if (ctx->cf_info.exec_potentially_empty_discard ||
10118 ctx->cf_info.exec_potentially_empty_break) {
10119 /* Discards can result in code running with an empty exec mask.
10120 * This would result in divergent breaks not ever being taken. As a
10121 * workaround, break the loop when the loop mask is empty instead of
10122 * always continuing. */
10123 ctx->block->kind |= (block_kind_continue_or_break | block_kind_uniform);
10124 unsigned block_idx = ctx->block->index;
10126 /* create helper blocks to avoid critical edges */
10127 Block* break_block = ctx->program->create_and_insert_block();
10128 break_block->kind = block_kind_uniform;
10129 bld.reset(break_block);
10130 bld.branch(aco_opcode::p_branch, bld.def(s2));
10131 add_linear_edge(block_idx, break_block);
10132 add_linear_edge(break_block->index, &lc->loop_exit);
10134 Block* continue_block = ctx->program->create_and_insert_block();
10135 continue_block->kind = block_kind_uniform;
10136 bld.reset(continue_block);
10137 bld.branch(aco_opcode::p_branch, bld.def(s2));
10138 add_linear_edge(block_idx, continue_block);
10139 add_linear_edge(continue_block->index, &ctx->program->blocks[loop_header_idx]);
10141 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10142 add_logical_edge(block_idx, &ctx->program->blocks[loop_header_idx]);
10143 ctx->block = &ctx->program->blocks[block_idx];
10145 ctx->block->kind |= (block_kind_continue | block_kind_uniform);
10146 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10147 add_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
10149 add_linear_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
10152 bld.reset(ctx->block);
10153 bld.branch(aco_opcode::p_branch, bld.def(s2));
10156 ctx->cf_info.has_branch = false;
10157 ctx->program->next_loop_depth--;
10159 // TODO: if the loop has not a single exit, we must add one °°
10160 /* emit loop successor block */
10161 ctx->block = ctx->program->insert_block(std::move(lc->loop_exit));
10162 append_logical_start(ctx->block);
10165 // TODO: check if it is beneficial to not branch on continues
10166 /* trim linear phis in loop header */
10167 for (auto&& instr : loop_entry->instructions) {
10168 if (instr->opcode == aco_opcode::p_linear_phi) {
10169 aco_ptr<Pseudo_instruction> new_phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, loop_entry->linear_predecessors.size(), 1)};
10170 new_phi->definitions[0] = instr->definitions[0];
10171 for (unsigned i = 0; i < new_phi->operands.size(); i++)
10172 new_phi->operands[i] = instr->operands[i];
10173 /* check that the remaining operands are all the same */
10174 for (unsigned i = new_phi->operands.size(); i < instr->operands.size(); i++)
10175 assert(instr->operands[i].tempId() == instr->operands.back().tempId());
10176 instr.swap(new_phi);
10177 } else if (instr->opcode == aco_opcode::p_phi) {
10185 ctx->cf_info.parent_loop.header_idx = lc->header_idx_old;
10186 ctx->cf_info.parent_loop.exit = lc->exit_old;
10187 ctx->cf_info.parent_loop.has_divergent_continue = lc->divergent_cont_old;
10188 ctx->cf_info.parent_loop.has_divergent_branch = lc->divergent_branch_old;
10189 ctx->cf_info.parent_if.is_divergent = lc->divergent_if_old;
10190 if (!ctx->block->loop_nest_depth && !ctx->cf_info.parent_if.is_divergent)
10191 ctx->cf_info.exec_potentially_empty_discard = false;
10195 emit_loop_jump(isel_context* ctx, bool is_break)
10197 Builder bld(ctx->program, ctx->block);
10198 Block* logical_target;
10199 append_logical_end(ctx->block);
10200 unsigned idx = ctx->block->index;
10203 logical_target = ctx->cf_info.parent_loop.exit;
10204 add_logical_edge(idx, logical_target);
10205 ctx->block->kind |= block_kind_break;
10207 if (!ctx->cf_info.parent_if.is_divergent &&
10208 !ctx->cf_info.parent_loop.has_divergent_continue) {
10209 /* uniform break - directly jump out of the loop */
10210 ctx->block->kind |= block_kind_uniform;
10211 ctx->cf_info.has_branch = true;
10212 bld.branch(aco_opcode::p_branch, bld.def(s2));
10213 add_linear_edge(idx, logical_target);
10216 ctx->cf_info.parent_loop.has_divergent_branch = true;
10218 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
10219 add_logical_edge(idx, logical_target);
10220 ctx->block->kind |= block_kind_continue;
10222 if (!ctx->cf_info.parent_if.is_divergent) {
10223 /* uniform continue - directly jump to the loop header */
10224 ctx->block->kind |= block_kind_uniform;
10225 ctx->cf_info.has_branch = true;
10226 bld.branch(aco_opcode::p_branch, bld.def(s2));
10227 add_linear_edge(idx, logical_target);
10231 /* for potential uniform breaks after this continue,
10232 we must ensure that they are handled correctly */
10233 ctx->cf_info.parent_loop.has_divergent_continue = true;
10234 ctx->cf_info.parent_loop.has_divergent_branch = true;
10237 if (ctx->cf_info.parent_if.is_divergent && !ctx->cf_info.exec_potentially_empty_break) {
10238 ctx->cf_info.exec_potentially_empty_break = true;
10239 ctx->cf_info.exec_potentially_empty_break_depth = ctx->block->loop_nest_depth;
10242 /* remove critical edges from linear CFG */
10243 bld.branch(aco_opcode::p_branch, bld.def(s2));
10244 Block* break_block = ctx->program->create_and_insert_block();
10245 break_block->kind |= block_kind_uniform;
10246 add_linear_edge(idx, break_block);
10247 /* the loop_header pointer might be invalidated by this point */
10249 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
10250 add_linear_edge(break_block->index, logical_target);
10251 bld.reset(break_block);
10252 bld.branch(aco_opcode::p_branch, bld.def(s2));
10254 Block* continue_block = ctx->program->create_and_insert_block();
10255 add_linear_edge(idx, continue_block);
10256 append_logical_start(continue_block);
10257 ctx->block = continue_block;
10261 emit_loop_break(isel_context* ctx)
10263 emit_loop_jump(ctx, true);
10267 emit_loop_continue(isel_context* ctx)
10269 emit_loop_jump(ctx, false);
10273 visit_jump(isel_context* ctx, nir_jump_instr* instr)
10275 /* visit_block() would usually do this but divergent jumps updates ctx->block */
10276 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
10278 switch (instr->type) {
10279 case nir_jump_break: emit_loop_break(ctx); break;
10280 case nir_jump_continue: emit_loop_continue(ctx); break;
10281 default: isel_err(&instr->instr, "Unknown NIR jump instr"); abort();
10286 visit_block(isel_context* ctx, nir_block* block)
10288 ctx->block->instructions.reserve(ctx->block->instructions.size() +
10289 exec_list_length(&block->instr_list) * 2);
10290 nir_foreach_instr (instr, block) {
10291 switch (instr->type) {
10292 case nir_instr_type_alu: visit_alu_instr(ctx, nir_instr_as_alu(instr)); break;
10293 case nir_instr_type_load_const: visit_load_const(ctx, nir_instr_as_load_const(instr)); break;
10294 case nir_instr_type_intrinsic: visit_intrinsic(ctx, nir_instr_as_intrinsic(instr)); break;
10295 case nir_instr_type_tex: visit_tex(ctx, nir_instr_as_tex(instr)); break;
10296 case nir_instr_type_phi: visit_phi(ctx, nir_instr_as_phi(instr)); break;
10297 case nir_instr_type_ssa_undef: visit_undef(ctx, nir_instr_as_ssa_undef(instr)); break;
10298 case nir_instr_type_deref: break;
10299 case nir_instr_type_jump: visit_jump(ctx, nir_instr_as_jump(instr)); break;
10300 default: isel_err(instr, "Unknown NIR instr type");
10304 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10305 ctx->cf_info.nir_to_aco[block->index] = ctx->block->index;
10309 create_continue_phis(isel_context* ctx, unsigned first, unsigned last,
10310 aco_ptr<Instruction>& header_phi, Operand* vals)
10312 vals[0] = Operand(header_phi->definitions[0].getTemp());
10313 RegClass rc = vals[0].regClass();
10315 unsigned loop_nest_depth = ctx->program->blocks[first].loop_nest_depth;
10317 unsigned next_pred = 1;
10319 for (unsigned idx = first + 1; idx <= last; idx++) {
10320 Block& block = ctx->program->blocks[idx];
10321 if (block.loop_nest_depth != loop_nest_depth) {
10322 vals[idx - first] = vals[idx - 1 - first];
10326 if ((block.kind & block_kind_continue) && block.index != last) {
10327 vals[idx - first] = header_phi->operands[next_pred];
10332 bool all_same = true;
10333 for (unsigned i = 1; all_same && (i < block.linear_preds.size()); i++)
10334 all_same = vals[block.linear_preds[i] - first] == vals[block.linear_preds[0] - first];
10338 val = vals[block.linear_preds[0] - first];
10340 aco_ptr<Instruction> phi(create_instruction<Pseudo_instruction>(
10341 aco_opcode::p_linear_phi, Format::PSEUDO, block.linear_preds.size(), 1));
10342 for (unsigned i = 0; i < block.linear_preds.size(); i++)
10343 phi->operands[i] = vals[block.linear_preds[i] - first];
10344 val = Operand(ctx->program->allocateTmp(rc));
10345 phi->definitions[0] = Definition(val.getTemp());
10346 block.instructions.emplace(block.instructions.begin(), std::move(phi));
10348 vals[idx - first] = val;
10351 return vals[last - first];
10354 static void begin_uniform_if_then(isel_context* ctx, if_context* ic, Temp cond);
10355 static void begin_uniform_if_else(isel_context* ctx, if_context* ic);
10356 static void end_uniform_if(isel_context* ctx, if_context* ic);
10359 visit_loop(isel_context* ctx, nir_loop* loop)
10362 begin_loop(ctx, &lc);
10364 /* NIR seems to allow this, and even though the loop exit has no predecessors, SSA defs from the
10365 * loop header are live. Handle this without complicating the ACO IR by creating a dummy break.
10367 if (nir_cf_node_cf_tree_next(&loop->cf_node)->predecessors->entries == 0) {
10368 Builder bld(ctx->program, ctx->block);
10369 Temp cond = bld.copy(bld.def(s1, scc), Operand::zero());
10371 begin_uniform_if_then(ctx, &ic, cond);
10372 emit_loop_break(ctx);
10373 begin_uniform_if_else(ctx, &ic);
10374 end_uniform_if(ctx, &ic);
10377 bool unreachable = visit_cf_list(ctx, &loop->body);
10379 unsigned loop_header_idx = ctx->cf_info.parent_loop.header_idx;
10381 /* Fixup phis in loop header from unreachable blocks.
10382 * has_branch/has_divergent_branch also indicates if the loop ends with a
10383 * break/continue instruction, but we don't emit those if unreachable=true */
10385 assert(ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch);
10386 bool linear = ctx->cf_info.has_branch;
10387 bool logical = ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch;
10388 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
10389 if ((logical && instr->opcode == aco_opcode::p_phi) ||
10390 (linear && instr->opcode == aco_opcode::p_linear_phi)) {
10391 /* the last operand should be the one that needs to be removed */
10392 instr->operands.pop_back();
10393 } else if (!is_phi(instr)) {
10399 /* Fixup linear phis in loop header from expecting a continue. Both this fixup
10400 * and the previous one shouldn't both happen at once because a break in the
10401 * merge block would get CSE'd */
10402 if (nir_loop_last_block(loop)->successors[0] != nir_loop_first_block(loop)) {
10403 unsigned num_vals = ctx->cf_info.has_branch ? 1 : (ctx->block->index - loop_header_idx + 1);
10404 Operand* const vals = (Operand*)alloca(num_vals * sizeof(Operand));
10405 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
10406 if (instr->opcode == aco_opcode::p_linear_phi) {
10407 if (ctx->cf_info.has_branch)
10408 instr->operands.pop_back();
10410 instr->operands.back() =
10411 create_continue_phis(ctx, loop_header_idx, ctx->block->index, instr, vals);
10412 } else if (!is_phi(instr)) {
10418 end_loop(ctx, &lc);
10422 begin_divergent_if_then(isel_context* ctx, if_context* ic, Temp cond)
10426 append_logical_end(ctx->block);
10427 ctx->block->kind |= block_kind_branch;
10429 /* branch to linear then block */
10430 assert(cond.regClass() == ctx->program->lane_mask);
10431 aco_ptr<Pseudo_branch_instruction> branch;
10432 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z,
10433 Format::PSEUDO_BRANCH, 1, 1));
10434 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10435 branch->operands[0] = Operand(cond);
10436 ctx->block->instructions.push_back(std::move(branch));
10438 ic->BB_if_idx = ctx->block->index;
10439 ic->BB_invert = Block();
10440 /* Invert blocks are intentionally not marked as top level because they
10441 * are not part of the logical cfg. */
10442 ic->BB_invert.kind |= block_kind_invert;
10443 ic->BB_endif = Block();
10444 ic->BB_endif.kind |= (block_kind_merge | (ctx->block->kind & block_kind_top_level));
10446 ic->exec_potentially_empty_discard_old = ctx->cf_info.exec_potentially_empty_discard;
10447 ic->exec_potentially_empty_break_old = ctx->cf_info.exec_potentially_empty_break;
10448 ic->exec_potentially_empty_break_depth_old = ctx->cf_info.exec_potentially_empty_break_depth;
10449 ic->divergent_old = ctx->cf_info.parent_if.is_divergent;
10450 ctx->cf_info.parent_if.is_divergent = true;
10452 /* divergent branches use cbranch_execz */
10453 ctx->cf_info.exec_potentially_empty_discard = false;
10454 ctx->cf_info.exec_potentially_empty_break = false;
10455 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10457 /** emit logical then block */
10458 ctx->program->next_divergent_if_logical_depth++;
10459 Block* BB_then_logical = ctx->program->create_and_insert_block();
10460 add_edge(ic->BB_if_idx, BB_then_logical);
10461 ctx->block = BB_then_logical;
10462 append_logical_start(BB_then_logical);
10466 begin_divergent_if_else(isel_context* ctx, if_context* ic)
10468 Block* BB_then_logical = ctx->block;
10469 append_logical_end(BB_then_logical);
10470 /* branch from logical then block to invert block */
10471 aco_ptr<Pseudo_branch_instruction> branch;
10472 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10473 Format::PSEUDO_BRANCH, 0, 1));
10474 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10475 BB_then_logical->instructions.emplace_back(std::move(branch));
10476 add_linear_edge(BB_then_logical->index, &ic->BB_invert);
10477 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10478 add_logical_edge(BB_then_logical->index, &ic->BB_endif);
10479 BB_then_logical->kind |= block_kind_uniform;
10480 assert(!ctx->cf_info.has_branch);
10481 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
10482 ctx->cf_info.parent_loop.has_divergent_branch = false;
10483 ctx->program->next_divergent_if_logical_depth--;
10485 /** emit linear then block */
10486 Block* BB_then_linear = ctx->program->create_and_insert_block();
10487 BB_then_linear->kind |= block_kind_uniform;
10488 add_linear_edge(ic->BB_if_idx, BB_then_linear);
10489 /* branch from linear then block to invert block */
10490 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10491 Format::PSEUDO_BRANCH, 0, 1));
10492 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10493 BB_then_linear->instructions.emplace_back(std::move(branch));
10494 add_linear_edge(BB_then_linear->index, &ic->BB_invert);
10496 /** emit invert merge block */
10497 ctx->block = ctx->program->insert_block(std::move(ic->BB_invert));
10498 ic->invert_idx = ctx->block->index;
10500 /* branch to linear else block (skip else) */
10501 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10502 Format::PSEUDO_BRANCH, 0, 1));
10503 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10504 ctx->block->instructions.push_back(std::move(branch));
10506 ic->exec_potentially_empty_discard_old |= ctx->cf_info.exec_potentially_empty_discard;
10507 ic->exec_potentially_empty_break_old |= ctx->cf_info.exec_potentially_empty_break;
10508 ic->exec_potentially_empty_break_depth_old = std::min(
10509 ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
10510 /* divergent branches use cbranch_execz */
10511 ctx->cf_info.exec_potentially_empty_discard = false;
10512 ctx->cf_info.exec_potentially_empty_break = false;
10513 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10515 /** emit logical else block */
10516 ctx->program->next_divergent_if_logical_depth++;
10517 Block* BB_else_logical = ctx->program->create_and_insert_block();
10518 add_logical_edge(ic->BB_if_idx, BB_else_logical);
10519 add_linear_edge(ic->invert_idx, BB_else_logical);
10520 ctx->block = BB_else_logical;
10521 append_logical_start(BB_else_logical);
10525 end_divergent_if(isel_context* ctx, if_context* ic)
10527 Block* BB_else_logical = ctx->block;
10528 append_logical_end(BB_else_logical);
10530 /* branch from logical else block to endif block */
10531 aco_ptr<Pseudo_branch_instruction> branch;
10532 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10533 Format::PSEUDO_BRANCH, 0, 1));
10534 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10535 BB_else_logical->instructions.emplace_back(std::move(branch));
10536 add_linear_edge(BB_else_logical->index, &ic->BB_endif);
10537 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10538 add_logical_edge(BB_else_logical->index, &ic->BB_endif);
10539 BB_else_logical->kind |= block_kind_uniform;
10540 ctx->program->next_divergent_if_logical_depth--;
10542 assert(!ctx->cf_info.has_branch);
10543 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
10545 /** emit linear else block */
10546 Block* BB_else_linear = ctx->program->create_and_insert_block();
10547 BB_else_linear->kind |= block_kind_uniform;
10548 add_linear_edge(ic->invert_idx, BB_else_linear);
10550 /* branch from linear else block to endif block */
10551 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10552 Format::PSEUDO_BRANCH, 0, 1));
10553 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10554 BB_else_linear->instructions.emplace_back(std::move(branch));
10555 add_linear_edge(BB_else_linear->index, &ic->BB_endif);
10557 /** emit endif merge block */
10558 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
10559 append_logical_start(ctx->block);
10561 ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
10562 ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
10563 ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
10564 ctx->cf_info.exec_potentially_empty_break_depth = std::min(
10565 ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
10566 if (ctx->block->loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
10567 !ctx->cf_info.parent_if.is_divergent) {
10568 ctx->cf_info.exec_potentially_empty_break = false;
10569 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10571 /* uniform control flow never has an empty exec-mask */
10572 if (!ctx->block->loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
10573 ctx->cf_info.exec_potentially_empty_discard = false;
10574 ctx->cf_info.exec_potentially_empty_break = false;
10575 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10580 begin_uniform_if_then(isel_context* ctx, if_context* ic, Temp cond)
10582 assert(cond.regClass() == s1);
10584 append_logical_end(ctx->block);
10585 ctx->block->kind |= block_kind_uniform;
10587 aco_ptr<Pseudo_branch_instruction> branch;
10588 aco_opcode branch_opcode = aco_opcode::p_cbranch_z;
10590 create_instruction<Pseudo_branch_instruction>(branch_opcode, Format::PSEUDO_BRANCH, 1, 1));
10591 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10592 branch->operands[0] = Operand(cond);
10593 branch->operands[0].setFixed(scc);
10594 ctx->block->instructions.emplace_back(std::move(branch));
10596 ic->BB_if_idx = ctx->block->index;
10597 ic->BB_endif = Block();
10598 ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level;
10600 ctx->cf_info.has_branch = false;
10601 ctx->cf_info.parent_loop.has_divergent_branch = false;
10603 /** emit then block */
10604 ctx->program->next_uniform_if_depth++;
10605 Block* BB_then = ctx->program->create_and_insert_block();
10606 add_edge(ic->BB_if_idx, BB_then);
10607 append_logical_start(BB_then);
10608 ctx->block = BB_then;
10612 begin_uniform_if_else(isel_context* ctx, if_context* ic)
10614 Block* BB_then = ctx->block;
10616 ic->uniform_has_then_branch = ctx->cf_info.has_branch;
10617 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
10619 if (!ic->uniform_has_then_branch) {
10620 append_logical_end(BB_then);
10621 /* branch from then block to endif block */
10622 aco_ptr<Pseudo_branch_instruction> branch;
10623 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10624 Format::PSEUDO_BRANCH, 0, 1));
10625 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10626 BB_then->instructions.emplace_back(std::move(branch));
10627 add_linear_edge(BB_then->index, &ic->BB_endif);
10628 if (!ic->then_branch_divergent)
10629 add_logical_edge(BB_then->index, &ic->BB_endif);
10630 BB_then->kind |= block_kind_uniform;
10633 ctx->cf_info.has_branch = false;
10634 ctx->cf_info.parent_loop.has_divergent_branch = false;
10636 /** emit else block */
10637 Block* BB_else = ctx->program->create_and_insert_block();
10638 add_edge(ic->BB_if_idx, BB_else);
10639 append_logical_start(BB_else);
10640 ctx->block = BB_else;
10644 end_uniform_if(isel_context* ctx, if_context* ic)
10646 Block* BB_else = ctx->block;
10648 if (!ctx->cf_info.has_branch) {
10649 append_logical_end(BB_else);
10650 /* branch from then block to endif block */
10651 aco_ptr<Pseudo_branch_instruction> branch;
10652 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10653 Format::PSEUDO_BRANCH, 0, 1));
10654 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10655 BB_else->instructions.emplace_back(std::move(branch));
10656 add_linear_edge(BB_else->index, &ic->BB_endif);
10657 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10658 add_logical_edge(BB_else->index, &ic->BB_endif);
10659 BB_else->kind |= block_kind_uniform;
10662 ctx->cf_info.has_branch &= ic->uniform_has_then_branch;
10663 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
10665 /** emit endif merge block */
10666 ctx->program->next_uniform_if_depth--;
10667 if (!ctx->cf_info.has_branch) {
10668 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
10669 append_logical_start(ctx->block);
10674 visit_if(isel_context* ctx, nir_if* if_stmt)
10676 Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa);
10677 Builder bld(ctx->program, ctx->block);
10678 aco_ptr<Pseudo_branch_instruction> branch;
10681 if (!nir_src_is_divergent(if_stmt->condition)) { /* uniform condition */
10683 * Uniform conditionals are represented in the following way*) :
10685 * The linear and logical CFG:
10688 * BB_THEN (logical) BB_ELSE (logical)
10692 * *) Exceptions may be due to break and continue statements within loops
10693 * If a break/continue happens within uniform control flow, it branches
10694 * to the loop exit/entry block. Otherwise, it branches to the next
10698 assert(cond.regClass() == ctx->program->lane_mask);
10699 cond = bool_to_scalar_condition(ctx, cond);
10701 begin_uniform_if_then(ctx, &ic, cond);
10702 visit_cf_list(ctx, &if_stmt->then_list);
10704 begin_uniform_if_else(ctx, &ic);
10705 visit_cf_list(ctx, &if_stmt->else_list);
10707 end_uniform_if(ctx, &ic);
10708 } else { /* non-uniform condition */
10710 * To maintain a logical and linear CFG without critical edges,
10711 * non-uniform conditionals are represented in the following way*) :
10716 * BB_THEN (logical) BB_THEN (linear)
10718 * BB_INVERT (linear)
10720 * BB_ELSE (logical) BB_ELSE (linear)
10727 * BB_THEN (logical) BB_ELSE (logical)
10731 * *) Exceptions may be due to break and continue statements within loops
10734 begin_divergent_if_then(ctx, &ic, cond);
10735 visit_cf_list(ctx, &if_stmt->then_list);
10737 begin_divergent_if_else(ctx, &ic);
10738 visit_cf_list(ctx, &if_stmt->else_list);
10740 end_divergent_if(ctx, &ic);
10743 return !ctx->cf_info.has_branch && !ctx->block->logical_preds.empty();
10747 visit_cf_list(isel_context* ctx, struct exec_list* list)
10749 foreach_list_typed (nir_cf_node, node, node, list) {
10750 switch (node->type) {
10751 case nir_cf_node_block: visit_block(ctx, nir_cf_node_as_block(node)); break;
10752 case nir_cf_node_if:
10753 if (!visit_if(ctx, nir_cf_node_as_if(node)))
10756 case nir_cf_node_loop: visit_loop(ctx, nir_cf_node_as_loop(node)); break;
10757 default: unreachable("unimplemented cf list type");
10764 export_vs_varying(isel_context* ctx, int slot, bool is_pos, int* next_pos)
10766 assert(ctx->stage.hw == HWStage::VS || ctx->stage.hw == HWStage::NGG);
10768 const uint8_t *vs_output_param_offset =
10769 ctx->program->info.outinfo.vs_output_param_offset;
10771 assert(vs_output_param_offset);
10773 int offset = vs_output_param_offset[slot];
10774 unsigned mask = ctx->outputs.mask[slot];
10775 if (!is_pos && !mask)
10777 if (!is_pos && offset == AC_EXP_PARAM_UNDEFINED)
10779 aco_ptr<Export_instruction> exp{
10780 create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
10781 exp->enabled_mask = mask;
10782 for (unsigned i = 0; i < 4; ++i) {
10783 if (mask & (1 << i))
10784 exp->operands[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
10786 exp->operands[i] = Operand(v1);
10788 /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
10789 * Setting valid_mask=1 prevents it and has no other effect.
10791 exp->valid_mask = ctx->options->gfx_level == GFX10 && is_pos && *next_pos == 0;
10793 exp->compressed = false;
10795 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
10797 exp->dest = V_008DFC_SQ_EXP_PARAM + offset;
10798 ctx->block->instructions.emplace_back(std::move(exp));
10802 export_vs_psiz_layer_viewport_vrs(isel_context* ctx, int* next_pos,
10803 const aco_vp_output_info* outinfo)
10805 aco_ptr<Export_instruction> exp{
10806 create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
10807 exp->enabled_mask = 0;
10808 for (unsigned i = 0; i < 4; ++i)
10809 exp->operands[i] = Operand(v1);
10810 if (ctx->outputs.mask[VARYING_SLOT_PSIZ]) {
10811 exp->operands[0] = Operand(ctx->outputs.temps[VARYING_SLOT_PSIZ * 4u]);
10812 exp->enabled_mask |= 0x1;
10814 if (ctx->outputs.mask[VARYING_SLOT_LAYER] && !outinfo->writes_layer_per_primitive) {
10815 exp->operands[2] = Operand(ctx->outputs.temps[VARYING_SLOT_LAYER * 4u]);
10816 exp->enabled_mask |= 0x4;
10818 if (ctx->outputs.mask[VARYING_SLOT_VIEWPORT] && !outinfo->writes_viewport_index_per_primitive) {
10819 if (ctx->options->gfx_level < GFX9) {
10820 exp->operands[3] = Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]);
10821 exp->enabled_mask |= 0x8;
10823 Builder bld(ctx->program, ctx->block);
10825 Temp out = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(16u),
10826 Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]));
10827 if (exp->operands[2].isTemp())
10828 out = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(out), exp->operands[2]);
10830 exp->operands[2] = Operand(out);
10831 exp->enabled_mask |= 0x4;
10834 if (ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_SHADING_RATE]) {
10835 exp->operands[1] = Operand(ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_SHADING_RATE * 4u]);
10836 exp->enabled_mask |= 0x2;
10839 exp->valid_mask = ctx->options->gfx_level == GFX10 && *next_pos == 0;
10841 exp->compressed = false;
10842 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
10843 ctx->block->instructions.emplace_back(std::move(exp));
10847 create_vs_exports(isel_context* ctx)
10849 assert(ctx->stage.hw == HWStage::VS || ctx->stage.hw == HWStage::NGG);
10850 const aco_vp_output_info* outinfo = &ctx->program->info.outinfo;
10853 ctx->block->kind |= block_kind_export_end;
10855 /* Hardware requires position data to always be exported, even if the
10856 * application did not write gl_Position.
10858 ctx->outputs.mask[VARYING_SLOT_POS] = 0xf;
10860 /* the order these position exports are created is important */
10862 export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
10864 if (outinfo->writes_pointsize || outinfo->writes_layer || outinfo->writes_viewport_index ||
10865 outinfo->writes_primitive_shading_rate) {
10866 export_vs_psiz_layer_viewport_vrs(ctx, &next_pos, outinfo);
10868 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
10869 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
10870 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
10871 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
10873 if (ctx->export_clip_dists) {
10874 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
10875 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, false, &next_pos);
10876 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
10877 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, false, &next_pos);
10880 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10881 if (i < VARYING_SLOT_VAR0 && i != VARYING_SLOT_LAYER && i != VARYING_SLOT_PRIMITIVE_ID &&
10882 i != VARYING_SLOT_VIEWPORT)
10884 if (ctx->shader && ctx->shader->info.per_primitive_outputs & BITFIELD64_BIT(i))
10887 export_vs_varying(ctx, i, false, NULL);
10892 create_primitive_exports(isel_context *ctx, Temp prim_ch1)
10894 assert(ctx->stage.hw == HWStage::NGG);
10895 const aco_vp_output_info* outinfo = &ctx->program->info.outinfo;
10897 Builder bld(ctx->program, ctx->block);
10899 /* When layer, viewport etc. are per-primitive, they need to be encoded in
10900 * the primitive export instruction's second channel. The encoding is:
10901 * bits 31..30: VRS rate Y
10902 * bits 29..28: VRS rate X
10903 * bits 23..20: viewport
10904 * bits 19..17: layer
10906 Temp ch2 = bld.copy(bld.def(v1), Operand::c32(0));
10909 if (outinfo->writes_layer_per_primitive) {
10911 Temp tmp = ctx->outputs.temps[VARYING_SLOT_LAYER * 4u];
10912 ch2 = bld.vop3(aco_opcode::v_lshl_or_b32, bld.def(v1), tmp, Operand::c32(17), ch2);
10914 if (outinfo->writes_viewport_index_per_primitive) {
10916 Temp tmp = ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u];
10917 ch2 = bld.vop3(aco_opcode::v_lshl_or_b32, bld.def(v1), tmp, Operand::c32(20), ch2);
10919 if (outinfo->writes_primitive_shading_rate_per_primitive) {
10921 Temp tmp = ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_SHADING_RATE * 4u];
10922 ch2 = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), tmp, ch2);
10925 Operand prim_ch2 = (en_mask & 2) ? Operand(ch2) : Operand(v1);
10927 bld.exp(aco_opcode::exp, prim_ch1, prim_ch2, Operand(v1), Operand(v1),
10928 en_mask /* enabled mask */, V_008DFC_SQ_EXP_PRIM /* dest */, false /* compressed */,
10929 true /* done */, false /* valid mask */);
10931 /* Export generic per-primitive attributes. */
10932 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10933 if (!(ctx->shader->info.per_primitive_outputs & BITFIELD64_BIT(i)))
10935 if (i == VARYING_SLOT_PRIMITIVE_SHADING_RATE)
10938 export_vs_varying(ctx, i, false, NULL);
10943 export_fs_mrt_z(isel_context* ctx)
10945 Builder bld(ctx->program, ctx->block);
10946 unsigned enabled_channels = 0;
10947 bool compr = false;
10950 for (unsigned i = 0; i < 4; ++i) {
10951 values[i] = Operand(v1);
10954 /* Both stencil and sample mask only need 16-bits. */
10955 if (!ctx->program->info.ps.writes_z &&
10956 (ctx->program->info.ps.writes_stencil || ctx->program->info.ps.writes_sample_mask)) {
10957 compr = ctx->program->gfx_level < GFX11; /* COMPR flag */
10959 if (ctx->program->info.ps.writes_stencil) {
10960 /* Stencil should be in X[23:16]. */
10961 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
10962 values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(16u), values[0]);
10963 enabled_channels |= ctx->program->gfx_level >= GFX11 ? 0x1 : 0x3;
10966 if (ctx->program->info.ps.writes_sample_mask) {
10967 /* SampleMask should be in Y[15:0]. */
10968 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
10969 enabled_channels |= ctx->program->gfx_level >= GFX11 ? 0x2 : 0xc;
10972 if (ctx->options->key.ps.alpha_to_coverage_via_mrtz &&
10973 (ctx->outputs.mask[FRAG_RESULT_DATA0] & 0x8)) {
10974 /* MRT0 alpha should be in Y[31:16] if alpha-to-coverage is enabled and MRTZ is present. */
10975 assert(ctx->program->gfx_level >= GFX11);
10976 Operand mrtz_alpha = Operand(ctx->outputs.temps[FRAG_RESULT_DATA0 + 3u]);
10978 bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(16u), mrtz_alpha);
10979 if (ctx->program->info.ps.writes_sample_mask) {
10980 /* Ignore the high 16 bits of the sample mask. */
10981 values[1] = bld.vop3(aco_opcode::v_and_or_b32, bld.def(v1), values[1],
10982 Operand::c32(0x0000ffffu), mrtz_alpha);
10984 values[1] = mrtz_alpha;
10986 enabled_channels |= 0x2;
10989 if (ctx->program->info.ps.writes_z) {
10990 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_DEPTH * 4u]);
10991 enabled_channels |= 0x1;
10994 if (ctx->program->info.ps.writes_stencil) {
10995 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
10996 enabled_channels |= 0x2;
10999 if (ctx->program->info.ps.writes_sample_mask) {
11000 values[2] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
11001 enabled_channels |= 0x4;
11004 if (ctx->options->key.ps.alpha_to_coverage_via_mrtz &&
11005 (ctx->outputs.mask[FRAG_RESULT_DATA0] & 0x8)) {
11006 assert(ctx->program->gfx_level >= GFX11);
11007 values[3] = Operand(ctx->outputs.temps[FRAG_RESULT_DATA0 + 3u]);
11008 enabled_channels |= 0x8;
11012 /* GFX6 (except OLAND and HAINAN) has a bug that it only looks at the X
11013 * writemask component.
11015 if (ctx->options->gfx_level == GFX6 && ctx->options->family != CHIP_OLAND &&
11016 ctx->options->family != CHIP_HAINAN) {
11017 enabled_channels |= 0x1;
11020 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3], enabled_channels,
11021 V_008DFC_SQ_EXP_MRTZ, compr);
11026 struct mrt_color_export {
11028 unsigned write_mask;
11030 uint8_t col_format;
11032 /* Fields below are only used for PS epilogs. */
11035 bool enable_mrt_output_nan_fixup;
11039 export_fs_mrt_color(isel_context* ctx, const struct mrt_color_export *out,
11042 Builder bld(ctx->program, ctx->block);
11045 for (unsigned i = 0; i < 4; ++i) {
11046 values[i] = out->values[i];
11050 unsigned enabled_channels = 0;
11051 aco_opcode compr_op = aco_opcode::num_opcodes;
11052 bool compr = false;
11054 target = V_008DFC_SQ_EXP_MRT + out->slot;
11056 /* Replace NaN by zero (only 32-bit) to fix game bugs if requested. */
11057 if (out->enable_mrt_output_nan_fixup &&
11058 (out->col_format == V_028714_SPI_SHADER_32_R || out->col_format == V_028714_SPI_SHADER_32_GR ||
11059 out->col_format == V_028714_SPI_SHADER_32_AR || out->col_format == V_028714_SPI_SHADER_32_ABGR ||
11060 out->col_format == V_028714_SPI_SHADER_FP16_ABGR)) {
11061 u_foreach_bit(i, out->write_mask) {
11062 Temp isnan = bld.vopc(aco_opcode::v_cmp_class_f32, bld.def(bld.lm), values[i],
11063 bld.copy(bld.def(v1), Operand::c32(3u)));
11064 values[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), values[i],
11065 bld.copy(bld.def(v1), Operand::zero()), isnan);
11069 switch (out->col_format) {
11070 case V_028714_SPI_SHADER_32_R: enabled_channels = 1; break;
11072 case V_028714_SPI_SHADER_32_GR: enabled_channels = 0x3; break;
11074 case V_028714_SPI_SHADER_32_AR:
11075 if (ctx->options->gfx_level >= GFX10) {
11076 /* Special case: on GFX10, the outputs are different for 32_AR */
11077 enabled_channels = 0x3;
11078 values[1] = values[3];
11079 values[3] = Operand(v1);
11081 enabled_channels = 0x9;
11085 case V_028714_SPI_SHADER_FP16_ABGR:
11086 if (is_ps_epilog) {
11087 for (int i = 0; i < 2; i++) {
11088 bool enabled = (out->write_mask >> (i * 2)) & 0x3;
11090 enabled_channels |= 0x3 << (i * 2);
11091 if (ctx->options->gfx_level == GFX8 || ctx->options->gfx_level == GFX9) {
11093 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32_e64, bld.def(v1),
11094 values[i * 2].isUndefined() ? Operand::zero() : values[i * 2],
11095 values[i * 2 + 1].isUndefined() ? Operand::zero() : values[i * 2 + 1]);
11098 bld.vop2(aco_opcode::v_cvt_pkrtz_f16_f32, bld.def(v1),
11099 values[i * 2].isUndefined() ? values[i * 2 + 1] : values[i * 2],
11100 values[i * 2 + 1].isUndefined() ? values[i * 2] : values[i * 2 + 1]);
11103 values[i] = Operand(v1);
11106 values[2] = Operand(v1);
11107 values[3] = Operand(v1);
11109 enabled_channels = util_widen_mask(out->write_mask, 2);
11114 case V_028714_SPI_SHADER_UNORM16_ABGR:
11115 if (is_ps_epilog) {
11116 compr_op = aco_opcode::v_cvt_pknorm_u16_f32;
11118 enabled_channels = util_widen_mask(out->write_mask, 2);
11123 case V_028714_SPI_SHADER_SNORM16_ABGR:
11124 if (is_ps_epilog) {
11125 compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
11127 enabled_channels = util_widen_mask(out->write_mask, 2);
11132 case V_028714_SPI_SHADER_UINT16_ABGR:
11133 if (is_ps_epilog) {
11134 compr_op = aco_opcode::v_cvt_pk_u16_u32;
11135 if (out->is_int8 || out->is_int10) {
11137 uint32_t max_rgb = out->is_int8 ? 255 : out->is_int10 ? 1023 : 0;
11139 u_foreach_bit(i, out->write_mask) {
11140 uint32_t max = i == 3 && out->is_int10 ? 3 : max_rgb;
11142 values[i] = bld.vop2(aco_opcode::v_min_u32, bld.def(v1), Operand::c32(max), values[i]);
11146 enabled_channels = util_widen_mask(out->write_mask, 2);
11151 case V_028714_SPI_SHADER_SINT16_ABGR:
11152 if (is_ps_epilog) {
11153 compr_op = aco_opcode::v_cvt_pk_i16_i32;
11154 if (out->is_int8 || out->is_int10) {
11156 uint32_t max_rgb = out->is_int8 ? 127 : out->is_int10 ? 511 : 0;
11157 uint32_t min_rgb = out->is_int8 ? -128 : out->is_int10 ? -512 : 0;
11159 u_foreach_bit(i, out->write_mask) {
11160 uint32_t max = i == 3 && out->is_int10 ? 1 : max_rgb;
11161 uint32_t min = i == 3 && out->is_int10 ? -2u : min_rgb;
11163 values[i] = bld.vop2(aco_opcode::v_min_i32, bld.def(v1), Operand::c32(max), values[i]);
11164 values[i] = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand::c32(min), values[i]);
11168 enabled_channels = util_widen_mask(out->write_mask, 2);
11173 case V_028714_SPI_SHADER_32_ABGR: enabled_channels = 0xF; break;
11175 case V_028714_SPI_SHADER_ZERO:
11176 default: return false;
11179 if (compr_op != aco_opcode::num_opcodes) {
11180 for (int i = 0; i < 2; i++) {
11181 /* check if at least one of the values to be compressed is enabled */
11182 bool enabled = (out->write_mask >> (i * 2)) & 0x3;
11184 enabled_channels |= 0x3 << (i * 2);
11185 values[i] = bld.vop3(
11186 compr_op, bld.def(v1), values[i * 2].isUndefined() ? Operand::zero() : values[i * 2],
11187 values[i * 2 + 1].isUndefined() ? Operand::zero() : values[i * 2 + 1]);
11189 values[i] = Operand(v1);
11192 values[2] = Operand(v1);
11193 values[3] = Operand(v1);
11195 } else if (!compr) {
11196 for (int i = 0; i < 4; i++)
11197 values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
11200 if (ctx->program->gfx_level >= GFX11) {
11201 /* GFX11 doesn't use COMPR for exports, but the channel mask should be
11204 enabled_channels = compr ? 0x3 : enabled_channels;
11208 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3], enabled_channels, target,
11214 create_fs_null_export(isel_context* ctx)
11216 /* FS must always have exports.
11217 * So when there are none, we need to add a null export.
11220 Builder bld(ctx->program, ctx->block);
11221 /* GFX11 doesn't support NULL exports, and MRT0 should be exported instead. */
11222 unsigned dest = ctx->options->gfx_level >= GFX11 ? V_008DFC_SQ_EXP_MRT : V_008DFC_SQ_EXP_NULL;
11223 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
11224 /* enabled_mask */ 0, dest, /* compr */ false, /* done */ true, /* vm */ true);
11228 create_fs_jump_to_epilog(isel_context* ctx)
11230 Builder bld(ctx->program, ctx->block);
11231 std::vector<Operand> color_exports;
11232 PhysReg exports_start(256); /* VGPR 0 */
11234 for (unsigned slot = FRAG_RESULT_DATA0; slot < FRAG_RESULT_DATA7 + 1; ++slot) {
11235 unsigned color_index = slot - FRAG_RESULT_DATA0;
11236 unsigned color_type = (ctx->output_color_types >> (color_index * 2)) & 0x3;
11237 unsigned write_mask = ctx->outputs.mask[slot];
11242 PhysReg color_start(exports_start.reg() + color_index * 4);
11244 for (unsigned i = 0; i < 4; i++) {
11245 if (!(write_mask & BITFIELD_BIT(i))) {
11246 color_exports.emplace_back(Operand(v1));
11250 PhysReg chan_reg = color_start.advance(i * 4u);
11251 Operand chan(ctx->outputs.temps[slot * 4u + i]);
11253 if (color_type == ACO_TYPE_FLOAT16) {
11254 chan = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), chan);
11255 } else if (color_type == ACO_TYPE_INT16 || color_type == ACO_TYPE_UINT16) {
11256 bool sign_ext = color_type == ACO_TYPE_INT16;
11257 Temp tmp = convert_int(ctx, bld, chan.getTemp(), 16, 32, sign_ext);
11258 chan = Operand(tmp);
11261 chan.setFixed(chan_reg);
11262 color_exports.emplace_back(chan);
11266 Temp continue_pc = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ps_epilog_pc));
11268 aco_ptr<Pseudo_instruction> jump{create_instruction<Pseudo_instruction>(
11269 aco_opcode::p_jump_to_epilog, Format::PSEUDO, 1 + color_exports.size(), 0)};
11270 jump->operands[0] = Operand(continue_pc);
11271 for (unsigned i = 0; i < color_exports.size(); i++) {
11272 jump->operands[i + 1] = color_exports[i];
11274 ctx->block->instructions.emplace_back(std::move(jump));
11278 create_fs_exports(isel_context* ctx)
11280 Builder bld(ctx->program, ctx->block);
11281 bool exported = false;
11283 /* Export depth, stencil and sample mask. */
11284 if (ctx->outputs.mask[FRAG_RESULT_DEPTH] || ctx->outputs.mask[FRAG_RESULT_STENCIL] ||
11285 ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK])
11286 exported |= export_fs_mrt_z(ctx);
11288 if (ctx->program->info.ps.has_epilog) {
11289 create_fs_jump_to_epilog(ctx);
11291 /* Export all color render targets. */
11292 for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i) {
11293 if (!ctx->outputs.mask[i])
11296 struct mrt_color_export out = {0};
11298 out.slot = i - FRAG_RESULT_DATA0;
11299 out.write_mask = ctx->outputs.mask[i];
11300 out.col_format = (ctx->options->key.ps.col_format >> (4 * out.slot)) & 0xf;
11302 for (unsigned c = 0; c < 4; ++c) {
11303 if (out.write_mask & (1 << c)) {
11304 out.values[c] = Operand(ctx->outputs.temps[i * 4u + c]);
11306 out.values[c] = Operand(v1);
11310 exported |= export_fs_mrt_color(ctx, &out, false);
11314 create_fs_null_export(ctx);
11317 ctx->block->kind |= block_kind_export_end;
11321 emit_stream_output(isel_context* ctx, Temp const* so_buffers, Temp const* so_write_offset,
11322 const struct aco_stream_output* output)
11324 assert(ctx->stage.hw == HWStage::VS);
11326 unsigned loc = output->location;
11327 unsigned buf = output->buffer;
11329 unsigned writemask = output->component_mask & ctx->outputs.mask[loc];
11330 while (writemask) {
11332 u_bit_scan_consecutive_range(&writemask, &start, &count);
11333 if (count == 3 && ctx->options->gfx_level == GFX6) {
11334 /* GFX6 doesn't support storing vec3, split it. */
11335 writemask |= 1u << (start + 2);
11339 unsigned offset = output->offset + (start - (ffs(output->component_mask) - 1)) * 4;
11341 Temp write_data = ctx->program->allocateTmp(RegClass(RegType::vgpr, count));
11342 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
11343 aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
11344 for (int i = 0; i < count; ++i)
11345 vec->operands[i] = Operand(ctx->outputs.temps[loc * 4 + start + i]);
11346 vec->definitions[0] = Definition(write_data);
11347 ctx->block->instructions.emplace_back(std::move(vec));
11349 aco_opcode opcode = get_buffer_store_op(count * 4);
11350 aco_ptr<MUBUF_instruction> store{
11351 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
11352 store->operands[0] = Operand(so_buffers[buf]);
11353 store->operands[1] = Operand(so_write_offset[buf]);
11354 store->operands[2] = Operand::c32(0);
11355 store->operands[3] = Operand(write_data);
11356 if (offset > 4095) {
11357 /* Don't think this can happen in RADV, but maybe GL? It's easy to do this anyway. */
11358 Builder bld(ctx->program, ctx->block);
11359 store->operands[1] =
11360 bld.vadd32(bld.def(v1), Operand::c32(offset), Operand(so_write_offset[buf]));
11362 store->offset = offset;
11364 store->offen = true;
11365 store->glc = ctx->program->gfx_level < GFX11;
11366 store->dlc = false;
11368 ctx->block->instructions.emplace_back(std::move(store));
11373 emit_streamout(isel_context* ctx, unsigned stream)
11375 Builder bld(ctx->program, ctx->block);
11377 Temp so_vtx_count =
11378 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
11379 get_arg(ctx, ctx->args->ac.streamout_config), Operand::c32(0x70010u));
11381 Temp tid = emit_mbcnt(ctx, bld.tmp(v1));
11383 Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(bld.lm), so_vtx_count, tid);
11386 begin_divergent_if_then(ctx, &ic, can_emit);
11388 bld.reset(ctx->block);
11390 Temp so_write_index =
11391 bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->ac.streamout_write_index), tid);
11393 Temp so_buffers[4];
11394 Temp so_write_offset[4];
11395 Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers));
11397 for (unsigned i = 0; i < 4; i++) {
11398 unsigned stride = ctx->program->info.so.strides[i];
11402 so_buffers[i] = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), buf_ptr,
11403 bld.copy(bld.def(s1), Operand::c32(i * 16u)));
11406 Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
11407 get_arg(ctx, ctx->args->ac.streamout_write_index),
11408 get_arg(ctx, ctx->args->ac.streamout_offset[i]));
11409 Temp new_offset = bld.vadd32(bld.def(v1), offset, tid);
11411 so_write_offset[i] =
11412 bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), new_offset);
11414 Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u);
11415 Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand::c32(4u),
11416 get_arg(ctx, ctx->args->ac.streamout_offset[i]));
11417 so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2);
11421 for (unsigned i = 0; i < ctx->program->info.so.num_outputs; i++) {
11422 const struct aco_stream_output* output = &ctx->program->info.so.outputs[i];
11423 if (stream != output->stream)
11426 emit_stream_output(ctx, so_buffers, so_write_offset, output);
11429 begin_divergent_if_else(ctx, &ic);
11430 end_divergent_if(ctx, &ic);
11433 Pseudo_instruction*
11434 add_startpgm(struct isel_context* ctx)
11436 unsigned def_count = 0;
11437 for (unsigned i = 0; i < ctx->args->ac.arg_count; i++) {
11438 if (ctx->args->ac.args[i].skip)
11440 unsigned align = MIN2(4, util_next_power_of_two(ctx->args->ac.args[i].size));
11441 if (ctx->args->ac.args[i].file == AC_ARG_SGPR && ctx->args->ac.args[i].offset % align)
11442 def_count += ctx->args->ac.args[i].size;
11447 Pseudo_instruction* startpgm =
11448 create_instruction<Pseudo_instruction>(aco_opcode::p_startpgm, Format::PSEUDO, 0, def_count);
11449 ctx->block->instructions.emplace_back(startpgm);
11450 for (unsigned i = 0, arg = 0; i < ctx->args->ac.arg_count; i++) {
11451 if (ctx->args->ac.args[i].skip)
11454 enum ac_arg_regfile file = ctx->args->ac.args[i].file;
11455 unsigned size = ctx->args->ac.args[i].size;
11456 unsigned reg = ctx->args->ac.args[i].offset;
11457 RegClass type = RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size);
11459 if (file == AC_ARG_SGPR && reg % MIN2(4, util_next_power_of_two(size))) {
11461 for (unsigned j = 0; j < size; j++) {
11462 elems[j] = ctx->program->allocateTmp(s1);
11463 startpgm->definitions[arg++] = Definition(elems[j].id(), PhysReg{reg + j}, s1);
11465 ctx->arg_temps[i] = create_vec_from_array(ctx, elems, size, RegType::sgpr, 4);
11467 Temp dst = ctx->program->allocateTmp(type);
11468 ctx->arg_temps[i] = dst;
11469 startpgm->definitions[arg] = Definition(dst);
11470 startpgm->definitions[arg].setFixed(PhysReg{file == AC_ARG_SGPR ? reg : reg + 256});
11475 /* Stash these in the program so that they can be accessed later when
11476 * handling spilling.
11478 ctx->program->private_segment_buffer = get_arg(ctx, ctx->args->ring_offsets);
11479 if (ctx->program->gfx_level <= GFX10_3) {
11480 ctx->program->scratch_offset = get_arg(ctx, ctx->args->ac.scratch_offset);
11482 if (ctx->program->gfx_level >= GFX9) {
11483 Operand scratch_offset(ctx->program->scratch_offset);
11484 scratch_offset.setLateKill(true);
11485 Builder bld(ctx->program, ctx->block);
11486 bld.pseudo(aco_opcode::p_init_scratch, bld.def(s2), bld.def(s1, scc),
11487 ctx->program->private_segment_buffer, scratch_offset);
11491 if (ctx->stage.has(SWStage::VS) && ctx->program->info.vs.dynamic_inputs) {
11492 unsigned num_attributes = util_last_bit(ctx->program->info.vs.input_slot_usage_mask);
11493 for (unsigned i = 0; i < num_attributes; i++) {
11494 Definition def(get_arg(ctx, ctx->args->vs_inputs[i]));
11496 unsigned idx = ctx->args->vs_inputs[i].arg_index;
11497 def.setFixed(PhysReg(256 + ctx->args->ac.args[idx].offset));
11499 ctx->program->vs_inputs.push_back(def);
11507 fix_ls_vgpr_init_bug(isel_context* ctx, Pseudo_instruction* startpgm)
11509 assert(ctx->shader->info.stage == MESA_SHADER_VERTEX);
11510 Builder bld(ctx->program, ctx->block);
11511 constexpr unsigned hs_idx = 1u;
11512 Builder::Result hs_thread_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
11513 get_arg(ctx, ctx->args->ac.merged_wave_info),
11514 Operand::c32((8u << 16) | (hs_idx * 8u)));
11515 Temp ls_has_nonzero_hs_threads = bool_to_vector_condition(ctx, hs_thread_count.def(1).getTemp());
11517 /* If there are no HS threads, SPI mistakenly loads the LS VGPRs starting at VGPR 0. */
11520 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.vertex_id),
11521 get_arg(ctx, ctx->args->ac.instance_id), ls_has_nonzero_hs_threads);
11522 Temp vs_rel_patch_id =
11523 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.tcs_rel_ids),
11524 get_arg(ctx, ctx->args->ac.vs_rel_patch_id), ls_has_nonzero_hs_threads);
11526 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.tcs_patch_id),
11527 get_arg(ctx, ctx->args->ac.vertex_id), ls_has_nonzero_hs_threads);
11529 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = instance_id;
11530 ctx->arg_temps[ctx->args->ac.vs_rel_patch_id.arg_index] = vs_rel_patch_id;
11531 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = vertex_id;
11535 split_arguments(isel_context* ctx, Pseudo_instruction* startpgm)
11537 /* Split all arguments except for the first (ring_offsets) and the last
11538 * (exec) so that the dead channels don't stay live throughout the program.
11540 for (int i = 1; i < startpgm->definitions.size(); i++) {
11541 if (startpgm->definitions[i].regClass().size() > 1) {
11542 emit_split_vector(ctx, startpgm->definitions[i].getTemp(),
11543 startpgm->definitions[i].regClass().size());
11549 handle_bc_optimize(isel_context* ctx)
11551 /* needed when SPI_PS_IN_CONTROL.BC_OPTIMIZE_DISABLE is set to 0 */
11552 Builder bld(ctx->program, ctx->block);
11553 uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena;
11555 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena);
11556 bool uses_persp_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena);
11557 bool uses_linear_centroid = G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena);
11559 if (uses_persp_centroid)
11560 ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
11561 if (uses_linear_centroid)
11562 ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
11564 if (uses_center && (uses_persp_centroid || uses_linear_centroid)) {
11565 Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.def(bld.lm),
11566 get_arg(ctx, ctx->args->ac.prim_mask), Operand::zero());
11568 if (uses_persp_centroid) {
11570 for (unsigned i = 0; i < 2; i++) {
11571 Temp persp_centroid =
11572 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1);
11573 Temp persp_center =
11574 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1);
11576 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), persp_centroid, persp_center, sel);
11578 ctx->persp_centroid = bld.tmp(v2);
11579 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid),
11580 Operand(new_coord[0]), Operand(new_coord[1]));
11581 emit_split_vector(ctx, ctx->persp_centroid, 2);
11584 if (uses_linear_centroid) {
11586 for (unsigned i = 0; i < 2; i++) {
11587 Temp linear_centroid =
11588 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1);
11589 Temp linear_center =
11590 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1);
11591 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), linear_centroid,
11592 linear_center, sel);
11594 ctx->linear_centroid = bld.tmp(v2);
11595 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid),
11596 Operand(new_coord[0]), Operand(new_coord[1]));
11597 emit_split_vector(ctx, ctx->linear_centroid, 2);
11603 setup_fp_mode(isel_context* ctx, nir_shader* shader)
11605 Program* program = ctx->program;
11607 unsigned float_controls = shader->info.float_controls_execution_mode;
11609 program->next_fp_mode.preserve_signed_zero_inf_nan32 =
11610 float_controls & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32;
11611 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 =
11612 float_controls & (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 |
11613 FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
11615 program->next_fp_mode.must_flush_denorms32 =
11616 float_controls & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32;
11617 program->next_fp_mode.must_flush_denorms16_64 =
11619 (FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 | FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
11621 program->next_fp_mode.care_about_round32 =
11623 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32);
11625 program->next_fp_mode.care_about_round16_64 =
11627 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 |
11628 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
11630 /* default to preserving fp16 and fp64 denorms, since it's free for fp64 and
11631 * the precision seems needed for Wolfenstein: Youngblood to render correctly */
11632 if (program->next_fp_mode.must_flush_denorms16_64)
11633 program->next_fp_mode.denorm16_64 = 0;
11635 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
11637 /* preserving fp32 denorms is expensive, so only do it if asked */
11638 if (float_controls & FLOAT_CONTROLS_DENORM_PRESERVE_FP32)
11639 program->next_fp_mode.denorm32 = fp_denorm_keep;
11641 program->next_fp_mode.denorm32 = 0;
11643 if (float_controls & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32)
11644 program->next_fp_mode.round32 = fp_round_tz;
11646 program->next_fp_mode.round32 = fp_round_ne;
11648 if (float_controls &
11649 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64))
11650 program->next_fp_mode.round16_64 = fp_round_tz;
11652 program->next_fp_mode.round16_64 = fp_round_ne;
11654 ctx->block->fp_mode = program->next_fp_mode;
11658 cleanup_cfg(Program* program)
11660 /* create linear_succs/logical_succs */
11661 for (Block& BB : program->blocks) {
11662 for (unsigned idx : BB.linear_preds)
11663 program->blocks[idx].linear_succs.emplace_back(BB.index);
11664 for (unsigned idx : BB.logical_preds)
11665 program->blocks[idx].logical_succs.emplace_back(BB.index);
11670 lanecount_to_mask(isel_context* ctx, Temp count, bool allow64 = true)
11672 assert(count.regClass() == s1);
11674 Builder bld(ctx->program, ctx->block);
11675 Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand::zero());
11678 if (ctx->program->wave_size == 64) {
11679 /* If we know that all 64 threads can't be active at a time, we just use the mask as-is */
11683 /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */
11684 Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count,
11685 Operand::c32(6u /* log2(64) */));
11687 bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand::c32(-1u), mask, bld.scc(active_64));
11689 /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of
11691 cond = emit_extract_vector(ctx, mask, 0, bld.lm);
11698 merged_wave_info_to_mask(isel_context* ctx, unsigned i)
11700 Builder bld(ctx->program, ctx->block);
11702 /* lanecount_to_mask() only cares about s0.u[6:0] so we don't need either s_bfe nor s_and here */
11703 Temp count = i == 0
11704 ? get_arg(ctx, ctx->args->ac.merged_wave_info)
11705 : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
11706 get_arg(ctx, ctx->args->ac.merged_wave_info), Operand::c32(i * 8u));
11708 return lanecount_to_mask(ctx, count);
11712 ngg_emit_sendmsg_gs_alloc_req(isel_context* ctx, Temp vtx_cnt, Temp prm_cnt)
11714 assert(vtx_cnt.id() && prm_cnt.id());
11716 Builder bld(ctx->program, ctx->block);
11719 if (ctx->program->gfx_level == GFX10 &&
11720 (ctx->stage.has(SWStage::GS) || ctx->program->info.has_ngg_culling)) {
11721 /* Navi 1x workaround: check whether the workgroup has no output.
11722 * If so, change the number of exported vertices and primitives to 1.
11724 prm_cnt_0 = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), prm_cnt, Operand::zero());
11725 prm_cnt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::c32(1u), prm_cnt,
11726 bld.scc(prm_cnt_0));
11727 vtx_cnt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::c32(1u), vtx_cnt,
11728 bld.scc(prm_cnt_0));
11731 /* Put the number of vertices and primitives into m0 for the GS_ALLOC_REQ */
11733 bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), prm_cnt, Operand::c32(12u));
11734 tmp = bld.sop2(aco_opcode::s_or_b32, bld.m0(bld.def(s1)), bld.def(s1, scc), tmp, vtx_cnt);
11736 /* Request the SPI to allocate space for the primitives and vertices
11737 * that will be exported by the threadgroup.
11739 bld.sopp(aco_opcode::s_sendmsg, bld.m0(tmp), -1, sendmsg_gs_alloc_req);
11741 if (prm_cnt_0.id()) {
11742 /* Navi 1x workaround: export a triangle with NaN coordinates when NGG has no output.
11743 * It can't have all-zero positions because that would render an undesired pixel with
11744 * conservative rasterization.
11746 Temp first_lane = bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm));
11747 Temp cond = bld.sop2(Builder::s_lshl, bld.def(bld.lm), bld.def(s1, scc),
11748 Operand::c32_or_c64(1u, ctx->program->wave_size == 64), first_lane);
11749 cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), cond,
11750 Operand::zero(ctx->program->wave_size == 64 ? 8 : 4), bld.scc(prm_cnt_0));
11752 if_context ic_prim_0;
11753 begin_divergent_if_then(ctx, &ic_prim_0, cond);
11754 bld.reset(ctx->block);
11755 ctx->block->kind |= block_kind_export_end;
11757 /* Use zero: means that it's a triangle whose every vertex index is 0. */
11758 Temp zero = bld.copy(bld.def(v1), Operand::zero());
11759 /* Use NaN for the coordinates, so that the rasterizer allways culls it. */
11760 Temp nan_coord = bld.copy(bld.def(v1), Operand::c32(-1u));
11762 bld.exp(aco_opcode::exp, zero, Operand(v1), Operand(v1), Operand(v1), 1 /* enabled mask */,
11763 V_008DFC_SQ_EXP_PRIM /* dest */, false /* compressed */, true /* done */,
11764 false /* valid mask */);
11765 bld.exp(aco_opcode::exp, nan_coord, nan_coord, nan_coord, nan_coord, 0xf /* enabled mask */,
11766 V_008DFC_SQ_EXP_POS /* dest */, false /* compressed */, true /* done */,
11767 true /* valid mask */);
11769 begin_divergent_if_else(ctx, &ic_prim_0);
11770 end_divergent_if(ctx, &ic_prim_0);
11771 bld.reset(ctx->block);
11775 } /* end namespace */
11778 select_program(Program* program, unsigned shader_count, struct nir_shader* const* shaders,
11779 ac_shader_config* config, const struct aco_compiler_options* options,
11780 const struct aco_shader_info* info,
11781 const struct radv_shader_args* args)
11783 isel_context ctx = setup_isel_context(program, shader_count, shaders, config, options, info, args, false, false);
11784 if_context ic_merged_wave_info;
11785 bool ngg_gs = ctx.stage.hw == HWStage::NGG && ctx.stage.has(SWStage::GS);
11787 for (unsigned i = 0; i < shader_count; i++) {
11788 nir_shader* nir = shaders[i];
11789 init_context(&ctx, nir);
11791 setup_fp_mode(&ctx, nir);
11794 /* needs to be after init_context() for FS */
11795 Pseudo_instruction* startpgm = add_startpgm(&ctx);
11796 append_logical_start(ctx.block);
11798 if (unlikely(ctx.options->has_ls_vgpr_init_bug && ctx.stage == vertex_tess_control_hs))
11799 fix_ls_vgpr_init_bug(&ctx, startpgm);
11801 split_arguments(&ctx, startpgm);
11803 if (!info->vs.has_prolog &&
11804 (program->stage.has(SWStage::VS) || program->stage.has(SWStage::TES))) {
11805 Builder(ctx.program, ctx.block).sopp(aco_opcode::s_setprio, -1u, 0x3u);
11809 /* In a merged VS+TCS HS, the VS implementation can be completely empty. */
11810 nir_function_impl* func = nir_shader_get_entrypoint(nir);
11811 bool empty_shader =
11812 nir_cf_list_is_empty_block(&func->body) &&
11813 ((nir->info.stage == MESA_SHADER_VERTEX &&
11814 (ctx.stage == vertex_tess_control_hs || ctx.stage == vertex_geometry_gs)) ||
11815 (nir->info.stage == MESA_SHADER_TESS_EVAL && ctx.stage == tess_eval_geometry_gs));
11817 bool check_merged_wave_info =
11818 ctx.tcs_in_out_eq ? i == 0 : (shader_count >= 2 && !empty_shader && !(ngg_gs && i == 1));
11819 bool endif_merged_wave_info =
11820 ctx.tcs_in_out_eq ? i == 1 : (check_merged_wave_info && !(ngg_gs && i == 1));
11822 if (program->gfx_level == GFX10 && program->stage.hw == HWStage::NGG &&
11823 program->stage.num_sw_stages() == 1) {
11824 /* Workaround for Navi1x HW bug to ensure that all NGG waves launch before
11825 * s_sendmsg(GS_ALLOC_REQ). */
11826 Builder(ctx.program, ctx.block).sopp(aco_opcode::s_barrier, -1u, 0u);
11829 if (check_merged_wave_info) {
11830 Temp cond = merged_wave_info_to_mask(&ctx, i);
11831 begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond);
11835 Builder bld(ctx.program, ctx.block);
11837 /* Skip s_barrier from TCS when VS outputs are not stored in the LDS. */
11838 bool tcs_skip_barrier = ctx.stage == vertex_tess_control_hs &&
11839 ctx.tcs_temp_only_inputs == nir->info.inputs_read;
11841 if (!ngg_gs && !tcs_skip_barrier) {
11843 ctx.stage == vertex_tess_control_hs &&
11844 program->wave_size % ctx.options->key.tcs.tess_input_vertices == 0 &&
11845 ctx.options->key.tcs.tess_input_vertices == nir->info.tess.tcs_vertices_out
11848 bld.barrier(aco_opcode::p_barrier,
11849 memory_sync_info(storage_shared, semantic_acqrel, scope), scope);
11852 if (ctx.stage == vertex_geometry_gs || ctx.stage == tess_eval_geometry_gs) {
11853 ctx.gs_wave_id = bld.pseudo(aco_opcode::p_extract, bld.def(s1, m0), bld.def(s1, scc),
11854 get_arg(&ctx, args->ac.merged_wave_info), Operand::c32(2u),
11855 Operand::c32(8u), Operand::zero());
11857 } else if (ctx.stage == geometry_gs)
11858 ctx.gs_wave_id = get_arg(&ctx, args->ac.gs_wave_id);
11860 if (ctx.stage == fragment_fs)
11861 handle_bc_optimize(&ctx);
11863 visit_cf_list(&ctx, &func->body);
11865 if (ctx.program->info.so.num_outputs && ctx.stage.hw == HWStage::VS)
11866 emit_streamout(&ctx, 0);
11868 if (ctx.stage.hw == HWStage::VS) {
11869 create_vs_exports(&ctx);
11870 } else if (nir->info.stage == MESA_SHADER_GEOMETRY && !ngg_gs) {
11871 Builder bld(ctx.program, ctx.block);
11872 bld.barrier(aco_opcode::p_barrier,
11873 memory_sync_info(storage_vmem_output, semantic_release, scope_device));
11874 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx.gs_wave_id), -1,
11875 sendmsg_gs_done(false, false, 0));
11878 if (ctx.stage == fragment_fs) {
11879 create_fs_exports(&ctx);
11882 if (endif_merged_wave_info) {
11883 begin_divergent_if_else(&ctx, &ic_merged_wave_info);
11884 end_divergent_if(&ctx, &ic_merged_wave_info);
11887 if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) {
11888 /* Outputs of the previous stage are inputs to the next stage */
11889 ctx.inputs = ctx.outputs;
11890 ctx.outputs = shader_io_state();
11893 cleanup_context(&ctx);
11896 program->config->float_mode = program->blocks[0].fp_mode.val;
11898 append_logical_end(ctx.block);
11899 ctx.block->kind |= block_kind_uniform;
11900 Builder bld(ctx.program, ctx.block);
11901 bld.sopp(aco_opcode::s_endpgm);
11903 cleanup_cfg(program);
11907 select_gs_copy_shader(Program* program, struct nir_shader* gs_shader, ac_shader_config* config,
11908 const struct aco_compiler_options* options,
11909 const struct aco_shader_info* info,
11910 const struct radv_shader_args* args)
11912 isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, options, info, args, true, false);
11914 ctx.block->fp_mode = program->next_fp_mode;
11916 add_startpgm(&ctx);
11917 append_logical_start(ctx.block);
11919 Builder bld(ctx.program, ctx.block);
11921 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4),
11922 program->private_segment_buffer, Operand::c32(RING_GSVS_VS * 16u));
11924 Operand stream_id = Operand::zero();
11925 if (program->info.so.num_outputs)
11926 stream_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
11927 get_arg(&ctx, ctx.args->ac.streamout_config), Operand::c32(0x20018u));
11929 Temp vtx_offset = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u),
11930 get_arg(&ctx, ctx.args->ac.vertex_id));
11932 std::stack<if_context, std::vector<if_context>> if_contexts;
11934 for (unsigned stream = 0; stream < 4; stream++) {
11935 if (stream_id.isConstant() && stream != stream_id.constantValue())
11938 unsigned num_components = program->info.gs.num_stream_output_components[stream];
11939 if (stream > 0 && (!num_components || !program->info.so.num_outputs))
11942 memset(ctx.outputs.mask, 0, sizeof(ctx.outputs.mask));
11944 if (!stream_id.isConstant()) {
11946 bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), stream_id, Operand::c32(stream));
11947 if_contexts.emplace();
11948 begin_uniform_if_then(&ctx, &if_contexts.top(), cond);
11949 bld.reset(ctx.block);
11952 unsigned offset = 0;
11953 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
11954 if (program->info.gs.output_streams[i] != stream)
11957 unsigned output_usage_mask = program->info.gs.output_usage_mask[i];
11958 unsigned length = util_last_bit(output_usage_mask);
11959 for (unsigned j = 0; j < length; ++j) {
11960 if (!(output_usage_mask & (1 << j)))
11963 Temp val = bld.tmp(v1);
11964 unsigned const_offset = offset * program->info.gs.vertices_out * 16 * 4;
11965 load_vmem_mubuf(&ctx, val, gsvs_ring, vtx_offset, Temp(), const_offset, 4, 1, 0u, true,
11968 ctx.outputs.mask[i] |= 1 << j;
11969 ctx.outputs.temps[i * 4u + j] = val;
11975 if (program->info.so.num_outputs) {
11976 emit_streamout(&ctx, stream);
11977 bld.reset(ctx.block);
11981 create_vs_exports(&ctx);
11984 if (!stream_id.isConstant()) {
11985 begin_uniform_if_else(&ctx, &if_contexts.top());
11986 bld.reset(ctx.block);
11990 while (!if_contexts.empty()) {
11991 end_uniform_if(&ctx, &if_contexts.top());
11995 program->config->float_mode = program->blocks[0].fp_mode.val;
11997 append_logical_end(ctx.block);
11998 ctx.block->kind |= block_kind_uniform;
11999 bld.reset(ctx.block);
12000 bld.sopp(aco_opcode::s_endpgm);
12002 cleanup_cfg(program);
12006 select_trap_handler_shader(Program* program, struct nir_shader* shader, ac_shader_config* config,
12007 const struct aco_compiler_options* options,
12008 const struct aco_shader_info* info,
12009 const struct radv_shader_args* args)
12011 assert(options->gfx_level == GFX8);
12013 init_program(program, compute_cs, info, options->gfx_level, options->family, options->wgp_mode,
12016 isel_context ctx = {};
12017 ctx.program = program;
12019 ctx.options = options;
12020 ctx.stage = program->stage;
12022 ctx.block = ctx.program->create_and_insert_block();
12023 ctx.block->kind = block_kind_top_level;
12025 program->workgroup_size = 1; /* XXX */
12027 add_startpgm(&ctx);
12028 append_logical_start(ctx.block);
12030 Builder bld(ctx.program, ctx.block);
12032 /* Load the buffer descriptor from TMA. */
12033 bld.smem(aco_opcode::s_load_dwordx4, Definition(PhysReg{ttmp4}, s4), Operand(PhysReg{tma}, s2),
12036 /* Store TTMP0-TTMP1. */
12037 bld.smem(aco_opcode::s_buffer_store_dwordx2, Operand(PhysReg{ttmp4}, s4), Operand::zero(),
12038 Operand(PhysReg{ttmp0}, s2), memory_sync_info(), true);
12040 uint32_t hw_regs_idx[] = {
12041 2, /* HW_REG_STATUS */
12042 3, /* HW_REG_TRAP_STS */
12043 4, /* HW_REG_HW_ID */
12044 7, /* HW_REG_IB_STS */
12047 /* Store some hardware registers. */
12048 for (unsigned i = 0; i < ARRAY_SIZE(hw_regs_idx); i++) {
12049 /* "((size - 1) << 11) | register" */
12050 bld.sopk(aco_opcode::s_getreg_b32, Definition(PhysReg{ttmp8}, s1),
12051 ((20 - 1) << 11) | hw_regs_idx[i]);
12053 bld.smem(aco_opcode::s_buffer_store_dword, Operand(PhysReg{ttmp4}, s4),
12054 Operand::c32(8u + i * 4), Operand(PhysReg{ttmp8}, s1), memory_sync_info(), true);
12057 program->config->float_mode = program->blocks[0].fp_mode.val;
12059 append_logical_end(ctx.block);
12060 ctx.block->kind |= block_kind_uniform;
12061 bld.sopp(aco_opcode::s_endpgm);
12063 cleanup_cfg(program);
12067 get_arg_fixed(const struct radv_shader_args* args, struct ac_arg arg)
12071 enum ac_arg_regfile file = args->ac.args[arg.arg_index].file;
12072 unsigned size = args->ac.args[arg.arg_index].size;
12073 unsigned reg = args->ac.args[arg.arg_index].offset;
12075 return Operand(PhysReg(file == AC_ARG_SGPR ? reg : reg + 256),
12076 RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size));
12080 load_vb_descs(Builder& bld, PhysReg dest, Operand base, unsigned start, unsigned max)
12082 unsigned count = MIN2((bld.program->dev.sgpr_limit - dest.reg()) / 4u, max);
12084 unsigned num_loads = (count / 4u) + util_bitcount(count & 0x3);
12085 if (bld.program->gfx_level >= GFX10 && num_loads > 1)
12086 bld.sopp(aco_opcode::s_clause, -1, num_loads - 1);
12088 for (unsigned i = 0; i < count;) {
12089 unsigned size = 1u << util_logbase2(MIN2(count - i, 4));
12092 bld.smem(aco_opcode::s_load_dwordx16, Definition(dest, s16), base,
12093 Operand::c32((start + i) * 16u));
12094 else if (size == 2)
12095 bld.smem(aco_opcode::s_load_dwordx8, Definition(dest, s8), base,
12096 Operand::c32((start + i) * 16u));
12098 bld.smem(aco_opcode::s_load_dwordx4, Definition(dest, s4), base,
12099 Operand::c32((start + i) * 16u));
12101 dest = dest.advance(size * 16u);
12109 calc_nontrivial_instance_id(Builder& bld, const struct radv_shader_args* args, unsigned index,
12110 Operand instance_id, Operand start_instance, PhysReg tmp_sgpr,
12111 PhysReg tmp_vgpr0, PhysReg tmp_vgpr1)
12113 bld.smem(aco_opcode::s_load_dwordx2, Definition(tmp_sgpr, s2),
12114 get_arg_fixed(args, args->prolog_inputs), Operand::c32(8u + index * 8u));
12118 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(bld.program->gfx_level));
12120 Definition fetch_index_def(tmp_vgpr0, v1);
12121 Operand fetch_index(tmp_vgpr0, v1);
12123 Operand div_info(tmp_sgpr, s1);
12124 if (bld.program->gfx_level >= GFX8 && bld.program->gfx_level < GFX11) {
12126 if (bld.program->gfx_level < GFX9) {
12127 bld.vop1(aco_opcode::v_mov_b32, Definition(tmp_vgpr1, v1), div_info);
12128 div_info = Operand(tmp_vgpr1, v1);
12131 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, instance_id);
12133 Instruction* instr;
12134 if (bld.program->gfx_level >= GFX9)
12135 instr = bld.vop2_sdwa(aco_opcode::v_add_u32, fetch_index_def, div_info, fetch_index).instr;
12137 instr = bld.vop2_sdwa(aco_opcode::v_add_co_u32, fetch_index_def, Definition(vcc, bld.lm),
12138 div_info, fetch_index)
12140 instr->sdwa().sel[0] = SubdwordSel::ubyte1;
12142 bld.vop3(aco_opcode::v_mul_hi_u32, fetch_index_def, Operand(tmp_sgpr.advance(4), s1),
12146 bld.vop2_sdwa(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, fetch_index).instr;
12147 instr->sdwa().sel[0] = SubdwordSel::ubyte2;
12149 Operand tmp_op(tmp_vgpr1, v1);
12150 Definition tmp_def(tmp_vgpr1, v1);
12152 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, instance_id);
12154 bld.vop3(aco_opcode::v_bfe_u32, tmp_def, div_info, Operand::c32(8u), Operand::c32(8u));
12155 bld.vadd32(fetch_index_def, tmp_op, fetch_index, false, Operand(s2), true);
12157 bld.vop3(aco_opcode::v_mul_hi_u32, fetch_index_def, fetch_index,
12158 Operand(tmp_sgpr.advance(4), s1));
12160 bld.vop3(aco_opcode::v_bfe_u32, tmp_def, div_info, Operand::c32(16u), Operand::c32(8u));
12161 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, tmp_op, fetch_index);
12164 bld.vadd32(fetch_index_def, start_instance, fetch_index, false, Operand(s2), true);
12166 return fetch_index;
12170 select_vs_prolog(Program* program, const struct aco_vs_prolog_key* key, ac_shader_config* config,
12171 const struct aco_compiler_options* options,
12172 const struct aco_shader_info* info,
12173 const struct radv_shader_args* args, unsigned* num_preserved_sgprs)
12175 assert(key->num_attributes > 0);
12177 /* This should be enough for any shader/stage. */
12178 unsigned max_user_sgprs = options->gfx_level >= GFX9 ? 32 : 16;
12179 *num_preserved_sgprs = max_user_sgprs + 14;
12181 init_program(program, compute_cs, info, options->gfx_level, options->family, options->wgp_mode,
12184 Block* block = program->create_and_insert_block();
12185 block->kind = block_kind_top_level;
12187 program->workgroup_size = 64;
12188 calc_min_waves(program);
12190 Builder bld(program, block);
12192 block->instructions.reserve(16 + key->num_attributes * 4);
12194 bld.sopp(aco_opcode::s_setprio, -1u, 0x3u);
12196 uint32_t attrib_mask = BITFIELD_MASK(key->num_attributes);
12197 bool has_nontrivial_divisors = key->state.nontrivial_divisors & attrib_mask;
12203 PhysReg vertex_buffers(align(*num_preserved_sgprs, 2));
12204 PhysReg prolog_input = vertex_buffers.advance(8);
12206 align((has_nontrivial_divisors ? prolog_input : vertex_buffers).advance(8).reg(), 4));
12208 Operand start_instance = get_arg_fixed(args, args->ac.start_instance);
12209 Operand instance_id = get_arg_fixed(args, args->ac.instance_id);
12211 PhysReg attributes_start(256 + args->ac.num_vgprs_used);
12212 /* choose vgprs that won't be used for anything else until the last attribute load */
12213 PhysReg vertex_index(attributes_start.reg() + key->num_attributes * 4 - 1);
12214 PhysReg instance_index(attributes_start.reg() + key->num_attributes * 4 - 2);
12215 PhysReg start_instance_vgpr(attributes_start.reg() + key->num_attributes * 4 - 3);
12216 PhysReg nontrivial_tmp_vgpr0(attributes_start.reg() + key->num_attributes * 4 - 4);
12217 PhysReg nontrivial_tmp_vgpr1(attributes_start.reg() + key->num_attributes * 4);
12219 bld.sop1(aco_opcode::s_mov_b32, Definition(vertex_buffers, s1),
12220 get_arg_fixed(args, args->ac.vertex_buffers));
12221 if (options->address32_hi >= 0xffff8000 || options->address32_hi <= 0x7fff) {
12222 bld.sopk(aco_opcode::s_movk_i32, Definition(vertex_buffers.advance(4), s1),
12223 options->address32_hi & 0xFFFF);
12225 bld.sop1(aco_opcode::s_mov_b32, Definition(vertex_buffers.advance(4), s1),
12226 Operand::c32((unsigned)options->address32_hi));
12229 /* calculate vgpr requirements */
12230 unsigned num_vgprs = attributes_start.reg() - 256;
12231 num_vgprs += key->num_attributes * 4;
12232 if (has_nontrivial_divisors && program->gfx_level <= GFX8)
12233 num_vgprs++; /* make space for nontrivial_tmp_vgpr1 */
12234 unsigned num_sgprs = 0;
12236 const struct ac_vtx_format_info* vtx_info_table =
12237 ac_get_vtx_format_info_table(GFX8, CHIP_POLARIS10);
12239 for (unsigned loc = 0; loc < key->num_attributes;) {
12240 unsigned num_descs =
12241 load_vb_descs(bld, desc, Operand(vertex_buffers, s2), loc, key->num_attributes - loc);
12242 num_sgprs = MAX2(num_sgprs, desc.advance(num_descs * 16u).reg());
12245 /* perform setup while we load the descriptors */
12246 if (key->is_ngg || key->next_stage != MESA_SHADER_VERTEX) {
12247 Operand count = get_arg_fixed(args, args->ac.merged_wave_info);
12248 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), count, Operand::c32(0u));
12249 if (program->wave_size == 64) {
12250 bld.sopc(aco_opcode::s_bitcmp1_b32, Definition(scc, s1), count,
12251 Operand::c32(6u /* log2(64) */));
12252 bld.sop2(aco_opcode::s_cselect_b64, Definition(exec, s2), Operand::c64(UINT64_MAX),
12253 Operand(exec, s2), Operand(scc, s1));
12257 bool needs_instance_index = false;
12258 bool needs_start_instance = false;
12259 u_foreach_bit(i, key->state.instance_rate_inputs & attrib_mask)
12261 needs_instance_index |= key->state.divisors[i] == 1;
12262 needs_start_instance |= key->state.divisors[i] == 0;
12264 bool needs_vertex_index = ~key->state.instance_rate_inputs & attrib_mask;
12265 if (needs_vertex_index)
12266 bld.vadd32(Definition(vertex_index, v1), get_arg_fixed(args, args->ac.base_vertex),
12267 get_arg_fixed(args, args->ac.vertex_id), false, Operand(s2), true);
12268 if (needs_instance_index)
12269 bld.vadd32(Definition(instance_index, v1), start_instance, instance_id, false,
12270 Operand(s2), true);
12271 if (needs_start_instance)
12272 bld.vop1(aco_opcode::v_mov_b32, Definition(start_instance_vgpr, v1), start_instance);
12275 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(program->gfx_level));
12277 for (unsigned i = 0; i < num_descs;) {
12278 PhysReg dest(attributes_start.reg() + loc * 4u);
12280 /* calculate index */
12281 Operand fetch_index = Operand(vertex_index, v1);
12282 if (key->state.instance_rate_inputs & (1u << loc)) {
12283 uint32_t divisor = key->state.divisors[loc];
12285 fetch_index = instance_id;
12286 if (key->state.nontrivial_divisors & (1u << loc)) {
12288 util_bitcount(key->state.nontrivial_divisors & BITFIELD_MASK(loc));
12289 fetch_index = calc_nontrivial_instance_id(
12290 bld, args, index, instance_id, start_instance, prolog_input,
12291 nontrivial_tmp_vgpr0, nontrivial_tmp_vgpr1);
12293 fetch_index = Operand(instance_index, v1);
12296 fetch_index = Operand(start_instance_vgpr, v1);
12301 PhysReg cur_desc = desc.advance(i * 16);
12302 if ((key->misaligned_mask & (1u << loc))) {
12303 const struct ac_vtx_format_info* vtx_info = &vtx_info_table[key->state.formats[loc]];
12305 assert(vtx_info->has_hw_format & 0x1);
12306 unsigned dfmt = vtx_info->hw_format[0] & 0xf;
12307 unsigned nfmt = vtx_info->hw_format[0] >> 4;
12309 for (unsigned j = 0; j < vtx_info->num_channels; j++) {
12310 bool post_shuffle = key->state.post_shuffle & (1u << loc);
12311 unsigned offset = vtx_info->chan_byte_size * (post_shuffle && j < 3 ? 2 - j : j);
12313 /* Use MUBUF to workaround hangs for byte-aligned dword loads. The Vulkan spec
12314 * doesn't require this to work, but some GL CTS tests over Zink do this anyway.
12315 * MTBUF can hang, but MUBUF doesn't (probably gives garbage, but GL CTS doesn't
12318 if (dfmt == V_008F0C_BUF_DATA_FORMAT_32)
12319 bld.mubuf(aco_opcode::buffer_load_dword, Definition(dest.advance(j * 4u), v1),
12320 Operand(cur_desc, s4), fetch_index, Operand::c32(0u), offset, false,
12322 else if (vtx_info->chan_byte_size == 8)
12323 bld.mtbuf(aco_opcode::tbuffer_load_format_xy,
12324 Definition(dest.advance(j * 8u), v2), Operand(cur_desc, s4),
12325 fetch_index, Operand::c32(0u), dfmt, nfmt, offset, false, true);
12327 bld.mtbuf(aco_opcode::tbuffer_load_format_x, Definition(dest.advance(j * 4u), v1),
12328 Operand(cur_desc, s4), fetch_index, Operand::c32(0u), dfmt, nfmt,
12329 offset, false, true);
12332 nfmt == V_008F0C_BUF_NUM_FORMAT_UINT || nfmt == V_008F0C_BUF_NUM_FORMAT_SINT
12335 /* 22.1.1. Attribute Location and Component Assignment of Vulkan 1.3 specification:
12336 * For 64-bit data types, no default attribute values are provided. Input variables must
12337 * not use more components than provided by the attribute.
12339 for (unsigned j = vtx_info->num_channels; vtx_info->chan_byte_size != 8 && j < 4; j++) {
12340 bld.vop1(aco_opcode::v_mov_b32, Definition(dest.advance(j * 4u), v1),
12341 Operand::c32(j == 3 ? one : 0u));
12344 unsigned slots = vtx_info->chan_byte_size == 8 && vtx_info->num_channels > 2 ? 2 : 1;
12348 bld.mubuf(aco_opcode::buffer_load_format_xyzw, Definition(dest, v4),
12349 Operand(cur_desc, s4), fetch_index, Operand::c32(0u), 0u, false, false, true);
12356 if (key->state.alpha_adjust_lo | key->state.alpha_adjust_hi) {
12359 bld.sopp(aco_opcode::s_waitcnt, -1, vm_imm.pack(program->gfx_level));
12362 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
12363 * so we may need to fix it up. */
12364 u_foreach_bit(loc, (key->state.alpha_adjust_lo | key->state.alpha_adjust_hi))
12366 PhysReg alpha(attributes_start.reg() + loc * 4u + 3);
12368 unsigned alpha_adjust = (key->state.alpha_adjust_lo >> loc) & 0x1;
12369 alpha_adjust |= ((key->state.alpha_adjust_hi >> loc) & 0x1) << 1;
12371 if (alpha_adjust == AC_ALPHA_ADJUST_SSCALED)
12372 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(alpha, v1), Operand(alpha, v1));
12374 /* For the integer-like cases, do a natural sign extension.
12376 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
12377 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
12380 unsigned offset = alpha_adjust == AC_ALPHA_ADJUST_SNORM ? 23u : 0u;
12381 bld.vop3(aco_opcode::v_bfe_i32, Definition(alpha, v1), Operand(alpha, v1),
12382 Operand::c32(offset), Operand::c32(2u));
12384 /* Convert back to the right type. */
12385 if (alpha_adjust == AC_ALPHA_ADJUST_SNORM) {
12386 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(alpha, v1), Operand(alpha, v1));
12387 bld.vop2(aco_opcode::v_max_f32, Definition(alpha, v1), Operand::c32(0xbf800000u),
12388 Operand(alpha, v1));
12389 } else if (alpha_adjust == AC_ALPHA_ADJUST_SSCALED) {
12390 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(alpha, v1), Operand(alpha, v1));
12394 block->kind |= block_kind_uniform;
12396 /* continue on to the main shader */
12397 Operand continue_pc = get_arg_fixed(args, args->prolog_inputs);
12398 if (has_nontrivial_divisors) {
12399 bld.smem(aco_opcode::s_load_dwordx2, Definition(prolog_input, s2),
12400 get_arg_fixed(args, args->prolog_inputs), Operand::c32(0u));
12401 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(program->gfx_level));
12402 continue_pc = Operand(prolog_input, s2);
12405 bld.sop1(aco_opcode::s_setpc_b64, continue_pc);
12407 program->config->float_mode = program->blocks[0].fp_mode.val;
12408 /* addition on GFX6-8 requires a carry-out (we use VCC) */
12409 program->needs_vcc = program->gfx_level <= GFX8;
12410 program->config->num_vgprs = get_vgpr_alloc(program, num_vgprs);
12411 program->config->num_sgprs = get_sgpr_alloc(program, num_sgprs);
12415 select_ps_epilog(Program* program, const struct aco_ps_epilog_key* key, ac_shader_config* config,
12416 const struct aco_compiler_options* options,
12417 const struct aco_shader_info* info,
12418 const struct radv_shader_args* args)
12420 isel_context ctx = setup_isel_context(program, 0, NULL, config, options, info, args, false, true);
12422 ctx.block->fp_mode = program->next_fp_mode;
12424 add_startpgm(&ctx);
12425 append_logical_start(ctx.block);
12427 Builder bld(ctx.program, ctx.block);
12429 /* Export all color render targets */
12430 bool exported = false;
12432 for (unsigned i = 0; i < 8; i++) {
12433 unsigned col_format = (key->spi_shader_col_format >> (i * 4)) & 0xf;
12435 if (col_format == V_028714_SPI_SHADER_ZERO)
12438 struct mrt_color_export out;
12441 out.write_mask = 0xf;
12442 out.col_format = col_format;
12443 out.is_int8 = (key->color_is_int8 >> i) & 1;
12444 out.is_int10 = (key->color_is_int10 >> i) & 1;
12445 out.enable_mrt_output_nan_fixup = (key->enable_mrt_output_nan_fixup >> i) & 1;
12447 Temp inputs = get_arg(&ctx, ctx.args->ps_epilog_inputs[i]);
12448 for (unsigned c = 0; c < 4; ++c) {
12449 out.values[c] = Operand(emit_extract_vector(&ctx, inputs, c, v1));
12452 exported |= export_fs_mrt_color(&ctx, &out, true);
12456 create_fs_null_export(&ctx);
12458 program->config->float_mode = program->blocks[0].fp_mode.val;
12460 append_logical_end(ctx.block);
12461 ctx.block->kind |= block_kind_export_end;
12462 bld.reset(ctx.block);
12463 bld.sopp(aco_opcode::s_endpgm);
12465 cleanup_cfg(program);