2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "aco_instruction_selection.h"
28 #include "aco_builder.h"
31 #include "common/ac_nir.h"
32 #include "common/sid.h"
34 #include "util/fast_idiv_by_const.h"
35 #include "util/memstream.h"
48 #define isel_err(...) _isel_err(ctx, __FILE__, __LINE__, __VA_ARGS__)
51 _isel_err(isel_context* ctx, const char* file, unsigned line, const nir_instr* instr,
56 struct u_memstream mem;
57 u_memstream_open(&mem, &out, &outsize);
58 FILE* const memf = u_memstream_get(&mem);
60 fprintf(memf, "%s: ", msg);
61 nir_print_instr(instr, memf);
62 u_memstream_close(&mem);
64 _aco_err(ctx->program, file, line, out);
72 bool exec_potentially_empty_discard_old;
73 bool exec_potentially_empty_break_old;
74 uint16_t exec_potentially_empty_break_depth_old;
78 bool uniform_has_then_branch;
79 bool then_branch_divergent;
87 unsigned header_idx_old;
89 bool divergent_cont_old;
90 bool divergent_branch_old;
91 bool divergent_if_old;
94 static bool visit_cf_list(struct isel_context* ctx, struct exec_list* list);
97 add_logical_edge(unsigned pred_idx, Block* succ)
99 succ->logical_preds.emplace_back(pred_idx);
103 add_linear_edge(unsigned pred_idx, Block* succ)
105 succ->linear_preds.emplace_back(pred_idx);
109 add_edge(unsigned pred_idx, Block* succ)
111 add_logical_edge(pred_idx, succ);
112 add_linear_edge(pred_idx, succ);
116 append_logical_start(Block* b)
118 Builder(NULL, b).pseudo(aco_opcode::p_logical_start);
122 append_logical_end(Block* b)
124 Builder(NULL, b).pseudo(aco_opcode::p_logical_end);
128 get_ssa_temp(struct isel_context* ctx, nir_ssa_def* def)
130 uint32_t id = ctx->first_temp_id + def->index;
131 return Temp(id, ctx->program->temp_rc[id]);
135 emit_mbcnt(isel_context* ctx, Temp dst, Operand mask = Operand(), Operand base = Operand::zero())
137 Builder bld(ctx->program, ctx->block);
138 assert(mask.isUndefined() || mask.isTemp() || (mask.isFixed() && mask.physReg() == exec));
139 assert(mask.isUndefined() || mask.bytes() == bld.lm.bytes());
141 if (ctx->program->wave_size == 32) {
142 Operand mask_lo = mask.isUndefined() ? Operand::c32(-1u) : mask;
143 return bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, Definition(dst), mask_lo, base);
146 Operand mask_lo = Operand::c32(-1u);
147 Operand mask_hi = Operand::c32(-1u);
150 RegClass rc = RegClass(mask.regClass().type(), 1);
151 Builder::Result mask_split =
152 bld.pseudo(aco_opcode::p_split_vector, bld.def(rc), bld.def(rc), mask);
153 mask_lo = Operand(mask_split.def(0).getTemp());
154 mask_hi = Operand(mask_split.def(1).getTemp());
155 } else if (mask.physReg() == exec) {
156 mask_lo = Operand(exec_lo, s1);
157 mask_hi = Operand(exec_hi, s1);
160 Temp mbcnt_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), mask_lo, base);
162 if (ctx->program->chip_class <= GFX7)
163 return bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, Definition(dst), mask_hi, mbcnt_lo);
165 return bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, Definition(dst), mask_hi, mbcnt_lo);
169 emit_wqm(Builder& bld, Temp src, Temp dst = Temp(0, s1), bool program_needs_wqm = false)
172 dst = bld.tmp(src.regClass());
174 assert(src.size() == dst.size());
176 if (bld.program->stage != fragment_fs) {
180 bld.copy(Definition(dst), src);
184 bld.pseudo(aco_opcode::p_wqm, Definition(dst), src);
185 bld.program->needs_wqm |= program_needs_wqm;
190 emit_bpermute(isel_context* ctx, Builder& bld, Temp index, Temp data)
192 if (index.regClass() == s1)
193 return bld.readlane(bld.def(s1), data, index);
195 if (ctx->options->chip_class <= GFX7) {
196 /* GFX6-7: there is no bpermute instruction */
197 Operand index_op(index);
198 Operand input_data(data);
199 index_op.setLateKill(true);
200 input_data.setLateKill(true);
202 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(bld.lm), bld.def(bld.lm, vcc),
203 index_op, input_data);
204 } else if (ctx->options->chip_class >= GFX10 && ctx->program->wave_size == 64) {
206 /* GFX10 wave64 mode: emulate full-wave bpermute */
208 bld.vopc(aco_opcode::v_cmp_ge_u32, bld.def(bld.lm), Operand::c32(31u), index);
209 Builder::Result index_is_lo_split =
210 bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), index_is_lo);
211 Temp index_is_lo_n1 = bld.sop1(aco_opcode::s_not_b32, bld.def(s1), bld.def(s1, scc),
212 index_is_lo_split.def(1).getTemp());
213 Operand same_half = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2),
214 index_is_lo_split.def(0).getTemp(), index_is_lo_n1);
215 Operand index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), index);
216 Operand input_data(data);
218 index_x4.setLateKill(true);
219 input_data.setLateKill(true);
220 same_half.setLateKill(true);
222 /* We need one pair of shared VGPRs:
223 * Note, that these have twice the allocation granularity of normal VGPRs */
224 ctx->program->config->num_shared_vgprs = 2 * ctx->program->dev.vgpr_alloc_granule;
226 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc),
227 index_x4, input_data, same_half);
229 /* GFX8-9 or GFX10 wave32: bpermute works normally */
230 Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), index);
231 return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
236 emit_masked_swizzle(isel_context* ctx, Builder& bld, Temp src, unsigned mask)
238 if (ctx->options->chip_class >= GFX8) {
239 unsigned and_mask = mask & 0x1f;
240 unsigned or_mask = (mask >> 5) & 0x1f;
241 unsigned xor_mask = (mask >> 10) & 0x1f;
243 uint16_t dpp_ctrl = 0xffff;
245 if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
246 unsigned res[4] = {0, 1, 2, 3};
247 for (unsigned i = 0; i < 4; i++)
248 res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
249 dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
250 } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
251 dpp_ctrl = dpp_row_rr(8);
252 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
253 dpp_ctrl = dpp_row_mirror;
254 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
255 dpp_ctrl = dpp_row_half_mirror;
256 } else if (ctx->options->chip_class >= GFX10 && (and_mask & 0x18) == 0x18 && or_mask < 8 && xor_mask < 8) {
257 // DPP8 comes last, as it does not allow several modifiers like `abs` that are available with DPP16
258 Builder::Result ret = bld.vop1_dpp8(aco_opcode::v_mov_b32, bld.def(v1), src);
259 for (unsigned i = 0; i < 8; i++) {
260 ret.instr->dpp8().lane_sel[i] = (((i & and_mask) | or_mask) ^ xor_mask) & 0x7;
265 if (dpp_ctrl != 0xffff)
266 return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
269 return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
273 as_vgpr(Builder& bld, Temp val)
275 if (val.type() == RegType::sgpr)
276 return bld.copy(bld.def(RegType::vgpr, val.size()), val);
277 assert(val.type() == RegType::vgpr);
282 as_vgpr(isel_context* ctx, Temp val)
284 Builder bld(ctx->program, ctx->block);
285 return as_vgpr(bld, val);
288 // assumes a != 0xffffffff
290 emit_v_div_u32(isel_context* ctx, Temp dst, Temp a, uint32_t b)
293 Builder bld(ctx->program, ctx->block);
295 if (util_is_power_of_two_or_zero(b)) {
296 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand::c32(util_logbase2(b)), a);
300 util_fast_udiv_info info = util_compute_fast_udiv_info(b, 32, 32);
302 assert(info.multiplier <= 0xffffffff);
304 bool pre_shift = info.pre_shift != 0;
305 bool increment = info.increment != 0;
306 bool multiply = true;
307 bool post_shift = info.post_shift != 0;
309 if (!pre_shift && !increment && !multiply && !post_shift) {
310 bld.copy(Definition(dst), a);
314 Temp pre_shift_dst = a;
316 pre_shift_dst = (increment || multiply || post_shift) ? bld.tmp(v1) : dst;
317 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(pre_shift_dst), Operand::c32(info.pre_shift),
321 Temp increment_dst = pre_shift_dst;
323 increment_dst = (post_shift || multiply) ? bld.tmp(v1) : dst;
324 bld.vadd32(Definition(increment_dst), Operand::c32(info.increment), pre_shift_dst);
327 Temp multiply_dst = increment_dst;
329 multiply_dst = post_shift ? bld.tmp(v1) : dst;
330 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(multiply_dst), increment_dst,
331 bld.copy(bld.def(v1), Operand::c32(info.multiplier)));
335 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand::c32(info.post_shift),
341 emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, Temp dst)
343 Builder bld(ctx->program, ctx->block);
344 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand::c32(idx));
348 emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst_rc)
350 /* no need to extract the whole vector */
351 if (src.regClass() == dst_rc) {
356 assert(src.bytes() > (idx * dst_rc.bytes()));
357 Builder bld(ctx->program, ctx->block);
358 auto it = ctx->allocated_vec.find(src.id());
359 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
360 if (it->second[idx].regClass() == dst_rc) {
361 return it->second[idx];
363 assert(!dst_rc.is_subdword());
364 assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
365 return bld.copy(bld.def(dst_rc), it->second[idx]);
369 if (dst_rc.is_subdword())
370 src = as_vgpr(ctx, src);
372 if (src.bytes() == dst_rc.bytes()) {
374 return bld.copy(bld.def(dst_rc), src);
376 Temp dst = bld.tmp(dst_rc);
377 emit_extract_vector(ctx, src, idx, dst);
383 emit_split_vector(isel_context* ctx, Temp vec_src, unsigned num_components)
385 if (num_components == 1)
387 if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
390 if (num_components > vec_src.size()) {
391 if (vec_src.type() == RegType::sgpr) {
392 /* should still help get_alu_src() */
393 emit_split_vector(ctx, vec_src, vec_src.size());
396 /* sub-dword split */
397 rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
399 rc = RegClass(vec_src.type(), vec_src.size() / num_components);
401 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
402 aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
403 split->operands[0] = Operand(vec_src);
404 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
405 for (unsigned i = 0; i < num_components; i++) {
406 elems[i] = ctx->program->allocateTmp(rc);
407 split->definitions[i] = Definition(elems[i]);
409 ctx->block->instructions.emplace_back(std::move(split));
410 ctx->allocated_vec.emplace(vec_src.id(), elems);
413 /* This vector expansion uses a mask to determine which elements in the new vector
414 * come from the original vector. The other elements are undefined. */
416 expand_vector(isel_context* ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask,
417 bool zero_padding = false)
419 assert(vec_src.type() == RegType::vgpr);
420 Builder bld(ctx->program, ctx->block);
422 if (dst.type() == RegType::sgpr && num_components > dst.size()) {
423 Temp tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, 2 * num_components));
424 expand_vector(ctx, vec_src, tmp_dst, num_components, mask, zero_padding);
425 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp_dst);
426 ctx->allocated_vec[dst.id()] = ctx->allocated_vec[tmp_dst.id()];
430 emit_split_vector(ctx, vec_src, util_bitcount(mask));
435 if (num_components == 1) {
436 if (dst.type() == RegType::sgpr)
437 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
439 bld.copy(Definition(dst), vec_src);
443 unsigned component_bytes = dst.bytes() / num_components;
444 RegClass src_rc = RegClass::get(RegType::vgpr, component_bytes);
445 RegClass dst_rc = RegClass::get(dst.type(), component_bytes);
446 assert(dst.type() == RegType::vgpr || !src_rc.is_subdword());
447 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
449 Temp padding = Temp(0, dst_rc);
451 padding = bld.copy(bld.def(dst_rc), Operand::zero(component_bytes));
453 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
454 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
455 vec->definitions[0] = Definition(dst);
457 for (unsigned i = 0; i < num_components; i++) {
458 if (mask & (1 << i)) {
459 Temp src = emit_extract_vector(ctx, vec_src, k++, src_rc);
460 if (dst.type() == RegType::sgpr)
461 src = bld.as_uniform(src);
462 vec->operands[i] = Operand(src);
465 vec->operands[i] = Operand::zero(component_bytes);
469 ctx->block->instructions.emplace_back(std::move(vec));
470 ctx->allocated_vec.emplace(dst.id(), elems);
473 /* adjust misaligned small bit size loads */
475 byte_align_scalar(isel_context* ctx, Temp vec, Operand offset, Temp dst)
477 Builder bld(ctx->program, ctx->block);
479 Temp select = Temp();
480 if (offset.isConstant()) {
481 assert(offset.constantValue() && offset.constantValue() < 4);
482 shift = Operand::c32(offset.constantValue() * 8);
484 /* bit_offset = 8 * (offset & 0x3) */
486 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand::c32(3u));
487 select = bld.tmp(s1);
488 shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp,
492 if (vec.size() == 1) {
493 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
494 } else if (vec.size() == 2) {
495 Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
496 bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
498 emit_split_vector(ctx, dst, 2);
500 emit_extract_vector(ctx, tmp, 0, dst);
501 } else if (vec.size() == 3 || vec.size() == 4) {
502 Temp lo = bld.tmp(s2), hi;
503 if (vec.size() == 3) {
504 /* this can happen if we use VMEM for a uniform load */
506 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
509 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
510 hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand::zero());
512 if (select != Temp())
514 bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand::zero(), bld.scc(select));
515 lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
516 Temp mid = bld.tmp(s1);
517 lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
518 hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
519 mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
520 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
521 emit_split_vector(ctx, dst, 2);
526 byte_align_vector(isel_context* ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
528 Builder bld(ctx->program, ctx->block);
529 if (offset.isTemp()) {
530 Temp tmp[4] = {vec, vec, vec, vec};
532 if (vec.size() == 4) {
533 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
534 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]),
535 Definition(tmp[2]), Definition(tmp[3]), vec);
536 } else if (vec.size() == 3) {
537 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
538 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]),
539 Definition(tmp[2]), vec);
540 } else if (vec.size() == 2) {
541 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
542 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
544 for (unsigned i = 0; i < dst.size(); i++)
545 tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
549 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
551 offset = Operand::zero();
554 unsigned num_components = vec.bytes() / component_size;
555 if (vec.regClass() == dst.regClass()) {
556 assert(offset.constantValue() == 0);
557 bld.copy(Definition(dst), vec);
558 emit_split_vector(ctx, dst, num_components);
562 emit_split_vector(ctx, vec, num_components);
563 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
564 RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
566 assert(offset.constantValue() % component_size == 0);
567 unsigned skip = offset.constantValue() / component_size;
568 for (unsigned i = skip; i < num_components; i++)
569 elems[i - skip] = emit_extract_vector(ctx, vec, i, rc);
571 if (dst.type() == RegType::vgpr) {
572 /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
573 num_components = dst.bytes() / component_size;
574 aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(
575 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
576 for (unsigned i = 0; i < num_components; i++)
577 create_vec->operands[i] = Operand(elems[i]);
578 create_vec->definitions[0] = Definition(dst);
579 bld.insert(std::move(create_vec));
582 /* if dst is sgpr - split the src, but move the original to sgpr. */
583 vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
584 byte_align_scalar(ctx, vec, offset, dst);
586 assert(dst.size() == vec.size());
587 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
590 ctx->allocated_vec.emplace(dst.id(), elems);
594 bool_to_vector_condition(isel_context* ctx, Temp val, Temp dst = Temp(0, s2))
596 Builder bld(ctx->program, ctx->block);
598 dst = bld.tmp(bld.lm);
600 assert(val.regClass() == s1);
601 assert(dst.regClass() == bld.lm);
603 return bld.sop2(Builder::s_cselect, Definition(dst), Operand::c32(-1), Operand::zero(),
608 bool_to_scalar_condition(isel_context* ctx, Temp val, Temp dst = Temp(0, s1))
610 Builder bld(ctx->program, ctx->block);
614 assert(val.regClass() == bld.lm);
615 assert(dst.regClass() == s1);
617 /* if we're currently in WQM mode, ensure that the source is also computed in WQM */
618 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(dst)), val, Operand(exec, bld.lm));
623 * Copies the first src_bits of the input to the output Temp. Input bits at positions larger than
624 * src_bits and dst_bits are truncated.
626 * Sign extension may be applied using the sign_extend parameter. The position of the input sign
627 * bit is indicated by src_bits in this case.
629 * If dst.bytes() is larger than dst_bits/8, the value of the upper bits is undefined.
632 convert_int(isel_context* ctx, Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits,
633 bool sign_extend, Temp dst = Temp())
635 assert(!(sign_extend && dst_bits < src_bits) &&
636 "Shrinking integers is not supported for signed inputs");
639 if (dst_bits % 32 == 0 || src.type() == RegType::sgpr)
640 dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u));
642 dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword());
645 assert(src.type() == RegType::sgpr || src_bits == src.bytes() * 8);
646 assert(dst.type() == RegType::sgpr || dst_bits == dst.bytes() * 8);
648 if (dst.bytes() == src.bytes() && dst_bits < src_bits) {
649 /* Copy the raw value, leaving an undefined value in the upper bits for
650 * the caller to handle appropriately */
651 return bld.copy(Definition(dst), src);
652 } else if (dst.bytes() < src.bytes()) {
653 return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand::zero());
658 tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1);
661 } else if (src.regClass() == s1) {
662 assert(src_bits < 32);
663 bld.pseudo(aco_opcode::p_extract, Definition(tmp), bld.def(s1, scc), src, Operand::zero(),
664 Operand::c32(src_bits), Operand::c32((unsigned)sign_extend));
666 assert(src_bits < 32);
667 bld.pseudo(aco_opcode::p_extract, Definition(tmp), src, Operand::zero(), Operand::c32(src_bits),
668 Operand::c32((unsigned)sign_extend));
671 if (dst_bits == 64) {
672 if (sign_extend && dst.regClass() == s2) {
674 bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand::c32(31u));
675 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
676 } else if (sign_extend && dst.regClass() == v2) {
677 Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), tmp);
678 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
680 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand::zero());
687 enum sgpr_extract_mode {
694 extract_8_16_bit_sgpr_element(isel_context* ctx, Temp dst, nir_alu_src* src, sgpr_extract_mode mode)
696 Temp vec = get_ssa_temp(ctx, src->src.ssa);
697 unsigned src_size = src->src.ssa->bit_size;
698 unsigned swizzle = src->swizzle[0];
700 if (vec.size() > 1) {
701 assert(src_size == 16);
702 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
703 swizzle = swizzle & 1;
706 Builder bld(ctx->program, ctx->block);
707 Temp tmp = dst.regClass() == s2 ? bld.tmp(s1) : dst;
709 if (mode == sgpr_extract_undef && swizzle == 0)
710 bld.copy(Definition(tmp), vec);
712 bld.pseudo(aco_opcode::p_extract, Definition(tmp), bld.def(s1, scc), Operand(vec),
713 Operand::c32(swizzle), Operand::c32(src_size),
714 Operand::c32((mode == sgpr_extract_sext)));
716 if (dst.regClass() == s2)
717 convert_int(ctx, bld, tmp, 32, 64, mode == sgpr_extract_sext, dst);
723 get_alu_src(struct isel_context* ctx, nir_alu_src src, unsigned size = 1)
725 if (src.src.ssa->num_components == 1 && size == 1)
726 return get_ssa_temp(ctx, src.src.ssa);
728 Temp vec = get_ssa_temp(ctx, src.src.ssa);
729 unsigned elem_size = src.src.ssa->bit_size / 8u;
730 bool identity_swizzle = true;
732 for (unsigned i = 0; identity_swizzle && i < size; i++) {
733 if (src.swizzle[i] != i)
734 identity_swizzle = false;
736 if (identity_swizzle)
737 return emit_extract_vector(ctx, vec, 0, RegClass::get(vec.type(), elem_size * size));
739 assert(elem_size > 0);
740 assert(vec.bytes() % elem_size == 0);
742 if (elem_size < 4 && vec.type() == RegType::sgpr && size == 1) {
743 assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
744 return extract_8_16_bit_sgpr_element(ctx, ctx->program->allocateTmp(s1), &src,
748 bool as_uniform = elem_size < 4 && vec.type() == RegType::sgpr;
750 vec = as_vgpr(ctx, vec);
752 RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword()
753 : RegClass(vec.type(), elem_size / 4);
755 return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
758 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
759 aco_ptr<Pseudo_instruction> vec_instr{create_instruction<Pseudo_instruction>(
760 aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
761 for (unsigned i = 0; i < size; ++i) {
762 elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
763 vec_instr->operands[i] = Operand{elems[i]};
765 Temp dst = ctx->program->allocateTmp(RegClass(vec.type(), elem_size * size / 4));
766 vec_instr->definitions[0] = Definition(dst);
767 ctx->block->instructions.emplace_back(std::move(vec_instr));
768 ctx->allocated_vec.emplace(dst.id(), elems);
769 return vec.type() == RegType::sgpr ? Builder(ctx->program, ctx->block).as_uniform(dst) : dst;
774 get_alu_src_vop3p(struct isel_context* ctx, nir_alu_src src)
776 /* returns v2b or v1 for vop3p usage.
777 * The source expects exactly 2 16bit components
778 * which are within the same dword
780 assert(src.src.ssa->bit_size == 16);
781 assert(src.swizzle[0] >> 1 == src.swizzle[1] >> 1);
783 Temp tmp = get_ssa_temp(ctx, src.src.ssa);
787 /* the size is larger than 1 dword: check the swizzle */
788 unsigned dword = src.swizzle[0] >> 1;
790 /* extract a full dword if possible */
791 if (tmp.bytes() >= (dword + 1) * 4) {
792 /* if the source is splitted into components, use p_create_vector */
793 auto it = ctx->allocated_vec.find(tmp.id());
794 if (it != ctx->allocated_vec.end()) {
795 unsigned index = dword << 1;
796 Builder bld(ctx->program, ctx->block);
797 if (it->second[index].regClass() == v2b)
798 return bld.pseudo(aco_opcode::p_create_vector, bld.def(v1), it->second[index],
799 it->second[index + 1]);
801 return emit_extract_vector(ctx, tmp, dword, v1);
803 /* This must be a swizzled access to %a.zz where %a is v6b */
804 assert(((src.swizzle[0] | src.swizzle[1]) & 1) == 0);
805 assert(tmp.regClass() == v6b && dword == 1);
806 return emit_extract_vector(ctx, tmp, dword * 2, v2b);
811 get_alu_src_ub(isel_context* ctx, nir_alu_instr* instr, int src_idx)
813 nir_ssa_scalar scalar =
814 nir_ssa_scalar{instr->src[src_idx].src.ssa, instr->src[src_idx].swizzle[0]};
815 return nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, scalar, &ctx->ub_config);
819 convert_pointer_to_64_bit(isel_context* ctx, Temp ptr, bool non_uniform = false)
823 Builder bld(ctx->program, ctx->block);
824 if (ptr.type() == RegType::vgpr && !non_uniform)
825 ptr = bld.as_uniform(ptr);
826 return bld.pseudo(aco_opcode::p_create_vector, bld.def(RegClass(ptr.type(), 2)), ptr,
827 Operand::c32((unsigned)ctx->options->address32_hi));
831 emit_sop2_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
832 bool writes_scc, uint8_t uses_ub = 0)
834 aco_ptr<SOP2_instruction> sop2{
835 create_instruction<SOP2_instruction>(op, Format::SOP2, 2, writes_scc ? 2 : 1)};
836 sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
837 sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
838 sop2->definitions[0] = Definition(dst);
839 if (instr->no_unsigned_wrap)
840 sop2->definitions[0].setNUW(true);
842 sop2->definitions[1] = Definition(ctx->program->allocateId(s1), scc, s1);
844 for (int i = 0; i < 2; i++) {
845 if (uses_ub & (1 << i)) {
846 uint32_t src_ub = get_alu_src_ub(ctx, instr, i);
847 if (src_ub <= 0xffff)
848 sop2->operands[i].set16bit(true);
849 else if (src_ub <= 0xffffff)
850 sop2->operands[i].set24bit(true);
854 ctx->block->instructions.emplace_back(std::move(sop2));
858 emit_vop2_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode opc, Temp dst,
859 bool commutative, bool swap_srcs = false, bool flush_denorms = false,
860 bool nuw = false, uint8_t uses_ub = 0)
862 Builder bld(ctx->program, ctx->block);
863 bld.is_precise = instr->exact;
865 Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
866 Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
867 if (src1.type() == RegType::sgpr) {
868 if (commutative && src0.type() == RegType::vgpr) {
873 src1 = as_vgpr(ctx, src1);
877 Operand op[2] = {Operand(src0), Operand(src1)};
879 for (int i = 0; i < 2; i++) {
880 if (uses_ub & (1 << i)) {
881 uint32_t src_ub = get_alu_src_ub(ctx, instr, swap_srcs ? !i : i);
882 if (src_ub <= 0xffff)
883 op[i].set16bit(true);
884 else if (src_ub <= 0xffffff)
885 op[i].set24bit(true);
889 if (flush_denorms && ctx->program->chip_class < GFX9) {
890 assert(dst.size() == 1);
891 Temp tmp = bld.vop2(opc, bld.def(v1), op[0], op[1]);
892 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0x3f800000u), tmp);
895 bld.nuw().vop2(opc, Definition(dst), op[0], op[1]);
897 bld.vop2(opc, Definition(dst), op[0], op[1]);
903 emit_vop2_instruction_logic64(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
905 Builder bld(ctx->program, ctx->block);
906 bld.is_precise = instr->exact;
908 Temp src0 = get_alu_src(ctx, instr->src[0]);
909 Temp src1 = get_alu_src(ctx, instr->src[1]);
911 if (src1.type() == RegType::sgpr) {
912 assert(src0.type() == RegType::vgpr);
913 std::swap(src0, src1);
916 Temp src00 = bld.tmp(src0.type(), 1);
917 Temp src01 = bld.tmp(src0.type(), 1);
918 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
919 Temp src10 = bld.tmp(v1);
920 Temp src11 = bld.tmp(v1);
921 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
922 Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
923 Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
924 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
928 emit_vop3a_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
929 bool flush_denorms = false, unsigned num_sources = 2, bool swap_srcs = false)
931 assert(num_sources == 2 || num_sources == 3);
932 Temp src[3] = {Temp(0, v1), Temp(0, v1), Temp(0, v1)};
933 bool has_sgpr = false;
934 for (unsigned i = 0; i < num_sources; i++) {
935 src[i] = get_alu_src(ctx, instr->src[swap_srcs ? 1 - i : i]);
937 src[i] = as_vgpr(ctx, src[i]);
939 has_sgpr = src[i].type() == RegType::sgpr;
942 Builder bld(ctx->program, ctx->block);
943 bld.is_precise = instr->exact;
944 if (flush_denorms && ctx->program->chip_class < GFX9) {
946 if (num_sources == 3)
947 tmp = bld.vop3(op, bld.def(dst.regClass()), src[0], src[1], src[2]);
949 tmp = bld.vop3(op, bld.def(dst.regClass()), src[0], src[1]);
951 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0x3f800000u), tmp);
953 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand::c64(0x3FF0000000000000), tmp);
954 } else if (num_sources == 3) {
955 bld.vop3(op, Definition(dst), src[0], src[1], src[2]);
957 bld.vop3(op, Definition(dst), src[0], src[1]);
962 emit_vop3p_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst,
963 bool swap_srcs = false)
965 Temp src0 = get_alu_src_vop3p(ctx, instr->src[swap_srcs]);
966 Temp src1 = get_alu_src_vop3p(ctx, instr->src[!swap_srcs]);
967 if (src0.type() == RegType::sgpr && src1.type() == RegType::sgpr)
968 src1 = as_vgpr(ctx, src1);
969 assert(instr->dest.dest.ssa.num_components == 2);
971 /* swizzle to opsel: all swizzles are either 0 (x) or 1 (y) */
973 (instr->src[!swap_srcs].swizzle[0] & 1) << 1 | (instr->src[swap_srcs].swizzle[0] & 1);
975 (instr->src[!swap_srcs].swizzle[1] & 1) << 1 | (instr->src[swap_srcs].swizzle[1] & 1);
977 Builder bld(ctx->program, ctx->block);
978 bld.is_precise = instr->exact;
979 Builder::Result res = bld.vop3p(op, Definition(dst), src0, src1, opsel_lo, opsel_hi);
984 emit_idot_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst, bool clamp)
986 Temp src[3] = {Temp(0, v1), Temp(0, v1), Temp(0, v1)};
987 bool has_sgpr = false;
988 for (unsigned i = 0; i < 3; i++) {
989 src[i] = get_alu_src(ctx, instr->src[i]);
991 src[i] = as_vgpr(ctx, src[i]);
993 has_sgpr = src[i].type() == RegType::sgpr;
996 Builder bld(ctx->program, ctx->block);
997 bld.is_precise = instr->exact;
998 bld.vop3p(op, Definition(dst), src[0], src[1], src[2], 0x0, 0x7).instr->vop3p().clamp = clamp;
1002 emit_vop1_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1004 Builder bld(ctx->program, ctx->block);
1005 bld.is_precise = instr->exact;
1006 if (dst.type() == RegType::sgpr)
1007 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
1008 bld.vop1(op, bld.def(RegType::vgpr, dst.size()), get_alu_src(ctx, instr->src[0])));
1010 bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
1014 emit_vopc_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1016 Temp src0 = get_alu_src(ctx, instr->src[0]);
1017 Temp src1 = get_alu_src(ctx, instr->src[1]);
1018 assert(src0.size() == src1.size());
1020 aco_ptr<Instruction> vopc;
1021 if (src1.type() == RegType::sgpr) {
1022 if (src0.type() == RegType::vgpr) {
1023 /* to swap the operands, we might also have to change the opcode */
1025 case aco_opcode::v_cmp_lt_f16: op = aco_opcode::v_cmp_gt_f16; break;
1026 case aco_opcode::v_cmp_ge_f16: op = aco_opcode::v_cmp_le_f16; break;
1027 case aco_opcode::v_cmp_lt_i16: op = aco_opcode::v_cmp_gt_i16; break;
1028 case aco_opcode::v_cmp_ge_i16: op = aco_opcode::v_cmp_le_i16; break;
1029 case aco_opcode::v_cmp_lt_u16: op = aco_opcode::v_cmp_gt_u16; break;
1030 case aco_opcode::v_cmp_ge_u16: op = aco_opcode::v_cmp_le_u16; break;
1031 case aco_opcode::v_cmp_lt_f32: op = aco_opcode::v_cmp_gt_f32; break;
1032 case aco_opcode::v_cmp_ge_f32: op = aco_opcode::v_cmp_le_f32; break;
1033 case aco_opcode::v_cmp_lt_i32: op = aco_opcode::v_cmp_gt_i32; break;
1034 case aco_opcode::v_cmp_ge_i32: op = aco_opcode::v_cmp_le_i32; break;
1035 case aco_opcode::v_cmp_lt_u32: op = aco_opcode::v_cmp_gt_u32; break;
1036 case aco_opcode::v_cmp_ge_u32: op = aco_opcode::v_cmp_le_u32; break;
1037 case aco_opcode::v_cmp_lt_f64: op = aco_opcode::v_cmp_gt_f64; break;
1038 case aco_opcode::v_cmp_ge_f64: op = aco_opcode::v_cmp_le_f64; break;
1039 case aco_opcode::v_cmp_lt_i64: op = aco_opcode::v_cmp_gt_i64; break;
1040 case aco_opcode::v_cmp_ge_i64: op = aco_opcode::v_cmp_le_i64; break;
1041 case aco_opcode::v_cmp_lt_u64: op = aco_opcode::v_cmp_gt_u64; break;
1042 case aco_opcode::v_cmp_ge_u64: op = aco_opcode::v_cmp_le_u64; break;
1043 default: /* eq and ne are commutative */ break;
1049 src1 = as_vgpr(ctx, src1);
1053 Builder bld(ctx->program, ctx->block);
1054 bld.vopc(op, Definition(dst), src0, src1);
1058 emit_sopc_instruction(isel_context* ctx, nir_alu_instr* instr, aco_opcode op, Temp dst)
1060 Temp src0 = get_alu_src(ctx, instr->src[0]);
1061 Temp src1 = get_alu_src(ctx, instr->src[1]);
1062 Builder bld(ctx->program, ctx->block);
1064 assert(dst.regClass() == bld.lm);
1065 assert(src0.type() == RegType::sgpr);
1066 assert(src1.type() == RegType::sgpr);
1067 assert(src0.regClass() == src1.regClass());
1069 /* Emit the SALU comparison instruction */
1070 Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
1071 /* Turn the result into a per-lane bool */
1072 bool_to_vector_condition(ctx, cmp, dst);
1076 emit_comparison(isel_context* ctx, nir_alu_instr* instr, Temp dst, aco_opcode v16_op,
1077 aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes,
1078 aco_opcode s64_op = aco_opcode::num_opcodes)
1080 aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op
1081 : instr->src[0].src.ssa->bit_size == 32 ? s32_op
1082 : aco_opcode::num_opcodes;
1083 aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op
1084 : instr->src[0].src.ssa->bit_size == 32 ? v32_op
1086 bool use_valu = s_op == aco_opcode::num_opcodes || nir_dest_is_divergent(instr->dest.dest) ||
1087 get_ssa_temp(ctx, instr->src[0].src.ssa).type() == RegType::vgpr ||
1088 get_ssa_temp(ctx, instr->src[1].src.ssa).type() == RegType::vgpr;
1089 aco_opcode op = use_valu ? v_op : s_op;
1090 assert(op != aco_opcode::num_opcodes);
1091 assert(dst.regClass() == ctx->program->lane_mask);
1094 emit_vopc_instruction(ctx, instr, op, dst);
1096 emit_sopc_instruction(ctx, instr, op, dst);
1100 emit_boolean_logic(isel_context* ctx, nir_alu_instr* instr, Builder::WaveSpecificOpcode op,
1103 Builder bld(ctx->program, ctx->block);
1104 Temp src0 = get_alu_src(ctx, instr->src[0]);
1105 Temp src1 = get_alu_src(ctx, instr->src[1]);
1107 assert(dst.regClass() == bld.lm);
1108 assert(src0.regClass() == bld.lm);
1109 assert(src1.regClass() == bld.lm);
1111 bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
1115 emit_bcsel(isel_context* ctx, nir_alu_instr* instr, Temp dst)
1117 Builder bld(ctx->program, ctx->block);
1118 Temp cond = get_alu_src(ctx, instr->src[0]);
1119 Temp then = get_alu_src(ctx, instr->src[1]);
1120 Temp els = get_alu_src(ctx, instr->src[2]);
1122 assert(cond.regClass() == bld.lm);
1124 if (dst.type() == RegType::vgpr) {
1125 aco_ptr<Instruction> bcsel;
1126 if (dst.size() == 1) {
1127 then = as_vgpr(ctx, then);
1128 els = as_vgpr(ctx, els);
1130 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond);
1131 } else if (dst.size() == 2) {
1132 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
1133 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then);
1134 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
1135 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), els);
1137 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, cond);
1138 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, cond);
1140 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1142 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1147 if (instr->dest.dest.ssa.bit_size == 1) {
1148 assert(dst.regClass() == bld.lm);
1149 assert(then.regClass() == bld.lm);
1150 assert(els.regClass() == bld.lm);
1153 if (!nir_src_is_divergent(instr->src[0].src)) { /* uniform condition and values in sgpr */
1154 if (dst.regClass() == s1 || dst.regClass() == s2) {
1155 assert((then.regClass() == s1 || then.regClass() == s2) &&
1156 els.regClass() == then.regClass());
1157 assert(dst.size() == then.size());
1159 dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
1160 bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
1162 isel_err(&instr->instr, "Unimplemented uniform bcsel bit size");
1167 /* divergent boolean bcsel
1168 * this implements bcsel on bools: dst = s0 ? s1 : s2
1169 * are going to be: dst = (s0 & s1) | (~s0 & s2) */
1170 assert(instr->dest.dest.ssa.bit_size == 1);
1172 if (cond.id() != then.id())
1173 then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
1175 if (cond.id() == els.id())
1176 bld.copy(Definition(dst), then);
1178 bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
1179 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
1183 emit_scaled_op(isel_context* ctx, Builder& bld, Definition dst, Temp val, aco_opcode op,
1186 /* multiply by 16777216 to handle denormals */
1187 Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.def(bld.lm), as_vgpr(ctx, val),
1188 bld.copy(bld.def(v1), Operand::c32((1u << 7) | (1u << 4))));
1189 Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::c32(0x4b800000u), val);
1190 scaled = bld.vop1(op, bld.def(v1), scaled);
1191 scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::c32(undo), scaled);
1193 Temp not_scaled = bld.vop1(op, bld.def(v1), val);
1195 bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal);
1199 emit_rcp(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1201 if (ctx->block->fp_mode.denorm32 == 0) {
1202 bld.vop1(aco_opcode::v_rcp_f32, dst, val);
1206 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u);
1210 emit_rsq(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1212 if (ctx->block->fp_mode.denorm32 == 0) {
1213 bld.vop1(aco_opcode::v_rsq_f32, dst, val);
1217 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u);
1221 emit_sqrt(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1223 if (ctx->block->fp_mode.denorm32 == 0) {
1224 bld.vop1(aco_opcode::v_sqrt_f32, dst, val);
1228 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u);
1232 emit_log2(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1234 if (ctx->block->fp_mode.denorm32 == 0) {
1235 bld.vop1(aco_opcode::v_log_f32, dst, val);
1239 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u);
1243 emit_trunc_f64(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1245 if (ctx->options->chip_class >= GFX7)
1246 return bld.vop1(aco_opcode::v_trunc_f64, Definition(dst), val);
1248 /* GFX6 doesn't support V_TRUNC_F64, lower it. */
1249 /* TODO: create more efficient code! */
1250 if (val.type() == RegType::sgpr)
1251 val = as_vgpr(ctx, val);
1253 /* Split the input value. */
1254 Temp val_lo = bld.tmp(v1), val_hi = bld.tmp(v1);
1255 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
1257 /* Extract the exponent and compute the unbiased value. */
1259 bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), val_hi, Operand::c32(20u), Operand::c32(11u));
1260 exponent = bld.vsub32(bld.def(v1), exponent, Operand::c32(1023u));
1262 /* Extract the fractional part. */
1263 Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::c32(-1u),
1264 Operand::c32(0x000fffffu));
1265 fract_mask = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), fract_mask, exponent);
1267 Temp fract_mask_lo = bld.tmp(v1), fract_mask_hi = bld.tmp(v1);
1268 bld.pseudo(aco_opcode::p_split_vector, Definition(fract_mask_lo), Definition(fract_mask_hi),
1271 Temp fract_lo = bld.tmp(v1), fract_hi = bld.tmp(v1);
1272 Temp tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_lo);
1273 fract_lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_lo, tmp);
1274 tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_hi);
1275 fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
1277 /* Get the sign bit. */
1278 Temp sign = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x80000000u), val_hi);
1280 /* Decide the operation to apply depending on the unbiased exponent. */
1282 bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.def(bld.lm), exponent, Operand::zero());
1283 Temp dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_lo,
1284 bld.copy(bld.def(v1), Operand::zero()), exp_lt0);
1285 Temp dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_hi, sign, exp_lt0);
1286 Temp exp_gt51 = bld.vopc_e64(aco_opcode::v_cmp_gt_i32, bld.def(s2), exponent, Operand::c32(51u));
1287 dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_lo, val_lo, exp_gt51);
1288 dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_hi, val_hi, exp_gt51);
1290 return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi);
1294 emit_floor_f64(isel_context* ctx, Builder& bld, Definition dst, Temp val)
1296 if (ctx->options->chip_class >= GFX7)
1297 return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
1299 /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
1300 * lowered at NIR level for precision reasons). */
1301 Temp src0 = as_vgpr(ctx, val);
1303 Temp mask = bld.copy(bld.def(s1), Operand::c32(3u)); /* isnan */
1304 Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::c32(-1u),
1305 Operand::c32(0x3fefffffu));
1307 Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.def(bld.lm), src0, mask);
1308 Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0);
1309 Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val);
1311 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
1312 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0);
1313 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
1314 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min);
1316 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan);
1317 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan);
1319 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
1321 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v);
1322 add->vop3().neg[1] = true;
1324 return add->definitions[0].getTemp();
1328 uadd32_sat(Builder& bld, Definition dst, Temp src0, Temp src1)
1330 if (bld.program->chip_class < GFX8) {
1331 Builder::Result add = bld.vadd32(bld.def(v1), src0, src1, true);
1332 return bld.vop2_e64(aco_opcode::v_cndmask_b32, dst, add.def(0).getTemp(), Operand::c32(-1),
1333 add.def(1).getTemp());
1336 Builder::Result add(NULL);
1337 if (bld.program->chip_class >= GFX9) {
1338 add = bld.vop2_e64(aco_opcode::v_add_u32, dst, src0, src1);
1340 add = bld.vop2_e64(aco_opcode::v_add_co_u32, dst, bld.def(bld.lm), src0, src1);
1342 add.instr->vop3().clamp = 1;
1343 return dst.getTemp();
1347 visit_alu_instr(isel_context* ctx, nir_alu_instr* instr)
1349 if (!instr->dest.dest.is_ssa) {
1350 isel_err(&instr->instr, "nir alu dst not in ssa");
1353 Builder bld(ctx->program, ctx->block);
1354 bld.is_precise = instr->exact;
1355 Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
1356 switch (instr->op) {
1362 case nir_op_vec16: {
1363 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
1364 unsigned num = instr->dest.dest.ssa.num_components;
1365 for (unsigned i = 0; i < num; ++i)
1366 elems[i] = get_alu_src(ctx, instr->src[i]);
1368 if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
1369 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
1370 aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
1371 RegClass elem_rc = RegClass::get(RegType::vgpr, instr->dest.dest.ssa.bit_size / 8u);
1372 for (unsigned i = 0; i < num; ++i) {
1373 if (elems[i].type() == RegType::sgpr && elem_rc.is_subdword())
1374 elems[i] = emit_extract_vector(ctx, elems[i], 0, elem_rc);
1375 vec->operands[i] = Operand{elems[i]};
1377 vec->definitions[0] = Definition(dst);
1378 ctx->block->instructions.emplace_back(std::move(vec));
1379 ctx->allocated_vec.emplace(dst.id(), elems);
1381 bool use_s_pack = ctx->program->chip_class >= GFX9;
1382 Temp mask = bld.copy(bld.def(s1), Operand::c32((1u << instr->dest.dest.ssa.bit_size) - 1));
1384 std::array<Temp, NIR_MAX_VEC_COMPONENTS> packed;
1385 uint32_t const_vals[NIR_MAX_VEC_COMPONENTS] = {};
1386 for (unsigned i = 0; i < num; i++) {
1387 unsigned packed_size = use_s_pack ? 16 : 32;
1388 unsigned idx = i * instr->dest.dest.ssa.bit_size / packed_size;
1389 unsigned offset = i * instr->dest.dest.ssa.bit_size % packed_size;
1390 if (nir_src_is_const(instr->src[i].src)) {
1391 const_vals[idx] |= nir_src_as_uint(instr->src[i].src) << offset;
1395 if (offset != packed_size - instr->dest.dest.ssa.bit_size)
1397 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
1400 elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), elems[i],
1401 Operand::c32(offset));
1403 if (packed[idx].id())
1404 packed[idx] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[i],
1407 packed[idx] = elems[i];
1411 for (unsigned i = 0; i < dst.size(); i++) {
1412 bool same = !!packed[i * 2].id() == !!packed[i * 2 + 1].id();
1414 if (packed[i * 2].id() && packed[i * 2 + 1].id())
1415 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1), packed[i * 2],
1417 else if (packed[i * 2 + 1].id())
1418 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1),
1419 Operand::c32(const_vals[i * 2]), packed[i * 2 + 1]);
1420 else if (packed[i * 2].id())
1421 packed[i] = bld.sop2(aco_opcode::s_pack_ll_b32_b16, bld.def(s1), packed[i * 2],
1422 Operand::c32(const_vals[i * 2 + 1]));
1425 const_vals[i] = const_vals[i * 2] | (const_vals[i * 2 + 1] << 16);
1431 for (unsigned i = 0; i < dst.size(); i++) {
1432 if (const_vals[i] && packed[i].id())
1433 packed[i] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
1434 Operand::c32(const_vals[i]), packed[i]);
1435 else if (!packed[i].id())
1436 packed[i] = bld.copy(bld.def(s1), Operand::c32(const_vals[i]));
1439 if (dst.size() == 1)
1440 bld.copy(Definition(dst), packed[0]);
1441 else if (dst.size() == 2)
1442 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), packed[0], packed[1]);
1444 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), packed[0], packed[1],
1450 Temp src = get_alu_src(ctx, instr->src[0]);
1451 if (src.type() == RegType::vgpr && dst.type() == RegType::sgpr) {
1452 /* use size() instead of bytes() for 8/16-bit */
1453 assert(src.size() == dst.size() && "wrong src or dst register class for nir_op_mov");
1454 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
1456 assert(src.bytes() == dst.bytes() && "wrong src or dst register class for nir_op_mov");
1457 bld.copy(Definition(dst), src);
1462 Temp src = get_alu_src(ctx, instr->src[0]);
1463 if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1464 emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
1465 } else if (dst.regClass() == v2) {
1466 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1467 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1468 lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
1469 hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
1470 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
1471 } else if (dst.type() == RegType::sgpr) {
1472 aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
1473 bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
1475 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1480 Temp src = get_alu_src(ctx, instr->src[0]);
1481 if (dst.regClass() == s1) {
1482 bld.sop1(aco_opcode::s_abs_i32, Definition(dst), bld.def(s1, scc), src);
1483 } else if (dst.regClass() == v1) {
1484 bld.vop2(aco_opcode::v_max_i32, Definition(dst), src,
1485 bld.vsub32(bld.def(v1), Operand::zero(), src));
1487 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1491 case nir_op_isign: {
1492 Temp src = get_alu_src(ctx, instr->src[0]);
1493 if (dst.regClass() == s1) {
1495 bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand::c32(-1));
1496 bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand::c32(1u));
1497 } else if (dst.regClass() == s2) {
1499 bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand::c32(63u));
1501 if (ctx->program->chip_class >= GFX8)
1502 neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand::zero());
1505 bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand::zero())
1508 /* SCC gets zero-extended to 64 bit */
1509 bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
1510 } else if (dst.regClass() == v1) {
1511 bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand::c32(-1), src, Operand::c32(1u));
1512 } else if (dst.regClass() == v2) {
1513 Temp upper = emit_extract_vector(ctx, src, 1, v1);
1514 Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), upper);
1515 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.def(bld.lm), Operand::zero(), src);
1516 Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(1u), neg, gtz);
1517 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), neg, gtz);
1518 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1520 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1525 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1526 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_i16_e64, dst);
1527 } else if (dst.regClass() == v2b) {
1528 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i16, dst, true);
1529 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1530 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_i16, dst);
1531 } else if (dst.regClass() == v1) {
1532 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i32, dst, true);
1533 } else if (dst.regClass() == s1) {
1534 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
1536 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1541 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1542 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_u16_e64, dst);
1543 } else if (dst.regClass() == v2b) {
1544 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u16, dst, true);
1545 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1546 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_u16, dst);
1547 } else if (dst.regClass() == v1) {
1548 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u32, dst, true);
1549 } else if (dst.regClass() == s1) {
1550 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
1552 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1557 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1558 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_i16_e64, dst);
1559 } else if (dst.regClass() == v2b) {
1560 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i16, dst, true);
1561 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1562 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_i16, dst);
1563 } else if (dst.regClass() == v1) {
1564 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i32, dst, true);
1565 } else if (dst.regClass() == s1) {
1566 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
1568 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1573 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1574 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_u16_e64, dst);
1575 } else if (dst.regClass() == v2b) {
1576 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u16, dst, true);
1577 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1578 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_u16, dst);
1579 } else if (dst.regClass() == v1) {
1580 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u32, dst, true);
1581 } else if (dst.regClass() == s1) {
1582 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
1584 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1589 if (instr->dest.dest.ssa.bit_size == 1) {
1590 emit_boolean_logic(ctx, instr, Builder::s_or, dst);
1591 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1592 emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
1593 } else if (dst.regClass() == v2) {
1594 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
1595 } else if (dst.regClass() == s1) {
1596 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
1597 } else if (dst.regClass() == s2) {
1598 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
1600 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1605 if (instr->dest.dest.ssa.bit_size == 1) {
1606 emit_boolean_logic(ctx, instr, Builder::s_and, dst);
1607 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1608 emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
1609 } else if (dst.regClass() == v2) {
1610 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
1611 } else if (dst.regClass() == s1) {
1612 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
1613 } else if (dst.regClass() == s2) {
1614 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
1616 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1621 if (instr->dest.dest.ssa.bit_size == 1) {
1622 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
1623 } else if (dst.regClass() == v1 || dst.regClass() == v2b || dst.regClass() == v1b) {
1624 emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
1625 } else if (dst.regClass() == v2) {
1626 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
1627 } else if (dst.regClass() == s1) {
1628 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
1629 } else if (dst.regClass() == s2) {
1630 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
1632 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1637 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1638 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshrrev_b16_e64, dst, false, 2, true);
1639 } else if (dst.regClass() == v2b) {
1640 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b16, dst, false, true);
1641 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1642 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_lshrrev_b16, dst, true);
1643 } else if (dst.regClass() == v1) {
1644 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true);
1645 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1646 bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1647 get_alu_src(ctx, instr->src[0]));
1648 } else if (dst.regClass() == v2) {
1649 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshr_b64, dst);
1650 } else if (dst.regClass() == s2) {
1651 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true);
1652 } else if (dst.regClass() == s1) {
1653 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
1655 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1660 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1661 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshlrev_b16_e64, dst, false, 2, true);
1662 } else if (dst.regClass() == v2b) {
1663 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b16, dst, false, true);
1664 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1665 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_lshlrev_b16, dst, true);
1666 } else if (dst.regClass() == v1) {
1667 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true, false,
1669 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1670 bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1671 get_alu_src(ctx, instr->src[0]));
1672 } else if (dst.regClass() == v2) {
1673 emit_vop3a_instruction(ctx, instr, aco_opcode::v_lshl_b64, dst);
1674 } else if (dst.regClass() == s1) {
1675 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true, 1);
1676 } else if (dst.regClass() == s2) {
1677 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
1679 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1684 if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10) {
1685 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ashrrev_i16_e64, dst, false, 2, true);
1686 } else if (dst.regClass() == v2b) {
1687 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i16, dst, false, true);
1688 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1689 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_ashrrev_i16, dst, true);
1690 } else if (dst.regClass() == v1) {
1691 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true);
1692 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1693 bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst), get_alu_src(ctx, instr->src[1]),
1694 get_alu_src(ctx, instr->src[0]));
1695 } else if (dst.regClass() == v2) {
1696 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ashr_i64, dst);
1697 } else if (dst.regClass() == s1) {
1698 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true);
1699 } else if (dst.regClass() == s2) {
1700 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
1702 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1706 case nir_op_find_lsb: {
1707 Temp src = get_alu_src(ctx, instr->src[0]);
1708 if (src.regClass() == s1) {
1709 bld.sop1(aco_opcode::s_ff1_i32_b32, Definition(dst), src);
1710 } else if (src.regClass() == v1) {
1711 emit_vop1_instruction(ctx, instr, aco_opcode::v_ffbl_b32, dst);
1712 } else if (src.regClass() == s2) {
1713 bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
1715 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1719 case nir_op_ufind_msb:
1720 case nir_op_ifind_msb: {
1721 Temp src = get_alu_src(ctx, instr->src[0]);
1722 if (src.regClass() == s1 || src.regClass() == s2) {
1723 aco_opcode op = src.regClass() == s2
1724 ? (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b64
1725 : aco_opcode::s_flbit_i32_i64)
1726 : (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b32
1727 : aco_opcode::s_flbit_i32);
1728 Temp msb_rev = bld.sop1(op, bld.def(s1), src);
1730 Builder::Result sub = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
1731 Operand::c32(src.size() * 32u - 1u), msb_rev);
1732 Temp msb = sub.def(0).getTemp();
1733 Temp carry = sub.def(1).getTemp();
1735 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand::c32(-1), msb,
1737 } else if (src.regClass() == v1) {
1739 instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1740 Temp msb_rev = bld.tmp(v1);
1741 emit_vop1_instruction(ctx, instr, op, msb_rev);
1742 Temp msb = bld.tmp(v1);
1744 bld.vsub32(Definition(msb), Operand::c32(31u), Operand(msb_rev), true).def(1).getTemp();
1745 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), msb, msb_rev, carry);
1746 } else if (src.regClass() == v2) {
1748 instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1750 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1751 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1753 lo = uadd32_sat(bld, bld.def(v1), bld.copy(bld.def(s1), Operand::c32(32u)),
1754 bld.vop1(op, bld.def(v1), lo));
1755 hi = bld.vop1(op, bld.def(v1), hi);
1756 Temp found_hi = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::c32(-1), hi);
1758 Temp msb_rev = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lo, hi, found_hi);
1760 Temp msb = bld.tmp(v1);
1762 bld.vsub32(Definition(msb), Operand::c32(63u), Operand(msb_rev), true).def(1).getTemp();
1763 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), msb, msb_rev, carry);
1765 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1769 case nir_op_bitfield_reverse: {
1770 if (dst.regClass() == s1) {
1771 bld.sop1(aco_opcode::s_brev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1772 } else if (dst.regClass() == v1) {
1773 bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1775 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1780 if (dst.regClass() == s1) {
1781 emit_sop2_instruction(ctx, instr, aco_opcode::s_add_u32, dst, true);
1783 } else if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX10) {
1784 emit_vop3a_instruction(ctx, instr, aco_opcode::v_add_u16_e64, dst);
1786 } else if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX8) {
1787 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_u16, dst, true);
1789 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1790 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_u16, dst);
1794 Temp src0 = get_alu_src(ctx, instr->src[0]);
1795 Temp src1 = get_alu_src(ctx, instr->src[1]);
1796 if (dst.type() == RegType::vgpr && dst.bytes() <= 4) {
1797 bld.vadd32(Definition(dst), Operand(src0), Operand(src1));
1801 assert(src0.size() == 2 && src1.size() == 2);
1802 Temp src00 = bld.tmp(src0.type(), 1);
1803 Temp src01 = bld.tmp(dst.type(), 1);
1804 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1805 Temp src10 = bld.tmp(src1.type(), 1);
1806 Temp src11 = bld.tmp(dst.type(), 1);
1807 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1809 if (dst.regClass() == s2) {
1810 Temp carry = bld.tmp(s1);
1812 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1813 Temp dst1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), src01, src11,
1815 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1816 } else if (dst.regClass() == v2) {
1817 Temp dst0 = bld.tmp(v1);
1818 Temp carry = bld.vadd32(Definition(dst0), src00, src10, true).def(1).getTemp();
1819 Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
1820 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1822 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1826 case nir_op_uadd_sat: {
1827 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1828 Instruction* add_instr =
1829 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_u16, dst);
1830 add_instr->vop3p().clamp = 1;
1833 Temp src0 = get_alu_src(ctx, instr->src[0]);
1834 Temp src1 = get_alu_src(ctx, instr->src[1]);
1835 if (dst.regClass() == s1) {
1836 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
1837 bld.sop2(aco_opcode::s_add_u32, Definition(tmp), bld.scc(Definition(carry)), src0, src1);
1838 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand::c32(-1), tmp,
1841 } else if (dst.regClass() == v2b) {
1842 Instruction* add_instr;
1843 if (ctx->program->chip_class >= GFX10) {
1844 add_instr = bld.vop3(aco_opcode::v_add_u16_e64, Definition(dst), src0, src1).instr;
1846 if (src1.type() == RegType::sgpr)
1847 std::swap(src0, src1);
1849 bld.vop2_e64(aco_opcode::v_add_u16, Definition(dst), src0, as_vgpr(ctx, src1)).instr;
1851 add_instr->vop3().clamp = 1;
1853 } else if (dst.regClass() == v1) {
1854 uadd32_sat(bld, Definition(dst), src0, src1);
1858 assert(src0.size() == 2 && src1.size() == 2);
1860 Temp src00 = bld.tmp(src0.type(), 1);
1861 Temp src01 = bld.tmp(src0.type(), 1);
1862 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1863 Temp src10 = bld.tmp(src1.type(), 1);
1864 Temp src11 = bld.tmp(src1.type(), 1);
1865 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1867 if (dst.regClass() == s2) {
1868 Temp carry0 = bld.tmp(s1);
1869 Temp carry1 = bld.tmp(s1);
1872 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry0)), src00, src10);
1873 Temp no_sat1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(Definition(carry1)),
1874 src01, src11, bld.scc(carry0));
1876 Temp no_sat = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), no_sat0, no_sat1);
1878 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand::c64(-1), no_sat,
1880 } else if (dst.regClass() == v2) {
1881 Temp no_sat0 = bld.tmp(v1);
1882 Temp dst0 = bld.tmp(v1);
1883 Temp dst1 = bld.tmp(v1);
1885 Temp carry0 = bld.vadd32(Definition(no_sat0), src00, src10, true).def(1).getTemp();
1888 if (ctx->program->chip_class >= GFX8) {
1889 carry1 = bld.tmp(bld.lm);
1890 bld.vop2_e64(aco_opcode::v_addc_co_u32, Definition(dst1), Definition(carry1),
1891 as_vgpr(ctx, src01), as_vgpr(ctx, src11), carry0)
1895 Temp no_sat1 = bld.tmp(v1);
1896 carry1 = bld.vadd32(Definition(no_sat1), src01, src11, true, carry0).def(1).getTemp();
1897 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst1), no_sat1, Operand::c32(-1),
1901 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst0), no_sat0, Operand::c32(-1),
1903 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1905 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1909 case nir_op_iadd_sat: {
1910 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1911 Instruction* add_instr =
1912 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_i16, dst);
1913 add_instr->vop3p().clamp = 1;
1916 Temp src0 = get_alu_src(ctx, instr->src[0]);
1917 Temp src1 = get_alu_src(ctx, instr->src[1]);
1918 if (dst.regClass() == s1) {
1919 Temp cond = bld.sopc(aco_opcode::s_cmp_lt_i32, bld.def(s1, scc), src1, Operand::zero());
1920 Temp bound = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(bld.def(s1, scc)),
1921 Operand::c32(INT32_MAX), cond);
1922 Temp overflow = bld.tmp(s1);
1924 bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.scc(Definition(overflow)), src0, src1);
1925 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), bound, add, bld.scc(overflow));
1929 src1 = as_vgpr(ctx, src1);
1931 if (dst.regClass() == v2b) {
1932 Instruction* add_instr =
1933 bld.vop3(aco_opcode::v_add_i16, Definition(dst), src0, src1).instr;
1934 add_instr->vop3().clamp = 1;
1935 } else if (dst.regClass() == v1) {
1936 Instruction* add_instr =
1937 bld.vop3(aco_opcode::v_add_i32, Definition(dst), src0, src1).instr;
1938 add_instr->vop3().clamp = 1;
1940 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1944 case nir_op_uadd_carry: {
1945 Temp src0 = get_alu_src(ctx, instr->src[0]);
1946 Temp src1 = get_alu_src(ctx, instr->src[1]);
1947 if (dst.regClass() == s1) {
1948 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1951 if (dst.regClass() == v1) {
1952 Temp carry = bld.vadd32(bld.def(v1), src0, src1, true).def(1).getTemp();
1953 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), Operand::c32(1u),
1958 Temp src00 = bld.tmp(src0.type(), 1);
1959 Temp src01 = bld.tmp(dst.type(), 1);
1960 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1961 Temp src10 = bld.tmp(src1.type(), 1);
1962 Temp src11 = bld.tmp(dst.type(), 1);
1963 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1964 if (dst.regClass() == s2) {
1965 Temp carry = bld.tmp(s1);
1966 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1967 carry = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11,
1971 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand::zero());
1972 } else if (dst.regClass() == v2) {
1973 Temp carry = bld.vadd32(bld.def(v1), src00, src10, true).def(1).getTemp();
1974 carry = bld.vadd32(bld.def(v1), src01, src11, true, carry).def(1).getTemp();
1975 carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
1976 Operand::c32(1u), carry);
1977 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand::zero());
1979 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
1984 if (dst.regClass() == s1) {
1985 emit_sop2_instruction(ctx, instr, aco_opcode::s_sub_i32, dst, true);
1987 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
1988 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_sub_u16, dst);
1992 Temp src0 = get_alu_src(ctx, instr->src[0]);
1993 Temp src1 = get_alu_src(ctx, instr->src[1]);
1994 if (dst.regClass() == v1) {
1995 bld.vsub32(Definition(dst), src0, src1);
1997 } else if (dst.bytes() <= 2) {
1998 if (ctx->program->chip_class >= GFX10)
1999 bld.vop3(aco_opcode::v_sub_u16_e64, Definition(dst), src0, src1);
2000 else if (src1.type() == RegType::sgpr)
2001 bld.vop2(aco_opcode::v_subrev_u16, Definition(dst), src1, as_vgpr(ctx, src0));
2002 else if (ctx->program->chip_class >= GFX8)
2003 bld.vop2(aco_opcode::v_sub_u16, Definition(dst), src0, as_vgpr(ctx, src1));
2005 bld.vsub32(Definition(dst), src0, src1);
2009 Temp src00 = bld.tmp(src0.type(), 1);
2010 Temp src01 = bld.tmp(dst.type(), 1);
2011 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2012 Temp src10 = bld.tmp(src1.type(), 1);
2013 Temp src11 = bld.tmp(dst.type(), 1);
2014 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2015 if (dst.regClass() == s2) {
2016 Temp borrow = bld.tmp(s1);
2018 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
2019 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), src01, src11,
2021 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2022 } else if (dst.regClass() == v2) {
2023 Temp lower = bld.tmp(v1);
2024 Temp borrow = bld.vsub32(Definition(lower), src00, src10, true).def(1).getTemp();
2025 Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
2026 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2028 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2032 case nir_op_usub_borrow: {
2033 Temp src0 = get_alu_src(ctx, instr->src[0]);
2034 Temp src1 = get_alu_src(ctx, instr->src[1]);
2035 if (dst.regClass() == s1) {
2036 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
2038 } else if (dst.regClass() == v1) {
2039 Temp borrow = bld.vsub32(bld.def(v1), src0, src1, true).def(1).getTemp();
2040 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), Operand::c32(1u),
2045 Temp src00 = bld.tmp(src0.type(), 1);
2046 Temp src01 = bld.tmp(dst.type(), 1);
2047 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
2048 Temp src10 = bld.tmp(src1.type(), 1);
2049 Temp src11 = bld.tmp(dst.type(), 1);
2050 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
2051 if (dst.regClass() == s2) {
2052 Temp borrow = bld.tmp(s1);
2053 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
2054 borrow = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11,
2058 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand::zero());
2059 } else if (dst.regClass() == v2) {
2060 Temp borrow = bld.vsub32(bld.def(v1), src00, src10, true).def(1).getTemp();
2061 borrow = bld.vsub32(bld.def(v1), src01, src11, true, Operand(borrow)).def(1).getTemp();
2062 borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
2063 Operand::c32(1u), borrow);
2064 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand::zero());
2066 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2071 if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX10) {
2072 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_lo_u16_e64, dst);
2073 } else if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX8) {
2074 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_lo_u16, dst, true);
2075 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2076 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_mul_lo_u16, dst);
2077 } else if (dst.type() == RegType::vgpr) {
2078 uint32_t src0_ub = get_alu_src_ub(ctx, instr, 0);
2079 uint32_t src1_ub = get_alu_src_ub(ctx, instr, 1);
2081 if (src0_ub <= 0xffffff && src1_ub <= 0xffffff) {
2082 bool nuw_16bit = src0_ub <= 0xffff && src1_ub <= 0xffff && src0_ub * src1_ub <= 0xffff;
2083 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_u32_u24, dst,
2084 true /* commutative */, false, false, nuw_16bit);
2085 } else if (nir_src_is_const(instr->src[0].src)) {
2086 bld.v_mul_imm(Definition(dst), get_alu_src(ctx, instr->src[1]),
2087 nir_src_as_uint(instr->src[0].src), false);
2088 } else if (nir_src_is_const(instr->src[1].src)) {
2089 bld.v_mul_imm(Definition(dst), get_alu_src(ctx, instr->src[0]),
2090 nir_src_as_uint(instr->src[1].src), false);
2092 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_lo_u32, dst);
2094 } else if (dst.regClass() == s1) {
2095 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
2097 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2101 case nir_op_umul_high: {
2102 if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
2103 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_hi_u32, dst, false);
2104 } else if (dst.bytes() == 4) {
2105 uint32_t src0_ub = get_alu_src_ub(ctx, instr, 0);
2106 uint32_t src1_ub = get_alu_src_ub(ctx, instr, 1);
2108 Temp tmp = dst.regClass() == s1 ? bld.tmp(v1) : dst;
2109 if (src0_ub <= 0xffffff && src1_ub <= 0xffffff) {
2110 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_hi_u32_u24, tmp, true);
2112 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_hi_u32, tmp);
2115 if (dst.regClass() == s1)
2116 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2118 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2122 case nir_op_imul_high: {
2123 if (dst.regClass() == v1) {
2124 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_hi_i32, dst);
2125 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
2126 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_hi_i32, dst, false);
2127 } else if (dst.regClass() == s1) {
2128 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
2129 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
2130 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2132 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2137 if (dst.regClass() == v2b) {
2138 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, dst, true);
2139 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2140 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_mul_f16, dst);
2141 } else if (dst.regClass() == v1) {
2142 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
2143 } else if (dst.regClass() == v2) {
2144 emit_vop3a_instruction(ctx, instr, aco_opcode::v_mul_f64, dst);
2146 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2150 case nir_op_fmulz: {
2151 if (dst.regClass() == v1) {
2152 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_legacy_f32, dst, true);
2154 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2159 if (dst.regClass() == v2b) {
2160 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, dst, true);
2161 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2162 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_f16, dst);
2163 } else if (dst.regClass() == v1) {
2164 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
2165 } else if (dst.regClass() == v2) {
2166 emit_vop3a_instruction(ctx, instr, aco_opcode::v_add_f64, dst);
2168 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2173 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2174 Instruction* add = emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_add_f16, dst);
2175 VOP3P_instruction& sub = add->vop3p();
2176 sub.neg_lo[1] = true;
2177 sub.neg_hi[1] = true;
2181 Temp src0 = get_alu_src(ctx, instr->src[0]);
2182 Temp src1 = get_alu_src(ctx, instr->src[1]);
2183 if (dst.regClass() == v2b) {
2184 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
2185 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, dst, false);
2187 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, dst, true);
2188 } else if (dst.regClass() == v1) {
2189 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
2190 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
2192 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
2193 } else if (dst.regClass() == v2) {
2194 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), as_vgpr(ctx, src0),
2195 as_vgpr(ctx, src1));
2196 add->vop3().neg[1] = true;
2198 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2203 if (dst.regClass() == v2b) {
2204 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f16, dst, false, 3);
2205 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2206 assert(instr->dest.dest.ssa.num_components == 2);
2208 Temp src0 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[0]));
2209 Temp src1 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[1]));
2210 Temp src2 = as_vgpr(ctx, get_alu_src_vop3p(ctx, instr->src[2]));
2212 /* swizzle to opsel: all swizzles are either 0 (x) or 1 (y) */
2213 unsigned opsel_lo = 0, opsel_hi = 0;
2214 for (unsigned i = 0; i < 3; i++) {
2215 opsel_lo |= (instr->src[i].swizzle[0] & 1) << i;
2216 opsel_hi |= (instr->src[i].swizzle[1] & 1) << i;
2219 bld.vop3p(aco_opcode::v_pk_fma_f16, Definition(dst), src0, src1, src2, opsel_lo, opsel_hi);
2220 } else if (dst.regClass() == v1) {
2221 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f32, dst,
2222 ctx->block->fp_mode.must_flush_denorms32, 3);
2223 } else if (dst.regClass() == v2) {
2224 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_f64, dst, false, 3);
2226 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2230 case nir_op_ffmaz: {
2231 if (dst.regClass() == v1) {
2232 emit_vop3a_instruction(ctx, instr, aco_opcode::v_fma_legacy_f32, dst,
2233 ctx->block->fp_mode.must_flush_denorms32, 3);
2235 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2240 if (dst.regClass() == v2b) {
2241 // TODO: check fp_mode.must_flush_denorms16_64
2242 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, dst, true);
2243 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2244 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_max_f16, dst);
2245 } else if (dst.regClass() == v1) {
2246 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false,
2247 ctx->block->fp_mode.must_flush_denorms32);
2248 } else if (dst.regClass() == v2) {
2249 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max_f64, dst,
2250 ctx->block->fp_mode.must_flush_denorms16_64);
2252 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2257 if (dst.regClass() == v2b) {
2258 // TODO: check fp_mode.must_flush_denorms16_64
2259 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, dst, true);
2260 } else if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2261 emit_vop3p_instruction(ctx, instr, aco_opcode::v_pk_min_f16, dst, true);
2262 } else if (dst.regClass() == v1) {
2263 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false,
2264 ctx->block->fp_mode.must_flush_denorms32);
2265 } else if (dst.regClass() == v2) {
2266 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min_f64, dst,
2267 ctx->block->fp_mode.must_flush_denorms16_64);
2269 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2273 case nir_op_sdot_4x8_iadd: {
2274 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_i32_i8, dst, false);
2277 case nir_op_sdot_4x8_iadd_sat: {
2278 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_i32_i8, dst, true);
2281 case nir_op_udot_4x8_uadd: {
2282 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_u32_u8, dst, false);
2285 case nir_op_udot_4x8_uadd_sat: {
2286 emit_idot_instruction(ctx, instr, aco_opcode::v_dot4_u32_u8, dst, true);
2289 case nir_op_sdot_2x16_iadd: {
2290 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_i32_i16, dst, false);
2293 case nir_op_sdot_2x16_iadd_sat: {
2294 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_i32_i16, dst, true);
2297 case nir_op_udot_2x16_uadd: {
2298 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_u32_u16, dst, false);
2301 case nir_op_udot_2x16_uadd_sat: {
2302 emit_idot_instruction(ctx, instr, aco_opcode::v_dot2_u32_u16, dst, true);
2305 case nir_op_cube_face_coord_amd: {
2306 Temp in = get_alu_src(ctx, instr->src[0], 3);
2307 Temp src[3] = {emit_extract_vector(ctx, in, 0, v1), emit_extract_vector(ctx, in, 1, v1),
2308 emit_extract_vector(ctx, in, 2, v1)};
2309 Temp ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), src[0], src[1], src[2]);
2310 ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
2311 Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
2312 Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
2313 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3f000000u /*0.5*/),
2314 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, ma));
2315 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3f000000u /*0.5*/),
2316 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, ma));
2317 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
2320 case nir_op_cube_face_index_amd: {
2321 Temp in = get_alu_src(ctx, instr->src[0], 3);
2322 Temp src[3] = {emit_extract_vector(ctx, in, 0, v1), emit_extract_vector(ctx, in, 1, v1),
2323 emit_extract_vector(ctx, in, 2, v1)};
2324 bld.vop3(aco_opcode::v_cubeid_f32, Definition(dst), src[0], src[1], src[2]);
2327 case nir_op_bcsel: {
2328 emit_bcsel(ctx, instr, dst);
2332 if (dst.regClass() == v2b) {
2333 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f16, dst);
2334 } else if (dst.regClass() == v1) {
2335 Temp src = get_alu_src(ctx, instr->src[0]);
2336 emit_rsq(ctx, bld, Definition(dst), src);
2337 } else if (dst.regClass() == v2) {
2338 /* Lowered at NIR level for precision reasons. */
2339 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
2341 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2346 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2347 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
2348 Instruction* vop3p =
2349 bld.vop3p(aco_opcode::v_pk_mul_f16, Definition(dst), src, Operand::c16(0x3C00),
2350 instr->src[0].swizzle[0] & 1, instr->src[0].swizzle[1] & 1);
2351 vop3p->vop3p().neg_lo[0] = true;
2352 vop3p->vop3p().neg_hi[0] = true;
2355 Temp src = get_alu_src(ctx, instr->src[0]);
2356 if (dst.regClass() == v2b) {
2357 bld.vop2(aco_opcode::v_mul_f16, Definition(dst), Operand::c16(0xbc00u), as_vgpr(ctx, src));
2358 } else if (dst.regClass() == v1) {
2359 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand::c32(0xbf800000u),
2361 } else if (dst.regClass() == v2) {
2362 if (ctx->block->fp_mode.must_flush_denorms16_64)
2363 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand::c64(0x3FF0000000000000),
2365 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
2366 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2367 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand::c32(0x80000000u), upper);
2368 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2370 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2375 Temp src = get_alu_src(ctx, instr->src[0]);
2376 if (dst.regClass() == v2b) {
2377 Instruction* mul = bld.vop2_e64(aco_opcode::v_mul_f16, Definition(dst),
2378 Operand::c16(0x3c00), as_vgpr(ctx, src))
2380 mul->vop3().abs[1] = true;
2381 } else if (dst.regClass() == v1) {
2382 Instruction* mul = bld.vop2_e64(aco_opcode::v_mul_f32, Definition(dst),
2383 Operand::c32(0x3f800000u), as_vgpr(ctx, src))
2385 mul->vop3().abs[1] = true;
2386 } else if (dst.regClass() == v2) {
2387 if (ctx->block->fp_mode.must_flush_denorms16_64)
2388 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand::c64(0x3FF0000000000000),
2390 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
2391 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2392 upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7FFFFFFFu), upper);
2393 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2395 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2400 if (dst.regClass() == v1 && instr->dest.dest.ssa.bit_size == 16) {
2401 Temp src = get_alu_src_vop3p(ctx, instr->src[0]);
2402 Instruction* vop3p =
2403 bld.vop3p(aco_opcode::v_pk_mul_f16, Definition(dst), src, Operand::c16(0x3C00),
2404 instr->src[0].swizzle[0] & 1, instr->src[0].swizzle[1] & 1);
2405 vop3p->vop3p().clamp = true;
2408 Temp src = get_alu_src(ctx, instr->src[0]);
2409 if (dst.regClass() == v2b) {
2410 bld.vop3(aco_opcode::v_med3_f16, Definition(dst), Operand::c16(0u), Operand::c16(0x3c00),
2412 } else if (dst.regClass() == v1) {
2413 bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand::zero(),
2414 Operand::c32(0x3f800000u), src);
2415 /* apparently, it is not necessary to flush denorms if this instruction is used with these
2417 // TODO: confirm that this holds under any circumstances
2418 } else if (dst.regClass() == v2) {
2419 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand::zero());
2420 add->vop3().clamp = true;
2422 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2426 case nir_op_flog2: {
2427 if (dst.regClass() == v2b) {
2428 emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f16, dst);
2429 } else if (dst.regClass() == v1) {
2430 Temp src = get_alu_src(ctx, instr->src[0]);
2431 emit_log2(ctx, bld, Definition(dst), src);
2433 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2438 if (dst.regClass() == v2b) {
2439 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f16, dst);
2440 } else if (dst.regClass() == v1) {
2441 Temp src = get_alu_src(ctx, instr->src[0]);
2442 emit_rcp(ctx, bld, Definition(dst), src);
2443 } else if (dst.regClass() == v2) {
2444 /* Lowered at NIR level for precision reasons. */
2445 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
2447 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2451 case nir_op_fexp2: {
2452 if (dst.regClass() == v2b) {
2453 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f16, dst);
2454 } else if (dst.regClass() == v1) {
2455 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
2457 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2461 case nir_op_fsqrt: {
2462 if (dst.regClass() == v2b) {
2463 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f16, dst);
2464 } else if (dst.regClass() == v1) {
2465 Temp src = get_alu_src(ctx, instr->src[0]);
2466 emit_sqrt(ctx, bld, Definition(dst), src);
2467 } else if (dst.regClass() == v2) {
2468 /* Lowered at NIR level for precision reasons. */
2469 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
2471 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2475 case nir_op_ffract: {
2476 if (dst.regClass() == v2b) {
2477 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f16, dst);
2478 } else if (dst.regClass() == v1) {
2479 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
2480 } else if (dst.regClass() == v2) {
2481 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
2483 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2487 case nir_op_ffloor: {
2488 if (dst.regClass() == v2b) {
2489 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f16, dst);
2490 } else if (dst.regClass() == v1) {
2491 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
2492 } else if (dst.regClass() == v2) {
2493 Temp src = get_alu_src(ctx, instr->src[0]);
2494 emit_floor_f64(ctx, bld, Definition(dst), src);
2496 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2500 case nir_op_fceil: {
2501 if (dst.regClass() == v2b) {
2502 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f16, dst);
2503 } else if (dst.regClass() == v1) {
2504 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
2505 } else if (dst.regClass() == v2) {
2506 if (ctx->options->chip_class >= GFX7) {
2507 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
2509 /* GFX6 doesn't support V_CEIL_F64, lower it. */
2510 /* trunc = trunc(src0)
2511 * if (src0 > 0.0 && src0 != trunc)
2514 Temp src0 = get_alu_src(ctx, instr->src[0]);
2515 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src0);
2517 bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, Operand::zero());
2518 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f64, bld.def(bld.lm), src0, trunc);
2519 Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), tmp0, tmp1);
2520 Temp add = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
2521 bld.copy(bld.def(v1), Operand::zero()),
2522 bld.copy(bld.def(v1), Operand::c32(0x3ff00000u)), cond);
2523 add = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2),
2524 bld.copy(bld.def(v1), Operand::zero()), add);
2525 bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
2528 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2532 case nir_op_ftrunc: {
2533 if (dst.regClass() == v2b) {
2534 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f16, dst);
2535 } else if (dst.regClass() == v1) {
2536 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
2537 } else if (dst.regClass() == v2) {
2538 Temp src = get_alu_src(ctx, instr->src[0]);
2539 emit_trunc_f64(ctx, bld, Definition(dst), src);
2541 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2545 case nir_op_fround_even: {
2546 if (dst.regClass() == v2b) {
2547 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f16, dst);
2548 } else if (dst.regClass() == v1) {
2549 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
2550 } else if (dst.regClass() == v2) {
2551 if (ctx->options->chip_class >= GFX7) {
2552 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
2554 /* GFX6 doesn't support V_RNDNE_F64, lower it. */
2555 Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
2556 Temp src0 = get_alu_src(ctx, instr->src[0]);
2557 bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
2559 Temp bitmask = bld.sop1(aco_opcode::s_brev_b32, bld.def(s1),
2560 bld.copy(bld.def(s1), Operand::c32(-2u)));
2562 bld.vop3(aco_opcode::v_bfi_b32, bld.def(v1), bitmask,
2563 bld.copy(bld.def(v1), Operand::c32(0x43300000u)), as_vgpr(ctx, src0_hi));
2565 bld.vop3(aco_opcode::v_add_f64, bld.def(v2), src0,
2566 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), bfi));
2568 bld.vop3(aco_opcode::v_add_f64, bld.def(v2), tmp,
2569 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), bfi));
2570 sub->vop3().neg[1] = true;
2571 tmp = sub->definitions[0].getTemp();
2573 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::c32(-1u),
2574 Operand::c32(0x432fffffu));
2575 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, v);
2576 vop3->vop3().abs[0] = true;
2577 Temp cond = vop3->definitions[0].getTemp();
2579 Temp tmp_lo = bld.tmp(v1), tmp_hi = bld.tmp(v1);
2580 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp_lo), Definition(tmp_hi), tmp);
2581 Temp dst0 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_lo,
2582 as_vgpr(ctx, src0_lo), cond);
2583 Temp dst1 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_hi,
2584 as_vgpr(ctx, src0_hi), cond);
2586 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2589 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2595 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2596 aco_ptr<Instruction> norm;
2597 if (dst.regClass() == v2b) {
2598 Temp half_pi = bld.copy(bld.def(s1), Operand::c32(0x3118u));
2599 Temp tmp = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), half_pi, src);
2601 instr->op == nir_op_fsin ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16;
2602 bld.vop1(opcode, Definition(dst), tmp);
2603 } else if (dst.regClass() == v1) {
2604 Temp half_pi = bld.copy(bld.def(s1), Operand::c32(0x3e22f983u));
2605 Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src);
2607 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
2608 if (ctx->options->chip_class < GFX9)
2609 tmp = bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), tmp);
2612 instr->op == nir_op_fsin ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
2613 bld.vop1(opcode, Definition(dst), tmp);
2615 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2619 case nir_op_ldexp: {
2620 if (dst.regClass() == v2b) {
2621 emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, dst, false);
2622 } else if (dst.regClass() == v1) {
2623 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ldexp_f32, dst);
2624 } else if (dst.regClass() == v2) {
2625 emit_vop3a_instruction(ctx, instr, aco_opcode::v_ldexp_f64, dst);
2627 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2631 case nir_op_frexp_sig: {
2632 if (dst.regClass() == v2b) {
2633 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f16, dst);
2634 } else if (dst.regClass() == v1) {
2635 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f32, dst);
2636 } else if (dst.regClass() == v2) {
2637 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_mant_f64, dst);
2639 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2643 case nir_op_frexp_exp: {
2644 if (instr->src[0].src.ssa->bit_size == 16) {
2645 Temp src = get_alu_src(ctx, instr->src[0]);
2646 Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src);
2647 tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand::zero());
2648 convert_int(ctx, bld, tmp, 8, 32, true, dst);
2649 } else if (instr->src[0].src.ssa->bit_size == 32) {
2650 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_exp_i32_f32, dst);
2651 } else if (instr->src[0].src.ssa->bit_size == 64) {
2652 emit_vop1_instruction(ctx, instr, aco_opcode::v_frexp_exp_i32_f64, dst);
2654 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2658 case nir_op_fsign: {
2659 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2660 if (dst.regClass() == v2b) {
2661 assert(ctx->program->chip_class >= GFX9);
2662 /* replace negative zero with positive zero */
2663 src = bld.vop2(aco_opcode::v_add_f16, bld.def(v2b), Operand::zero(), src);
2665 bld.vop3(aco_opcode::v_med3_i16, bld.def(v2b), Operand::c16(-1), src, Operand::c16(1u));
2666 bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
2667 } else if (dst.regClass() == v1) {
2668 src = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::zero(), src);
2670 bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand::c32(-1), src, Operand::c32(1u));
2671 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
2672 } else if (dst.regClass() == v2) {
2673 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.def(bld.lm), Operand::zero(), src);
2674 Temp tmp = bld.copy(bld.def(v1), Operand::c32(0x3FF00000u));
2675 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp,
2676 emit_extract_vector(ctx, src, 1, v1), cond);
2678 cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.def(bld.lm), Operand::zero(), src);
2679 tmp = bld.copy(bld.def(v1), Operand::c32(0xBFF00000u));
2680 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
2682 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(), upper);
2684 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2689 case nir_op_f2f16_rtne: {
2690 Temp src = get_alu_src(ctx, instr->src[0]);
2691 if (instr->src[0].src.ssa->bit_size == 64)
2692 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2693 if (instr->op == nir_op_f2f16_rtne && ctx->block->fp_mode.round16_64 != fp_round_ne)
2694 /* We emit s_round_mode/s_setreg_imm32 in lower_to_hw_instr to
2695 * keep value numbering and the scheduler simpler.
2697 bld.vop1(aco_opcode::p_cvt_f16_f32_rtne, Definition(dst), src);
2699 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2702 case nir_op_f2f16_rtz: {
2703 Temp src = get_alu_src(ctx, instr->src[0]);
2704 if (instr->src[0].src.ssa->bit_size == 64)
2705 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2706 if (ctx->block->fp_mode.round16_64 == fp_round_tz)
2707 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2708 else if (ctx->program->chip_class == GFX8 || ctx->program->chip_class == GFX9)
2709 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32_e64, Definition(dst), src, Operand::zero());
2711 bld.vop2(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src, as_vgpr(ctx, src));
2714 case nir_op_f2f32: {
2715 if (instr->src[0].src.ssa->bit_size == 16) {
2716 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
2717 } else if (instr->src[0].src.ssa->bit_size == 64) {
2718 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
2720 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2724 case nir_op_f2f64: {
2725 Temp src = get_alu_src(ctx, instr->src[0]);
2726 if (instr->src[0].src.ssa->bit_size == 16)
2727 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2728 bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src);
2731 case nir_op_i2f16: {
2732 assert(dst.regClass() == v2b);
2733 Temp src = get_alu_src(ctx, instr->src[0]);
2734 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2735 if (input_size <= 16) {
2736 /* Expand integer to the size expected by the uint→float converter used below */
2737 unsigned target_size = (ctx->program->chip_class >= GFX8 ? 16 : 32);
2738 if (input_size != target_size) {
2739 src = convert_int(ctx, bld, src, input_size, target_size, true);
2741 } else if (input_size == 64) {
2742 /* Truncate down to 32 bits; if any of the upper bits are relevant,
2743 * the value does not fall into the single-precision float range
2744 * anyway. SPIR-V does not mandate any specific behavior for such
2747 src = convert_int(ctx, bld, src, 64, 32, false);
2750 if (ctx->program->chip_class >= GFX8 && input_size <= 16) {
2751 bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
2753 /* Convert to f32 and then down to f16. This is needed to handle
2754 * inputs slightly outside the range [INT16_MIN, INT16_MAX],
2755 * which are representable via f16 but wouldn't be converted
2756 * correctly by v_cvt_f16_i16.
2758 * This is also the fallback-path taken on GFX7 and earlier, which
2759 * do not support direct f16⟷i16 conversions.
2761 src = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), src);
2762 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2766 case nir_op_i2f32: {
2767 assert(dst.size() == 1);
2768 Temp src = get_alu_src(ctx, instr->src[0]);
2769 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2770 if (input_size <= 32) {
2771 if (input_size <= 16) {
2772 /* Sign-extend to 32-bits */
2773 src = convert_int(ctx, bld, src, input_size, 32, true);
2775 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
2777 assert(input_size == 64);
2778 RegClass rc = RegClass(src.type(), 1);
2779 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2780 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2781 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2782 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2783 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2784 upper = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), lower, upper);
2785 bld.vop1(aco_opcode::v_cvt_f32_f64, Definition(dst), upper);
2790 case nir_op_i2f64: {
2791 if (instr->src[0].src.ssa->bit_size <= 32) {
2792 Temp src = get_alu_src(ctx, instr->src[0]);
2793 if (instr->src[0].src.ssa->bit_size <= 16)
2794 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
2795 bld.vop1(aco_opcode::v_cvt_f64_i32, Definition(dst), src);
2796 } else if (instr->src[0].src.ssa->bit_size == 64) {
2797 Temp src = get_alu_src(ctx, instr->src[0]);
2798 RegClass rc = RegClass(src.type(), 1);
2799 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2800 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2801 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2802 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2803 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2804 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2807 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2811 case nir_op_u2f16: {
2812 assert(dst.regClass() == v2b);
2813 Temp src = get_alu_src(ctx, instr->src[0]);
2814 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2815 if (input_size <= 16) {
2816 /* Expand integer to the size expected by the uint→float converter used below */
2817 unsigned target_size = (ctx->program->chip_class >= GFX8 ? 16 : 32);
2818 if (input_size != target_size) {
2819 src = convert_int(ctx, bld, src, input_size, target_size, false);
2821 } else if (input_size == 64) {
2822 /* Truncate down to 32 bits; if any of the upper bits are non-zero,
2823 * the value does not fall into the single-precision float range
2824 * anyway. SPIR-V does not mandate any specific behavior for such
2827 src = convert_int(ctx, bld, src, 64, 32, false);
2830 if (ctx->program->chip_class >= GFX8) {
2831 /* float16 has a range of [0, 65519]. Converting from larger
2832 * inputs is UB, so we just need to consider the lower 16 bits */
2833 bld.vop1(aco_opcode::v_cvt_f16_u16, Definition(dst), src);
2835 /* GFX7 and earlier do not support direct f16⟷u16 conversions */
2836 src = bld.vop1(aco_opcode::v_cvt_f32_u32, bld.def(v1), src);
2837 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2841 case nir_op_u2f32: {
2842 assert(dst.size() == 1);
2843 Temp src = get_alu_src(ctx, instr->src[0]);
2844 const unsigned input_size = instr->src[0].src.ssa->bit_size;
2845 if (input_size == 8) {
2846 bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src);
2847 } else if (input_size <= 32) {
2848 if (input_size == 16)
2849 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
2850 bld.vop1(aco_opcode::v_cvt_f32_u32, Definition(dst), src);
2852 assert(input_size == 64);
2853 RegClass rc = RegClass(src.type(), 1);
2854 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2855 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2856 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2857 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
2858 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2859 upper = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), lower, upper);
2860 bld.vop1(aco_opcode::v_cvt_f32_f64, Definition(dst), upper);
2864 case nir_op_u2f64: {
2865 if (instr->src[0].src.ssa->bit_size <= 32) {
2866 Temp src = get_alu_src(ctx, instr->src[0]);
2867 if (instr->src[0].src.ssa->bit_size <= 16)
2868 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
2869 bld.vop1(aco_opcode::v_cvt_f64_u32, Definition(dst), src);
2870 } else if (instr->src[0].src.ssa->bit_size == 64) {
2871 Temp src = get_alu_src(ctx, instr->src[0]);
2872 RegClass rc = RegClass(src.type(), 1);
2873 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2874 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2875 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2876 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
2877 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand::c32(32u));
2878 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2880 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2885 case nir_op_f2i16: {
2886 if (instr->src[0].src.ssa->bit_size == 16) {
2887 if (ctx->program->chip_class >= GFX8) {
2888 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i16_f16, dst);
2890 /* GFX7 and earlier do not support direct f16⟷i16 conversions */
2891 Temp tmp = bld.tmp(v1);
2892 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, tmp);
2893 tmp = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp);
2894 tmp = convert_int(ctx, bld, tmp, 32, instr->dest.dest.ssa.bit_size, false,
2895 (dst.type() == RegType::sgpr) ? Temp() : dst);
2896 if (dst.type() == RegType::sgpr) {
2897 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2900 } else if (instr->src[0].src.ssa->bit_size == 32) {
2901 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
2903 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
2908 case nir_op_f2u16: {
2909 if (instr->src[0].src.ssa->bit_size == 16) {
2910 if (ctx->program->chip_class >= GFX8) {
2911 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u16_f16, dst);
2913 /* GFX7 and earlier do not support direct f16⟷u16 conversions */
2914 Temp tmp = bld.tmp(v1);
2915 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, tmp);
2916 tmp = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp);
2917 tmp = convert_int(ctx, bld, tmp, 32, instr->dest.dest.ssa.bit_size, false,
2918 (dst.type() == RegType::sgpr) ? Temp() : dst);
2919 if (dst.type() == RegType::sgpr) {
2920 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
2923 } else if (instr->src[0].src.ssa->bit_size == 32) {
2924 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
2926 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
2930 case nir_op_f2i32: {
2931 Temp src = get_alu_src(ctx, instr->src[0]);
2932 if (instr->src[0].src.ssa->bit_size == 16) {
2933 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2934 if (dst.type() == RegType::vgpr) {
2935 bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), tmp);
2937 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2938 bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp));
2940 } else if (instr->src[0].src.ssa->bit_size == 32) {
2941 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
2942 } else if (instr->src[0].src.ssa->bit_size == 64) {
2943 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
2945 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2949 case nir_op_f2u32: {
2950 Temp src = get_alu_src(ctx, instr->src[0]);
2951 if (instr->src[0].src.ssa->bit_size == 16) {
2952 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2953 if (dst.type() == RegType::vgpr) {
2954 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), tmp);
2956 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2957 bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp));
2959 } else if (instr->src[0].src.ssa->bit_size == 32) {
2960 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
2961 } else if (instr->src[0].src.ssa->bit_size == 64) {
2962 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
2964 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
2968 case nir_op_f2i64: {
2969 Temp src = get_alu_src(ctx, instr->src[0]);
2970 if (instr->src[0].src.ssa->bit_size == 16)
2971 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2973 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
2974 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
2975 exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand::zero(), exponent,
2977 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffu), src);
2978 Temp sign = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand::c32(31u), src);
2979 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(0x800000u), mantissa);
2980 mantissa = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(7u), mantissa);
2981 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), mantissa);
2982 Temp new_exponent = bld.tmp(v1);
2984 bld.vsub32(Definition(new_exponent), Operand::c32(63u), exponent, true).def(1).getTemp();
2985 if (ctx->program->chip_class >= GFX8)
2986 mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa);
2988 mantissa = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), mantissa, new_exponent);
2989 Temp saturate = bld.vop1(aco_opcode::v_bfrev_b32, bld.def(v1), Operand::c32(0xfffffffeu));
2990 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
2991 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2992 lower = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), lower,
2993 Operand::c32(0xffffffffu), borrow);
2994 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), upper, saturate, borrow);
2995 lower = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, lower);
2996 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, upper);
2997 Temp new_lower = bld.tmp(v1);
2998 borrow = bld.vsub32(Definition(new_lower), lower, sign, true).def(1).getTemp();
2999 Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow);
3000 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper);
3002 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
3003 if (src.type() == RegType::vgpr)
3004 src = bld.as_uniform(src);
3005 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src,
3006 Operand::c32(0x80017u));
3007 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent,
3008 Operand::c32(126u));
3009 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand::zero(),
3011 exponent = bld.sop2(aco_opcode::s_min_i32, bld.def(s1), bld.def(s1, scc),
3012 Operand::c32(64u), exponent);
3013 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3014 Operand::c32(0x7fffffu), src);
3016 bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand::c32(31u));
3017 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
3018 Operand::c32(0x800000u), mantissa);
3019 mantissa = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), mantissa,
3021 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(), mantissa);
3022 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3023 Operand::c32(63u), exponent);
3025 bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent);
3026 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), exponent,
3027 Operand::c32(0xffffffffu)); // exp >= 64
3028 Temp saturate = bld.sop1(aco_opcode::s_brev_b64, bld.def(s2), Operand::c32(0xfffffffeu));
3029 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), saturate, mantissa, cond);
3030 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
3031 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3032 lower = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, lower);
3033 upper = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, upper);
3034 Temp borrow = bld.tmp(s1);
3036 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), lower, sign);
3037 upper = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), upper, sign,
3039 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3041 } else if (instr->src[0].src.ssa->bit_size == 64) {
3042 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3043 Operand::c32(0x3df00000u));
3044 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
3045 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
3046 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3047 Operand::c32(0xc1f00000u));
3048 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
3049 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
3050 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
3051 Temp upper = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), floor);
3052 if (dst.type() == RegType::sgpr) {
3053 lower = bld.as_uniform(lower);
3054 upper = bld.as_uniform(upper);
3056 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3059 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3063 case nir_op_f2u64: {
3064 Temp src = get_alu_src(ctx, instr->src[0]);
3065 if (instr->src[0].src.ssa->bit_size == 16)
3066 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
3068 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
3069 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
3070 Temp exponent_in_range =
3071 bld.vopc(aco_opcode::v_cmp_ge_i32, bld.def(bld.lm), Operand::c32(64u), exponent);
3072 exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand::zero(), exponent);
3073 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffu), src);
3074 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(0x800000u), mantissa);
3075 Temp exponent_small = bld.vsub32(bld.def(v1), Operand::c32(24u), exponent);
3076 Temp small = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), exponent_small, mantissa);
3077 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand::zero(), mantissa);
3078 Temp new_exponent = bld.tmp(v1);
3080 bld.vsub32(Definition(new_exponent), exponent, Operand::c32(24u), true).def(1).getTemp();
3081 if (ctx->program->chip_class >= GFX8)
3082 mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa);
3084 mantissa = bld.vop3(aco_opcode::v_lshl_b64, bld.def(v2), mantissa, new_exponent);
3085 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
3086 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3087 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lower, small, cond_small);
3088 upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), upper, Operand::zero(),
3090 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(0xffffffffu), lower,
3092 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::c32(0xffffffffu), upper,
3094 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3096 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
3097 if (src.type() == RegType::vgpr)
3098 src = bld.as_uniform(src);
3099 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src,
3100 Operand::c32(0x80017u));
3101 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent,
3102 Operand::c32(126u));
3103 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand::zero(),
3105 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3106 Operand::c32(0x7fffffu), src);
3107 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
3108 Operand::c32(0x800000u), mantissa);
3109 Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3110 Operand::c32(24u), exponent);
3111 Temp small = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), mantissa,
3113 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(), mantissa);
3114 Temp exponent_large = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
3115 exponent, Operand::c32(24u));
3116 mantissa = bld.sop2(aco_opcode::s_lshl_b64, bld.def(s2), bld.def(s1, scc), mantissa,
3119 bld.sopc(aco_opcode::s_cmp_ge_i32, bld.def(s1, scc), Operand::c32(64u), exponent);
3120 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), mantissa,
3121 Operand::c32(0xffffffffu), cond);
3122 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
3123 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
3125 bld.sopc(aco_opcode::s_cmp_le_i32, bld.def(s1, scc), exponent, Operand::c32(24u));
3126 lower = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), small, lower, cond_small);
3128 bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::zero(), upper, cond_small);
3129 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3131 } else if (instr->src[0].src.ssa->bit_size == 64) {
3132 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3133 Operand::c32(0x3df00000u));
3134 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
3135 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
3136 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand::zero(),
3137 Operand::c32(0xc1f00000u));
3138 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
3139 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
3140 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
3141 Temp upper = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), floor);
3142 if (dst.type() == RegType::sgpr) {
3143 lower = bld.as_uniform(lower);
3144 upper = bld.as_uniform(upper);
3146 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
3149 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3153 case nir_op_b2f16: {
3154 Temp src = get_alu_src(ctx, instr->src[0]);
3155 assert(src.regClass() == bld.lm);
3157 if (dst.regClass() == s1) {
3158 src = bool_to_scalar_condition(ctx, src);
3159 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand::c32(0x3c00u), src);
3160 } else if (dst.regClass() == v2b) {
3161 Temp one = bld.copy(bld.def(v1), Operand::c32(0x3c00u));
3162 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), one, src);
3164 unreachable("Wrong destination register class for nir_op_b2f16.");
3168 case nir_op_b2f32: {
3169 Temp src = get_alu_src(ctx, instr->src[0]);
3170 assert(src.regClass() == bld.lm);
3172 if (dst.regClass() == s1) {
3173 src = bool_to_scalar_condition(ctx, src);
3174 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand::c32(0x3f800000u), src);
3175 } else if (dst.regClass() == v1) {
3176 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(),
3177 Operand::c32(0x3f800000u), src);
3179 unreachable("Wrong destination register class for nir_op_b2f32.");
3183 case nir_op_b2f64: {
3184 Temp src = get_alu_src(ctx, instr->src[0]);
3185 assert(src.regClass() == bld.lm);
3187 if (dst.regClass() == s2) {
3188 src = bool_to_scalar_condition(ctx, src);
3189 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand::c32(0x3f800000u),
3190 Operand::zero(), bld.scc(src));
3191 } else if (dst.regClass() == v2) {
3192 Temp one = bld.copy(bld.def(v1), Operand::c32(0x3FF00000u));
3194 bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), one, src);
3195 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(), upper);
3197 unreachable("Wrong destination register class for nir_op_b2f64.");
3204 case nir_op_i2i64: {
3205 if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
3206 /* no need to do the extract in get_alu_src() */
3207 sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size
3209 : sgpr_extract_undef;
3210 extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
3212 const unsigned input_bitsize = instr->src[0].src.ssa->bit_size;
3213 const unsigned output_bitsize = instr->dest.dest.ssa.bit_size;
3214 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]), input_bitsize, output_bitsize,
3215 output_bitsize > input_bitsize, dst);
3222 case nir_op_u2u64: {
3223 if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
3224 /* no need to do the extract in get_alu_src() */
3225 sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size
3227 : sgpr_extract_undef;
3228 extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
3230 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]), instr->src[0].src.ssa->bit_size,
3231 instr->dest.dest.ssa.bit_size, false, dst);
3239 case nir_op_b2i64: {
3240 Temp src = get_alu_src(ctx, instr->src[0]);
3241 assert(src.regClass() == bld.lm);
3243 Temp tmp = dst.bytes() == 8 ? bld.tmp(RegClass::get(dst.type(), 4)) : dst;
3244 if (tmp.regClass() == s1) {
3245 bool_to_scalar_condition(ctx, src, tmp);
3246 } else if (tmp.type() == RegType::vgpr) {
3247 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(tmp), Operand::zero(), Operand::c32(1u),
3250 unreachable("Invalid register class for b2i32");
3254 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand::zero());
3259 Temp src = get_alu_src(ctx, instr->src[0]);
3260 assert(dst.regClass() == bld.lm);
3262 if (src.type() == RegType::vgpr) {
3263 assert(src.regClass() == v1 || src.regClass() == v2);
3264 assert(dst.regClass() == bld.lm);
3265 bld.vopc(src.size() == 2 ? aco_opcode::v_cmp_lg_u64 : aco_opcode::v_cmp_lg_u32,
3266 Definition(dst), Operand::zero(), src);
3268 assert(src.regClass() == s1 || src.regClass() == s2);
3270 if (src.regClass() == s2 && ctx->program->chip_class <= GFX7) {
3272 bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), Operand::zero(), src)
3276 tmp = bld.sopc(src.size() == 2 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::s_cmp_lg_u32,
3277 bld.scc(bld.def(s1)), Operand::zero(), src);
3279 bool_to_vector_condition(ctx, tmp, dst);
3283 case nir_op_unpack_64_2x32:
3284 case nir_op_unpack_32_2x16:
3285 case nir_op_unpack_64_4x16:
3286 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3287 emit_split_vector(ctx, dst, instr->op == nir_op_unpack_64_4x16 ? 4 : 2);
3289 case nir_op_pack_64_2x32_split: {
3290 Temp src0 = get_alu_src(ctx, instr->src[0]);
3291 Temp src1 = get_alu_src(ctx, instr->src[1]);
3293 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
3296 case nir_op_unpack_64_2x32_split_x:
3297 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()),
3298 get_alu_src(ctx, instr->src[0]));
3300 case nir_op_unpack_64_2x32_split_y:
3301 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst),
3302 get_alu_src(ctx, instr->src[0]));
3304 case nir_op_unpack_32_2x16_split_x:
3305 if (dst.type() == RegType::vgpr) {
3306 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()),
3307 get_alu_src(ctx, instr->src[0]));
3309 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3312 case nir_op_unpack_32_2x16_split_y:
3313 if (dst.type() == RegType::vgpr) {
3314 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst),
3315 get_alu_src(ctx, instr->src[0]));
3317 bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc),
3318 get_alu_src(ctx, instr->src[0]), Operand::c32(1u), Operand::c32(16u),
3322 case nir_op_pack_32_2x16_split: {
3323 Temp src0 = get_alu_src(ctx, instr->src[0]);
3324 Temp src1 = get_alu_src(ctx, instr->src[1]);
3325 if (dst.regClass() == v1) {
3326 src0 = emit_extract_vector(ctx, src0, 0, v2b);
3327 src1 = emit_extract_vector(ctx, src1, 0, v2b);
3328 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
3330 src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0,
3331 Operand::c32(0xFFFFu));
3332 src1 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), src1,
3334 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), src0, src1);
3338 case nir_op_pack_32_4x8: bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0], 4)); break;
3339 case nir_op_pack_half_2x16_split: {
3340 if (dst.regClass() == v1) {
3341 if (ctx->program->chip_class == GFX8 || ctx->program->chip_class == GFX9)
3342 emit_vop3a_instruction(ctx, instr, aco_opcode::v_cvt_pkrtz_f16_f32_e64, dst);
3344 emit_vop2_instruction(ctx, instr, aco_opcode::v_cvt_pkrtz_f16_f32, dst, false);
3346 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3350 case nir_op_pack_unorm_2x16:
3351 case nir_op_pack_snorm_2x16: {
3352 Temp src = get_alu_src(ctx, instr->src[0], 2);
3353 Temp src0 = emit_extract_vector(ctx, src, 0, v1);
3354 Temp src1 = emit_extract_vector(ctx, src, 1, v1);
3355 aco_opcode opcode = instr->op == nir_op_pack_unorm_2x16 ? aco_opcode::v_cvt_pknorm_u16_f32
3356 : aco_opcode::v_cvt_pknorm_i16_f32;
3357 bld.vop3(opcode, Definition(dst), src0, src1);
3360 case nir_op_pack_uint_2x16:
3361 case nir_op_pack_sint_2x16: {
3362 Temp src = get_alu_src(ctx, instr->src[0], 2);
3363 Temp src0 = emit_extract_vector(ctx, src, 0, v1);
3364 Temp src1 = emit_extract_vector(ctx, src, 1, v1);
3365 aco_opcode opcode = instr->op == nir_op_pack_uint_2x16 ? aco_opcode::v_cvt_pk_u16_u32
3366 : aco_opcode::v_cvt_pk_i16_i32;
3367 bld.vop3(opcode, Definition(dst), src0, src1);
3370 case nir_op_unpack_half_2x16_split_x_flush_to_zero:
3371 case nir_op_unpack_half_2x16_split_x: {
3372 Temp src = get_alu_src(ctx, instr->src[0]);
3373 if (src.regClass() == v1)
3374 src = bld.pseudo(aco_opcode::p_split_vector, bld.def(v2b), bld.def(v2b), src);
3375 if (dst.regClass() == v1) {
3376 assert(ctx->block->fp_mode.must_flush_denorms16_64 ==
3377 (instr->op == nir_op_unpack_half_2x16_split_x_flush_to_zero));
3378 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), src);
3380 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3384 case nir_op_unpack_half_2x16_split_y_flush_to_zero:
3385 case nir_op_unpack_half_2x16_split_y: {
3386 Temp src = get_alu_src(ctx, instr->src[0]);
3387 if (src.regClass() == s1)
3388 src = bld.pseudo(aco_opcode::p_extract, bld.def(s1), bld.def(s1, scc), src,
3389 Operand::c32(1u), Operand::c32(16u), Operand::zero());
3392 bld.pseudo(aco_opcode::p_split_vector, bld.def(v2b), bld.def(v2b), src).def(1).getTemp();
3393 if (dst.regClass() == v1) {
3394 assert(ctx->block->fp_mode.must_flush_denorms16_64 ==
3395 (instr->op == nir_op_unpack_half_2x16_split_y_flush_to_zero));
3396 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), src);
3398 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3402 case nir_op_sad_u8x4: {
3403 assert(dst.regClass() == v1);
3404 emit_vop3a_instruction(ctx, instr, aco_opcode::v_sad_u8, dst, false, 3u, false);
3407 case nir_op_fquantize2f16: {
3408 Temp src = get_alu_src(ctx, instr->src[0]);
3409 Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v2b), src);
3412 if (ctx->program->chip_class >= GFX8) {
3413 Temp mask = bld.copy(
3414 bld.def(s1), Operand::c32(0x36Fu)); /* value is NOT negative/positive denormal value */
3415 cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.def(bld.lm), f16, mask);
3416 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
3418 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
3419 * so compare the result and flush to 0 if it's smaller.
3421 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
3422 Temp smallest = bld.copy(bld.def(s1), Operand::c32(0x38800000u));
3423 Instruction* tmp0 = bld.vopc_e64(aco_opcode::v_cmp_lt_f32, bld.def(bld.lm), f32, smallest);
3424 tmp0->vop3().abs[0] = true;
3425 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f32, bld.def(bld.lm), Operand::zero(), f32);
3426 cmp_res = bld.sop2(aco_opcode::s_nand_b64, bld.def(s2), bld.def(s1, scc),
3427 tmp0->definitions[0].getTemp(), tmp1);
3430 if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32) {
3432 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand::zero(), as_vgpr(ctx, src));
3433 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res);
3435 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand::zero(), f32, cmp_res);
3440 Temp bits = get_alu_src(ctx, instr->src[0]);
3441 Temp offset = get_alu_src(ctx, instr->src[1]);
3443 if (dst.regClass() == s1) {
3444 bld.sop2(aco_opcode::s_bfm_b32, Definition(dst), bits, offset);
3445 } else if (dst.regClass() == v1) {
3446 bld.vop3(aco_opcode::v_bfm_b32, Definition(dst), bits, offset);
3448 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3452 case nir_op_bitfield_select: {
3454 /* dst = (insert & bitmask) | (base & ~bitmask) */
3455 if (dst.regClass() == s1) {
3456 Temp bitmask = get_alu_src(ctx, instr->src[0]);
3457 Temp insert = get_alu_src(ctx, instr->src[1]);
3458 Temp base = get_alu_src(ctx, instr->src[2]);
3459 aco_ptr<Instruction> sop2;
3460 nir_const_value* const_bitmask = nir_src_as_const_value(instr->src[0].src);
3461 nir_const_value* const_insert = nir_src_as_const_value(instr->src[1].src);
3463 if (const_insert && const_bitmask) {
3464 lhs = Operand::c32(const_insert->u32 & const_bitmask->u32);
3467 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), insert, bitmask);
3468 lhs = Operand(insert);
3472 nir_const_value* const_base = nir_src_as_const_value(instr->src[2].src);
3473 if (const_base && const_bitmask) {
3474 rhs = Operand::c32(const_base->u32 & ~const_bitmask->u32);
3476 base = bld.sop2(aco_opcode::s_andn2_b32, bld.def(s1), bld.def(s1, scc), base, bitmask);
3477 rhs = Operand(base);
3480 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), rhs, lhs);
3482 } else if (dst.regClass() == v1) {
3483 emit_vop3a_instruction(ctx, instr, aco_opcode::v_bfi_b32, dst, false, 3);
3485 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3491 if (dst.bytes() != 4)
3492 unreachable("Unsupported BFE bit size");
3494 if (dst.type() == RegType::sgpr) {
3495 Temp base = get_alu_src(ctx, instr->src[0]);
3497 nir_const_value* const_offset = nir_src_as_const_value(instr->src[1].src);
3498 nir_const_value* const_bits = nir_src_as_const_value(instr->src[2].src);
3499 if (const_offset && const_bits) {
3500 uint32_t extract = (const_bits->u32 << 16) | (const_offset->u32 & 0x1f);
3502 instr->op == nir_op_ubfe ? aco_opcode::s_bfe_u32 : aco_opcode::s_bfe_i32;
3503 bld.sop2(opcode, Definition(dst), bld.def(s1, scc), base, Operand::c32(extract));
3507 Temp offset = get_alu_src(ctx, instr->src[1]);
3508 Temp bits = get_alu_src(ctx, instr->src[2]);
3509 if (instr->op == nir_op_ubfe) {
3510 Temp mask = bld.sop2(aco_opcode::s_bfm_b32, bld.def(s1), bits, offset);
3512 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), base, mask);
3513 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), masked, offset);
3515 Operand bits_op = const_bits ? Operand::c32(const_bits->u32 << 16)
3516 : bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1),
3517 bld.def(s1, scc), bits, Operand::c32(16u));
3518 Operand offset_op = const_offset
3519 ? Operand::c32(const_offset->u32 & 0x1fu)
3520 : bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3521 offset, Operand::c32(0x1fu));
3524 bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), bits_op, offset_op);
3525 bld.sop2(aco_opcode::s_bfe_i32, Definition(dst), bld.def(s1, scc), base, extract);
3530 instr->op == nir_op_ubfe ? aco_opcode::v_bfe_u32 : aco_opcode::v_bfe_i32;
3531 emit_vop3a_instruction(ctx, instr, opcode, dst, false, 3);
3535 case nir_op_extract_u8:
3536 case nir_op_extract_i8:
3537 case nir_op_extract_u16:
3538 case nir_op_extract_i16: {
3539 bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
3540 unsigned comp = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 4 : 2;
3541 uint32_t bits = comp == 4 ? 8 : 16;
3542 unsigned index = nir_src_as_uint(instr->src[1].src);
3543 if (bits >= instr->dest.dest.ssa.bit_size || index * bits >= instr->dest.dest.ssa.bit_size) {
3545 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3546 } else if (dst.regClass() == s1 && instr->dest.dest.ssa.bit_size == 16) {
3547 Temp vec = get_ssa_temp(ctx, instr->src[0].src.ssa);
3548 unsigned swizzle = instr->src[0].swizzle[0];
3549 if (vec.size() > 1) {
3550 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
3551 swizzle = swizzle & 1;
3553 index += swizzle * instr->dest.dest.ssa.bit_size / bits;
3554 bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc), Operand(vec),
3555 Operand::c32(index), Operand::c32(bits), Operand::c32(is_signed));
3557 Temp src = get_alu_src(ctx, instr->src[0]);
3558 Definition def(dst);
3559 if (dst.bytes() == 8) {
3560 src = emit_extract_vector(ctx, src, index / comp, RegClass(src.type(), 1));
3562 def = bld.def(src.type(), 1);
3564 assert(def.bytes() <= 4);
3565 if (def.regClass() == s1) {
3566 bld.pseudo(aco_opcode::p_extract, def, bld.def(s1, scc), Operand(src),
3567 Operand::c32(index), Operand::c32(bits), Operand::c32(is_signed));
3569 src = emit_extract_vector(ctx, src, 0, def.regClass());
3570 bld.pseudo(aco_opcode::p_extract, def, Operand(src), Operand::c32(index),
3571 Operand::c32(bits), Operand::c32(is_signed));
3573 if (dst.size() == 2)
3574 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), def.getTemp(),
3579 case nir_op_insert_u8:
3580 case nir_op_insert_u16: {
3581 unsigned comp = instr->op == nir_op_insert_u8 ? 4 : 2;
3582 uint32_t bits = comp == 4 ? 8 : 16;
3583 unsigned index = nir_src_as_uint(instr->src[1].src);
3584 if (bits >= instr->dest.dest.ssa.bit_size || index * bits >= instr->dest.dest.ssa.bit_size) {
3586 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
3588 Temp src = get_alu_src(ctx, instr->src[0]);
3589 Definition def(dst);
3591 if (dst.bytes() == 8) {
3592 src = emit_extract_vector(ctx, src, 0u, RegClass(src.type(), 1));
3593 swap = index >= comp;
3595 def = bld.def(src.type(), 1);
3597 if (def.regClass() == s1) {
3598 bld.pseudo(aco_opcode::p_insert, def, bld.def(s1, scc), Operand(src),
3599 Operand::c32(index), Operand::c32(bits));
3601 src = emit_extract_vector(ctx, src, 0, def.regClass());
3602 bld.pseudo(aco_opcode::p_insert, def, Operand(src), Operand::c32(index),
3603 Operand::c32(bits));
3605 if (dst.size() == 2 && swap)
3606 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand::zero(),
3608 else if (dst.size() == 2)
3609 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), def.getTemp(),
3614 case nir_op_bit_count: {
3615 Temp src = get_alu_src(ctx, instr->src[0]);
3616 if (src.regClass() == s1) {
3617 bld.sop1(aco_opcode::s_bcnt1_i32_b32, Definition(dst), bld.def(s1, scc), src);
3618 } else if (src.regClass() == v1) {
3619 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), src, Operand::zero());
3620 } else if (src.regClass() == v2) {
3621 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), emit_extract_vector(ctx, src, 1, v1),
3622 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1),
3623 emit_extract_vector(ctx, src, 0, v1), Operand::zero()));
3624 } else if (src.regClass() == s2) {
3625 bld.sop1(aco_opcode::s_bcnt1_i32_b64, Definition(dst), bld.def(s1, scc), src);
3627 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
3632 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f16, aco_opcode::v_cmp_lt_f32,
3633 aco_opcode::v_cmp_lt_f64);
3637 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f16, aco_opcode::v_cmp_ge_f32,
3638 aco_opcode::v_cmp_ge_f64);
3642 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32,
3643 aco_opcode::v_cmp_eq_f64);
3647 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32,
3648 aco_opcode::v_cmp_neq_f64);
3652 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i16, aco_opcode::v_cmp_lt_i32,
3653 aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
3657 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i16, aco_opcode::v_cmp_ge_i32,
3658 aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
3662 if (instr->src[0].src.ssa->bit_size == 1)
3663 emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
3666 ctx, instr, dst, aco_opcode::v_cmp_eq_i16, aco_opcode::v_cmp_eq_i32,
3667 aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
3668 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes);
3672 if (instr->src[0].src.ssa->bit_size == 1)
3673 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
3676 ctx, instr, dst, aco_opcode::v_cmp_lg_i16, aco_opcode::v_cmp_lg_i32,
3677 aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
3678 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes);
3682 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u16, aco_opcode::v_cmp_lt_u32,
3683 aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
3687 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u16, aco_opcode::v_cmp_ge_u32,
3688 aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
3693 case nir_op_fddx_fine:
3694 case nir_op_fddy_fine:
3695 case nir_op_fddx_coarse:
3696 case nir_op_fddy_coarse: {
3697 if (!nir_src_is_divergent(instr->src[0].src)) {
3698 /* Source is the same in all lanes, so the derivative is zero.
3699 * This also avoids emitting invalid IR.
3701 bld.copy(Definition(dst), Operand::zero());
3705 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
3706 uint16_t dpp_ctrl1, dpp_ctrl2;
3707 if (instr->op == nir_op_fddx_fine) {
3708 dpp_ctrl1 = dpp_quad_perm(0, 0, 2, 2);
3709 dpp_ctrl2 = dpp_quad_perm(1, 1, 3, 3);
3710 } else if (instr->op == nir_op_fddy_fine) {
3711 dpp_ctrl1 = dpp_quad_perm(0, 1, 0, 1);
3712 dpp_ctrl2 = dpp_quad_perm(2, 3, 2, 3);
3714 dpp_ctrl1 = dpp_quad_perm(0, 0, 0, 0);
3715 if (instr->op == nir_op_fddx || instr->op == nir_op_fddx_coarse)
3716 dpp_ctrl2 = dpp_quad_perm(1, 1, 1, 1);
3718 dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
3722 if (ctx->program->chip_class >= GFX8) {
3723 Temp tl = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl1);
3724 tmp = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), src, tl, dpp_ctrl2);
3726 Temp tl = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl1);
3727 Temp tr = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl2);
3728 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), tr, tl);
3730 emit_wqm(bld, tmp, dst, true);
3733 default: isel_err(&instr->instr, "Unknown NIR ALU instr");
3738 visit_load_const(isel_context* ctx, nir_load_const_instr* instr)
3740 Temp dst = get_ssa_temp(ctx, &instr->def);
3742 // TODO: we really want to have the resulting type as this would allow for 64bit literals
3743 // which get truncated the lsb if double and msb if int
3744 // for now, we only use s_mov_b64 with 64bit inline constants
3745 assert(instr->def.num_components == 1 && "Vector load_const should be lowered to scalar.");
3746 assert(dst.type() == RegType::sgpr);
3748 Builder bld(ctx->program, ctx->block);
3750 if (instr->def.bit_size == 1) {
3751 assert(dst.regClass() == bld.lm);
3752 int val = instr->value[0].b ? -1 : 0;
3753 Operand op = bld.lm.size() == 1 ? Operand::c32(val) : Operand::c64(val);
3754 bld.copy(Definition(dst), op);
3755 } else if (instr->def.bit_size == 8) {
3756 bld.copy(Definition(dst), Operand::c32(instr->value[0].u8));
3757 } else if (instr->def.bit_size == 16) {
3758 /* sign-extend to use s_movk_i32 instead of a literal */
3759 bld.copy(Definition(dst), Operand::c32(instr->value[0].i16));
3760 } else if (dst.size() == 1) {
3761 bld.copy(Definition(dst), Operand::c32(instr->value[0].u32));
3763 assert(dst.size() != 1);
3764 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
3765 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
3766 if (instr->def.bit_size == 64)
3767 for (unsigned i = 0; i < dst.size(); i++)
3768 vec->operands[i] = Operand::c32(instr->value[0].u64 >> i * 32);
3770 for (unsigned i = 0; i < dst.size(); i++)
3771 vec->operands[i] = Operand::c32(instr->value[i].u32);
3773 vec->definitions[0] = Definition(dst);
3774 ctx->block->instructions.emplace_back(std::move(vec));
3779 can_use_byte_align_for_global_load(unsigned num_components, unsigned component_size,
3780 unsigned align_, bool support_12_byte)
3782 /* Only use byte-align for 8/16-bit loads if we won't have to increase it's size and won't have
3783 * to use unsupported load sizes.
3785 assert(util_is_power_of_two_nonzero(align_));
3787 assert(component_size < 4);
3788 unsigned load_size = num_components * component_size;
3789 int new_size = align(load_size + (4 - align_), 4);
3790 return new_size == align(load_size, 4) && (new_size != 12 || support_12_byte);
3795 struct LoadEmitInfo {
3798 unsigned num_components;
3799 unsigned component_size;
3800 Temp resource = Temp(0, s1); /* buffer resource or base 64-bit address */
3801 unsigned component_stride = 0;
3802 unsigned const_offset = 0;
3803 unsigned align_mul = 0;
3804 unsigned align_offset = 0;
3808 unsigned swizzle_component_size = 0;
3809 memory_sync_info sync;
3810 Temp soffset = Temp(0, s1);
3813 struct EmitLoadParameters {
3814 using Callback = Temp (*)(Builder& bld, const LoadEmitInfo& info, Temp offset,
3815 unsigned bytes_needed, unsigned align, unsigned const_offset,
3819 bool byte_align_loads;
3820 bool supports_8bit_16bit_loads;
3821 unsigned max_const_offset_plus_one;
3825 emit_load(isel_context* ctx, Builder& bld, const LoadEmitInfo& info,
3826 const EmitLoadParameters& params)
3828 unsigned load_size = info.num_components * info.component_size;
3829 unsigned component_size = info.component_size;
3831 unsigned num_vals = 0;
3832 Temp* const vals = (Temp*)alloca(info.dst.bytes() * sizeof(Temp));
3834 unsigned const_offset = info.const_offset;
3836 const unsigned align_mul = info.align_mul ? info.align_mul : component_size;
3837 unsigned align_offset = (info.align_offset + const_offset) % align_mul;
3839 unsigned bytes_read = 0;
3840 while (bytes_read < load_size) {
3841 unsigned bytes_needed = load_size - bytes_read;
3843 /* add buffer for unaligned loads */
3845 if (params.byte_align_loads) {
3846 byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
3850 if (bytes_needed > 2 || (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) ||
3851 !params.supports_8bit_16bit_loads) {
3852 if (info.component_stride) {
3853 assert(params.supports_8bit_16bit_loads && "unimplemented");
3857 bytes_needed += byte_align == -1 ? 4 - info.align_mul : byte_align;
3858 bytes_needed = align(bytes_needed, 4);
3865 if (info.swizzle_component_size)
3866 bytes_needed = MIN2(bytes_needed, info.swizzle_component_size);
3867 if (info.component_stride)
3868 bytes_needed = MIN2(bytes_needed, info.component_size);
3870 bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4);
3872 /* reduce constant offset */
3873 Operand offset = info.offset;
3874 unsigned reduced_const_offset = const_offset;
3875 bool remove_const_offset_completely = need_to_align_offset;
3877 (remove_const_offset_completely || const_offset >= params.max_const_offset_plus_one)) {
3878 unsigned to_add = const_offset;
3879 if (remove_const_offset_completely) {
3880 reduced_const_offset = 0;
3883 const_offset / params.max_const_offset_plus_one * params.max_const_offset_plus_one;
3884 reduced_const_offset %= params.max_const_offset_plus_one;
3886 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
3887 if (offset.isConstant()) {
3888 offset = Operand::c32(offset.constantValue() + to_add);
3889 } else if (offset_tmp.regClass() == s1) {
3890 offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), offset_tmp,
3891 Operand::c32(to_add));
3892 } else if (offset_tmp.regClass() == v1) {
3893 offset = bld.vadd32(bld.def(v1), offset_tmp, Operand::c32(to_add));
3895 Temp lo = bld.tmp(offset_tmp.type(), 1);
3896 Temp hi = bld.tmp(offset_tmp.type(), 1);
3897 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
3899 if (offset_tmp.regClass() == s2) {
3900 Temp carry = bld.tmp(s1);
3901 lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), lo,
3902 Operand::c32(to_add));
3903 hi = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), hi, carry);
3904 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), lo, hi);
3906 Temp new_lo = bld.tmp(v1);
3908 bld.vadd32(Definition(new_lo), lo, Operand::c32(to_add), true).def(1).getTemp();
3909 hi = bld.vadd32(bld.def(v1), hi, Operand::zero(), false, carry);
3910 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_lo, hi);
3915 /* align offset down if needed */
3916 Operand aligned_offset = offset;
3917 unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
3918 if (need_to_align_offset) {
3920 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
3921 if (offset.isConstant()) {
3922 aligned_offset = Operand::c32(offset.constantValue() & 0xfffffffcu);
3923 } else if (offset_tmp.regClass() == s1) {
3924 aligned_offset = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
3925 Operand::c32(0xfffffffcu), offset_tmp);
3926 } else if (offset_tmp.regClass() == s2) {
3927 aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc),
3928 Operand::c64(0xfffffffffffffffcllu), offset_tmp);
3929 } else if (offset_tmp.regClass() == v1) {
3931 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0xfffffffcu), offset_tmp);
3932 } else if (offset_tmp.regClass() == v2) {
3933 Temp hi = bld.tmp(v1), lo = bld.tmp(v1);
3934 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
3935 lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0xfffffffcu), lo);
3936 aligned_offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), lo, hi);
3939 Temp aligned_offset_tmp =
3940 aligned_offset.isTemp() ? aligned_offset.getTemp() : bld.copy(bld.def(s1), aligned_offset);
3942 Temp val = params.callback(bld, info, aligned_offset_tmp, bytes_needed, align,
3943 reduced_const_offset, byte_align ? Temp() : info.dst);
3945 /* the callback wrote directly to dst */
3946 if (val == info.dst) {
3947 assert(num_vals == 0);
3948 emit_split_vector(ctx, info.dst, info.num_components);
3952 /* shift result right if needed */
3953 if (params.byte_align_loads && info.component_size < 4) {
3954 Operand byte_align_off = Operand::c32(byte_align);
3955 if (byte_align == -1) {
3956 if (offset.isConstant())
3957 byte_align_off = Operand::c32(offset.constantValue() % 4u);
3958 else if (offset.size() == 2)
3959 byte_align_off = Operand(emit_extract_vector(ctx, offset.getTemp(), 0,
3960 RegClass(offset.getTemp().type(), 1)));
3962 byte_align_off = offset;
3965 assert(val.bytes() >= load_size && "unimplemented");
3966 if (val.type() == RegType::sgpr)
3967 byte_align_scalar(ctx, val, byte_align_off, info.dst);
3969 byte_align_vector(ctx, val, byte_align_off, info.dst, component_size);
3973 /* add result to list and advance */
3974 if (info.component_stride) {
3975 assert(val.bytes() == info.component_size && "unimplemented");
3976 const_offset += info.component_stride;
3977 align_offset = (align_offset + info.component_stride) % align_mul;
3979 const_offset += val.bytes();
3980 align_offset = (align_offset + val.bytes()) % align_mul;
3982 bytes_read += val.bytes();
3983 vals[num_vals++] = val;
3986 /* create array of components */
3987 unsigned components_split = 0;
3988 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
3989 bool has_vgprs = false;
3990 for (unsigned i = 0; i < num_vals;) {
3991 Temp* const tmp = (Temp*)alloca(num_vals * sizeof(Temp));
3992 unsigned num_tmps = 0;
3993 unsigned tmp_size = 0;
3994 RegType reg_type = RegType::sgpr;
3995 while ((!tmp_size || (tmp_size % component_size)) && i < num_vals) {
3996 if (vals[i].type() == RegType::vgpr)
3997 reg_type = RegType::vgpr;
3998 tmp_size += vals[i].bytes();
3999 tmp[num_tmps++] = vals[i++];
4002 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
4003 aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)};
4004 for (unsigned j = 0; j < num_tmps; j++)
4005 vec->operands[j] = Operand(tmp[j]);
4006 tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size));
4007 vec->definitions[0] = Definition(tmp[0]);
4008 bld.insert(std::move(vec));
4011 if (tmp[0].bytes() % component_size) {
4013 assert(i == num_vals);
4015 RegClass::get(reg_type, tmp[0].bytes() / component_size * component_size);
4017 bld.pseudo(aco_opcode::p_extract_vector, bld.def(new_rc), tmp[0], Operand::zero());
4020 RegClass elem_rc = RegClass::get(reg_type, component_size);
4022 unsigned start = components_split;
4024 if (tmp_size == elem_rc.bytes()) {
4025 allocated_vec[components_split++] = tmp[0];
4027 assert(tmp_size % elem_rc.bytes() == 0);
4028 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
4029 aco_opcode::p_split_vector, Format::PSEUDO, 1, tmp_size / elem_rc.bytes())};
4030 for (auto& def : split->definitions) {
4031 Temp component = bld.tmp(elem_rc);
4032 allocated_vec[components_split++] = component;
4033 def = Definition(component);
4035 split->operands[0] = Operand(tmp[0]);
4036 bld.insert(std::move(split));
4039 /* try to p_as_uniform early so we can create more optimizable code and
4040 * also update allocated_vec */
4041 for (unsigned j = start; j < components_split; j++) {
4042 if (allocated_vec[j].bytes() % 4 == 0 && info.dst.type() == RegType::sgpr)
4043 allocated_vec[j] = bld.as_uniform(allocated_vec[j]);
4044 has_vgprs |= allocated_vec[j].type() == RegType::vgpr;
4048 /* concatenate components and p_as_uniform() result if needed */
4049 if (info.dst.type() == RegType::vgpr || !has_vgprs)
4050 ctx->allocated_vec.emplace(info.dst.id(), allocated_vec);
4053 MAX2((int)info.dst.bytes() - int(allocated_vec[0].bytes() * info.num_components), 0);
4055 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
4056 aco_opcode::p_create_vector, Format::PSEUDO, info.num_components + !!padding_bytes, 1)};
4057 for (unsigned i = 0; i < info.num_components; i++)
4058 vec->operands[i] = Operand(allocated_vec[i]);
4060 vec->operands[info.num_components] = Operand(RegClass::get(RegType::vgpr, padding_bytes));
4061 if (info.dst.type() == RegType::sgpr && has_vgprs) {
4062 Temp tmp = bld.tmp(RegType::vgpr, info.dst.size());
4063 vec->definitions[0] = Definition(tmp);
4064 bld.insert(std::move(vec));
4065 bld.pseudo(aco_opcode::p_as_uniform, Definition(info.dst), tmp);
4067 vec->definitions[0] = Definition(info.dst);
4068 bld.insert(std::move(vec));
4073 load_lds_size_m0(Builder& bld)
4075 /* m0 does not need to be initialized on GFX9+ */
4076 if (bld.program->chip_class >= GFX9)
4079 return bld.m0((Temp)bld.copy(bld.def(s1, m0), Operand::c32(0xffffffffu)));
4083 lds_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4084 unsigned align, unsigned const_offset, Temp dst_hint)
4086 offset = offset.regClass() == s1 ? bld.copy(bld.def(v1), offset) : offset;
4088 Operand m = load_lds_size_m0(bld);
4090 bool large_ds_read = bld.program->chip_class >= GFX7;
4091 bool usable_read2 = bld.program->chip_class >= GFX7;
4096 if (bytes_needed >= 16 && align % 16 == 0 && large_ds_read) {
4098 op = aco_opcode::ds_read_b128;
4099 } else if (bytes_needed >= 16 && align % 8 == 0 && const_offset % 8 == 0 && usable_read2) {
4102 op = aco_opcode::ds_read2_b64;
4103 } else if (bytes_needed >= 12 && align % 16 == 0 && large_ds_read) {
4105 op = aco_opcode::ds_read_b96;
4106 } else if (bytes_needed >= 8 && align % 8 == 0) {
4108 op = aco_opcode::ds_read_b64;
4109 } else if (bytes_needed >= 8 && align % 4 == 0 && const_offset % 4 == 0 && usable_read2) {
4112 op = aco_opcode::ds_read2_b32;
4113 } else if (bytes_needed >= 4 && align % 4 == 0) {
4115 op = aco_opcode::ds_read_b32;
4116 } else if (bytes_needed >= 2 && align % 2 == 0) {
4118 op = bld.program->chip_class >= GFX9 ? aco_opcode::ds_read_u16_d16 : aco_opcode::ds_read_u16;
4121 op = bld.program->chip_class >= GFX9 ? aco_opcode::ds_read_u8_d16 : aco_opcode::ds_read_u8;
4124 unsigned const_offset_unit = read2 ? size / 2u : 1u;
4125 unsigned const_offset_range = read2 ? 255 * const_offset_unit : 65536;
4127 if (const_offset > (const_offset_range - const_offset_unit)) {
4128 unsigned excess = const_offset - (const_offset % const_offset_range);
4129 offset = bld.vadd32(bld.def(v1), offset, Operand::c32(excess));
4130 const_offset -= excess;
4133 const_offset /= const_offset_unit;
4135 RegClass rc = RegClass::get(RegType::vgpr, size);
4136 Temp val = rc == info.dst.regClass() && dst_hint.id() ? dst_hint : bld.tmp(rc);
4139 instr = bld.ds(op, Definition(val), offset, m, const_offset, const_offset + 1);
4141 instr = bld.ds(op, Definition(val), offset, m, const_offset);
4142 instr->ds().sync = info.sync;
4144 if (m.isUndefined())
4145 instr->operands.pop_back();
4150 const EmitLoadParameters lds_load_params{lds_load_callback, false, true, UINT32_MAX};
4153 smem_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4154 unsigned align, unsigned const_offset, Temp dst_hint)
4156 assert(align >= 4u);
4158 bool buffer = info.resource.id() && info.resource.bytes() == 16;
4159 Temp addr = info.resource;
4160 if (!buffer && !addr.id()) {
4165 bytes_needed = MIN2(bytes_needed, 64);
4166 unsigned needed_round_up = util_next_power_of_two(bytes_needed);
4167 unsigned needed_round_down = needed_round_up >> (needed_round_up != bytes_needed ? 1 : 0);
4168 /* Only round-up global loads if it's aligned so that it won't cross pages */
4169 bytes_needed = buffer || align % needed_round_up == 0 ? needed_round_up : needed_round_down;
4172 if (bytes_needed <= 4) {
4173 op = buffer ? aco_opcode::s_buffer_load_dword : aco_opcode::s_load_dword;
4174 } else if (bytes_needed <= 8) {
4175 op = buffer ? aco_opcode::s_buffer_load_dwordx2 : aco_opcode::s_load_dwordx2;
4176 } else if (bytes_needed <= 16) {
4177 op = buffer ? aco_opcode::s_buffer_load_dwordx4 : aco_opcode::s_load_dwordx4;
4178 } else if (bytes_needed <= 32) {
4179 op = buffer ? aco_opcode::s_buffer_load_dwordx8 : aco_opcode::s_load_dwordx8;
4181 assert(bytes_needed == 64);
4182 op = buffer ? aco_opcode::s_buffer_load_dwordx16 : aco_opcode::s_load_dwordx16;
4185 aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
4188 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
4189 Operand::c32(const_offset));
4190 load->operands[0] = Operand(info.resource);
4191 load->operands[1] = Operand(offset);
4193 load->operands[0] = Operand(addr);
4194 if (offset.id() && const_offset)
4195 load->operands[1] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
4196 Operand::c32(const_offset));
4198 load->operands[1] = Operand::c32(const_offset);
4200 RegClass rc(RegType::sgpr, DIV_ROUND_UP(bytes_needed, 4u));
4201 Temp val = dst_hint.id() && dst_hint.regClass() == rc ? dst_hint : bld.tmp(rc);
4202 load->definitions[0] = Definition(val);
4203 load->glc = info.glc;
4204 load->dlc = info.glc && bld.program->chip_class >= GFX10;
4205 load->sync = info.sync;
4206 bld.insert(std::move(load));
4210 const EmitLoadParameters smem_load_params{smem_load_callback, true, false, 1024};
4213 mubuf_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4214 unsigned align_, unsigned const_offset, Temp dst_hint)
4216 Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
4217 Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
4219 if (info.soffset.id()) {
4220 if (soffset.isTemp())
4221 vaddr = bld.copy(bld.def(v1), soffset);
4222 soffset = Operand(info.soffset);
4225 unsigned bytes_size = 0;
4227 if (bytes_needed == 1 || align_ % 2) {
4229 op = aco_opcode::buffer_load_ubyte;
4230 } else if (bytes_needed == 2 || align_ % 4) {
4232 op = aco_opcode::buffer_load_ushort;
4233 } else if (bytes_needed <= 4) {
4235 op = aco_opcode::buffer_load_dword;
4236 } else if (bytes_needed <= 8) {
4238 op = aco_opcode::buffer_load_dwordx2;
4239 } else if (bytes_needed <= 12 && bld.program->chip_class > GFX6) {
4241 op = aco_opcode::buffer_load_dwordx3;
4244 op = aco_opcode::buffer_load_dwordx4;
4246 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4247 mubuf->operands[0] = Operand(info.resource);
4248 mubuf->operands[1] = vaddr;
4249 mubuf->operands[2] = soffset;
4250 mubuf->offen = (offset.type() == RegType::vgpr);
4251 mubuf->glc = info.glc;
4252 mubuf->dlc = info.glc && bld.program->chip_class >= GFX10;
4253 mubuf->slc = info.slc;
4254 mubuf->sync = info.sync;
4255 mubuf->offset = const_offset;
4256 mubuf->swizzled = info.swizzle_component_size != 0;
4257 RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
4258 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
4259 mubuf->definitions[0] = Definition(val);
4260 bld.insert(std::move(mubuf));
4265 const EmitLoadParameters mubuf_load_params{mubuf_load_callback, true, true, 4096};
4266 const EmitLoadParameters scratch_load_params{mubuf_load_callback, false, true, 4096};
4269 get_gfx6_global_rsrc(Builder& bld, Temp addr)
4271 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4272 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
4274 if (addr.type() == RegType::vgpr)
4275 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand::zero(), Operand::zero(),
4276 Operand::c32(-1u), Operand::c32(rsrc_conf));
4277 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand::c32(-1u),
4278 Operand::c32(rsrc_conf));
4282 add64_32(Builder& bld, Temp src0, Temp src1)
4284 Temp src00 = bld.tmp(src0.type(), 1);
4285 Temp src01 = bld.tmp(src0.type(), 1);
4286 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
4288 if (src0.type() == RegType::vgpr || src1.type() == RegType::vgpr) {
4289 Temp dst0 = bld.tmp(v1);
4290 Temp carry = bld.vadd32(Definition(dst0), src00, src1, true).def(1).getTemp();
4291 Temp dst1 = bld.vadd32(bld.def(v1), src01, Operand::zero(), false, carry);
4292 return bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
4294 Temp carry = bld.tmp(s1);
4296 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src1);
4297 Temp dst1 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), src01, carry);
4298 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), dst0, dst1);
4303 lower_global_address(Builder& bld, uint32_t offset_in, Temp* address_inout,
4304 uint32_t* const_offset_inout, Temp* offset_inout)
4306 Temp address = *address_inout;
4307 uint64_t const_offset = *const_offset_inout + offset_in;
4308 Temp offset = *offset_inout;
4310 uint64_t max_const_offset_plus_one =
4311 1; /* GFX7/8/9: FLAT loads do not support constant offsets */
4312 if (bld.program->chip_class >= GFX10)
4313 max_const_offset_plus_one =
4314 2048; /* GLOBAL has a 11-bit signed offset field (12 bits if signed) */
4315 else if (bld.program->chip_class == GFX6 || bld.program->chip_class == GFX9)
4316 max_const_offset_plus_one =
4317 4096; /* MUBUF/GLOBAL has a 12-bit unsigned offset field (13 bits if signed for GLOBAL) */
4318 uint64_t excess_offset = const_offset - (const_offset % max_const_offset_plus_one);
4319 const_offset %= max_const_offset_plus_one;
4322 while (unlikely(excess_offset > UINT32_MAX)) {
4323 address = add64_32(bld, address, bld.copy(bld.def(s1), Operand::c32(UINT32_MAX)));
4324 excess_offset -= UINT32_MAX;
4327 offset = bld.copy(bld.def(s1), Operand::c32(excess_offset));
4329 /* If we add to "offset", we would transform the indended
4330 * "address + u2u64(offset) + u2u64(const_offset)" into
4331 * "address + u2u64(offset + const_offset)", so add to the address.
4332 * This could be more efficient if excess_offset>UINT32_MAX by doing a full 64-bit addition,
4333 * but that should be really rare.
4335 while (excess_offset) {
4336 uint32_t src2 = MIN2(excess_offset, UINT32_MAX);
4337 address = add64_32(bld, address, bld.copy(bld.def(s1), Operand::c32(src2)));
4338 excess_offset -= src2;
4342 if (bld.program->chip_class == GFX6) {
4343 /* GFX6 (MUBUF): (SGPR address, SGPR offset) or (VGPR address, SGPR offset) */
4344 if (offset.type() != RegType::sgpr) {
4345 address = add64_32(bld, address, offset);
4348 offset = offset.id() ? offset : bld.copy(bld.def(s1), Operand::zero());
4349 } else if (bld.program->chip_class <= GFX8) {
4350 /* GFX7,8 (FLAT): VGPR address */
4352 address = add64_32(bld, address, offset);
4355 address = as_vgpr(bld, address);
4357 /* GFX9+ (GLOBAL): (VGPR address), or (SGPR address and VGPR offset) */
4358 if (address.type() == RegType::vgpr && offset.id()) {
4359 address = add64_32(bld, address, offset);
4361 } else if (address.type() == RegType::sgpr && offset.id()) {
4362 offset = as_vgpr(bld, offset);
4364 if (address.type() == RegType::sgpr && !offset.id())
4365 offset = bld.copy(bld.def(v1), bld.copy(bld.def(s1), Operand::zero()));
4368 *address_inout = address;
4369 *const_offset_inout = const_offset;
4370 *offset_inout = offset;
4374 global_load_callback(Builder& bld, const LoadEmitInfo& info, Temp offset, unsigned bytes_needed,
4375 unsigned align_, unsigned const_offset, Temp dst_hint)
4377 Temp addr = info.resource;
4382 lower_global_address(bld, 0, &addr, &const_offset, &offset);
4384 unsigned bytes_size = 0;
4385 bool use_mubuf = bld.program->chip_class == GFX6;
4386 bool global = bld.program->chip_class >= GFX9;
4388 if (bytes_needed == 1 || align_ % 2u) {
4390 op = use_mubuf ? aco_opcode::buffer_load_ubyte
4391 : global ? aco_opcode::global_load_ubyte
4392 : aco_opcode::flat_load_ubyte;
4393 } else if (bytes_needed == 2 || align_ % 4u) {
4395 op = use_mubuf ? aco_opcode::buffer_load_ushort
4396 : global ? aco_opcode::global_load_ushort
4397 : aco_opcode::flat_load_ushort;
4398 } else if (bytes_needed <= 4) {
4400 op = use_mubuf ? aco_opcode::buffer_load_dword
4401 : global ? aco_opcode::global_load_dword
4402 : aco_opcode::flat_load_dword;
4403 } else if (bytes_needed <= 8 || (bytes_needed <= 12 && use_mubuf)) {
4405 op = use_mubuf ? aco_opcode::buffer_load_dwordx2
4406 : global ? aco_opcode::global_load_dwordx2
4407 : aco_opcode::flat_load_dwordx2;
4408 } else if (bytes_needed <= 12 && !use_mubuf) {
4410 op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
4413 op = use_mubuf ? aco_opcode::buffer_load_dwordx4
4414 : global ? aco_opcode::global_load_dwordx4
4415 : aco_opcode::flat_load_dwordx4;
4417 RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
4418 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
4420 aco_ptr<MUBUF_instruction> mubuf{
4421 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4422 mubuf->operands[0] = Operand(get_gfx6_global_rsrc(bld, addr));
4423 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
4424 mubuf->operands[2] = Operand(offset);
4425 mubuf->glc = info.glc;
4427 mubuf->offset = const_offset;
4428 mubuf->addr64 = addr.type() == RegType::vgpr;
4429 mubuf->disable_wqm = false;
4430 mubuf->sync = info.sync;
4431 mubuf->definitions[0] = Definition(val);
4432 bld.insert(std::move(mubuf));
4434 aco_ptr<FLAT_instruction> flat{
4435 create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
4436 if (addr.regClass() == s2) {
4437 assert(global && offset.id() && offset.type() == RegType::vgpr);
4438 flat->operands[0] = Operand(offset);
4439 flat->operands[1] = Operand(addr);
4441 assert(addr.type() == RegType::vgpr && !offset.id());
4442 flat->operands[0] = Operand(addr);
4443 flat->operands[1] = Operand(s1);
4445 flat->glc = info.glc;
4446 flat->dlc = info.glc && bld.program->chip_class >= GFX10;
4447 flat->sync = info.sync;
4448 assert(global || !const_offset);
4449 flat->offset = const_offset;
4450 flat->definitions[0] = Definition(val);
4451 bld.insert(std::move(flat));
4457 const EmitLoadParameters global_load_params{global_load_callback, true, true, UINT32_MAX};
4460 load_lds(isel_context* ctx, unsigned elem_size_bytes, unsigned num_components, Temp dst,
4461 Temp address, unsigned base_offset, unsigned align)
4463 assert(util_is_power_of_two_nonzero(align));
4465 Builder bld(ctx->program, ctx->block);
4467 LoadEmitInfo info = {Operand(as_vgpr(ctx, address)), dst, num_components, elem_size_bytes};
4468 info.align_mul = align;
4469 info.align_offset = 0;
4470 info.sync = memory_sync_info(storage_shared);
4471 info.const_offset = base_offset;
4472 emit_load(ctx, bld, info, lds_load_params);
4478 split_store_data(isel_context* ctx, RegType dst_type, unsigned count, Temp* dst, unsigned* bytes,
4484 Builder bld(ctx->program, ctx->block);
4486 /* count == 1 fast path */
4488 if (dst_type == RegType::sgpr)
4489 dst[0] = bld.as_uniform(src);
4491 dst[0] = as_vgpr(ctx, src);
4495 /* elem_size_bytes is the greatest common divisor which is a power of 2 */
4496 unsigned elem_size_bytes =
4497 1u << (ffs(std::accumulate(bytes, bytes + count, 8, std::bit_or<>{})) - 1);
4499 ASSERTED bool is_subdword = elem_size_bytes < 4;
4500 assert(!is_subdword || dst_type == RegType::vgpr);
4502 for (unsigned i = 0; i < count; i++)
4503 dst[i] = bld.tmp(RegClass::get(dst_type, bytes[i]));
4505 std::vector<Temp> temps;
4506 /* use allocated_vec if possible */
4507 auto it = ctx->allocated_vec.find(src.id());
4508 if (it != ctx->allocated_vec.end()) {
4509 if (!it->second[0].id())
4511 unsigned elem_size = it->second[0].bytes();
4512 assert(src.bytes() % elem_size == 0);
4514 for (unsigned i = 0; i < src.bytes() / elem_size; i++) {
4515 if (!it->second[i].id())
4518 if (elem_size_bytes % elem_size)
4521 temps.insert(temps.end(), it->second.begin(), it->second.begin() + src.bytes() / elem_size);
4522 elem_size_bytes = elem_size;
4526 /* split src if necessary */
4527 if (temps.empty()) {
4528 if (is_subdword && src.type() == RegType::sgpr)
4529 src = as_vgpr(ctx, src);
4530 if (dst_type == RegType::sgpr)
4531 src = bld.as_uniform(src);
4533 unsigned num_elems = src.bytes() / elem_size_bytes;
4534 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(
4535 aco_opcode::p_split_vector, Format::PSEUDO, 1, num_elems)};
4536 split->operands[0] = Operand(src);
4537 for (unsigned i = 0; i < num_elems; i++) {
4538 temps.emplace_back(bld.tmp(RegClass::get(dst_type, elem_size_bytes)));
4539 split->definitions[i] = Definition(temps.back());
4541 bld.insert(std::move(split));
4545 for (unsigned i = 0; i < count; i++) {
4546 unsigned op_count = dst[i].bytes() / elem_size_bytes;
4547 if (op_count == 1) {
4548 if (dst_type == RegType::sgpr)
4549 dst[i] = bld.as_uniform(temps[idx++]);
4551 dst[i] = as_vgpr(ctx, temps[idx++]);
4555 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
4556 Format::PSEUDO, op_count, 1)};
4557 for (unsigned j = 0; j < op_count; j++) {
4558 Temp tmp = temps[idx++];
4559 if (dst_type == RegType::sgpr)
4560 tmp = bld.as_uniform(tmp);
4561 vec->operands[j] = Operand(tmp);
4563 vec->definitions[0] = Definition(dst[i]);
4564 bld.insert(std::move(vec));
4570 scan_write_mask(uint32_t mask, uint32_t todo_mask, int* start, int* count)
4572 unsigned start_elem = ffs(todo_mask) - 1;
4573 bool skip = !(mask & (1 << start_elem));
4575 mask = ~mask & todo_mask;
4579 u_bit_scan_consecutive_range(&mask, start, count);
4585 advance_write_mask(uint32_t* todo_mask, int start, int count)
4587 *todo_mask &= ~u_bit_consecutive(0, count) << start;
4591 store_lds(isel_context* ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask, Temp address,
4592 unsigned base_offset, unsigned align)
4594 assert(util_is_power_of_two_nonzero(align));
4595 assert(util_is_power_of_two_nonzero(elem_size_bytes) && elem_size_bytes <= 8);
4597 Builder bld(ctx->program, ctx->block);
4598 bool large_ds_write = ctx->options->chip_class >= GFX7;
4599 bool usable_write2 = ctx->options->chip_class >= GFX7;
4601 unsigned write_count = 0;
4602 Temp write_datas[32];
4603 unsigned offsets[32];
4605 aco_opcode opcodes[32];
4607 wrmask = util_widen_mask(wrmask, elem_size_bytes);
4609 uint32_t todo = u_bit_consecutive(0, data.bytes());
4612 if (!scan_write_mask(wrmask, todo, &offset, &byte)) {
4613 offsets[write_count] = offset;
4614 bytes[write_count] = byte;
4615 opcodes[write_count] = aco_opcode::num_opcodes;
4617 advance_write_mask(&todo, offset, byte);
4621 bool aligned2 = offset % 2 == 0 && align % 2 == 0;
4622 bool aligned4 = offset % 4 == 0 && align % 4 == 0;
4623 bool aligned8 = offset % 8 == 0 && align % 8 == 0;
4624 bool aligned16 = offset % 16 == 0 && align % 16 == 0;
4626 // TODO: use ds_write_b8_d16_hi/ds_write_b16_d16_hi if beneficial
4627 aco_opcode op = aco_opcode::num_opcodes;
4628 if (byte >= 16 && aligned16 && large_ds_write) {
4629 op = aco_opcode::ds_write_b128;
4631 } else if (byte >= 12 && aligned16 && large_ds_write) {
4632 op = aco_opcode::ds_write_b96;
4634 } else if (byte >= 8 && aligned8) {
4635 op = aco_opcode::ds_write_b64;
4637 } else if (byte >= 4 && aligned4) {
4638 op = aco_opcode::ds_write_b32;
4640 } else if (byte >= 2 && aligned2) {
4641 op = aco_opcode::ds_write_b16;
4643 } else if (byte >= 1) {
4644 op = aco_opcode::ds_write_b8;
4650 offsets[write_count] = offset;
4651 bytes[write_count] = byte;
4652 opcodes[write_count] = op;
4654 advance_write_mask(&todo, offset, byte);
4657 Operand m = load_lds_size_m0(bld);
4659 split_store_data(ctx, RegType::vgpr, write_count, write_datas, bytes, data);
4661 for (unsigned i = 0; i < write_count; i++) {
4662 aco_opcode op = opcodes[i];
4663 if (op == aco_opcode::num_opcodes)
4666 Temp split_data = write_datas[i];
4668 unsigned second = write_count;
4669 if (usable_write2 && (op == aco_opcode::ds_write_b32 || op == aco_opcode::ds_write_b64)) {
4670 for (second = i + 1; second < write_count; second++) {
4671 if (opcodes[second] == op && (offsets[second] - offsets[i]) % split_data.bytes() == 0) {
4672 op = split_data.bytes() == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
4673 opcodes[second] = aco_opcode::num_opcodes;
4679 bool write2 = op == aco_opcode::ds_write2_b32 || op == aco_opcode::ds_write2_b64;
4680 unsigned write2_off = (offsets[second] - offsets[i]) / split_data.bytes();
4682 unsigned inline_offset = base_offset + offsets[i];
4683 unsigned max_offset = write2 ? (255 - write2_off) * split_data.bytes() : 65535;
4684 Temp address_offset = address;
4685 if (inline_offset > max_offset) {
4686 address_offset = bld.vadd32(bld.def(v1), Operand::c32(base_offset), address_offset);
4687 inline_offset = offsets[i];
4690 /* offsets[i] shouldn't be large enough for this to happen */
4691 assert(inline_offset <= max_offset);
4695 Temp second_data = write_datas[second];
4696 inline_offset /= split_data.bytes();
4697 instr = bld.ds(op, address_offset, split_data, second_data, m, inline_offset,
4698 inline_offset + write2_off);
4700 instr = bld.ds(op, address_offset, split_data, m, inline_offset);
4702 instr->ds().sync = memory_sync_info(storage_shared);
4704 if (m.isUndefined())
4705 instr->operands.pop_back();
4710 get_buffer_store_op(unsigned bytes)
4713 case 1: return aco_opcode::buffer_store_byte;
4714 case 2: return aco_opcode::buffer_store_short;
4715 case 4: return aco_opcode::buffer_store_dword;
4716 case 8: return aco_opcode::buffer_store_dwordx2;
4717 case 12: return aco_opcode::buffer_store_dwordx3;
4718 case 16: return aco_opcode::buffer_store_dwordx4;
4720 unreachable("Unexpected store size");
4721 return aco_opcode::num_opcodes;
4725 split_buffer_store(isel_context* ctx, nir_intrinsic_instr* instr, bool smem, RegType dst_type,
4726 Temp data, unsigned writemask, int swizzle_element_size, unsigned* write_count,
4727 Temp* write_datas, unsigned* offsets)
4729 unsigned write_count_with_skips = 0;
4733 /* determine how to split the data */
4734 unsigned todo = u_bit_consecutive(0, data.bytes());
4737 skips[write_count_with_skips] = !scan_write_mask(writemask, todo, &offset, &byte);
4738 offsets[write_count_with_skips] = offset;
4739 if (skips[write_count_with_skips]) {
4740 bytes[write_count_with_skips] = byte;
4741 advance_write_mask(&todo, offset, byte);
4742 write_count_with_skips++;
4746 /* only supported sizes are 1, 2, 4, 8, 12 and 16 bytes and can't be
4747 * larger than swizzle_element_size */
4748 byte = MIN2(byte, swizzle_element_size);
4750 byte = byte > 4 ? byte & ~0x3 : MIN2(byte, 2);
4752 /* SMEM and GFX6 VMEM can't emit 12-byte stores */
4753 if ((ctx->program->chip_class == GFX6 || smem) && byte == 12)
4756 /* dword or larger stores have to be dword-aligned */
4757 unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
4758 unsigned align_offset = (instr ? nir_intrinsic_align_offset(instr) : 0) + offset;
4759 bool dword_aligned = align_offset % 4 == 0 && align_mul % 4 == 0;
4761 byte = MIN2(byte, (align_offset % 2 == 0 && align_mul % 2 == 0) ? 2 : 1);
4763 bytes[write_count_with_skips] = byte;
4764 advance_write_mask(&todo, offset, byte);
4765 write_count_with_skips++;
4768 /* actually split data */
4769 split_store_data(ctx, dst_type, write_count_with_skips, write_datas, bytes, data);
4772 for (unsigned i = 0; i < write_count_with_skips; i++) {
4775 write_datas[*write_count] = write_datas[i];
4776 offsets[*write_count] = offsets[i];
4782 create_vec_from_array(isel_context* ctx, Temp arr[], unsigned cnt, RegType reg_type,
4783 unsigned elem_size_bytes, unsigned split_cnt = 0u, Temp dst = Temp())
4785 Builder bld(ctx->program, ctx->block);
4786 unsigned dword_size = elem_size_bytes / 4;
4789 dst = bld.tmp(RegClass(reg_type, cnt * dword_size));
4791 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
4792 aco_ptr<Pseudo_instruction> instr{
4793 create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, cnt, 1)};
4794 instr->definitions[0] = Definition(dst);
4796 for (unsigned i = 0; i < cnt; ++i) {
4798 assert(arr[i].size() == dword_size);
4799 allocated_vec[i] = arr[i];
4800 instr->operands[i] = Operand(arr[i]);
4802 Temp zero = bld.copy(bld.def(RegClass(reg_type, dword_size)),
4803 Operand::zero(dword_size == 2 ? 8 : 4));
4804 allocated_vec[i] = zero;
4805 instr->operands[i] = Operand(zero);
4809 bld.insert(std::move(instr));
4812 emit_split_vector(ctx, dst, split_cnt);
4814 ctx->allocated_vec.emplace(dst.id(), allocated_vec); /* emit_split_vector already does this */
4820 resolve_excess_vmem_const_offset(Builder& bld, Temp& voffset, unsigned const_offset)
4822 if (const_offset >= 4096) {
4823 unsigned excess_const_offset = const_offset / 4096u * 4096u;
4824 const_offset %= 4096u;
4827 voffset = bld.copy(bld.def(v1), Operand::c32(excess_const_offset));
4828 else if (unlikely(voffset.regClass() == s1))
4829 voffset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc),
4830 Operand::c32(excess_const_offset), Operand(voffset));
4831 else if (likely(voffset.regClass() == v1))
4832 voffset = bld.vadd32(bld.def(v1), Operand(voffset), Operand::c32(excess_const_offset));
4834 unreachable("Unsupported register class of voffset");
4837 return const_offset;
4841 emit_single_mubuf_store(isel_context* ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
4842 unsigned const_offset = 0u, memory_sync_info sync = memory_sync_info(),
4843 bool slc = false, bool swizzled = false)
4846 assert(vdata.size() != 3 || ctx->program->chip_class != GFX6);
4847 assert(vdata.size() >= 1 && vdata.size() <= 4);
4849 Builder bld(ctx->program, ctx->block);
4850 aco_opcode op = get_buffer_store_op(vdata.bytes());
4851 const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
4853 Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
4854 Operand soffset_op = soffset.id() ? Operand(soffset) : Operand::zero();
4856 bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
4857 /* offen */ !voffset_op.isUndefined(), /* swizzled */ swizzled,
4858 /* idxen*/ false, /* addr64 */ false, /* disable_wqm */ false, /* glc */ true,
4859 /* dlc*/ false, /* slc */ slc);
4861 r.instr->mubuf().sync = sync;
4865 store_vmem_mubuf(isel_context* ctx, Temp src, Temp descriptor, Temp voffset, Temp soffset,
4866 unsigned base_const_offset, unsigned elem_size_bytes, unsigned write_mask,
4867 bool allow_combining = true, memory_sync_info sync = memory_sync_info(),
4870 Builder bld(ctx->program, ctx->block);
4871 assert(elem_size_bytes == 1 || elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
4873 write_mask = util_widen_mask(write_mask, elem_size_bytes);
4875 unsigned write_count = 0;
4876 Temp write_datas[32];
4877 unsigned offsets[32];
4878 split_buffer_store(ctx, NULL, false, RegType::vgpr, src, write_mask, allow_combining ? 16 : 4,
4879 &write_count, write_datas, offsets);
4881 for (unsigned i = 0; i < write_count; i++) {
4882 unsigned const_offset = offsets[i] + base_const_offset;
4883 emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, sync,
4884 slc, !allow_combining);
4889 load_vmem_mubuf(isel_context* ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset,
4890 unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components,
4891 unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true,
4892 bool slc = false, memory_sync_info sync = memory_sync_info())
4894 assert(elem_size_bytes == 1 || elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
4895 assert((num_components * elem_size_bytes) == dst.bytes());
4896 assert(!!stride != allow_combining);
4898 Builder bld(ctx->program, ctx->block);
4900 LoadEmitInfo info = {Operand(voffset), dst, num_components, elem_size_bytes, descriptor};
4901 info.component_stride = allow_combining ? 0 : stride;
4904 info.swizzle_component_size = allow_combining ? 0 : 4;
4905 info.align_mul = MIN2(elem_size_bytes, 4);
4906 info.align_offset = 0;
4907 info.soffset = soffset;
4908 info.const_offset = base_const_offset;
4910 emit_load(ctx, bld, info, mubuf_load_params);
4914 wave_id_in_threadgroup(isel_context* ctx)
4916 Builder bld(ctx->program, ctx->block);
4917 return bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
4918 get_arg(ctx, ctx->args->ac.merged_wave_info), Operand::c32(24u | (4u << 16)));
4922 thread_id_in_threadgroup(isel_context* ctx)
4924 /* tid_in_tg = wave_id * wave_size + tid_in_wave */
4926 Builder bld(ctx->program, ctx->block);
4927 Temp tid_in_wave = emit_mbcnt(ctx, bld.tmp(v1));
4929 if (ctx->program->workgroup_size <= ctx->program->wave_size)
4932 Temp wave_id_in_tg = wave_id_in_threadgroup(ctx);
4933 Temp num_pre_threads =
4934 bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), wave_id_in_tg,
4935 Operand::c32(ctx->program->wave_size == 64 ? 6u : 5u));
4936 return bld.vadd32(bld.def(v1), Operand(num_pre_threads), Operand(tid_in_wave));
4940 store_output_to_temps(isel_context* ctx, nir_intrinsic_instr* instr)
4942 unsigned write_mask = nir_intrinsic_write_mask(instr);
4943 unsigned component = nir_intrinsic_component(instr);
4944 unsigned idx = nir_intrinsic_base(instr) * 4u + component;
4945 nir_src offset = *nir_get_io_offset_src(instr);
4947 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
4950 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
4952 if (instr->src[0].ssa->bit_size == 64)
4953 write_mask = util_widen_mask(write_mask, 2);
4955 RegClass rc = instr->src[0].ssa->bit_size == 16 ? v2b : v1;
4957 for (unsigned i = 0; i < 8; ++i) {
4958 if (write_mask & (1 << i)) {
4959 ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
4960 ctx->outputs.temps[idx] = emit_extract_vector(ctx, src, i, rc);
4969 load_input_from_temps(isel_context* ctx, nir_intrinsic_instr* instr, Temp dst)
4971 /* Only TCS per-vertex inputs are supported by this function.
4972 * Per-vertex inputs only match between the VS/TCS invocation id when the number of invocations
4975 if (ctx->shader->info.stage != MESA_SHADER_TESS_CTRL || !ctx->tcs_in_out_eq)
4978 nir_src* off_src = nir_get_io_offset_src(instr);
4979 nir_src* vertex_index_src = nir_get_io_arrayed_index_src(instr);
4980 nir_instr* vertex_index_instr = vertex_index_src->ssa->parent_instr;
4981 bool can_use_temps =
4982 nir_src_is_const(*off_src) && vertex_index_instr->type == nir_instr_type_intrinsic &&
4983 nir_instr_as_intrinsic(vertex_index_instr)->intrinsic == nir_intrinsic_load_invocation_id;
4988 unsigned idx = nir_intrinsic_base(instr) * 4u + nir_intrinsic_component(instr) +
4989 4 * nir_src_as_uint(*off_src);
4990 Temp* src = &ctx->inputs.temps[idx];
4991 create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u, 0, dst);
4996 static void export_vs_varying(isel_context* ctx, int slot, bool is_pos, int* next_pos);
4999 visit_store_output(isel_context* ctx, nir_intrinsic_instr* instr)
5001 if (ctx->stage == vertex_vs || ctx->stage == tess_eval_vs || ctx->stage == fragment_fs ||
5002 ctx->stage == vertex_ngg || ctx->stage == tess_eval_ngg || ctx->stage == mesh_ngg ||
5003 (ctx->stage == vertex_tess_control_hs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
5004 ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
5005 bool stored_to_temps = store_output_to_temps(ctx, instr);
5006 if (!stored_to_temps) {
5007 isel_err(instr->src[1].ssa->parent_instr, "Unimplemented output offset instruction");
5011 unreachable("Shader stage not implemented");
5014 /* For NGG VS and TES shaders the primitive ID is exported manually after the other exports so we
5015 * have to emit an exp here manually */
5016 if (ctx->stage.hw == HWStage::NGG &&
5017 (ctx->stage.has(SWStage::VS) || ctx->stage.has(SWStage::TES)) &&
5018 nir_intrinsic_io_semantics(instr).location == VARYING_SLOT_PRIMITIVE_ID)
5019 export_vs_varying(ctx, VARYING_SLOT_PRIMITIVE_ID, false, NULL);
5023 emit_interp_instr(isel_context* ctx, unsigned idx, unsigned component, Temp src, Temp dst,
5026 Temp coord1 = emit_extract_vector(ctx, src, 0, v1);
5027 Temp coord2 = emit_extract_vector(ctx, src, 1, v1);
5029 Builder bld(ctx->program, ctx->block);
5031 if (dst.regClass() == v2b) {
5032 if (ctx->program->dev.has_16bank_lds) {
5033 assert(ctx->options->chip_class <= GFX8);
5034 Builder::Result interp_p1 =
5035 bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1), Operand::c32(2u) /* P0 */,
5036 bld.m0(prim_mask), idx, component);
5037 interp_p1 = bld.vintrp(aco_opcode::v_interp_p1lv_f16, bld.def(v2b), coord1,
5038 bld.m0(prim_mask), interp_p1, idx, component);
5039 bld.vintrp(aco_opcode::v_interp_p2_legacy_f16, Definition(dst), coord2, bld.m0(prim_mask),
5040 interp_p1, idx, component);
5042 aco_opcode interp_p2_op = aco_opcode::v_interp_p2_f16;
5044 if (ctx->options->chip_class == GFX8)
5045 interp_p2_op = aco_opcode::v_interp_p2_legacy_f16;
5047 Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1ll_f16, bld.def(v1), coord1,
5048 bld.m0(prim_mask), idx, component);
5049 bld.vintrp(interp_p2_op, Definition(dst), coord2, bld.m0(prim_mask), interp_p1, idx,
5053 Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1,
5054 bld.m0(prim_mask), idx, component);
5056 if (ctx->program->dev.has_16bank_lds)
5057 interp_p1.instr->operands[0].setLateKill(true);
5059 bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2, bld.m0(prim_mask), interp_p1,
5065 emit_load_frag_coord(isel_context* ctx, Temp dst, unsigned num_components)
5067 Builder bld(ctx->program, ctx->block);
5069 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(
5070 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1));
5071 for (unsigned i = 0; i < num_components; i++) {
5072 if (ctx->args->ac.frag_pos[i].used)
5073 vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i]));
5075 vec->operands[i] = Operand(v1);
5077 if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) {
5078 assert(num_components == 4);
5080 bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3]));
5083 for (Operand& op : vec->operands)
5084 op = op.isUndefined() ? Operand::zero() : op;
5086 vec->definitions[0] = Definition(dst);
5087 ctx->block->instructions.emplace_back(std::move(vec));
5088 emit_split_vector(ctx, dst, num_components);
5093 emit_load_frag_shading_rate(isel_context* ctx, Temp dst)
5095 Builder bld(ctx->program, ctx->block);
5098 /* VRS Rate X = Ancillary[2:3]
5099 * VRS Rate Y = Ancillary[4:5]
5101 Temp x_rate = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), get_arg(ctx, ctx->args->ac.ancillary),
5102 Operand::c32(2u), Operand::c32(2u));
5103 Temp y_rate = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), get_arg(ctx, ctx->args->ac.ancillary),
5104 Operand::c32(4u), Operand::c32(2u));
5106 /* xRate = xRate == 0x1 ? Horizontal2Pixels : None. */
5107 cond = bld.vopc(aco_opcode::v_cmp_eq_i32, bld.def(bld.lm), Operand::c32(1u), Operand(x_rate));
5108 x_rate = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand::zero()),
5109 bld.copy(bld.def(v1), Operand::c32(4u)), cond);
5111 /* yRate = yRate == 0x1 ? Vertical2Pixels : None. */
5112 cond = bld.vopc(aco_opcode::v_cmp_eq_i32, bld.def(bld.lm), Operand::c32(1u), Operand(y_rate));
5113 y_rate = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand::zero()),
5114 bld.copy(bld.def(v1), Operand::c32(1u)), cond);
5116 bld.vop2(aco_opcode::v_or_b32, Definition(dst), Operand(x_rate), Operand(y_rate));
5120 visit_load_interpolated_input(isel_context* ctx, nir_intrinsic_instr* instr)
5122 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5123 Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
5124 unsigned idx = nir_intrinsic_base(instr);
5125 unsigned component = nir_intrinsic_component(instr);
5126 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
5128 assert(nir_src_is_const(instr->src[1]) && !nir_src_as_uint(instr->src[1]));
5130 if (instr->dest.ssa.num_components == 1) {
5131 emit_interp_instr(ctx, idx, component, coords, dst, prim_mask);
5133 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(
5134 aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1));
5135 for (unsigned i = 0; i < instr->dest.ssa.num_components; i++) {
5136 Temp tmp = ctx->program->allocateTmp(instr->dest.ssa.bit_size == 16 ? v2b : v1);
5137 emit_interp_instr(ctx, idx, component + i, coords, tmp, prim_mask);
5138 vec->operands[i] = Operand(tmp);
5140 vec->definitions[0] = Definition(dst);
5141 ctx->block->instructions.emplace_back(std::move(vec));
5146 check_vertex_fetch_size(isel_context* ctx, const ac_data_format_info* vtx_info, unsigned offset,
5147 unsigned binding_align, unsigned channels)
5149 unsigned vertex_byte_size = vtx_info->chan_byte_size * channels;
5150 if (vtx_info->chan_byte_size != 4 && channels == 3)
5153 /* Split typed vertex buffer loads on GFX6 and GFX10+ to avoid any
5154 * alignment issues that triggers memory violations and eventually a GPU
5155 * hang. This can happen if the stride (static or dynamic) is unaligned and
5156 * also if the VBO offset is aligned to a scalar (eg. stride is 8 and VBO
5157 * offset is 2 for R16G16B16A16_SNORM).
5159 return (ctx->options->chip_class >= GFX7 && ctx->options->chip_class <= GFX9) ||
5160 (offset % vertex_byte_size == 0 && MAX2(binding_align, 1) % vertex_byte_size == 0);
5164 get_fetch_data_format(isel_context* ctx, const ac_data_format_info* vtx_info, unsigned offset,
5165 unsigned* channels, unsigned max_channels, unsigned binding_align)
5167 if (!vtx_info->chan_byte_size) {
5168 *channels = vtx_info->num_channels;
5169 return vtx_info->chan_format;
5172 unsigned num_channels = *channels;
5173 if (!check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, *channels)) {
5174 unsigned new_channels = num_channels + 1;
5175 /* first, assume more loads is worse and try using a larger data format */
5176 while (new_channels <= max_channels &&
5177 !check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, new_channels)) {
5181 if (new_channels > max_channels) {
5182 /* then try decreasing load size (at the cost of more loads) */
5183 new_channels = *channels;
5184 while (new_channels > 1 &&
5185 !check_vertex_fetch_size(ctx, vtx_info, offset, binding_align, new_channels))
5189 if (new_channels < *channels)
5190 *channels = new_channels;
5191 num_channels = new_channels;
5194 switch (vtx_info->chan_format) {
5195 case V_008F0C_BUF_DATA_FORMAT_8:
5196 return std::array<uint8_t, 4>{V_008F0C_BUF_DATA_FORMAT_8, V_008F0C_BUF_DATA_FORMAT_8_8,
5197 V_008F0C_BUF_DATA_FORMAT_INVALID,
5198 V_008F0C_BUF_DATA_FORMAT_8_8_8_8}[num_channels - 1];
5199 case V_008F0C_BUF_DATA_FORMAT_16:
5200 return std::array<uint8_t, 4>{V_008F0C_BUF_DATA_FORMAT_16, V_008F0C_BUF_DATA_FORMAT_16_16,
5201 V_008F0C_BUF_DATA_FORMAT_INVALID,
5202 V_008F0C_BUF_DATA_FORMAT_16_16_16_16}[num_channels - 1];
5203 case V_008F0C_BUF_DATA_FORMAT_32:
5204 return std::array<uint8_t, 4>{V_008F0C_BUF_DATA_FORMAT_32, V_008F0C_BUF_DATA_FORMAT_32_32,
5205 V_008F0C_BUF_DATA_FORMAT_32_32_32,
5206 V_008F0C_BUF_DATA_FORMAT_32_32_32_32}[num_channels - 1];
5208 unreachable("shouldn't reach here");
5209 return V_008F0C_BUF_DATA_FORMAT_INVALID;
5213 visit_load_input(isel_context* ctx, nir_intrinsic_instr* instr)
5215 Builder bld(ctx->program, ctx->block);
5216 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5217 nir_src offset = *nir_get_io_offset_src(instr);
5219 if (ctx->shader->info.stage == MESA_SHADER_VERTEX && ctx->program->info.vs.dynamic_inputs) {
5220 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5221 isel_err(offset.ssa->parent_instr,
5222 "Unimplemented non-zero nir_intrinsic_load_input offset");
5224 unsigned location = nir_intrinsic_base(instr) - VERT_ATTRIB_GENERIC0;
5225 unsigned component = nir_intrinsic_component(instr);
5226 unsigned bitsize = instr->dest.ssa.bit_size;
5227 unsigned num_components = instr->dest.ssa.num_components;
5229 Temp input = get_arg(ctx, ctx->args->vs_inputs[location]);
5231 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
5232 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5233 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5234 for (unsigned i = 0; i < num_components; i++) {
5235 elems[i] = emit_extract_vector(ctx, input, component + i, bitsize == 64 ? v2 : v1);
5236 if (bitsize == 16) {
5237 if (nir_alu_type_get_base_type(nir_intrinsic_dest_type(instr)) == nir_type_float)
5238 elems[i] = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v2b), elems[i]);
5240 elems[i] = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v2b), elems[i],
5243 vec->operands[i] = Operand(elems[i]);
5245 vec->definitions[0] = Definition(dst);
5246 ctx->block->instructions.emplace_back(std::move(vec));
5247 ctx->allocated_vec.emplace(dst.id(), elems);
5248 } else if (ctx->shader->info.stage == MESA_SHADER_VERTEX) {
5250 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5251 isel_err(offset.ssa->parent_instr,
5252 "Unimplemented non-zero nir_intrinsic_load_input offset");
5254 Temp vertex_buffers =
5255 convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.vertex_buffers));
5257 unsigned location = nir_intrinsic_base(instr) - VERT_ATTRIB_GENERIC0;
5258 unsigned component = nir_intrinsic_component(instr);
5259 unsigned bitsize = instr->dest.ssa.bit_size;
5260 unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[location];
5261 uint32_t attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[location];
5262 uint32_t attrib_stride = ctx->options->key.vs.vertex_attribute_strides[location];
5263 unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[location];
5264 unsigned binding_align = ctx->options->key.vs.vertex_binding_align[attrib_binding];
5266 unsigned dfmt = attrib_format & 0xf;
5267 unsigned nfmt = (attrib_format >> 4) & 0x7;
5268 const struct ac_data_format_info* vtx_info = ac_get_data_format_info(dfmt);
5270 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa) << component;
5271 unsigned num_channels = MIN2(util_last_bit(mask), vtx_info->num_channels);
5273 unsigned desc_index =
5274 ctx->program->info.vs.use_per_attribute_vb_descs ? location : attrib_binding;
5275 desc_index = util_bitcount(ctx->program->info.vs.vb_desc_usage_mask &
5276 u_bit_consecutive(0, desc_index));
5277 Operand off = bld.copy(bld.def(s1), Operand::c32(desc_index * 16u));
5278 Temp list = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), vertex_buffers, off);
5281 if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) {
5282 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location];
5283 Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance);
5285 Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id);
5287 Temp divided = bld.tmp(v1);
5288 emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor);
5289 index = bld.vadd32(bld.def(v1), start_instance, divided);
5291 index = bld.vadd32(bld.def(v1), start_instance, instance_id);
5294 index = bld.copy(bld.def(v1), start_instance);
5297 index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->ac.base_vertex),
5298 get_arg(ctx, ctx->args->ac.vertex_id));
5301 Temp* const channels = (Temp*)alloca(num_channels * sizeof(Temp));
5302 unsigned channel_start = 0;
5303 bool direct_fetch = false;
5305 /* skip unused channels at the start */
5306 if (vtx_info->chan_byte_size) {
5307 channel_start = ffs(mask) - 1;
5308 for (unsigned i = 0; i < MIN2(channel_start, num_channels); i++)
5309 channels[i] = Temp(0, s1);
5313 while (channel_start < num_channels) {
5314 unsigned fetch_component = num_channels - channel_start;
5315 unsigned fetch_offset = attrib_offset + channel_start * vtx_info->chan_byte_size;
5316 bool expanded = false;
5318 /* use MUBUF when possible to avoid possible alignment issues */
5319 /* TODO: we could use SDWA to unpack 8/16-bit attributes without extra instructions */
5321 (nfmt == V_008F0C_BUF_NUM_FORMAT_FLOAT || nfmt == V_008F0C_BUF_NUM_FORMAT_UINT ||
5322 nfmt == V_008F0C_BUF_NUM_FORMAT_SINT) &&
5323 vtx_info->chan_byte_size == 4;
5324 unsigned fetch_dfmt = V_008F0C_BUF_DATA_FORMAT_INVALID;
5327 get_fetch_data_format(ctx, vtx_info, fetch_offset, &fetch_component,
5328 vtx_info->num_channels - channel_start, binding_align);
5330 if (fetch_component == 3 && ctx->options->chip_class == GFX6) {
5331 /* GFX6 only supports loading vec3 with MTBUF, expand to vec4. */
5332 fetch_component = 4;
5337 unsigned fetch_bytes = fetch_component * bitsize / 8;
5339 Temp fetch_index = index;
5340 if (attrib_stride != 0 && fetch_offset > attrib_stride) {
5342 bld.vadd32(bld.def(v1), Operand::c32(fetch_offset / attrib_stride), fetch_index);
5343 fetch_offset = fetch_offset % attrib_stride;
5346 Operand soffset = Operand::zero();
5347 if (fetch_offset >= 4096) {
5348 soffset = bld.copy(bld.def(s1), Operand::c32(fetch_offset / 4096 * 4096));
5349 fetch_offset %= 4096;
5353 switch (fetch_bytes) {
5355 assert(!use_mubuf && bitsize == 16);
5356 opcode = aco_opcode::tbuffer_load_format_d16_x;
5359 if (bitsize == 16) {
5361 opcode = aco_opcode::tbuffer_load_format_d16_xy;
5364 use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
5368 assert(!use_mubuf && bitsize == 16);
5369 opcode = aco_opcode::tbuffer_load_format_d16_xyz;
5372 if (bitsize == 16) {
5374 opcode = aco_opcode::tbuffer_load_format_d16_xyzw;
5377 use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
5381 assert(ctx->options->chip_class >= GFX7 ||
5382 (!use_mubuf && ctx->options->chip_class == GFX6));
5384 use_mubuf ? aco_opcode::buffer_load_dwordx3 : aco_opcode::tbuffer_load_format_xyz;
5388 use_mubuf ? aco_opcode::buffer_load_dwordx4 : aco_opcode::tbuffer_load_format_xyzw;
5390 default: unreachable("Unimplemented load_input vector size");
5394 if (channel_start == 0 && fetch_bytes == dst.bytes() && !expanded &&
5395 num_channels <= 3) {
5396 direct_fetch = true;
5399 fetch_dst = bld.tmp(RegClass::get(RegType::vgpr, fetch_bytes));
5403 Instruction* mubuf = bld.mubuf(opcode, Definition(fetch_dst), list, fetch_index,
5404 soffset, fetch_offset, false, false, true)
5406 mubuf->mubuf().vtx_binding = attrib_binding + 1;
5408 Instruction* mtbuf = bld.mtbuf(opcode, Definition(fetch_dst), list, fetch_index,
5409 soffset, fetch_dfmt, nfmt, fetch_offset, false, true)
5411 mtbuf->mtbuf().vtx_binding = attrib_binding + 1;
5414 emit_split_vector(ctx, fetch_dst, fetch_dst.size());
5416 if (fetch_component == 1) {
5417 channels[channel_start] = fetch_dst;
5419 for (unsigned i = 0; i < MIN2(fetch_component, num_channels - channel_start); i++)
5420 channels[channel_start + i] =
5421 emit_extract_vector(ctx, fetch_dst, i, bitsize == 16 ? v2b : v1);
5424 channel_start += fetch_component;
5427 if (!direct_fetch) {
5429 nfmt != V_008F0C_BUF_NUM_FORMAT_UINT && nfmt != V_008F0C_BUF_NUM_FORMAT_SINT;
5431 unsigned num_components = instr->dest.ssa.num_components;
5433 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
5434 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5435 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5436 unsigned num_temp = 0;
5437 for (unsigned i = 0; i < num_components; i++) {
5438 unsigned idx = i + component;
5439 if (idx < num_channels && channels[idx].id()) {
5440 Temp channel = channels[idx];
5441 vec->operands[i] = Operand(channel);
5445 } else if (is_float && idx == 3) {
5446 vec->operands[i] = Operand::c32(0x3f800000u);
5447 } else if (!is_float && idx == 3) {
5448 vec->operands[i] = Operand::c32(1u);
5450 vec->operands[i] = Operand::zero();
5453 vec->definitions[0] = Definition(dst);
5454 ctx->block->instructions.emplace_back(std::move(vec));
5455 emit_split_vector(ctx, dst, num_components);
5457 if (num_temp == num_components)
5458 ctx->allocated_vec.emplace(dst.id(), elems);
5460 } else if (ctx->shader->info.stage == MESA_SHADER_FRAGMENT) {
5461 if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
5462 isel_err(offset.ssa->parent_instr,
5463 "Unimplemented non-zero nir_intrinsic_load_input offset");
5465 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
5467 unsigned idx = nir_intrinsic_base(instr);
5468 unsigned component = nir_intrinsic_component(instr);
5469 unsigned vertex_id = 2; /* P0 */
5471 if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
5472 nir_const_value* src0 = nir_src_as_const_value(instr->src[0]);
5473 switch (src0->u32) {
5475 vertex_id = 2; /* P0 */
5478 vertex_id = 0; /* P10 */
5481 vertex_id = 1; /* P20 */
5483 default: unreachable("invalid vertex index");
5487 if (instr->dest.ssa.num_components == 1 &&
5488 instr->dest.ssa.bit_size != 64) {
5489 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(dst), Operand::c32(vertex_id),
5490 bld.m0(prim_mask), idx, component);
5492 unsigned num_components = instr->dest.ssa.num_components;
5493 if (instr->dest.ssa.bit_size == 64)
5494 num_components *= 2;
5495 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5496 aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
5497 for (unsigned i = 0; i < num_components; i++) {
5498 unsigned chan_component = (component + i) % 4;
5499 unsigned chan_idx = idx + (component + i) / 4;
5500 vec->operands[i] = bld.vintrp(
5501 aco_opcode::v_interp_mov_f32, bld.def(instr->dest.ssa.bit_size == 16 ? v2b : v1),
5502 Operand::c32(vertex_id), bld.m0(prim_mask), chan_idx, chan_component);
5504 vec->definitions[0] = Definition(dst);
5505 bld.insert(std::move(vec));
5508 unreachable("Shader stage not implemented");
5513 visit_load_tcs_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
5515 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
5517 Builder bld(ctx->program, ctx->block);
5518 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5520 if (load_input_from_temps(ctx, instr, dst))
5523 unreachable("LDS-based TCS input should have been lowered in NIR.");
5527 visit_load_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr)
5529 switch (ctx->shader->info.stage) {
5530 case MESA_SHADER_TESS_CTRL: visit_load_tcs_per_vertex_input(ctx, instr); break;
5531 default: unreachable("Unimplemented shader stage");
5536 visit_load_tess_coord(isel_context* ctx, nir_intrinsic_instr* instr)
5538 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
5540 Builder bld(ctx->program, ctx->block);
5541 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5543 Operand tes_u(get_arg(ctx, ctx->args->ac.tes_u));
5544 Operand tes_v(get_arg(ctx, ctx->args->ac.tes_v));
5545 Operand tes_w = Operand::zero();
5547 if (ctx->shader->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
5548 Temp tmp = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), tes_u, tes_v);
5549 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand::c32(0x3f800000u /* 1.0f */), tmp);
5550 tes_w = Operand(tmp);
5553 Temp tess_coord = bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tes_u, tes_v, tes_w);
5554 emit_split_vector(ctx, tess_coord, 3);
5558 load_buffer(isel_context* ctx, unsigned num_components, unsigned component_size, Temp dst,
5559 Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset, bool glc = false,
5560 bool allow_smem = true, memory_sync_info sync = memory_sync_info())
5562 Builder bld(ctx->program, ctx->block);
5565 dst.type() != RegType::vgpr && (!glc || ctx->options->chip_class >= GFX8) && allow_smem;
5567 offset = bld.as_uniform(offset);
5569 /* GFX6-7 are affected by a hw bug that prevents address clamping to
5570 * work correctly when the SGPR offset is used.
5572 if (offset.type() == RegType::sgpr && ctx->options->chip_class < GFX8)
5573 offset = as_vgpr(ctx, offset);
5576 LoadEmitInfo info = {Operand(offset), dst, num_components, component_size, rsrc};
5579 info.align_mul = align_mul;
5580 info.align_offset = align_offset;
5582 emit_load(ctx, bld, info, smem_load_params);
5584 emit_load(ctx, bld, info, mubuf_load_params);
5588 visit_load_ubo(isel_context* ctx, nir_intrinsic_instr* instr)
5590 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5591 Builder bld(ctx->program, ctx->block);
5592 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5594 unsigned size = instr->dest.ssa.bit_size / 8;
5595 load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
5596 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr));
5600 visit_load_sbt_amd(isel_context* ctx, nir_intrinsic_instr* instr)
5602 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5603 unsigned binding = nir_intrinsic_binding(instr);
5605 Builder bld(ctx->program, ctx->block);
5606 Temp desc_base = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.sbt_descriptors));
5607 Operand desc_off = bld.copy(bld.def(s1), Operand::c32(binding * 16u));
5608 bld.smem(aco_opcode::s_load_dwordx4, Definition(dst), desc_base, desc_off);
5609 emit_split_vector(ctx, dst, 4);
5613 visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr)
5615 Builder bld(ctx->program, ctx->block);
5616 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5617 unsigned offset = nir_intrinsic_base(instr);
5618 unsigned count = instr->dest.ssa.num_components;
5619 nir_const_value* index_cv = nir_src_as_const_value(instr->src[0]);
5621 if (instr->dest.ssa.bit_size == 64)
5624 if (index_cv && instr->dest.ssa.bit_size >= 32) {
5625 unsigned start = (offset + index_cv->u32) / 4u;
5626 uint64_t mask = BITFIELD64_MASK(count) << start;
5627 if ((ctx->args->ac.inline_push_const_mask | mask) == ctx->args->ac.inline_push_const_mask &&
5628 start + count <= (sizeof(ctx->args->ac.inline_push_const_mask) * 8u)) {
5629 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
5630 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5631 aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
5632 unsigned arg_index =
5633 util_bitcount64(ctx->args->ac.inline_push_const_mask & BITFIELD64_MASK(start));
5634 for (unsigned i = 0; i < count; ++i) {
5635 elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[arg_index++]);
5636 vec->operands[i] = Operand{elems[i]};
5638 vec->definitions[0] = Definition(dst);
5639 ctx->block->instructions.emplace_back(std::move(vec));
5640 ctx->allocated_vec.emplace(dst.id(), elems);
5645 Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5646 if (offset != 0) // TODO check if index != 0 as well
5647 index = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
5648 Operand::c32(offset), index);
5649 Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
5652 bool aligned = true;
5654 if (instr->dest.ssa.bit_size == 8) {
5655 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5656 bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
5658 vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
5659 } else if (instr->dest.ssa.bit_size == 16) {
5660 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5662 vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
5667 switch (vec.size()) {
5668 case 1: op = aco_opcode::s_load_dword; break;
5669 case 2: op = aco_opcode::s_load_dwordx2; break;
5674 case 4: op = aco_opcode::s_load_dwordx4; break;
5679 case 8: op = aco_opcode::s_load_dwordx8; break;
5680 default: unreachable("unimplemented or forbidden load_push_constant.");
5683 bld.smem(op, Definition(vec), ptr, index).instr->smem().prevent_overflow = true;
5686 Operand byte_offset = index_cv ? Operand::c32((offset + index_cv->u32) % 4) : Operand(index);
5687 byte_align_scalar(ctx, vec, byte_offset, dst);
5692 emit_split_vector(ctx, vec, 4);
5693 RegClass rc = dst.size() == 3 ? s1 : s2;
5694 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), emit_extract_vector(ctx, vec, 0, rc),
5695 emit_extract_vector(ctx, vec, 1, rc), emit_extract_vector(ctx, vec, 2, rc));
5697 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
5701 visit_load_constant(isel_context* ctx, nir_intrinsic_instr* instr)
5703 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5705 Builder bld(ctx->program, ctx->block);
5707 uint32_t desc_type =
5708 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5709 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
5710 if (ctx->options->chip_class >= GFX10) {
5711 desc_type |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
5712 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
5714 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5715 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
5718 unsigned base = nir_intrinsic_base(instr);
5719 unsigned range = nir_intrinsic_range(instr);
5721 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
5722 if (base && offset.type() == RegType::sgpr)
5723 offset = bld.nuw().sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
5724 Operand::c32(base));
5725 else if (base && offset.type() == RegType::vgpr)
5726 offset = bld.vadd32(bld.def(v1), Operand::c32(base), offset);
5728 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5729 bld.pseudo(aco_opcode::p_constaddr, bld.def(s2), bld.def(s1, scc),
5730 Operand::c32(ctx->constant_data_offset)),
5731 Operand::c32(MIN2(base + range, ctx->shader->constant_data_size)),
5732 Operand::c32(desc_type));
5733 unsigned size = instr->dest.ssa.bit_size / 8;
5734 // TODO: get alignment information for subdword constants
5735 load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0);
5739 should_declare_array(isel_context* ctx, enum glsl_sampler_dim sampler_dim, bool is_array)
5741 if (sampler_dim == GLSL_SAMPLER_DIM_BUF)
5743 ac_image_dim dim = ac_get_sampler_dim(ctx->options->chip_class, sampler_dim, is_array);
5744 return dim == ac_image_cube || dim == ac_image_1darray || dim == ac_image_2darray ||
5745 dim == ac_image_2darraymsaa;
5749 image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
5752 case GLSL_SAMPLER_DIM_BUF: return 1;
5753 case GLSL_SAMPLER_DIM_1D: return array ? 2 : 1;
5754 case GLSL_SAMPLER_DIM_2D: return array ? 3 : 2;
5755 case GLSL_SAMPLER_DIM_MS: return array ? 4 : 3;
5756 case GLSL_SAMPLER_DIM_3D:
5757 case GLSL_SAMPLER_DIM_CUBE: return 3;
5758 case GLSL_SAMPLER_DIM_RECT:
5759 case GLSL_SAMPLER_DIM_SUBPASS: return 2;
5760 case GLSL_SAMPLER_DIM_SUBPASS_MS: return 3;
5766 static MIMG_instruction*
5767 emit_mimg(Builder& bld, aco_opcode op, Definition dst, Temp rsrc, Operand samp,
5768 std::vector<Temp> coords, unsigned wqm_mask = 0, Operand vdata = Operand(v1))
5770 /* Limit NSA instructions to 3 dwords on GFX10 to avoid stability issues. */
5771 unsigned max_nsa_size = bld.program->chip_class >= GFX10_3 ? 13 : 5;
5772 bool use_nsa = bld.program->chip_class >= GFX10 && coords.size() <= max_nsa_size;
5775 Temp coord = coords[0];
5776 if (coords.size() > 1) {
5777 coord = bld.tmp(RegType::vgpr, coords.size());
5779 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5780 aco_opcode::p_create_vector, Format::PSEUDO, coords.size(), 1)};
5781 for (unsigned i = 0; i < coords.size(); i++)
5782 vec->operands[i] = Operand(coords[i]);
5783 vec->definitions[0] = Definition(coord);
5784 bld.insert(std::move(vec));
5785 } else if (coord.type() == RegType::sgpr) {
5786 coord = bld.copy(bld.def(v1), coord);
5790 /* We don't need the bias, sample index, compare value or offset to be
5791 * computed in WQM but if the p_create_vector copies the coordinates, then it
5792 * needs to be in WQM. */
5793 coord = emit_wqm(bld, coord, bld.tmp(coord.regClass()), true);
5799 for (unsigned i = 0; i < coords.size(); i++) {
5800 if (wqm_mask & (1u << i))
5801 coords[i] = emit_wqm(bld, coords[i], bld.tmp(coords[i].regClass()), true);
5804 for (Temp& coord : coords) {
5805 if (coord.type() == RegType::sgpr)
5806 coord = bld.copy(bld.def(v1), coord);
5810 aco_ptr<MIMG_instruction> mimg{
5811 create_instruction<MIMG_instruction>(op, Format::MIMG, 3 + coords.size(), dst.isTemp())};
5813 mimg->definitions[0] = dst;
5814 mimg->operands[0] = Operand(rsrc);
5815 mimg->operands[1] = samp;
5816 mimg->operands[2] = vdata;
5817 for (unsigned i = 0; i < coords.size(); i++)
5818 mimg->operands[3 + i] = Operand(coords[i]);
5820 MIMG_instruction* res = mimg.get();
5821 bld.insert(std::move(mimg));
5826 visit_bvh64_intersect_ray_amd(isel_context* ctx, nir_intrinsic_instr* instr)
5828 Builder bld(ctx->program, ctx->block);
5829 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5830 Temp resource = get_ssa_temp(ctx, instr->src[0].ssa);
5831 Temp node = get_ssa_temp(ctx, instr->src[1].ssa);
5832 Temp tmax = get_ssa_temp(ctx, instr->src[2].ssa);
5833 Temp origin = get_ssa_temp(ctx, instr->src[3].ssa);
5834 Temp dir = get_ssa_temp(ctx, instr->src[4].ssa);
5835 Temp inv_dir = get_ssa_temp(ctx, instr->src[5].ssa);
5837 std::vector<Temp> args;
5838 args.push_back(emit_extract_vector(ctx, node, 0, v1));
5839 args.push_back(emit_extract_vector(ctx, node, 1, v1));
5840 args.push_back(as_vgpr(ctx, tmax));
5841 args.push_back(emit_extract_vector(ctx, origin, 0, v1));
5842 args.push_back(emit_extract_vector(ctx, origin, 1, v1));
5843 args.push_back(emit_extract_vector(ctx, origin, 2, v1));
5844 args.push_back(emit_extract_vector(ctx, dir, 0, v1));
5845 args.push_back(emit_extract_vector(ctx, dir, 1, v1));
5846 args.push_back(emit_extract_vector(ctx, dir, 2, v1));
5847 args.push_back(emit_extract_vector(ctx, inv_dir, 0, v1));
5848 args.push_back(emit_extract_vector(ctx, inv_dir, 1, v1));
5849 args.push_back(emit_extract_vector(ctx, inv_dir, 2, v1));
5851 MIMG_instruction* mimg = emit_mimg(bld, aco_opcode::image_bvh64_intersect_ray, Definition(dst),
5852 resource, Operand(s4), args);
5853 mimg->dim = ac_image_1d;
5859 static std::vector<Temp>
5860 get_image_coords(isel_context* ctx, const nir_intrinsic_instr* instr)
5863 Temp src0 = get_ssa_temp(ctx, instr->src[1].ssa);
5864 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
5865 bool is_array = nir_intrinsic_image_array(instr);
5866 ASSERTED bool add_frag_pos =
5867 (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5868 assert(!add_frag_pos && "Input attachments should be lowered.");
5869 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5870 bool gfx9_1d = ctx->options->chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
5871 int count = image_type_to_components_count(dim, is_array);
5872 std::vector<Temp> coords(count);
5873 Builder bld(ctx->program, ctx->block);
5876 coords[--count] = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[2].ssa), 0, v1);
5879 coords[0] = emit_extract_vector(ctx, src0, 0, v1);
5880 coords.resize(coords.size() + 1);
5881 coords[1] = bld.copy(bld.def(v1), Operand::zero());
5883 coords[2] = emit_extract_vector(ctx, src0, 1, v1);
5885 for (int i = 0; i < count; i++)
5886 coords[i] = emit_extract_vector(ctx, src0, i, v1);
5889 if (instr->intrinsic == nir_intrinsic_bindless_image_load ||
5890 instr->intrinsic == nir_intrinsic_bindless_image_sparse_load ||
5891 instr->intrinsic == nir_intrinsic_bindless_image_store) {
5892 int lod_index = instr->intrinsic == nir_intrinsic_bindless_image_store ? 4 : 3;
5894 nir_src_is_const(instr->src[lod_index]) && nir_src_as_uint(instr->src[lod_index]) == 0;
5897 coords.emplace_back(get_ssa_temp(ctx, instr->src[lod_index].ssa));
5904 get_memory_sync_info(nir_intrinsic_instr* instr, storage_class storage, unsigned semantics)
5906 /* atomicrmw might not have NIR_INTRINSIC_ACCESS and there's nothing interesting there anyway */
5907 if (semantics & semantic_atomicrmw)
5908 return memory_sync_info(storage, semantics);
5910 unsigned access = nir_intrinsic_access(instr);
5912 if (access & ACCESS_VOLATILE)
5913 semantics |= semantic_volatile;
5914 if (access & ACCESS_CAN_REORDER)
5915 semantics |= semantic_can_reorder | semantic_private;
5917 return memory_sync_info(storage, semantics);
5921 emit_tfe_init(Builder& bld, Temp dst)
5923 Temp tmp = bld.tmp(dst.regClass());
5925 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
5926 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
5927 for (unsigned i = 0; i < dst.size(); i++)
5928 vec->operands[i] = Operand::zero();
5929 vec->definitions[0] = Definition(tmp);
5930 /* Since this is fixed to an instruction's definition register, any CSE will
5931 * just create copies. Copying costs about the same as zero-initialization,
5932 * but these copies can break up clauses.
5934 vec->definitions[0].setNoCSE(true);
5935 bld.insert(std::move(vec));
5937 return Operand(tmp);
5941 visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr)
5943 Builder bld(ctx->program, ctx->block);
5944 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
5945 bool is_array = nir_intrinsic_image_array(instr);
5946 bool is_sparse = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
5947 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5949 memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
5950 unsigned access = nir_intrinsic_access(instr);
5952 unsigned result_size = instr->dest.ssa.num_components - is_sparse;
5953 unsigned expand_mask =
5954 nir_ssa_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size);
5955 expand_mask = MAX2(expand_mask, 1); /* this can be zero in the case of sparse image loads */
5956 if (dim == GLSL_SAMPLER_DIM_BUF)
5957 expand_mask = (1u << util_last_bit(expand_mask)) - 1u;
5958 unsigned dmask = expand_mask;
5959 if (instr->dest.ssa.bit_size == 64) {
5961 /* only R64_UINT and R64_SINT supported. x is in xy of the result, w in zw */
5962 dmask = ((expand_mask & 0x1) ? 0x3 : 0) | ((expand_mask & 0x8) ? 0xc : 0);
5965 expand_mask |= 1 << result_size;
5967 bool d16 = instr->dest.ssa.bit_size == 16;
5968 assert(!d16 || !is_sparse);
5970 unsigned num_bytes = util_bitcount(dmask) * (d16 ? 2 : 4) + is_sparse * 4;
5973 if (num_bytes == dst.bytes() && dst.type() == RegType::vgpr)
5976 tmp = bld.tmp(RegClass::get(RegType::vgpr, num_bytes));
5978 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5980 if (dim == GLSL_SAMPLER_DIM_BUF) {
5981 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5985 switch (util_bitcount(dmask)) {
5986 case 1: opcode = aco_opcode::buffer_load_format_x; break;
5987 case 2: opcode = aco_opcode::buffer_load_format_xy; break;
5988 case 3: opcode = aco_opcode::buffer_load_format_xyz; break;
5989 case 4: opcode = aco_opcode::buffer_load_format_xyzw; break;
5990 default: unreachable(">4 channel buffer image load");
5993 switch (util_bitcount(dmask)) {
5994 case 1: opcode = aco_opcode::buffer_load_format_d16_x; break;
5995 case 2: opcode = aco_opcode::buffer_load_format_d16_xy; break;
5996 case 3: opcode = aco_opcode::buffer_load_format_d16_xyz; break;
5997 case 4: opcode = aco_opcode::buffer_load_format_d16_xyzw; break;
5998 default: unreachable(">4 channel buffer image load");
6001 aco_ptr<MUBUF_instruction> load{
6002 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 3 + is_sparse, 1)};
6003 load->operands[0] = Operand(resource);
6004 load->operands[1] = Operand(vindex);
6005 load->operands[2] = Operand::c32(0);
6006 load->definitions[0] = Definition(tmp);
6008 load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
6009 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
6011 load->tfe = is_sparse;
6013 load->operands[3] = emit_tfe_init(bld, tmp);
6014 ctx->block->instructions.emplace_back(std::move(load));
6016 std::vector<Temp> coords = get_image_coords(ctx, instr);
6018 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
6019 aco_opcode opcode = level_zero ? aco_opcode::image_load : aco_opcode::image_load_mip;
6021 Operand vdata = is_sparse ? emit_tfe_init(bld, tmp) : Operand(v1);
6022 MIMG_instruction* load =
6023 emit_mimg(bld, opcode, Definition(tmp), resource, Operand(s4), coords, 0, vdata);
6024 load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
6025 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
6026 load->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6028 load->dmask = dmask;
6030 load->da = should_declare_array(ctx, dim, is_array);
6032 load->tfe = is_sparse;
6035 if (is_sparse && instr->dest.ssa.bit_size == 64) {
6036 /* The result components are 64-bit but the sparse residency code is
6037 * 32-bit. So add a zero to the end so expand_vector() works correctly.
6039 tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, tmp.size() + 1), tmp,
6043 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, expand_mask,
6044 instr->dest.ssa.bit_size == 64);
6048 visit_image_store(isel_context* ctx, nir_intrinsic_instr* instr)
6050 Builder bld(ctx->program, ctx->block);
6051 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6052 bool is_array = nir_intrinsic_image_array(instr);
6053 Temp data = get_ssa_temp(ctx, instr->src[3].ssa);
6054 bool d16 = instr->src[3].ssa->bit_size == 16;
6056 /* only R64_UINT and R64_SINT supported */
6057 if (instr->src[3].ssa->bit_size == 64 && data.bytes() > 8)
6058 data = emit_extract_vector(ctx, data, 0, RegClass(data.type(), 2));
6059 data = as_vgpr(ctx, data);
6061 uint32_t num_components = d16 ? instr->src[3].ssa->num_components : data.size();
6063 memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
6064 unsigned access = nir_intrinsic_access(instr);
6065 bool glc = ctx->options->chip_class == GFX6 ||
6066 access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE)
6070 if (dim == GLSL_SAMPLER_DIM_BUF) {
6071 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6072 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6075 switch (num_components) {
6076 case 1: opcode = aco_opcode::buffer_store_format_x; break;
6077 case 2: opcode = aco_opcode::buffer_store_format_xy; break;
6078 case 3: opcode = aco_opcode::buffer_store_format_xyz; break;
6079 case 4: opcode = aco_opcode::buffer_store_format_xyzw; break;
6080 default: unreachable(">4 channel buffer image store");
6083 switch (num_components) {
6084 case 1: opcode = aco_opcode::buffer_store_format_d16_x; break;
6085 case 2: opcode = aco_opcode::buffer_store_format_d16_xy; break;
6086 case 3: opcode = aco_opcode::buffer_store_format_d16_xyz; break;
6087 case 4: opcode = aco_opcode::buffer_store_format_d16_xyzw; break;
6088 default: unreachable(">4 channel buffer image store");
6091 aco_ptr<MUBUF_instruction> store{
6092 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
6093 store->operands[0] = Operand(rsrc);
6094 store->operands[1] = Operand(vindex);
6095 store->operands[2] = Operand::c32(0);
6096 store->operands[3] = Operand(data);
6097 store->idxen = true;
6100 store->disable_wqm = true;
6102 ctx->program->needs_exact = true;
6103 ctx->block->instructions.emplace_back(std::move(store));
6107 assert(data.type() == RegType::vgpr);
6108 std::vector<Temp> coords = get_image_coords(ctx, instr);
6109 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6111 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
6112 aco_opcode opcode = level_zero ? aco_opcode::image_store : aco_opcode::image_store_mip;
6114 uint32_t dmask = BITFIELD_MASK(num_components);
6115 /* remove zero/undef elements from data, components which aren't in dmask
6118 if (instr->src[3].ssa->bit_size == 32 || instr->src[3].ssa->bit_size == 16) {
6119 for (uint32_t i = 0; i < instr->num_components; i++) {
6120 nir_ssa_scalar comp = nir_ssa_scalar_resolved(instr->src[3].ssa, i);
6121 if (comp.def->parent_instr->type == nir_instr_type_ssa_undef ||
6122 (nir_ssa_scalar_is_const(comp) && nir_ssa_scalar_as_uint(comp) == 0))
6123 dmask &= ~BITFIELD_BIT(i);
6126 /* dmask cannot be 0, at least one vgpr is always read */
6130 if (dmask != BITFIELD_MASK(num_components)) {
6131 uint32_t dmask_count = util_bitcount(dmask);
6132 RegClass rc = d16 ? v2b : v1;
6133 if (dmask_count == 1) {
6134 data = emit_extract_vector(ctx, data, ffs(dmask) - 1, rc);
6136 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
6137 aco_opcode::p_create_vector, Format::PSEUDO, dmask_count, 1)};
6139 u_foreach_bit(bit, dmask) {
6140 vec->operands[index++] = Operand(emit_extract_vector(ctx, data, bit, rc));
6142 data = bld.tmp(RegClass::get(RegType::vgpr, dmask_count * rc.bytes()));
6143 vec->definitions[0] = Definition(data);
6144 bld.insert(std::move(vec));
6149 MIMG_instruction* store =
6150 emit_mimg(bld, opcode, Definition(), resource, Operand(s4), coords, 0, Operand(data));
6153 store->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6155 store->dmask = dmask;
6157 store->da = should_declare_array(ctx, dim, is_array);
6158 store->disable_wqm = true;
6160 ctx->program->needs_exact = true;
6165 visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
6167 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6168 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6169 bool is_array = nir_intrinsic_image_array(instr);
6170 Builder bld(ctx->program, ctx->block);
6172 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
6173 bool cmpswap = instr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap;
6174 bool is_64bit = data.bytes() == 8;
6175 assert((data.bytes() == 4 || data.bytes() == 8) && "only 32/64-bit image atomics implemented.");
6178 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(is_64bit ? v4 : v2),
6179 get_ssa_temp(ctx, instr->src[4].ssa), data);
6181 aco_opcode buf_op, buf_op64, image_op;
6182 switch (instr->intrinsic) {
6183 case nir_intrinsic_bindless_image_atomic_add:
6184 buf_op = aco_opcode::buffer_atomic_add;
6185 buf_op64 = aco_opcode::buffer_atomic_add_x2;
6186 image_op = aco_opcode::image_atomic_add;
6188 case nir_intrinsic_bindless_image_atomic_umin:
6189 buf_op = aco_opcode::buffer_atomic_umin;
6190 buf_op64 = aco_opcode::buffer_atomic_umin_x2;
6191 image_op = aco_opcode::image_atomic_umin;
6193 case nir_intrinsic_bindless_image_atomic_imin:
6194 buf_op = aco_opcode::buffer_atomic_smin;
6195 buf_op64 = aco_opcode::buffer_atomic_smin_x2;
6196 image_op = aco_opcode::image_atomic_smin;
6198 case nir_intrinsic_bindless_image_atomic_umax:
6199 buf_op = aco_opcode::buffer_atomic_umax;
6200 buf_op64 = aco_opcode::buffer_atomic_umax_x2;
6201 image_op = aco_opcode::image_atomic_umax;
6203 case nir_intrinsic_bindless_image_atomic_imax:
6204 buf_op = aco_opcode::buffer_atomic_smax;
6205 buf_op64 = aco_opcode::buffer_atomic_smax_x2;
6206 image_op = aco_opcode::image_atomic_smax;
6208 case nir_intrinsic_bindless_image_atomic_and:
6209 buf_op = aco_opcode::buffer_atomic_and;
6210 buf_op64 = aco_opcode::buffer_atomic_and_x2;
6211 image_op = aco_opcode::image_atomic_and;
6213 case nir_intrinsic_bindless_image_atomic_or:
6214 buf_op = aco_opcode::buffer_atomic_or;
6215 buf_op64 = aco_opcode::buffer_atomic_or_x2;
6216 image_op = aco_opcode::image_atomic_or;
6218 case nir_intrinsic_bindless_image_atomic_xor:
6219 buf_op = aco_opcode::buffer_atomic_xor;
6220 buf_op64 = aco_opcode::buffer_atomic_xor_x2;
6221 image_op = aco_opcode::image_atomic_xor;
6223 case nir_intrinsic_bindless_image_atomic_exchange:
6224 buf_op = aco_opcode::buffer_atomic_swap;
6225 buf_op64 = aco_opcode::buffer_atomic_swap_x2;
6226 image_op = aco_opcode::image_atomic_swap;
6228 case nir_intrinsic_bindless_image_atomic_comp_swap:
6229 buf_op = aco_opcode::buffer_atomic_cmpswap;
6230 buf_op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6231 image_op = aco_opcode::image_atomic_cmpswap;
6233 case nir_intrinsic_bindless_image_atomic_fmin:
6234 buf_op = aco_opcode::buffer_atomic_fmin;
6235 buf_op64 = aco_opcode::buffer_atomic_fmin_x2;
6236 image_op = aco_opcode::image_atomic_fmin;
6238 case nir_intrinsic_bindless_image_atomic_fmax:
6239 buf_op = aco_opcode::buffer_atomic_fmax;
6240 buf_op64 = aco_opcode::buffer_atomic_fmax_x2;
6241 image_op = aco_opcode::image_atomic_fmax;
6244 unreachable("visit_image_atomic should only be called with "
6245 "nir_intrinsic_bindless_image_atomic_* instructions.");
6248 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6249 memory_sync_info sync = get_memory_sync_info(instr, storage_image, semantic_atomicrmw);
6251 if (dim == GLSL_SAMPLER_DIM_BUF) {
6252 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6253 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6254 // assert(ctx->options->chip_class < GFX9 && "GFX9 stride size workaround not yet
6256 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(
6257 is_64bit ? buf_op64 : buf_op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6258 mubuf->operands[0] = Operand(resource);
6259 mubuf->operands[1] = Operand(vindex);
6260 mubuf->operands[2] = Operand::c32(0);
6261 mubuf->operands[3] = Operand(data);
6263 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6264 if (return_previous)
6265 mubuf->definitions[0] = def;
6267 mubuf->idxen = true;
6268 mubuf->glc = return_previous;
6269 mubuf->dlc = false; /* Not needed for atomics */
6270 mubuf->disable_wqm = true;
6272 ctx->program->needs_exact = true;
6273 ctx->block->instructions.emplace_back(std::move(mubuf));
6274 if (return_previous && cmpswap)
6275 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6279 std::vector<Temp> coords = get_image_coords(ctx, instr);
6280 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6282 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6283 MIMG_instruction* mimg =
6284 emit_mimg(bld, image_op, def, resource, Operand(s4), coords, 0, Operand(data));
6285 mimg->glc = return_previous;
6286 mimg->dlc = false; /* Not needed for atomics */
6287 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6288 mimg->dmask = (1 << data.size()) - 1;
6290 mimg->da = should_declare_array(ctx, dim, is_array);
6291 mimg->disable_wqm = true;
6293 ctx->program->needs_exact = true;
6294 if (return_previous && cmpswap)
6295 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6300 get_buffer_size(isel_context* ctx, Temp desc, Temp dst)
6302 if (ctx->options->chip_class == GFX8) {
6303 /* we only have to divide by 1, 2, 4, 8, 12 or 16 */
6304 Builder bld(ctx->program, ctx->block);
6306 Temp size = emit_extract_vector(ctx, desc, 2, s1);
6308 Temp size_div3 = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1),
6309 bld.copy(bld.def(v1), Operand::c32(0xaaaaaaabu)), size);
6310 size_div3 = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
6311 bld.as_uniform(size_div3), Operand::c32(1u));
6313 Temp stride = emit_extract_vector(ctx, desc, 1, s1);
6314 stride = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), stride,
6315 Operand::c32((5u << 16) | 16u));
6317 Temp is12 = bld.sopc(aco_opcode::s_cmp_eq_i32, bld.def(s1, scc), stride, Operand::c32(12u));
6318 size = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), size_div3, size, bld.scc(is12));
6320 Temp shr_dst = dst.type() == RegType::vgpr ? bld.tmp(s1) : dst;
6321 bld.sop2(aco_opcode::s_lshr_b32, Definition(shr_dst), bld.def(s1, scc), size,
6322 bld.sop1(aco_opcode::s_ff1_i32_b32, bld.def(s1), stride));
6323 if (dst.type() == RegType::vgpr)
6324 bld.copy(Definition(dst), shr_dst);
6326 /* TODO: we can probably calculate this faster with v_skip when stride != 12 */
6328 emit_extract_vector(ctx, desc, 2, dst);
6333 visit_image_size(isel_context* ctx, nir_intrinsic_instr* instr)
6335 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
6336 bool is_array = nir_intrinsic_image_array(instr);
6337 Builder bld(ctx->program, ctx->block);
6339 if (dim == GLSL_SAMPLER_DIM_BUF) {
6340 Temp desc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6341 return get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa));
6345 assert(nir_src_as_uint(instr->src[1]) == 0);
6346 std::vector<Temp> lod{bld.copy(bld.def(v1), Operand::zero())};
6349 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6351 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6353 MIMG_instruction* mimg =
6354 emit_mimg(bld, aco_opcode::image_get_resinfo, Definition(dst), resource, Operand(s4), lod);
6355 uint8_t& dmask = mimg->dmask;
6356 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6357 mimg->dmask = (1 << instr->dest.ssa.num_components) - 1;
6358 mimg->da = is_array;
6360 if (ctx->options->chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D && is_array) {
6361 assert(instr->dest.ssa.num_components == 2);
6365 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
6369 get_image_samples(isel_context* ctx, Definition dst, Temp resource)
6371 Builder bld(ctx->program, ctx->block);
6373 Temp dword3 = emit_extract_vector(ctx, resource, 3, s1);
6374 Temp samples_log2 = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3,
6375 Operand::c32(16u | 4u << 16));
6376 Temp samples = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), Operand::c32(1u),
6378 Temp type = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3,
6379 Operand::c32(28u | 4u << 16 /* offset=28, width=4 */));
6381 Operand default_sample = Operand::c32(1u);
6382 if (ctx->options->robust_buffer_access) {
6383 /* Extract the second dword of the descriptor, if it's
6384 * all zero, then it's a null descriptor.
6386 Temp dword1 = emit_extract_vector(ctx, resource, 1, s1);
6387 Temp is_non_null_descriptor =
6388 bld.sopc(aco_opcode::s_cmp_gt_u32, bld.def(s1, scc), dword1, Operand::zero());
6389 default_sample = Operand(is_non_null_descriptor);
6392 Temp is_msaa = bld.sopc(aco_opcode::s_cmp_ge_u32, bld.def(s1, scc), type, Operand::c32(14u));
6393 bld.sop2(aco_opcode::s_cselect_b32, dst, samples, default_sample, bld.scc(is_msaa));
6397 visit_image_samples(isel_context* ctx, nir_intrinsic_instr* instr)
6399 Builder bld(ctx->program, ctx->block);
6400 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6401 Temp resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6402 get_image_samples(ctx, Definition(dst), resource);
6406 visit_load_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6408 Builder bld(ctx->program, ctx->block);
6409 unsigned num_components = instr->num_components;
6411 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6412 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6414 unsigned access = nir_intrinsic_access(instr);
6415 bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
6416 unsigned size = instr->dest.ssa.bit_size / 8;
6418 bool allow_smem = access & ACCESS_CAN_REORDER;
6420 load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
6421 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, allow_smem,
6422 get_memory_sync_info(instr, storage_buffer, 0));
6426 visit_store_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6428 Builder bld(ctx->program, ctx->block);
6429 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
6430 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6431 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6432 Temp offset = get_ssa_temp(ctx, instr->src[2].ssa);
6434 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
6436 memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
6438 nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6440 unsigned write_count = 0;
6441 Temp write_datas[32];
6442 unsigned offsets[32];
6443 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, 16, &write_count,
6444 write_datas, offsets);
6446 /* GFX6-7 are affected by a hw bug that prevents address clamping to work
6447 * correctly when the SGPR offset is used.
6449 if (offset.type() == RegType::sgpr && ctx->options->chip_class < GFX8)
6450 offset = as_vgpr(ctx, offset);
6452 for (unsigned i = 0; i < write_count; i++) {
6453 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
6455 aco_ptr<MUBUF_instruction> store{
6456 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6457 store->operands[0] = Operand(rsrc);
6458 store->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6459 store->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
6460 store->operands[3] = Operand(write_datas[i]);
6461 store->offset = offsets[i];
6462 store->offen = (offset.type() == RegType::vgpr);
6465 store->disable_wqm = true;
6467 ctx->program->needs_exact = true;
6468 ctx->block->instructions.emplace_back(std::move(store));
6473 visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
6475 Builder bld(ctx->program, ctx->block);
6476 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6477 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
6478 bool cmpswap = instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap;
6481 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6482 get_ssa_temp(ctx, instr->src[3].ssa), data);
6484 Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
6485 Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6487 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6489 aco_opcode op32, op64;
6490 switch (instr->intrinsic) {
6491 case nir_intrinsic_ssbo_atomic_add:
6492 op32 = aco_opcode::buffer_atomic_add;
6493 op64 = aco_opcode::buffer_atomic_add_x2;
6495 case nir_intrinsic_ssbo_atomic_imin:
6496 op32 = aco_opcode::buffer_atomic_smin;
6497 op64 = aco_opcode::buffer_atomic_smin_x2;
6499 case nir_intrinsic_ssbo_atomic_umin:
6500 op32 = aco_opcode::buffer_atomic_umin;
6501 op64 = aco_opcode::buffer_atomic_umin_x2;
6503 case nir_intrinsic_ssbo_atomic_imax:
6504 op32 = aco_opcode::buffer_atomic_smax;
6505 op64 = aco_opcode::buffer_atomic_smax_x2;
6507 case nir_intrinsic_ssbo_atomic_umax:
6508 op32 = aco_opcode::buffer_atomic_umax;
6509 op64 = aco_opcode::buffer_atomic_umax_x2;
6511 case nir_intrinsic_ssbo_atomic_and:
6512 op32 = aco_opcode::buffer_atomic_and;
6513 op64 = aco_opcode::buffer_atomic_and_x2;
6515 case nir_intrinsic_ssbo_atomic_or:
6516 op32 = aco_opcode::buffer_atomic_or;
6517 op64 = aco_opcode::buffer_atomic_or_x2;
6519 case nir_intrinsic_ssbo_atomic_xor:
6520 op32 = aco_opcode::buffer_atomic_xor;
6521 op64 = aco_opcode::buffer_atomic_xor_x2;
6523 case nir_intrinsic_ssbo_atomic_exchange:
6524 op32 = aco_opcode::buffer_atomic_swap;
6525 op64 = aco_opcode::buffer_atomic_swap_x2;
6527 case nir_intrinsic_ssbo_atomic_comp_swap:
6528 op32 = aco_opcode::buffer_atomic_cmpswap;
6529 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6531 case nir_intrinsic_ssbo_atomic_fmin:
6532 op32 = aco_opcode::buffer_atomic_fmin;
6533 op64 = aco_opcode::buffer_atomic_fmin_x2;
6535 case nir_intrinsic_ssbo_atomic_fmax:
6536 op32 = aco_opcode::buffer_atomic_fmax;
6537 op64 = aco_opcode::buffer_atomic_fmax_x2;
6541 "visit_atomic_ssbo should only be called with nir_intrinsic_ssbo_atomic_* instructions.");
6543 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6544 aco_ptr<MUBUF_instruction> mubuf{
6545 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6546 mubuf->operands[0] = Operand(rsrc);
6547 mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6548 mubuf->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand::c32(0);
6549 mubuf->operands[3] = Operand(data);
6551 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6552 if (return_previous)
6553 mubuf->definitions[0] = def;
6555 mubuf->offen = (offset.type() == RegType::vgpr);
6556 mubuf->glc = return_previous;
6557 mubuf->dlc = false; /* Not needed for atomics */
6558 mubuf->disable_wqm = true;
6559 mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
6560 ctx->program->needs_exact = true;
6561 ctx->block->instructions.emplace_back(std::move(mubuf));
6562 if (return_previous && cmpswap)
6563 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6567 parse_global(isel_context* ctx, nir_intrinsic_instr* intrin, Temp* address, uint32_t* const_offset,
6570 bool is_store = intrin->intrinsic == nir_intrinsic_store_global_amd;
6571 *address = get_ssa_temp(ctx, intrin->src[is_store ? 1 : 0].ssa);
6573 *const_offset = nir_intrinsic_base(intrin);
6575 unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
6576 nir_src offset_src = intrin->src[num_src - 1];
6577 if (!nir_src_is_const(offset_src) || nir_src_as_uint(offset_src))
6578 *offset = get_ssa_temp(ctx, offset_src.ssa);
6584 visit_load_global(isel_context* ctx, nir_intrinsic_instr* instr)
6586 Builder bld(ctx->program, ctx->block);
6587 unsigned num_components = instr->num_components;
6588 unsigned component_size = instr->dest.ssa.bit_size / 8;
6591 uint32_t const_offset;
6592 parse_global(ctx, instr, &addr, &const_offset, &offset);
6594 LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->dest.ssa), num_components,
6597 info.resource = addr;
6598 info.offset = Operand(offset);
6600 info.const_offset = const_offset;
6601 info.glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
6602 info.align_mul = nir_intrinsic_align_mul(instr);
6603 info.align_offset = nir_intrinsic_align_offset(instr);
6604 info.sync = get_memory_sync_info(instr, storage_buffer, 0);
6606 /* Don't expand global loads when they use MUBUF or SMEM.
6607 * Global loads don't have the bounds checking that buffer loads have that
6610 unsigned align = nir_intrinsic_align(instr);
6611 bool byte_align_for_smem_mubuf =
6612 can_use_byte_align_for_global_load(num_components, component_size, align, false);
6614 /* VMEM stores don't update the SMEM cache and it's difficult to prove that
6615 * it's safe to use SMEM */
6617 (nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE) && byte_align_for_smem_mubuf;
6618 if (info.dst.type() == RegType::vgpr || (info.glc && ctx->options->chip_class < GFX8) ||
6620 EmitLoadParameters params = global_load_params;
6621 params.byte_align_loads = ctx->options->chip_class > GFX6 || byte_align_for_smem_mubuf;
6622 emit_load(ctx, bld, info, params);
6624 info.offset = Operand(bld.as_uniform(info.offset));
6625 emit_load(ctx, bld, info, smem_load_params);
6630 visit_store_global(isel_context* ctx, nir_intrinsic_instr* instr)
6632 Builder bld(ctx->program, ctx->block);
6633 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6634 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6636 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6637 memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
6639 nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6641 unsigned write_count = 0;
6642 Temp write_datas[32];
6643 unsigned offsets[32];
6644 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, 16, &write_count,
6645 write_datas, offsets);
6648 uint32_t const_offset;
6649 parse_global(ctx, instr, &addr, &const_offset, &offset);
6651 for (unsigned i = 0; i < write_count; i++) {
6652 Temp write_address = addr;
6653 uint32_t write_const_offset = const_offset;
6654 Temp write_offset = offset;
6655 lower_global_address(bld, offsets[i], &write_address, &write_const_offset, &write_offset);
6657 if (ctx->options->chip_class >= GFX7) {
6658 bool global = ctx->options->chip_class >= GFX9;
6660 switch (write_datas[i].bytes()) {
6661 case 1: op = global ? aco_opcode::global_store_byte : aco_opcode::flat_store_byte; break;
6662 case 2: op = global ? aco_opcode::global_store_short : aco_opcode::flat_store_short; break;
6663 case 4: op = global ? aco_opcode::global_store_dword : aco_opcode::flat_store_dword; break;
6665 op = global ? aco_opcode::global_store_dwordx2 : aco_opcode::flat_store_dwordx2;
6668 op = global ? aco_opcode::global_store_dwordx3 : aco_opcode::flat_store_dwordx3;
6671 op = global ? aco_opcode::global_store_dwordx4 : aco_opcode::flat_store_dwordx4;
6673 default: unreachable("store_global not implemented for this size.");
6676 aco_ptr<FLAT_instruction> flat{
6677 create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, 0)};
6678 if (write_address.regClass() == s2) {
6679 assert(global && write_offset.id() && write_offset.type() == RegType::vgpr);
6680 flat->operands[0] = Operand(write_offset);
6681 flat->operands[1] = Operand(write_address);
6683 assert(write_address.type() == RegType::vgpr && !write_offset.id());
6684 flat->operands[0] = Operand(write_address);
6685 flat->operands[1] = Operand(s1);
6687 flat->operands[2] = Operand(write_datas[i]);
6690 assert(global || !write_const_offset);
6691 flat->offset = write_const_offset;
6692 flat->disable_wqm = true;
6694 ctx->program->needs_exact = true;
6695 ctx->block->instructions.emplace_back(std::move(flat));
6697 assert(ctx->options->chip_class == GFX6);
6699 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
6701 Temp rsrc = get_gfx6_global_rsrc(bld, write_address);
6703 aco_ptr<MUBUF_instruction> mubuf{
6704 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6705 mubuf->operands[0] = Operand(rsrc);
6706 mubuf->operands[1] =
6707 write_address.type() == RegType::vgpr ? Operand(write_address) : Operand(v1);
6708 mubuf->operands[2] = Operand(write_offset);
6709 mubuf->operands[3] = Operand(write_datas[i]);
6712 mubuf->offset = write_const_offset;
6713 mubuf->addr64 = write_address.type() == RegType::vgpr;
6714 mubuf->disable_wqm = true;
6716 ctx->program->needs_exact = true;
6717 ctx->block->instructions.emplace_back(std::move(mubuf));
6723 visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
6725 Builder bld(ctx->program, ctx->block);
6726 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
6727 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6728 bool cmpswap = instr->intrinsic == nir_intrinsic_global_atomic_comp_swap_amd;
6731 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6732 get_ssa_temp(ctx, instr->src[2].ssa), data);
6734 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6736 aco_opcode op32, op64;
6739 uint32_t const_offset;
6740 parse_global(ctx, instr, &addr, &const_offset, &offset);
6741 lower_global_address(bld, 0, &addr, &const_offset, &offset);
6743 if (ctx->options->chip_class >= GFX7) {
6744 bool global = ctx->options->chip_class >= GFX9;
6745 switch (instr->intrinsic) {
6746 case nir_intrinsic_global_atomic_add_amd:
6747 op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add;
6748 op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2;
6750 case nir_intrinsic_global_atomic_imin_amd:
6751 op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin;
6752 op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2;
6754 case nir_intrinsic_global_atomic_umin_amd:
6755 op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin;
6756 op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2;
6758 case nir_intrinsic_global_atomic_imax_amd:
6759 op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax;
6760 op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2;
6762 case nir_intrinsic_global_atomic_umax_amd:
6763 op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax;
6764 op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2;
6766 case nir_intrinsic_global_atomic_and_amd:
6767 op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and;
6768 op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2;
6770 case nir_intrinsic_global_atomic_or_amd:
6771 op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or;
6772 op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2;
6774 case nir_intrinsic_global_atomic_xor_amd:
6775 op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor;
6776 op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2;
6778 case nir_intrinsic_global_atomic_exchange_amd:
6779 op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap;
6780 op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2;
6782 case nir_intrinsic_global_atomic_comp_swap_amd:
6783 op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap;
6784 op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2;
6786 case nir_intrinsic_global_atomic_fmin_amd:
6787 op32 = global ? aco_opcode::global_atomic_fmin : aco_opcode::flat_atomic_fmin;
6788 op64 = global ? aco_opcode::global_atomic_fmin_x2 : aco_opcode::flat_atomic_fmin_x2;
6790 case nir_intrinsic_global_atomic_fmax_amd:
6791 op32 = global ? aco_opcode::global_atomic_fmax : aco_opcode::flat_atomic_fmax;
6792 op64 = global ? aco_opcode::global_atomic_fmax_x2 : aco_opcode::flat_atomic_fmax_x2;
6795 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* "
6799 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6800 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(
6801 op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
6802 if (addr.regClass() == s2) {
6803 assert(global && offset.id() && offset.type() == RegType::vgpr);
6804 flat->operands[0] = Operand(offset);
6805 flat->operands[1] = Operand(addr);
6807 assert(addr.type() == RegType::vgpr && !offset.id());
6808 flat->operands[0] = Operand(addr);
6809 flat->operands[1] = Operand(s1);
6811 flat->operands[2] = Operand(data);
6812 if (return_previous)
6813 flat->definitions[0] = Definition(dst);
6814 flat->glc = return_previous;
6815 flat->dlc = false; /* Not needed for atomics */
6816 assert(global || !const_offset);
6817 flat->offset = const_offset;
6818 flat->disable_wqm = true;
6819 flat->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
6820 ctx->program->needs_exact = true;
6821 ctx->block->instructions.emplace_back(std::move(flat));
6823 assert(ctx->options->chip_class == GFX6);
6825 switch (instr->intrinsic) {
6826 case nir_intrinsic_global_atomic_add_amd:
6827 op32 = aco_opcode::buffer_atomic_add;
6828 op64 = aco_opcode::buffer_atomic_add_x2;
6830 case nir_intrinsic_global_atomic_imin_amd:
6831 op32 = aco_opcode::buffer_atomic_smin;
6832 op64 = aco_opcode::buffer_atomic_smin_x2;
6834 case nir_intrinsic_global_atomic_umin_amd:
6835 op32 = aco_opcode::buffer_atomic_umin;
6836 op64 = aco_opcode::buffer_atomic_umin_x2;
6838 case nir_intrinsic_global_atomic_imax_amd:
6839 op32 = aco_opcode::buffer_atomic_smax;
6840 op64 = aco_opcode::buffer_atomic_smax_x2;
6842 case nir_intrinsic_global_atomic_umax_amd:
6843 op32 = aco_opcode::buffer_atomic_umax;
6844 op64 = aco_opcode::buffer_atomic_umax_x2;
6846 case nir_intrinsic_global_atomic_and_amd:
6847 op32 = aco_opcode::buffer_atomic_and;
6848 op64 = aco_opcode::buffer_atomic_and_x2;
6850 case nir_intrinsic_global_atomic_or_amd:
6851 op32 = aco_opcode::buffer_atomic_or;
6852 op64 = aco_opcode::buffer_atomic_or_x2;
6854 case nir_intrinsic_global_atomic_xor_amd:
6855 op32 = aco_opcode::buffer_atomic_xor;
6856 op64 = aco_opcode::buffer_atomic_xor_x2;
6858 case nir_intrinsic_global_atomic_exchange_amd:
6859 op32 = aco_opcode::buffer_atomic_swap;
6860 op64 = aco_opcode::buffer_atomic_swap_x2;
6862 case nir_intrinsic_global_atomic_comp_swap_amd:
6863 op32 = aco_opcode::buffer_atomic_cmpswap;
6864 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6866 case nir_intrinsic_global_atomic_fmin_amd:
6867 op32 = aco_opcode::buffer_atomic_fmin;
6868 op64 = aco_opcode::buffer_atomic_fmin_x2;
6870 case nir_intrinsic_global_atomic_fmax_amd:
6871 op32 = aco_opcode::buffer_atomic_fmax;
6872 op64 = aco_opcode::buffer_atomic_fmax_x2;
6875 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* "
6879 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6881 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6883 aco_ptr<MUBUF_instruction> mubuf{
6884 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6885 mubuf->operands[0] = Operand(rsrc);
6886 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6887 mubuf->operands[2] = Operand(offset);
6888 mubuf->operands[3] = Operand(data);
6890 return_previous ? (cmpswap ? bld.def(data.regClass()) : Definition(dst)) : Definition();
6891 if (return_previous)
6892 mubuf->definitions[0] = def;
6893 mubuf->glc = return_previous;
6895 mubuf->offset = const_offset;
6896 mubuf->addr64 = addr.type() == RegType::vgpr;
6897 mubuf->disable_wqm = true;
6898 mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
6899 ctx->program->needs_exact = true;
6900 ctx->block->instructions.emplace_back(std::move(mubuf));
6901 if (return_previous && cmpswap)
6902 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), def.getTemp(), Operand::zero());
6907 aco_storage_mode_from_nir_mem_mode(unsigned mem_mode)
6909 unsigned storage = storage_none;
6911 if (mem_mode & nir_var_shader_out)
6912 storage |= storage_vmem_output;
6913 if ((mem_mode & nir_var_mem_ssbo) || (mem_mode & nir_var_mem_global))
6914 storage |= storage_buffer;
6915 if (mem_mode & nir_var_mem_task_payload)
6916 storage |= storage_task_payload;
6917 if (mem_mode & nir_var_mem_shared)
6918 storage |= storage_shared;
6919 if (mem_mode & nir_var_image)
6920 storage |= storage_image;
6926 visit_load_buffer(isel_context* ctx, nir_intrinsic_instr* intrin)
6928 Builder bld(ctx->program, ctx->block);
6930 Temp dst = get_ssa_temp(ctx, &intrin->dest.ssa);
6931 Temp descriptor = bld.as_uniform(get_ssa_temp(ctx, intrin->src[0].ssa));
6932 Temp v_offset = as_vgpr(ctx, get_ssa_temp(ctx, intrin->src[1].ssa));
6933 Temp s_offset = bld.as_uniform(get_ssa_temp(ctx, intrin->src[2].ssa));
6935 bool swizzled = nir_intrinsic_is_swizzled(intrin);
6936 bool reorder = nir_intrinsic_can_reorder(intrin);
6937 bool slc = nir_intrinsic_slc_amd(intrin);
6939 unsigned const_offset = nir_intrinsic_base(intrin);
6940 unsigned elem_size_bytes = intrin->dest.ssa.bit_size / 8u;
6941 unsigned num_components = intrin->dest.ssa.num_components;
6942 unsigned swizzle_element_size = swizzled ? (ctx->program->chip_class <= GFX8 ? 4 : 16) : 0;
6944 nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin);
6945 memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode));
6947 load_vmem_mubuf(ctx, dst, descriptor, v_offset, s_offset, const_offset, elem_size_bytes,
6948 num_components, swizzle_element_size, !swizzled, reorder, slc, sync);
6952 visit_store_buffer(isel_context* ctx, nir_intrinsic_instr* intrin)
6954 Temp store_src = get_ssa_temp(ctx, intrin->src[0].ssa);
6955 Temp descriptor = get_ssa_temp(ctx, intrin->src[1].ssa);
6956 Temp v_offset = get_ssa_temp(ctx, intrin->src[2].ssa);
6957 Temp s_offset = get_ssa_temp(ctx, intrin->src[3].ssa);
6959 bool swizzled = nir_intrinsic_is_swizzled(intrin);
6960 bool slc = nir_intrinsic_slc_amd(intrin);
6962 unsigned const_offset = nir_intrinsic_base(intrin);
6963 unsigned write_mask = nir_intrinsic_write_mask(intrin);
6964 unsigned elem_size_bytes = intrin->src[0].ssa->bit_size / 8u;
6966 nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin);
6967 memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode));
6969 store_vmem_mubuf(ctx, store_src, descriptor, v_offset, s_offset, const_offset, elem_size_bytes,
6970 write_mask, !swizzled, sync, slc);
6974 visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr)
6976 Builder bld(ctx->program, ctx->block);
6977 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6978 Temp base = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
6979 Temp offset = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
6981 aco_opcode opcode = aco_opcode::s_load_dword;
6984 assert(dst.bytes() <= 64);
6986 if (dst.bytes() > 32) {
6987 opcode = aco_opcode::s_load_dwordx16;
6989 } else if (dst.bytes() > 16) {
6990 opcode = aco_opcode::s_load_dwordx8;
6992 } else if (dst.bytes() > 8) {
6993 opcode = aco_opcode::s_load_dwordx4;
6995 } else if (dst.bytes() > 4) {
6996 opcode = aco_opcode::s_load_dwordx2;
7000 if (dst.size() != size) {
7001 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst),
7002 bld.smem(opcode, bld.def(RegType::sgpr, size), base, offset), Operand::c32(0u));
7004 bld.smem(opcode, Definition(dst), base, offset);
7006 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
7010 translate_nir_scope(nir_scope scope)
7013 case NIR_SCOPE_NONE:
7014 case NIR_SCOPE_INVOCATION: return scope_invocation;
7015 case NIR_SCOPE_SUBGROUP: return scope_subgroup;
7016 case NIR_SCOPE_WORKGROUP: return scope_workgroup;
7017 case NIR_SCOPE_QUEUE_FAMILY: return scope_queuefamily;
7018 case NIR_SCOPE_DEVICE: return scope_device;
7019 case NIR_SCOPE_SHADER_CALL: return scope_invocation;
7021 unreachable("invalid scope");
7025 emit_scoped_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
7027 Builder bld(ctx->program, ctx->block);
7029 unsigned storage_allowed = storage_buffer | storage_image;
7030 unsigned semantics = 0;
7031 sync_scope mem_scope = translate_nir_scope(nir_intrinsic_memory_scope(instr));
7032 sync_scope exec_scope = translate_nir_scope(nir_intrinsic_execution_scope(instr));
7034 /* We use shared storage for the following:
7035 * - compute shaders expose it in their API
7036 * - when tessellation is used, TCS and VS I/O is lowered to shared memory
7037 * - when GS is used on GFX9+, VS->GS and TES->GS I/O is lowered to shared memory
7038 * - additionally, when NGG is used on GFX10+, shared memory is used for certain features
7040 bool shared_storage_used = ctx->stage.hw == HWStage::CS || ctx->stage.hw == HWStage::LS ||
7041 ctx->stage.hw == HWStage::HS ||
7042 (ctx->stage.hw == HWStage::GS && ctx->program->chip_class >= GFX9) ||
7043 ctx->stage.hw == HWStage::NGG;
7045 if (shared_storage_used)
7046 storage_allowed |= storage_shared;
7048 /* Task payload: Task Shader output, Mesh Shader input */
7049 if (ctx->stage.has(SWStage::MS) || ctx->stage.has(SWStage::TS))
7050 storage_allowed |= storage_task_payload;
7052 /* Allow VMEM output for all stages that can have outputs. */
7053 if (ctx->stage.hw != HWStage::CS && ctx->stage.hw != HWStage::FS)
7054 storage_allowed |= storage_vmem_output;
7056 /* Workgroup barriers can hang merged shaders that can potentially have 0 threads in either half.
7057 * They are allowed in CS, TCS, and in any NGG shader.
7059 ASSERTED bool workgroup_scope_allowed =
7060 ctx->stage.hw == HWStage::CS || ctx->stage.hw == HWStage::HS || ctx->stage.hw == HWStage::NGG;
7062 unsigned nir_storage = nir_intrinsic_memory_modes(instr);
7063 unsigned storage = aco_storage_mode_from_nir_mem_mode(nir_storage);
7064 storage &= storage_allowed;
7066 unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
7067 if (nir_semantics & NIR_MEMORY_ACQUIRE)
7068 semantics |= semantic_acquire | semantic_release;
7069 if (nir_semantics & NIR_MEMORY_RELEASE)
7070 semantics |= semantic_acquire | semantic_release;
7072 assert(!(nir_semantics & (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
7073 assert(exec_scope != scope_workgroup || workgroup_scope_allowed);
7075 bld.barrier(aco_opcode::p_barrier,
7076 memory_sync_info((storage_class)storage, (memory_semantics)semantics, mem_scope),
7081 visit_load_shared(isel_context* ctx, nir_intrinsic_instr* instr)
7083 // TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
7084 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7085 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7086 Builder bld(ctx->program, ctx->block);
7088 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
7089 unsigned num_components = instr->dest.ssa.num_components;
7090 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7091 load_lds(ctx, elem_size_bytes, num_components, dst, address, nir_intrinsic_base(instr), align);
7095 visit_store_shared(isel_context* ctx, nir_intrinsic_instr* instr)
7097 unsigned writemask = nir_intrinsic_write_mask(instr);
7098 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
7099 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
7100 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
7102 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
7103 store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
7107 visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
7109 unsigned offset = nir_intrinsic_base(instr);
7110 Builder bld(ctx->program, ctx->block);
7111 Operand m = load_lds_size_m0(bld);
7112 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
7113 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7115 unsigned num_operands = 3;
7116 aco_opcode op32, op64, op32_rtn, op64_rtn;
7117 switch (instr->intrinsic) {
7118 case nir_intrinsic_shared_atomic_add:
7119 op32 = aco_opcode::ds_add_u32;
7120 op64 = aco_opcode::ds_add_u64;
7121 op32_rtn = aco_opcode::ds_add_rtn_u32;
7122 op64_rtn = aco_opcode::ds_add_rtn_u64;
7124 case nir_intrinsic_shared_atomic_imin:
7125 op32 = aco_opcode::ds_min_i32;
7126 op64 = aco_opcode::ds_min_i64;
7127 op32_rtn = aco_opcode::ds_min_rtn_i32;
7128 op64_rtn = aco_opcode::ds_min_rtn_i64;
7130 case nir_intrinsic_shared_atomic_umin:
7131 op32 = aco_opcode::ds_min_u32;
7132 op64 = aco_opcode::ds_min_u64;
7133 op32_rtn = aco_opcode::ds_min_rtn_u32;
7134 op64_rtn = aco_opcode::ds_min_rtn_u64;
7136 case nir_intrinsic_shared_atomic_imax:
7137 op32 = aco_opcode::ds_max_i32;
7138 op64 = aco_opcode::ds_max_i64;
7139 op32_rtn = aco_opcode::ds_max_rtn_i32;
7140 op64_rtn = aco_opcode::ds_max_rtn_i64;
7142 case nir_intrinsic_shared_atomic_umax:
7143 op32 = aco_opcode::ds_max_u32;
7144 op64 = aco_opcode::ds_max_u64;
7145 op32_rtn = aco_opcode::ds_max_rtn_u32;
7146 op64_rtn = aco_opcode::ds_max_rtn_u64;
7148 case nir_intrinsic_shared_atomic_and:
7149 op32 = aco_opcode::ds_and_b32;
7150 op64 = aco_opcode::ds_and_b64;
7151 op32_rtn = aco_opcode::ds_and_rtn_b32;
7152 op64_rtn = aco_opcode::ds_and_rtn_b64;
7154 case nir_intrinsic_shared_atomic_or:
7155 op32 = aco_opcode::ds_or_b32;
7156 op64 = aco_opcode::ds_or_b64;
7157 op32_rtn = aco_opcode::ds_or_rtn_b32;
7158 op64_rtn = aco_opcode::ds_or_rtn_b64;
7160 case nir_intrinsic_shared_atomic_xor:
7161 op32 = aco_opcode::ds_xor_b32;
7162 op64 = aco_opcode::ds_xor_b64;
7163 op32_rtn = aco_opcode::ds_xor_rtn_b32;
7164 op64_rtn = aco_opcode::ds_xor_rtn_b64;
7166 case nir_intrinsic_shared_atomic_exchange:
7167 op32 = aco_opcode::ds_write_b32;
7168 op64 = aco_opcode::ds_write_b64;
7169 op32_rtn = aco_opcode::ds_wrxchg_rtn_b32;
7170 op64_rtn = aco_opcode::ds_wrxchg_rtn_b64;
7172 case nir_intrinsic_shared_atomic_comp_swap:
7173 op32 = aco_opcode::ds_cmpst_b32;
7174 op64 = aco_opcode::ds_cmpst_b64;
7175 op32_rtn = aco_opcode::ds_cmpst_rtn_b32;
7176 op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
7179 case nir_intrinsic_shared_atomic_fadd:
7180 op32 = aco_opcode::ds_add_f32;
7181 op32_rtn = aco_opcode::ds_add_rtn_f32;
7182 op64 = aco_opcode::num_opcodes;
7183 op64_rtn = aco_opcode::num_opcodes;
7185 case nir_intrinsic_shared_atomic_fmin:
7186 op32 = aco_opcode::ds_min_f32;
7187 op32_rtn = aco_opcode::ds_min_rtn_f32;
7188 op64 = aco_opcode::ds_min_f64;
7189 op64_rtn = aco_opcode::ds_min_rtn_f64;
7191 case nir_intrinsic_shared_atomic_fmax:
7192 op32 = aco_opcode::ds_max_f32;
7193 op32_rtn = aco_opcode::ds_max_rtn_f32;
7194 op64 = aco_opcode::ds_max_f64;
7195 op64_rtn = aco_opcode::ds_max_rtn_f64;
7197 default: unreachable("Unhandled shared atomic intrinsic");
7200 bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
7203 if (data.size() == 1) {
7204 assert(instr->dest.ssa.bit_size == 32);
7205 op = return_previous ? op32_rtn : op32;
7207 assert(instr->dest.ssa.bit_size == 64);
7208 op = return_previous ? op64_rtn : op64;
7211 if (offset > 65535) {
7212 address = bld.vadd32(bld.def(v1), Operand::c32(offset), address);
7216 aco_ptr<DS_instruction> ds;
7218 create_instruction<DS_instruction>(op, Format::DS, num_operands, return_previous ? 1 : 0));
7219 ds->operands[0] = Operand(address);
7220 ds->operands[1] = Operand(data);
7221 if (num_operands == 4) {
7222 Temp data2 = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
7223 ds->operands[2] = Operand(data2);
7225 ds->operands[num_operands - 1] = m;
7226 ds->offset0 = offset;
7227 if (return_previous)
7228 ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
7229 ds->sync = memory_sync_info(storage_shared, semantic_atomicrmw);
7231 if (m.isUndefined())
7232 ds->operands.pop_back();
7234 ctx->block->instructions.emplace_back(std::move(ds));
7238 visit_access_shared2_amd(isel_context* ctx, nir_intrinsic_instr* instr)
7240 bool is_store = instr->intrinsic == nir_intrinsic_store_shared2_amd;
7241 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[is_store].ssa));
7242 Builder bld(ctx->program, ctx->block);
7244 assert(bld.program->chip_class >= GFX7);
7246 bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->dest.ssa.bit_size) == 64;
7247 uint8_t offset0 = nir_intrinsic_offset0(instr);
7248 uint8_t offset1 = nir_intrinsic_offset1(instr);
7249 bool st64 = nir_intrinsic_st64(instr);
7251 Operand m = load_lds_size_m0(bld);
7254 aco_opcode op = st64
7255 ? (is64bit ? aco_opcode::ds_write2st64_b64 : aco_opcode::ds_write2st64_b32)
7256 : (is64bit ? aco_opcode::ds_write2_b64 : aco_opcode::ds_write2_b32);
7257 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
7258 RegClass comp_rc = is64bit ? v2 : v1;
7259 Temp data0 = emit_extract_vector(ctx, data, 0, comp_rc);
7260 Temp data1 = emit_extract_vector(ctx, data, 1, comp_rc);
7261 ds = bld.ds(op, address, data0, data1, m, offset0, offset1);
7263 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7264 Definition tmp_dst(dst.type() == RegType::vgpr ? dst : bld.tmp(is64bit ? v4 : v2));
7265 aco_opcode op = st64 ? (is64bit ? aco_opcode::ds_read2st64_b64 : aco_opcode::ds_read2st64_b32)
7266 : (is64bit ? aco_opcode::ds_read2_b64 : aco_opcode::ds_read2_b32);
7267 ds = bld.ds(op, tmp_dst, address, m, offset0, offset1);
7269 ds->ds().sync = memory_sync_info(storage_shared);
7270 if (m.isUndefined())
7271 ds->operands.pop_back();
7274 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7275 if (dst.type() == RegType::sgpr) {
7276 emit_split_vector(ctx, ds->definitions[0].getTemp(), dst.size());
7278 /* Use scalar v_readfirstlane_b32 for better 32-bit copy propagation */
7279 for (unsigned i = 0; i < dst.size(); i++)
7280 comp[i] = bld.as_uniform(emit_extract_vector(ctx, ds->definitions[0].getTemp(), i, v1));
7282 Temp comp0 = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), comp[0], comp[1]);
7283 Temp comp1 = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), comp[2], comp[3]);
7284 ctx->allocated_vec[comp0.id()] = {comp[0], comp[1]};
7285 ctx->allocated_vec[comp1.id()] = {comp[2], comp[3]};
7286 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), comp0, comp1);
7287 ctx->allocated_vec[dst.id()] = {comp0, comp1};
7289 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), comp[0], comp[1]);
7293 emit_split_vector(ctx, dst, 2);
7298 get_scratch_resource(isel_context* ctx)
7300 Builder bld(ctx->program, ctx->block);
7301 Temp scratch_addr = ctx->program->private_segment_buffer;
7302 if (ctx->stage != compute_cs)
7304 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand::zero());
7306 uint32_t rsrc_conf =
7307 S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);
7309 if (ctx->program->chip_class >= GFX10) {
7310 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
7311 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
7312 } else if (ctx->program->chip_class <=
7313 GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
7314 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
7315 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
7318 /* older generations need element size = 4 bytes. element size removed in GFX9 */
7319 if (ctx->program->chip_class <= GFX8)
7320 rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
7322 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand::c32(-1u),
7323 Operand::c32(rsrc_conf));
7327 visit_load_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
7329 Builder bld(ctx->program, ctx->block);
7330 Temp rsrc = get_scratch_resource(ctx);
7331 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7332 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7334 LoadEmitInfo info = {Operand(offset), dst, instr->dest.ssa.num_components,
7335 instr->dest.ssa.bit_size / 8u, rsrc};
7336 info.align_mul = nir_intrinsic_align_mul(instr);
7337 info.align_offset = nir_intrinsic_align_offset(instr);
7338 info.swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 0;
7339 info.sync = memory_sync_info(storage_scratch, semantic_private);
7340 info.soffset = ctx->program->scratch_offset;
7341 emit_load(ctx, bld, info, scratch_load_params);
7345 visit_store_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
7347 Builder bld(ctx->program, ctx->block);
7348 Temp rsrc = get_scratch_resource(ctx);
7349 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7350 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
7352 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
7353 unsigned writemask = util_widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
7355 unsigned write_count = 0;
7356 Temp write_datas[32];
7357 unsigned offsets[32];
7358 unsigned swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 16;
7359 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask, swizzle_component_size,
7360 &write_count, write_datas, offsets);
7362 for (unsigned i = 0; i < write_count; i++) {
7363 aco_opcode op = get_buffer_store_op(write_datas[i].bytes());
7364 Instruction* mubuf = bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_datas[i],
7365 offsets[i], true, true);
7366 mubuf->mubuf().sync = memory_sync_info(storage_scratch, semantic_private);
7371 visit_emit_vertex_with_counter(isel_context* ctx, nir_intrinsic_instr* instr)
7373 Builder bld(ctx->program, ctx->block);
7375 unsigned stream = nir_intrinsic_stream_id(instr);
7376 Temp next_vertex = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7377 next_vertex = bld.v_mul_imm(bld.def(v1), next_vertex, 4u);
7378 nir_const_value* next_vertex_cv = nir_src_as_const_value(instr->src[0]);
7382 bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer,
7383 Operand::c32(RING_GSVS_GS * 16u));
7385 unsigned num_components = ctx->program->info.gs.num_stream_output_components[stream];
7387 unsigned stride = 4u * num_components * ctx->shader->info.gs.vertices_out;
7388 unsigned stream_offset = 0;
7389 for (unsigned i = 0; i < stream; i++) {
7390 unsigned prev_stride = 4u * ctx->program->info.gs.num_stream_output_components[i] *
7391 ctx->shader->info.gs.vertices_out;
7392 stream_offset += prev_stride * ctx->program->wave_size;
7395 /* Limit on the stride field for <= GFX7. */
7396 assert(stride < (1 << 14));
7398 Temp gsvs_dwords[4];
7399 for (unsigned i = 0; i < 4; i++)
7400 gsvs_dwords[i] = bld.tmp(s1);
7401 bld.pseudo(aco_opcode::p_split_vector, Definition(gsvs_dwords[0]), Definition(gsvs_dwords[1]),
7402 Definition(gsvs_dwords[2]), Definition(gsvs_dwords[3]), gsvs_ring);
7404 if (stream_offset) {
7405 Temp stream_offset_tmp = bld.copy(bld.def(s1), Operand::c32(stream_offset));
7407 Temp carry = bld.tmp(s1);
7408 gsvs_dwords[0] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)),
7409 gsvs_dwords[0], stream_offset_tmp);
7410 gsvs_dwords[1] = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc),
7411 gsvs_dwords[1], Operand::zero(), bld.scc(carry));
7414 gsvs_dwords[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1],
7415 Operand::c32(S_008F04_STRIDE(stride)));
7416 gsvs_dwords[2] = bld.copy(bld.def(s1), Operand::c32(ctx->program->wave_size));
7418 gsvs_ring = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), gsvs_dwords[0], gsvs_dwords[1],
7419 gsvs_dwords[2], gsvs_dwords[3]);
7421 unsigned offset = 0;
7422 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; i++) {
7423 if (ctx->program->info.gs.output_streams[i] != stream)
7426 for (unsigned j = 0; j < 4; j++) {
7427 if (!(ctx->program->info.gs.output_usage_mask[i] & (1 << j)))
7430 if (ctx->outputs.mask[i] & (1 << j)) {
7431 Operand vaddr_offset = next_vertex_cv ? Operand(v1) : Operand(next_vertex);
7432 unsigned const_offset = (offset + (next_vertex_cv ? next_vertex_cv->u32 : 0u)) * 4u;
7433 if (const_offset >= 4096u) {
7434 if (vaddr_offset.isUndefined())
7435 vaddr_offset = bld.copy(bld.def(v1), Operand::c32(const_offset / 4096u * 4096u));
7437 vaddr_offset = bld.vadd32(bld.def(v1), Operand::c32(const_offset / 4096u * 4096u),
7439 const_offset %= 4096u;
7442 aco_ptr<MTBUF_instruction> mtbuf{create_instruction<MTBUF_instruction>(
7443 aco_opcode::tbuffer_store_format_x, Format::MTBUF, 4, 0)};
7444 mtbuf->operands[0] = Operand(gsvs_ring);
7445 mtbuf->operands[1] = vaddr_offset;
7446 mtbuf->operands[2] = Operand(get_arg(ctx, ctx->args->ac.gs2vs_offset));
7447 mtbuf->operands[3] = Operand(ctx->outputs.temps[i * 4u + j]);
7448 mtbuf->offen = !vaddr_offset.isUndefined();
7449 mtbuf->dfmt = V_008F0C_BUF_DATA_FORMAT_32;
7450 mtbuf->nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
7451 mtbuf->offset = const_offset;
7454 mtbuf->sync = memory_sync_info(storage_vmem_output, semantic_can_reorder);
7455 bld.insert(std::move(mtbuf));
7458 offset += ctx->shader->info.gs.vertices_out;
7461 /* outputs for the next vertex are undefined and keeping them around can
7462 * create invalid IR with control flow */
7463 ctx->outputs.mask[i] = 0;
7466 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(false, true, stream));
7470 emit_boolean_reduce(isel_context* ctx, nir_op op, unsigned cluster_size, Temp src)
7472 Builder bld(ctx->program, ctx->block);
7474 if (cluster_size == 1) {
7477 if (op == nir_op_iand && cluster_size == 4) {
7478 /* subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val) */
7480 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
7481 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc),
7482 bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp));
7483 } else if (op == nir_op_ior && cluster_size == 4) {
7484 /* subgroupClusteredOr(val, 4) -> wqm(val & exec) */
7486 Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc),
7487 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)));
7488 } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) {
7489 /* subgroupAnd(val) -> (exec & ~val) == 0 */
7491 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src)
7494 Temp cond = bool_to_vector_condition(ctx, emit_wqm(bld, tmp));
7495 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), cond);
7496 } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) {
7497 /* subgroupOr(val) -> (val & exec) != 0 */
7499 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))
7502 return bool_to_vector_condition(ctx, tmp);
7503 } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) {
7504 /* subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1 */
7506 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7507 tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp);
7508 tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand::c32(1u))
7511 return bool_to_vector_condition(ctx, tmp);
7513 /* subgroupClustered{And,Or,Xor}(val, n):
7514 * lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) (just v_mbcnt_lo on wave32)
7515 * cluster_offset = ~(n - 1) & lane_id cluster_mask = ((1 << n) - 1)
7516 * subgroupClusteredAnd():
7517 * return ((val | ~exec) >> cluster_offset) & cluster_mask == cluster_mask
7518 * subgroupClusteredOr():
7519 * return ((val & exec) >> cluster_offset) & cluster_mask != 0
7520 * subgroupClusteredXor():
7521 * return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0
7523 Temp lane_id = emit_mbcnt(ctx, bld.tmp(v1));
7524 Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1),
7525 Operand::c32(~uint32_t(cluster_size - 1)), lane_id);
7528 if (op == nir_op_iand)
7529 tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src,
7530 Operand(exec, bld.lm));
7533 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7535 uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u;
7537 if (ctx->program->chip_class <= GFX7)
7538 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), tmp, cluster_offset);
7539 else if (ctx->program->wave_size == 64)
7540 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
7542 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp);
7543 tmp = emit_extract_vector(ctx, tmp, 0, v1);
7544 if (cluster_mask != 0xffffffff)
7545 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(cluster_mask), tmp);
7547 if (op == nir_op_iand) {
7548 return bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand::c32(cluster_mask),
7550 } else if (op == nir_op_ior) {
7551 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
7552 } else if (op == nir_op_ixor) {
7553 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u),
7554 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand::zero()));
7555 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
7563 emit_boolean_exclusive_scan(isel_context* ctx, nir_op op, Temp src)
7565 Builder bld(ctx->program, ctx->block);
7566 assert(src.regClass() == bld.lm);
7568 /* subgroupExclusiveAnd(val) -> mbcnt(exec & ~val) == 0
7569 * subgroupExclusiveOr(val) -> mbcnt(val & exec) != 0
7570 * subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0
7573 if (op == nir_op_iand)
7575 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
7577 tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7579 Temp mbcnt = emit_mbcnt(ctx, bld.tmp(v1), Operand(tmp));
7581 if (op == nir_op_iand)
7582 return bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand::zero(), mbcnt);
7583 else if (op == nir_op_ior)
7584 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), mbcnt);
7585 else if (op == nir_op_ixor)
7586 return bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(),
7587 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), mbcnt));
7594 emit_boolean_inclusive_scan(isel_context* ctx, nir_op op, Temp src)
7596 Builder bld(ctx->program, ctx->block);
7598 /* subgroupInclusiveAnd(val) -> subgroupExclusiveAnd(val) && val
7599 * subgroupInclusiveOr(val) -> subgroupExclusiveOr(val) || val
7600 * subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val
7602 Temp tmp = emit_boolean_exclusive_scan(ctx, op, src);
7603 if (op == nir_op_iand)
7604 return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7605 else if (op == nir_op_ior)
7606 return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7607 else if (op == nir_op_ixor)
7608 return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7615 get_reduce_op(nir_op op, unsigned bit_size)
7618 #define CASEI(name) \
7619 case nir_op_##name: \
7620 return (bit_size == 32) ? name##32 \
7621 : (bit_size == 16) ? name##16 \
7622 : (bit_size == 8) ? name##8 \
7624 #define CASEF(name) \
7625 case nir_op_##name: return (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : name##64;
7639 default: unreachable("unknown reduction op");
7646 emit_uniform_subgroup(isel_context* ctx, nir_intrinsic_instr* instr, Temp src)
7648 Builder bld(ctx->program, ctx->block);
7649 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7650 assert(dst.regClass().type() != RegType::vgpr);
7651 if (src.regClass().type() == RegType::vgpr)
7652 bld.pseudo(aco_opcode::p_as_uniform, dst, src);
7658 emit_addition_uniform_reduce(isel_context* ctx, nir_op op, Definition dst, nir_src src, Temp count)
7660 Builder bld(ctx->program, ctx->block);
7661 Temp src_tmp = get_ssa_temp(ctx, src.ssa);
7663 if (op == nir_op_fadd) {
7664 src_tmp = as_vgpr(ctx, src_tmp);
7665 Temp tmp = dst.regClass() == s1 ? bld.tmp(RegClass::get(RegType::vgpr, src.ssa->bit_size / 8))
7668 if (src.ssa->bit_size == 16) {
7669 count = bld.vop1(aco_opcode::v_cvt_f16_u16, bld.def(v2b), count);
7670 bld.vop2(aco_opcode::v_mul_f16, Definition(tmp), count, src_tmp);
7672 assert(src.ssa->bit_size == 32);
7673 count = bld.vop1(aco_opcode::v_cvt_f32_u32, bld.def(v1), count);
7674 bld.vop2(aco_opcode::v_mul_f32, Definition(tmp), count, src_tmp);
7677 if (tmp != dst.getTemp())
7678 bld.pseudo(aco_opcode::p_as_uniform, dst, tmp);
7683 if (dst.regClass() == s1)
7684 src_tmp = bld.as_uniform(src_tmp);
7686 if (op == nir_op_ixor && count.type() == RegType::sgpr)
7688 bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), count, Operand::c32(1u));
7689 else if (op == nir_op_ixor)
7690 count = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), count);
7692 assert(dst.getTemp().type() == count.type());
7694 if (nir_src_is_const(src)) {
7695 if (nir_src_as_uint(src) == 1 && dst.bytes() <= 2)
7696 bld.pseudo(aco_opcode::p_extract_vector, dst, count, Operand::zero());
7697 else if (nir_src_as_uint(src) == 1)
7698 bld.copy(dst, count);
7699 else if (nir_src_as_uint(src) == 0 && dst.bytes() <= 2)
7700 bld.vop1(aco_opcode::v_mov_b32, dst, Operand::zero()); /* RA will use SDWA if possible */
7701 else if (nir_src_as_uint(src) == 0)
7702 bld.copy(dst, Operand::zero());
7703 else if (count.type() == RegType::vgpr)
7704 bld.v_mul_imm(dst, count, nir_src_as_uint(src));
7706 bld.sop2(aco_opcode::s_mul_i32, dst, src_tmp, count);
7707 } else if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX10) {
7708 bld.vop3(aco_opcode::v_mul_lo_u16_e64, dst, src_tmp, count);
7709 } else if (dst.bytes() <= 2 && ctx->program->chip_class >= GFX8) {
7710 bld.vop2(aco_opcode::v_mul_lo_u16, dst, src_tmp, count);
7711 } else if (dst.getTemp().type() == RegType::vgpr) {
7712 bld.vop3(aco_opcode::v_mul_lo_u32, dst, src_tmp, count);
7714 bld.sop2(aco_opcode::s_mul_i32, dst, src_tmp, count);
7719 emit_uniform_reduce(isel_context* ctx, nir_intrinsic_instr* instr)
7721 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
7722 if (op == nir_op_imul || op == nir_op_fmul)
7725 if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) {
7726 Builder bld(ctx->program, ctx->block);
7727 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7728 unsigned bit_size = instr->src[0].ssa->bit_size;
7733 bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), Operand(exec, bld.lm));
7735 emit_addition_uniform_reduce(ctx, op, dst, instr->src[0], thread_count);
7737 emit_uniform_subgroup(ctx, instr, get_ssa_temp(ctx, instr->src[0].ssa));
7744 emit_uniform_scan(isel_context* ctx, nir_intrinsic_instr* instr)
7746 Builder bld(ctx->program, ctx->block);
7747 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7748 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
7749 bool inc = instr->intrinsic == nir_intrinsic_inclusive_scan;
7751 if (op == nir_op_imul || op == nir_op_fmul)
7754 if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) {
7755 if (instr->src[0].ssa->bit_size > 32)
7760 packed_tid = emit_mbcnt(ctx, bld.tmp(v1), Operand(exec, bld.lm), Operand::c32(1u));
7762 packed_tid = emit_mbcnt(ctx, bld.tmp(v1), Operand(exec, bld.lm));
7764 emit_addition_uniform_reduce(ctx, op, dst, instr->src[0], packed_tid);
7768 assert(op == nir_op_imin || op == nir_op_umin || op == nir_op_imax || op == nir_op_umax ||
7769 op == nir_op_iand || op == nir_op_ior || op == nir_op_fmin || op == nir_op_fmax);
7772 emit_uniform_subgroup(ctx, instr, get_ssa_temp(ctx, instr->src[0].ssa));
7776 /* Copy the source and write the reduction operation identity to the first lane. */
7777 Temp lane = bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm));
7778 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7779 ReduceOp reduce_op = get_reduce_op(op, instr->src[0].ssa->bit_size);
7780 if (dst.bytes() == 8) {
7781 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7782 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7783 uint32_t identity_lo = get_reduction_identity(reduce_op, 0);
7784 uint32_t identity_hi = get_reduction_identity(reduce_op, 1);
7787 bld.writelane(bld.def(v1), bld.copy(bld.def(s1, m0), Operand::c32(identity_lo)), lane, lo);
7789 bld.writelane(bld.def(v1), bld.copy(bld.def(s1, m0), Operand::c32(identity_hi)), lane, hi);
7790 bld.pseudo(aco_opcode::p_create_vector, dst, lo, hi);
7792 uint32_t identity = get_reduction_identity(reduce_op, 0);
7793 bld.writelane(dst, bld.copy(bld.def(s1, m0), Operand::c32(identity)), lane,
7801 emit_reduction_instr(isel_context* ctx, aco_opcode aco_op, ReduceOp op, unsigned cluster_size,
7802 Definition dst, Temp src)
7804 assert(src.bytes() <= 8);
7805 assert(src.type() == RegType::vgpr);
7807 Builder bld(ctx->program, ctx->block);
7809 unsigned num_defs = 0;
7811 defs[num_defs++] = dst;
7812 defs[num_defs++] = bld.def(bld.lm); /* used internally to save/restore exec */
7814 /* scalar identity temporary */
7815 bool need_sitmp = (ctx->program->chip_class <= GFX7 || ctx->program->chip_class >= GFX10) &&
7816 aco_op != aco_opcode::p_reduce;
7817 if (aco_op == aco_opcode::p_exclusive_scan) {
7818 need_sitmp |= (op == imin8 || op == imin16 || op == imin32 || op == imin64 || op == imax8 ||
7819 op == imax16 || op == imax32 || op == imax64 || op == fmin16 || op == fmin32 ||
7820 op == fmin64 || op == fmax16 || op == fmax32 || op == fmax64 || op == fmul16 ||
7824 defs[num_defs++] = bld.def(RegType::sgpr, dst.size());
7827 defs[num_defs++] = bld.def(s1, scc);
7830 bool clobber_vcc = false;
7831 if ((op == iadd32 || op == imul64) && ctx->program->chip_class < GFX9)
7833 if ((op == iadd8 || op == iadd16) && ctx->program->chip_class < GFX8)
7835 if (op == iadd64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)
7839 defs[num_defs++] = bld.def(bld.lm, vcc);
7841 Pseudo_reduction_instruction* reduce = create_instruction<Pseudo_reduction_instruction>(
7842 aco_op, Format::PSEUDO_REDUCTION, 3, num_defs);
7843 reduce->operands[0] = Operand(src);
7844 /* setup_reduce_temp will update these undef operands if needed */
7845 reduce->operands[1] = Operand(RegClass(RegType::vgpr, dst.size()).as_linear());
7846 reduce->operands[2] = Operand(v1.as_linear());
7847 std::copy(defs, defs + num_defs, reduce->definitions.begin());
7849 reduce->reduce_op = op;
7850 reduce->cluster_size = cluster_size;
7851 bld.insert(std::move(reduce));
7853 return dst.getTemp();
7857 emit_interp_center(isel_context* ctx, Temp dst, Temp bary, Temp pos1, Temp pos2)
7859 Builder bld(ctx->program, ctx->block);
7860 Temp p1 = emit_extract_vector(ctx, bary, 0, v1);
7861 Temp p2 = emit_extract_vector(ctx, bary, 1, v1);
7863 Temp ddx_1, ddx_2, ddy_1, ddy_2;
7864 uint32_t dpp_ctrl0 = dpp_quad_perm(0, 0, 0, 0);
7865 uint32_t dpp_ctrl1 = dpp_quad_perm(1, 1, 1, 1);
7866 uint32_t dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
7869 if (ctx->program->chip_class >= GFX8) {
7870 Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_ctrl0);
7871 ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl1);
7872 ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl2);
7873 Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_ctrl0);
7874 ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl1);
7875 ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl2);
7877 Temp tl_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl0);
7878 ddx_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl1);
7879 ddx_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_1, tl_1);
7880 ddy_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl2);
7881 ddy_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_1, tl_1);
7883 Temp tl_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl0);
7884 ddx_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl1);
7885 ddx_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_2, tl_2);
7886 ddy_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl2);
7887 ddy_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_2, tl_2);
7890 /* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */
7892 ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fma_f32 : aco_opcode::v_mad_f32;
7893 Temp tmp1 = bld.vop3(mad, bld.def(v1), ddx_1, pos1, p1);
7894 Temp tmp2 = bld.vop3(mad, bld.def(v1), ddx_2, pos1, p2);
7895 tmp1 = bld.vop3(mad, bld.def(v1), ddy_1, pos2, tmp1);
7896 tmp2 = bld.vop3(mad, bld.def(v1), ddy_2, pos2, tmp2);
7897 Temp wqm1 = bld.tmp(v1);
7898 emit_wqm(bld, tmp1, wqm1, true);
7899 Temp wqm2 = bld.tmp(v1);
7900 emit_wqm(bld, tmp2, wqm2, true);
7901 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), wqm1, wqm2);
7905 Temp merged_wave_info_to_mask(isel_context* ctx, unsigned i);
7906 void ngg_emit_sendmsg_gs_alloc_req(isel_context* ctx, Temp vtx_cnt, Temp prm_cnt);
7907 static void create_primitive_exports(isel_context *ctx, Temp prim_ch1);
7908 static void create_vs_exports(isel_context* ctx);
7911 get_interp_param(isel_context* ctx, nir_intrinsic_op intrin,
7912 enum glsl_interp_mode interp)
7914 bool linear = interp == INTERP_MODE_NOPERSPECTIVE;
7915 if (intrin == nir_intrinsic_load_barycentric_pixel ||
7916 intrin == nir_intrinsic_load_barycentric_at_sample ||
7917 intrin == nir_intrinsic_load_barycentric_at_offset) {
7918 return get_arg(ctx, linear ? ctx->args->ac.linear_center : ctx->args->ac.persp_center);
7919 } else if (intrin == nir_intrinsic_load_barycentric_centroid) {
7920 return linear ? ctx->linear_centroid : ctx->persp_centroid;
7922 assert(intrin == nir_intrinsic_load_barycentric_sample);
7923 return get_arg(ctx, linear ? ctx->args->ac.linear_sample : ctx->args->ac.persp_sample);
7928 visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
7930 Builder bld(ctx->program, ctx->block);
7931 switch (instr->intrinsic) {
7932 case nir_intrinsic_load_barycentric_sample:
7933 case nir_intrinsic_load_barycentric_pixel:
7934 case nir_intrinsic_load_barycentric_centroid: {
7935 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
7936 Temp bary = get_interp_param(ctx, instr->intrinsic, mode);
7937 assert(bary.size() == 2);
7938 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7939 bld.copy(Definition(dst), bary);
7940 emit_split_vector(ctx, dst, 2);
7943 case nir_intrinsic_load_barycentric_model: {
7944 Temp model = get_arg(ctx, ctx->args->ac.pull_model);
7945 assert(model.size() == 3);
7946 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7947 bld.copy(Definition(dst), model);
7948 emit_split_vector(ctx, dst, 3);
7951 case nir_intrinsic_load_barycentric_at_sample: {
7952 uint32_t sample_pos_offset = RING_PS_SAMPLE_POSITIONS * 16;
7953 switch (ctx->options->key.ps.num_samples) {
7954 case 2: sample_pos_offset += 1 << 3; break;
7955 case 4: sample_pos_offset += 3 << 3; break;
7956 case 8: sample_pos_offset += 7 << 3; break;
7960 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
7961 nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
7962 Temp private_segment_buffer = ctx->program->private_segment_buffer;
7963 // TODO: bounds checking?
7964 if (addr.type() == RegType::sgpr) {
7967 sample_pos_offset += const_addr->u32 << 3;
7968 offset = Operand::c32(sample_pos_offset);
7969 } else if (ctx->options->chip_class >= GFX9) {
7970 offset = bld.sop2(aco_opcode::s_lshl3_add_u32, bld.def(s1), bld.def(s1, scc), addr,
7971 Operand::c32(sample_pos_offset));
7973 offset = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), addr,
7975 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset,
7976 Operand::c32(sample_pos_offset));
7979 Operand off = bld.copy(bld.def(s1), Operand(offset));
7981 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, off);
7983 } else if (ctx->options->chip_class >= GFX9) {
7984 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
7985 sample_pos = bld.global(aco_opcode::global_load_dwordx2, bld.def(v2), addr,
7986 private_segment_buffer, sample_pos_offset);
7987 } else if (ctx->options->chip_class >= GFX7) {
7988 /* addr += private_segment_buffer + sample_pos_offset */
7989 Temp tmp0 = bld.tmp(s1);
7990 Temp tmp1 = bld.tmp(s1);
7991 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp0), Definition(tmp1),
7992 private_segment_buffer);
7993 Definition scc_tmp = bld.def(s1, scc);
7994 tmp0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), scc_tmp, tmp0,
7995 Operand::c32(sample_pos_offset));
7996 tmp1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), tmp1,
7997 Operand::zero(), bld.scc(scc_tmp.getTemp()));
7998 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
7999 Temp pck0 = bld.tmp(v1);
8000 Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp();
8001 tmp1 = as_vgpr(ctx, tmp1);
8002 Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.def(bld.lm), tmp1,
8003 Operand::zero(), carry);
8004 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1);
8006 /* sample_pos = flat_load_dwordx2 addr */
8007 sample_pos = bld.flat(aco_opcode::flat_load_dwordx2, bld.def(v2), addr, Operand(s1));
8009 assert(ctx->options->chip_class == GFX6);
8011 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
8012 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
8013 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
8014 Operand::zero(), Operand::c32(rsrc_conf));
8016 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(3u), addr);
8017 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), addr, Operand::zero());
8019 sample_pos = bld.tmp(v2);
8021 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(
8022 aco_opcode::buffer_load_dwordx2, Format::MUBUF, 3, 1)};
8023 load->definitions[0] = Definition(sample_pos);
8024 load->operands[0] = Operand(rsrc);
8025 load->operands[1] = Operand(addr);
8026 load->operands[2] = Operand::zero();
8027 load->offset = sample_pos_offset;
8029 load->addr64 = true;
8032 load->disable_wqm = false;
8033 ctx->block->instructions.emplace_back(std::move(load));
8036 /* sample_pos -= 0.5 */
8037 Temp pos1 = bld.tmp(RegClass(sample_pos.type(), 1));
8038 Temp pos2 = bld.tmp(RegClass(sample_pos.type(), 1));
8039 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), sample_pos);
8040 pos1 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos1, Operand::c32(0x3f000000u));
8041 pos2 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos2, Operand::c32(0x3f000000u));
8043 Temp bary = get_interp_param(ctx, instr->intrinsic, (glsl_interp_mode)nir_intrinsic_interp_mode(instr));
8044 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), bary, pos1, pos2);
8047 case nir_intrinsic_load_barycentric_at_offset: {
8048 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
8049 RegClass rc = RegClass(offset.type(), 1);
8050 Temp pos1 = bld.tmp(rc), pos2 = bld.tmp(rc);
8051 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset);
8052 Temp bary = get_interp_param(ctx, instr->intrinsic, (glsl_interp_mode)nir_intrinsic_interp_mode(instr));
8053 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), bary, pos1, pos2);
8056 case nir_intrinsic_load_front_face: {
8057 bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8058 Operand::zero(), get_arg(ctx, ctx->args->ac.front_face));
8061 case nir_intrinsic_load_view_index: {
8062 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8063 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index)));
8066 case nir_intrinsic_load_frag_coord: {
8067 emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4);
8070 case nir_intrinsic_load_frag_shading_rate:
8071 emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8073 case nir_intrinsic_load_sample_pos: {
8074 Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]);
8075 Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]);
8077 aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8078 posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand::zero(),
8079 posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand::zero());
8082 case nir_intrinsic_load_tess_coord: visit_load_tess_coord(ctx, instr); break;
8083 case nir_intrinsic_load_interpolated_input: visit_load_interpolated_input(ctx, instr); break;
8084 case nir_intrinsic_store_output: visit_store_output(ctx, instr); break;
8085 case nir_intrinsic_load_input:
8086 case nir_intrinsic_load_input_vertex: visit_load_input(ctx, instr); break;
8087 case nir_intrinsic_load_per_vertex_input: visit_load_per_vertex_input(ctx, instr); break;
8088 case nir_intrinsic_load_ubo: visit_load_ubo(ctx, instr); break;
8089 case nir_intrinsic_load_push_constant: visit_load_push_constant(ctx, instr); break;
8090 case nir_intrinsic_load_constant: visit_load_constant(ctx, instr); break;
8091 case nir_intrinsic_load_shared: visit_load_shared(ctx, instr); break;
8092 case nir_intrinsic_store_shared: visit_store_shared(ctx, instr); break;
8093 case nir_intrinsic_shared_atomic_add:
8094 case nir_intrinsic_shared_atomic_imin:
8095 case nir_intrinsic_shared_atomic_umin:
8096 case nir_intrinsic_shared_atomic_imax:
8097 case nir_intrinsic_shared_atomic_umax:
8098 case nir_intrinsic_shared_atomic_and:
8099 case nir_intrinsic_shared_atomic_or:
8100 case nir_intrinsic_shared_atomic_xor:
8101 case nir_intrinsic_shared_atomic_exchange:
8102 case nir_intrinsic_shared_atomic_comp_swap:
8103 case nir_intrinsic_shared_atomic_fadd:
8104 case nir_intrinsic_shared_atomic_fmin:
8105 case nir_intrinsic_shared_atomic_fmax: visit_shared_atomic(ctx, instr); break;
8106 case nir_intrinsic_load_shared2_amd:
8107 case nir_intrinsic_store_shared2_amd: visit_access_shared2_amd(ctx, instr); break;
8108 case nir_intrinsic_bindless_image_load:
8109 case nir_intrinsic_bindless_image_sparse_load: visit_image_load(ctx, instr); break;
8110 case nir_intrinsic_bindless_image_store: visit_image_store(ctx, instr); break;
8111 case nir_intrinsic_bindless_image_atomic_add:
8112 case nir_intrinsic_bindless_image_atomic_umin:
8113 case nir_intrinsic_bindless_image_atomic_imin:
8114 case nir_intrinsic_bindless_image_atomic_umax:
8115 case nir_intrinsic_bindless_image_atomic_imax:
8116 case nir_intrinsic_bindless_image_atomic_and:
8117 case nir_intrinsic_bindless_image_atomic_or:
8118 case nir_intrinsic_bindless_image_atomic_xor:
8119 case nir_intrinsic_bindless_image_atomic_exchange:
8120 case nir_intrinsic_bindless_image_atomic_comp_swap:
8121 case nir_intrinsic_bindless_image_atomic_fmin:
8122 case nir_intrinsic_bindless_image_atomic_fmax: visit_image_atomic(ctx, instr); break;
8123 case nir_intrinsic_bindless_image_size: visit_image_size(ctx, instr); break;
8124 case nir_intrinsic_bindless_image_samples: visit_image_samples(ctx, instr); break;
8125 case nir_intrinsic_load_ssbo: visit_load_ssbo(ctx, instr); break;
8126 case nir_intrinsic_store_ssbo: visit_store_ssbo(ctx, instr); break;
8127 case nir_intrinsic_load_buffer_amd: visit_load_buffer(ctx, instr); break;
8128 case nir_intrinsic_store_buffer_amd: visit_store_buffer(ctx, instr); break;
8129 case nir_intrinsic_load_smem_amd: visit_load_smem(ctx, instr); break;
8130 case nir_intrinsic_load_global_amd: visit_load_global(ctx, instr); break;
8131 case nir_intrinsic_store_global_amd: visit_store_global(ctx, instr); break;
8132 case nir_intrinsic_global_atomic_add_amd:
8133 case nir_intrinsic_global_atomic_imin_amd:
8134 case nir_intrinsic_global_atomic_umin_amd:
8135 case nir_intrinsic_global_atomic_imax_amd:
8136 case nir_intrinsic_global_atomic_umax_amd:
8137 case nir_intrinsic_global_atomic_and_amd:
8138 case nir_intrinsic_global_atomic_or_amd:
8139 case nir_intrinsic_global_atomic_xor_amd:
8140 case nir_intrinsic_global_atomic_exchange_amd:
8141 case nir_intrinsic_global_atomic_comp_swap_amd:
8142 case nir_intrinsic_global_atomic_fmin_amd:
8143 case nir_intrinsic_global_atomic_fmax_amd: visit_global_atomic(ctx, instr); break;
8144 case nir_intrinsic_ssbo_atomic_add:
8145 case nir_intrinsic_ssbo_atomic_imin:
8146 case nir_intrinsic_ssbo_atomic_umin:
8147 case nir_intrinsic_ssbo_atomic_imax:
8148 case nir_intrinsic_ssbo_atomic_umax:
8149 case nir_intrinsic_ssbo_atomic_and:
8150 case nir_intrinsic_ssbo_atomic_or:
8151 case nir_intrinsic_ssbo_atomic_xor:
8152 case nir_intrinsic_ssbo_atomic_exchange:
8153 case nir_intrinsic_ssbo_atomic_comp_swap:
8154 case nir_intrinsic_ssbo_atomic_fmin:
8155 case nir_intrinsic_ssbo_atomic_fmax: visit_atomic_ssbo(ctx, instr); break;
8156 case nir_intrinsic_load_scratch: visit_load_scratch(ctx, instr); break;
8157 case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break;
8158 case nir_intrinsic_scoped_barrier: emit_scoped_barrier(ctx, instr); break;
8159 case nir_intrinsic_load_num_workgroups: {
8160 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8161 if (ctx->args->load_grid_size_from_user_sgpr) {
8162 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.num_work_groups));
8164 Temp addr = get_arg(ctx, ctx->args->ac.num_work_groups);
8165 assert(addr.regClass() == s2);
8166 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
8167 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), addr, Operand::zero()),
8168 bld.smem(aco_opcode::s_load_dword, bld.def(s1), addr, Operand::c32(8)));
8170 emit_split_vector(ctx, dst, 3);
8173 case nir_intrinsic_load_ray_launch_size_addr_amd: {
8174 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8175 Temp addr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.ray_launch_size_addr));
8176 bld.copy(Definition(dst), Operand(addr));
8179 case nir_intrinsic_load_local_invocation_id: {
8180 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8181 if (ctx->options->chip_class >= GFX11) {
8184 /* Thread IDs are packed in VGPR0, 10 bits per component. */
8185 for (uint32_t i = 0; i < 3; i++) {
8186 local_ids[i] = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
8187 get_arg(ctx, ctx->args->ac.local_invocation_ids),
8188 Operand::c32(i * 10u), Operand::c32(10u));
8191 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), local_ids[0], local_ids[1],
8194 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids)));
8196 emit_split_vector(ctx, dst, 3);
8199 case nir_intrinsic_load_workgroup_id: {
8200 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8201 if (ctx->stage.hw == HWStage::CS) {
8202 const struct ac_arg* ids = ctx->args->ac.workgroup_ids;
8203 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
8204 ids[0].used ? Operand(get_arg(ctx, ids[0])) : Operand::zero(),
8205 ids[1].used ? Operand(get_arg(ctx, ids[1])) : Operand::zero(),
8206 ids[2].used ? Operand(get_arg(ctx, ids[2])) : Operand::zero());
8207 emit_split_vector(ctx, dst, 3);
8209 isel_err(&instr->instr, "Unsupported stage for load_workgroup_id");
8213 case nir_intrinsic_load_local_invocation_index: {
8214 if (ctx->stage.hw == HWStage::LS || ctx->stage.hw == HWStage::HS) {
8215 if (ctx->options->chip_class >= GFX11) {
8216 /* On GFX11, RelAutoIndex is WaveID * WaveSize + ThreadID. */
8218 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
8219 get_arg(ctx, ctx->args->ac.tcs_wave_id), Operand::c32(0u | (5u << 16)));
8221 Temp temp = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), wave_id,
8222 Operand::c32(ctx->program->wave_size));
8223 Temp thread_id = emit_mbcnt(ctx, bld.tmp(v1));
8225 bld.vadd32(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), temp, thread_id);
8227 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8228 get_arg(ctx, ctx->args->ac.vs_rel_patch_id));
8231 } else if (ctx->stage.hw == HWStage::GS || ctx->stage.hw == HWStage::NGG) {
8232 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), thread_id_in_threadgroup(ctx));
8234 } else if (ctx->program->workgroup_size <= ctx->program->wave_size) {
8235 emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8239 Temp id = emit_mbcnt(ctx, bld.tmp(v1));
8241 /* The tg_size bits [6:11] contain the subgroup id,
8242 * we need this multiplied by the wave size, and then OR the thread id to it.
8244 if (ctx->program->wave_size == 64) {
8245 /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just
8246 * feed that to v_or */
8247 Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
8248 Operand::c32(0xfc0u), get_arg(ctx, ctx->args->ac.tg_size));
8249 bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num,
8252 /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
8254 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
8255 get_arg(ctx, ctx->args->ac.tg_size), Operand::c32(0x6u | (0x6u << 16)));
8256 bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8257 tg_num, Operand::c32(0x5u), id);
8261 case nir_intrinsic_load_subgroup_id: {
8262 if (ctx->stage == compute_cs) {
8263 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8264 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.tg_size),
8265 Operand::c32(0x6u | (0x6u << 16)));
8266 } else if (ctx->stage.hw == HWStage::NGG) {
8267 /* Get the id of the current wave within the threadgroup (workgroup) */
8268 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8269 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.merged_wave_info),
8270 Operand::c32(24u | (4u << 16)));
8272 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand::zero());
8276 case nir_intrinsic_load_subgroup_invocation: {
8277 emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
8280 case nir_intrinsic_load_num_subgroups: {
8281 if (ctx->stage == compute_cs)
8282 bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8283 bld.def(s1, scc), Operand::c32(0x3fu), get_arg(ctx, ctx->args->ac.tg_size));
8284 else if (ctx->stage.hw == HWStage::NGG)
8285 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8286 bld.def(s1, scc), get_arg(ctx, ctx->args->ac.merged_wave_info),
8287 Operand::c32(28u | (4u << 16)));
8289 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand::c32(0x1u));
8292 case nir_intrinsic_ballot: {
8293 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8294 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8296 if (instr->src[0].ssa->bit_size == 1) {
8297 assert(src.regClass() == bld.lm);
8298 } else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) {
8299 src = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), src);
8300 } else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
8301 src = bld.vopc(aco_opcode::v_cmp_lg_u64, bld.def(bld.lm), Operand::zero(), src);
8303 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8306 /* Make sure that all inactive lanes return zero.
8307 * Value-numbering might remove the comparison above */
8308 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8309 if (dst.size() != bld.lm.size()) {
8310 /* Wave32 with ballot size set to 64 */
8312 bld.pseudo(aco_opcode::p_create_vector, bld.def(dst.regClass()), src, Operand::zero());
8315 emit_wqm(bld, src, dst);
8318 case nir_intrinsic_shuffle:
8319 case nir_intrinsic_read_invocation: {
8320 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8321 if (!nir_src_is_divergent(instr->src[0])) {
8322 emit_uniform_subgroup(ctx, instr, src);
8324 Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
8325 if (instr->intrinsic == nir_intrinsic_read_invocation ||
8326 !nir_src_is_divergent(instr->src[1]))
8327 tid = bld.as_uniform(tid);
8328 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8330 if (instr->dest.ssa.bit_size != 1)
8331 src = as_vgpr(ctx, src);
8333 if (src.regClass() == v1b || src.regClass() == v2b) {
8334 Temp tmp = bld.tmp(v1);
8335 tmp = emit_wqm(bld, emit_bpermute(ctx, bld, tid, src), tmp);
8336 if (dst.type() == RegType::vgpr)
8337 bld.pseudo(aco_opcode::p_split_vector, Definition(dst),
8338 bld.def(src.regClass() == v1b ? v3b : v2b), tmp);
8340 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
8341 } else if (src.regClass() == v1) {
8342 emit_wqm(bld, emit_bpermute(ctx, bld, tid, src), dst);
8343 } else if (src.regClass() == v2) {
8344 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8345 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8346 lo = emit_wqm(bld, emit_bpermute(ctx, bld, tid, lo));
8347 hi = emit_wqm(bld, emit_bpermute(ctx, bld, tid, hi));
8348 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8349 emit_split_vector(ctx, dst, 2);
8350 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
8351 assert(src.regClass() == bld.lm);
8352 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
8353 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8354 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
8355 assert(src.regClass() == bld.lm);
8357 if (ctx->program->chip_class <= GFX7)
8358 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), src, tid);
8359 else if (ctx->program->wave_size == 64)
8360 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
8362 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src);
8363 tmp = emit_extract_vector(ctx, tmp, 0, v1);
8364 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(1u), tmp);
8365 emit_wqm(bld, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp),
8368 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8373 case nir_intrinsic_load_sample_id: {
8374 bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8375 get_arg(ctx, ctx->args->ac.ancillary), Operand::c32(8u), Operand::c32(4u));
8378 case nir_intrinsic_read_first_invocation: {
8379 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8380 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8381 if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) {
8382 emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src), dst);
8383 } else if (src.regClass() == v2) {
8384 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8385 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8386 lo = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), lo));
8387 hi = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi));
8388 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8389 emit_split_vector(ctx, dst, 2);
8390 } else if (instr->dest.ssa.bit_size == 1) {
8391 assert(src.regClass() == bld.lm);
8392 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
8393 bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
8394 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8396 bld.copy(Definition(dst), src);
8400 case nir_intrinsic_vote_all: {
8401 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8402 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8403 assert(src.regClass() == bld.lm);
8404 assert(dst.regClass() == bld.lm);
8407 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src)
8410 Temp cond = bool_to_vector_condition(ctx, emit_wqm(bld, tmp));
8411 bld.sop1(Builder::s_not, Definition(dst), bld.def(s1, scc), cond);
8414 case nir_intrinsic_vote_any: {
8415 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8416 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8417 assert(src.regClass() == bld.lm);
8418 assert(dst.regClass() == bld.lm);
8420 Temp tmp = bool_to_scalar_condition(ctx, src);
8421 bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
8424 case nir_intrinsic_reduce:
8425 case nir_intrinsic_inclusive_scan:
8426 case nir_intrinsic_exclusive_scan: {
8427 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8428 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8429 nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
8430 unsigned cluster_size =
8431 instr->intrinsic == nir_intrinsic_reduce ? nir_intrinsic_cluster_size(instr) : 0;
8432 cluster_size = util_next_power_of_two(
8433 MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
8435 if (!nir_src_is_divergent(instr->src[0]) && cluster_size == ctx->program->wave_size &&
8436 instr->dest.ssa.bit_size != 1) {
8437 /* We use divergence analysis to assign the regclass, so check if it's
8438 * working as expected */
8439 ASSERTED bool expected_divergent = instr->intrinsic == nir_intrinsic_exclusive_scan;
8440 if (instr->intrinsic == nir_intrinsic_inclusive_scan)
8441 expected_divergent = op == nir_op_iadd || op == nir_op_fadd || op == nir_op_ixor;
8442 assert(nir_dest_is_divergent(instr->dest) == expected_divergent);
8444 if (instr->intrinsic == nir_intrinsic_reduce) {
8445 if (emit_uniform_reduce(ctx, instr))
8447 } else if (emit_uniform_scan(ctx, instr)) {
8452 if (instr->dest.ssa.bit_size == 1) {
8453 if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
8455 else if (op == nir_op_iadd)
8457 else if (op == nir_op_umax || op == nir_op_imax)
8459 assert(op == nir_op_iand || op == nir_op_ior || op == nir_op_ixor);
8461 switch (instr->intrinsic) {
8462 case nir_intrinsic_reduce:
8463 emit_wqm(bld, emit_boolean_reduce(ctx, op, cluster_size, src), dst);
8465 case nir_intrinsic_exclusive_scan:
8466 emit_wqm(bld, emit_boolean_exclusive_scan(ctx, op, src), dst);
8468 case nir_intrinsic_inclusive_scan:
8469 emit_wqm(bld, emit_boolean_inclusive_scan(ctx, op, src), dst);
8471 default: assert(false);
8473 } else if (cluster_size == 1) {
8474 bld.copy(Definition(dst), src);
8476 unsigned bit_size = instr->src[0].ssa->bit_size;
8478 src = emit_extract_vector(ctx, src, 0, RegClass::get(RegType::vgpr, bit_size / 8));
8480 ReduceOp reduce_op = get_reduce_op(op, bit_size);
8483 switch (instr->intrinsic) {
8484 case nir_intrinsic_reduce: aco_op = aco_opcode::p_reduce; break;
8485 case nir_intrinsic_inclusive_scan: aco_op = aco_opcode::p_inclusive_scan; break;
8486 case nir_intrinsic_exclusive_scan: aco_op = aco_opcode::p_exclusive_scan; break;
8487 default: unreachable("unknown reduce intrinsic");
8490 Temp tmp_dst = emit_reduction_instr(ctx, aco_op, reduce_op, cluster_size,
8491 bld.def(dst.regClass()), src);
8492 emit_wqm(bld, tmp_dst, dst);
8496 case nir_intrinsic_quad_broadcast:
8497 case nir_intrinsic_quad_swap_horizontal:
8498 case nir_intrinsic_quad_swap_vertical:
8499 case nir_intrinsic_quad_swap_diagonal:
8500 case nir_intrinsic_quad_swizzle_amd: {
8501 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8503 if (!nir_dest_is_divergent(instr->dest)) {
8504 emit_uniform_subgroup(ctx, instr, src);
8508 /* Quad broadcast lane. */
8510 /* Use VALU for the bool instructions that don't have a SALU-only special case. */
8511 bool bool_use_valu = instr->dest.ssa.bit_size == 1;
8513 uint16_t dpp_ctrl = 0;
8515 switch (instr->intrinsic) {
8516 case nir_intrinsic_quad_swap_horizontal: dpp_ctrl = dpp_quad_perm(1, 0, 3, 2); break;
8517 case nir_intrinsic_quad_swap_vertical: dpp_ctrl = dpp_quad_perm(2, 3, 0, 1); break;
8518 case nir_intrinsic_quad_swap_diagonal: dpp_ctrl = dpp_quad_perm(3, 2, 1, 0); break;
8519 case nir_intrinsic_quad_swizzle_amd: dpp_ctrl = nir_intrinsic_swizzle_mask(instr); break;
8520 case nir_intrinsic_quad_broadcast:
8521 lane = nir_src_as_const_value(instr->src[1])->u32;
8522 dpp_ctrl = dpp_quad_perm(lane, lane, lane, lane);
8523 bool_use_valu = false;
8528 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8533 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
8534 Operand::c32(-1), src);
8535 else if (instr->dest.ssa.bit_size != 1)
8536 src = as_vgpr(ctx, src);
8538 /* Setup temporary destination. */
8541 else if (ctx->program->stage == fragment_fs)
8542 tmp = bld.tmp(dst.regClass());
8544 if (instr->dest.ssa.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) {
8545 /* Special case for quad broadcast using SALU only. */
8546 assert(src.regClass() == bld.lm && tmp.regClass() == bld.lm);
8548 uint32_t half_mask = 0x11111111u << lane;
8549 Operand mask_tmp = bld.lm.bytes() == 4
8550 ? Operand::c32(half_mask)
8551 : bld.pseudo(aco_opcode::p_create_vector, bld.def(bld.lm),
8552 Operand::c32(half_mask), Operand::c32(half_mask));
8555 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8556 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp, src);
8557 bld.sop1(Builder::s_wqm, Definition(tmp), src);
8558 } else if (instr->dest.ssa.bit_size <= 32 || bool_use_valu) {
8559 unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->dest.ssa.bit_size / 8;
8560 Definition def = excess_bytes ? bld.def(v1) : Definition(tmp);
8562 if (ctx->program->chip_class >= GFX8)
8563 bld.vop1_dpp(aco_opcode::v_mov_b32, def, src, dpp_ctrl);
8565 bld.ds(aco_opcode::ds_swizzle_b32, def, src, (1 << 15) | dpp_ctrl);
8568 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp),
8569 bld.def(RegClass::get(tmp.type(), excess_bytes)), def.getTemp());
8570 } else if (instr->dest.ssa.bit_size == 64) {
8571 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8572 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8574 if (ctx->program->chip_class >= GFX8) {
8575 lo = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl);
8576 hi = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl);
8578 lo = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, (1 << 15) | dpp_ctrl);
8579 hi = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, (1 << 15) | dpp_ctrl);
8582 bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lo, hi);
8583 emit_split_vector(ctx, tmp, 2);
8585 isel_err(&instr->instr, "Unimplemented NIR quad group instruction bit size.");
8588 if (tmp.id() != dst.id()) {
8590 tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), tmp);
8592 /* Vulkan spec 9.25: Helper invocations must be active for quad group instructions. */
8593 emit_wqm(bld, tmp, dst, true);
8598 case nir_intrinsic_masked_swizzle_amd: {
8599 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8600 if (!nir_dest_is_divergent(instr->dest)) {
8601 emit_uniform_subgroup(ctx, instr, src);
8604 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8605 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
8607 if (instr->dest.ssa.bit_size != 1)
8608 src = as_vgpr(ctx, src);
8610 if (instr->dest.ssa.bit_size == 1) {
8611 assert(src.regClass() == bld.lm);
8612 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
8613 Operand::c32(-1), src);
8614 src = emit_masked_swizzle(ctx, bld, src, mask);
8615 Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand::zero(), src);
8616 emit_wqm(bld, tmp, dst);
8617 } else if (dst.regClass() == v1b) {
8618 Temp tmp = emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask));
8619 emit_extract_vector(ctx, tmp, 0, dst);
8620 } else if (dst.regClass() == v2b) {
8621 Temp tmp = emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask));
8622 emit_extract_vector(ctx, tmp, 0, dst);
8623 } else if (dst.regClass() == v1) {
8624 emit_wqm(bld, emit_masked_swizzle(ctx, bld, src, mask), dst);
8625 } else if (dst.regClass() == v2) {
8626 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
8627 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
8628 lo = emit_wqm(bld, emit_masked_swizzle(ctx, bld, lo, mask));
8629 hi = emit_wqm(bld, emit_masked_swizzle(ctx, bld, hi, mask));
8630 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8631 emit_split_vector(ctx, dst, 2);
8633 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8637 case nir_intrinsic_write_invocation_amd: {
8638 Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
8639 Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
8640 Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa));
8641 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8642 if (dst.regClass() == v1) {
8643 /* src2 is ignored for writelane. RA assigns the same reg for dst */
8644 emit_wqm(bld, bld.writelane(bld.def(v1), val, lane, src), dst);
8645 } else if (dst.regClass() == v2) {
8646 Temp src_lo = bld.tmp(v1), src_hi = bld.tmp(v1);
8647 Temp val_lo = bld.tmp(s1), val_hi = bld.tmp(s1);
8648 bld.pseudo(aco_opcode::p_split_vector, Definition(src_lo), Definition(src_hi), src);
8649 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
8650 Temp lo = emit_wqm(bld, bld.writelane(bld.def(v1), val_lo, lane, src_hi));
8651 Temp hi = emit_wqm(bld, bld.writelane(bld.def(v1), val_hi, lane, src_hi));
8652 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
8653 emit_split_vector(ctx, dst, 2);
8655 isel_err(&instr->instr, "Unimplemented NIR instr bit size");
8659 case nir_intrinsic_mbcnt_amd: {
8660 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8661 Temp add_src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
8662 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8663 /* Fit 64-bit mask for wave32 */
8664 src = emit_extract_vector(ctx, src, 0, RegClass(src.type(), bld.lm.size()));
8665 Temp wqm_tmp = emit_mbcnt(ctx, bld.tmp(v1), Operand(src), Operand(add_src));
8666 emit_wqm(bld, wqm_tmp, dst);
8669 case nir_intrinsic_byte_permute_amd: {
8670 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8671 assert(dst.regClass() == v1);
8672 assert(ctx->program->chip_class >= GFX8);
8673 bld.vop3(aco_opcode::v_perm_b32, Definition(dst), get_ssa_temp(ctx, instr->src[0].ssa),
8674 as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)),
8675 as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa)));
8678 case nir_intrinsic_lane_permute_16_amd: {
8679 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8680 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8681 assert(ctx->program->chip_class >= GFX10);
8683 if (src.regClass() == s1) {
8684 bld.copy(Definition(dst), src);
8685 } else if (dst.regClass() == v1 && src.regClass() == v1) {
8686 bld.vop3(aco_opcode::v_permlane16_b32, Definition(dst), src,
8687 bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa)),
8688 bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa)));
8690 isel_err(&instr->instr, "Unimplemented lane_permute_16_amd");
8694 case nir_intrinsic_load_helper_invocation:
8695 case nir_intrinsic_is_helper_invocation: {
8696 /* load_helper() after demote() get lowered to is_helper().
8697 * Otherwise, these two behave the same. */
8698 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8699 bld.pseudo(aco_opcode::p_is_helper, Definition(dst), Operand(exec, bld.lm));
8700 ctx->block->kind |= block_kind_needs_lowering;
8701 ctx->program->needs_exact = true;
8704 case nir_intrinsic_demote:
8705 bld.pseudo(aco_opcode::p_demote_to_helper, Operand::c32(-1u));
8707 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8708 ctx->cf_info.exec_potentially_empty_discard = true;
8709 ctx->block->kind |= block_kind_uses_discard;
8710 ctx->program->needs_exact = true;
8712 case nir_intrinsic_demote_if: {
8713 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8714 assert(src.regClass() == bld.lm);
8716 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8717 bld.pseudo(aco_opcode::p_demote_to_helper, cond);
8719 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8720 ctx->cf_info.exec_potentially_empty_discard = true;
8721 ctx->block->kind |= block_kind_uses_discard;
8722 ctx->program->needs_exact = true;
8725 case nir_intrinsic_terminate:
8726 case nir_intrinsic_terminate_if:
8727 case nir_intrinsic_discard:
8728 case nir_intrinsic_discard_if: {
8729 Operand cond = Operand::c32(-1u);
8730 if (instr->intrinsic == nir_intrinsic_discard_if ||
8731 instr->intrinsic == nir_intrinsic_terminate_if) {
8732 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
8733 assert(src.regClass() == bld.lm);
8735 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
8738 bld.pseudo(aco_opcode::p_discard_if, cond);
8740 if (ctx->block->loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
8741 ctx->cf_info.exec_potentially_empty_discard = true;
8742 ctx->block->kind |= block_kind_uses_discard;
8743 ctx->program->needs_exact = true;
8746 case nir_intrinsic_first_invocation: {
8747 emit_wqm(bld, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
8748 get_ssa_temp(ctx, &instr->dest.ssa));
8751 case nir_intrinsic_last_invocation: {
8752 Temp flbit = bld.sop1(Builder::s_flbit_i32, bld.def(s1), Operand(exec, bld.lm));
8753 Temp last = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc),
8754 Operand::c32(ctx->program->wave_size - 1u), flbit);
8755 emit_wqm(bld, last, get_ssa_temp(ctx, &instr->dest.ssa));
8758 case nir_intrinsic_elect: {
8759 /* p_elect is lowered in aco_insert_exec_mask.
8760 * Use exec as an operand so value numbering and the pre-RA optimizer won't recognize
8761 * two p_elect with different exec masks as the same.
8763 Temp elected = bld.pseudo(aco_opcode::p_elect, bld.def(bld.lm), Operand(exec, bld.lm));
8764 emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->dest.ssa));
8765 ctx->block->kind |= block_kind_needs_lowering;
8768 case nir_intrinsic_shader_clock: {
8769 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8770 if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_SUBGROUP &&
8771 ctx->options->chip_class >= GFX10_3) {
8772 /* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */
8773 Temp clock = bld.sopk(aco_opcode::s_getreg_b32, bld.def(s1), ((20 - 1) << 11) | 29);
8774 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), clock, Operand::zero());
8776 aco_opcode opcode = nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE
8777 ? aco_opcode::s_memrealtime
8778 : aco_opcode::s_memtime;
8779 bld.smem(opcode, Definition(dst), memory_sync_info(0, semantic_volatile));
8781 emit_split_vector(ctx, dst, 2);
8784 case nir_intrinsic_load_vertex_id_zero_base: {
8785 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8786 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
8789 case nir_intrinsic_load_first_vertex: {
8790 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8791 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex));
8794 case nir_intrinsic_load_base_instance: {
8795 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8796 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance));
8799 case nir_intrinsic_load_instance_id: {
8800 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8801 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id));
8804 case nir_intrinsic_load_draw_id: {
8805 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8806 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id));
8809 case nir_intrinsic_load_invocation_id: {
8810 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8812 if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
8813 if (ctx->options->chip_class >= GFX10)
8814 bld.vop2_e64(aco_opcode::v_and_b32, Definition(dst), Operand::c32(127u),
8815 get_arg(ctx, ctx->args->ac.gs_invocation_id));
8817 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_invocation_id));
8818 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
8819 bld.vop3(aco_opcode::v_bfe_u32, Definition(dst), get_arg(ctx, ctx->args->ac.tcs_rel_ids),
8820 Operand::c32(8u), Operand::c32(5u));
8822 unreachable("Unsupported stage for load_invocation_id");
8827 case nir_intrinsic_load_primitive_id: {
8828 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8830 switch (ctx->shader->info.stage) {
8831 case MESA_SHADER_GEOMETRY:
8832 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
8834 case MESA_SHADER_TESS_CTRL:
8835 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tcs_patch_id));
8837 case MESA_SHADER_TESS_EVAL:
8838 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tes_patch_id));
8841 if (ctx->stage.hw == HWStage::NGG && !ctx->stage.has(SWStage::GS)) {
8842 /* In case of NGG, the GS threads always have the primitive ID
8843 * even if there is no SW GS. */
8844 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
8847 unreachable("Unimplemented shader stage for nir_intrinsic_load_primitive_id");
8852 case nir_intrinsic_emit_vertex_with_counter: {
8853 assert(ctx->stage.hw == HWStage::GS);
8854 visit_emit_vertex_with_counter(ctx, instr);
8857 case nir_intrinsic_end_primitive_with_counter: {
8858 if (ctx->stage.hw != HWStage::NGG) {
8859 unsigned stream = nir_intrinsic_stream_id(instr);
8860 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1,
8861 sendmsg_gs(true, false, stream));
8865 case nir_intrinsic_set_vertex_and_primitive_count: {
8866 assert(ctx->stage.hw == HWStage::GS);
8867 /* unused in the legacy pipeline, the HW keeps track of this for us */
8870 case nir_intrinsic_has_input_vertex_amd:
8871 case nir_intrinsic_has_input_primitive_amd: {
8872 assert(ctx->stage.hw == HWStage::NGG);
8873 unsigned i = instr->intrinsic == nir_intrinsic_has_input_vertex_amd ? 0 : 1;
8874 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), merged_wave_info_to_mask(ctx, i));
8877 case nir_intrinsic_export_vertex_amd: {
8878 ctx->block->kind |= block_kind_export_end;
8879 create_vs_exports(ctx);
8882 case nir_intrinsic_export_primitive_amd: {
8883 Temp prim_ch1 = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
8884 create_primitive_exports(ctx, prim_ch1);
8887 case nir_intrinsic_alloc_vertices_and_primitives_amd: {
8888 assert(ctx->stage.hw == HWStage::NGG);
8889 Temp num_vertices = get_ssa_temp(ctx, instr->src[0].ssa);
8890 Temp num_primitives = get_ssa_temp(ctx, instr->src[1].ssa);
8891 ngg_emit_sendmsg_gs_alloc_req(ctx, num_vertices, num_primitives);
8894 case nir_intrinsic_gds_atomic_add_amd: {
8895 Temp store_val = get_ssa_temp(ctx, instr->src[0].ssa);
8896 Temp gds_addr = get_ssa_temp(ctx, instr->src[1].ssa);
8897 Temp m0_val = get_ssa_temp(ctx, instr->src[2].ssa);
8898 Operand m = bld.m0((Temp)bld.copy(bld.def(s1, m0), bld.as_uniform(m0_val)));
8899 bld.ds(aco_opcode::ds_add_u32, as_vgpr(ctx, gds_addr), as_vgpr(ctx, store_val), m, 0u, 0u,
8903 case nir_intrinsic_load_sbt_amd: visit_load_sbt_amd(ctx, instr); break;
8904 case nir_intrinsic_bvh64_intersect_ray_amd: visit_bvh64_intersect_ray_amd(ctx, instr); break;
8905 case nir_intrinsic_overwrite_vs_arguments_amd: {
8906 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = get_ssa_temp(ctx, instr->src[0].ssa);
8907 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = get_ssa_temp(ctx, instr->src[1].ssa);
8910 case nir_intrinsic_overwrite_tes_arguments_amd: {
8911 ctx->arg_temps[ctx->args->ac.tes_u.arg_index] = get_ssa_temp(ctx, instr->src[0].ssa);
8912 ctx->arg_temps[ctx->args->ac.tes_v.arg_index] = get_ssa_temp(ctx, instr->src[1].ssa);
8913 ctx->arg_temps[ctx->args->ac.tes_rel_patch_id.arg_index] =
8914 get_ssa_temp(ctx, instr->src[2].ssa);
8915 ctx->arg_temps[ctx->args->ac.tes_patch_id.arg_index] = get_ssa_temp(ctx, instr->src[3].ssa);
8918 case nir_intrinsic_load_force_vrs_rates_amd: {
8919 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8920 get_arg(ctx, ctx->args->ac.force_vrs_rates));
8923 case nir_intrinsic_load_scalar_arg_amd:
8924 case nir_intrinsic_load_vector_arg_amd: {
8925 assert(nir_intrinsic_base(instr) < ctx->args->ac.arg_count);
8926 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8927 Temp src = ctx->arg_temps[nir_intrinsic_base(instr)];
8929 assert(src.type() == (instr->intrinsic == nir_intrinsic_load_scalar_arg_amd ? RegType::sgpr : RegType::vgpr));
8930 bld.copy(Definition(dst), src);
8931 emit_split_vector(ctx, dst, dst.size());
8935 isel_err(&instr->instr, "Unimplemented intrinsic instr");
8943 build_cube_select(isel_context* ctx, Temp ma, Temp id, Temp deriv, Temp* out_ma, Temp* out_sc,
8946 Builder bld(ctx->program, ctx->block);
8948 Temp deriv_x = emit_extract_vector(ctx, deriv, 0, v1);
8949 Temp deriv_y = emit_extract_vector(ctx, deriv, 1, v1);
8950 Temp deriv_z = emit_extract_vector(ctx, deriv, 2, v1);
8952 Operand neg_one = Operand::c32(0xbf800000u);
8953 Operand one = Operand::c32(0x3f800000u);
8954 Operand two = Operand::c32(0x40000000u);
8955 Operand four = Operand::c32(0x40800000u);
8957 Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), Operand::zero(), ma);
8958 Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive);
8959 Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand::zero(), sgn_ma);
8961 Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), four, id);
8962 Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), two, id);
8963 is_ma_y = bld.sop2(Builder::s_andn2, bld.def(bld.lm), is_ma_y, is_ma_z);
8965 bld.sop2(aco_opcode::s_or_b64, bld.def(bld.lm), bld.def(s1, scc), is_ma_z, is_ma_y);
8968 Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x);
8969 Temp sgn = bld.vop2_e64(
8970 aco_opcode::v_cndmask_b32, bld.def(v1),
8971 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_sgn_ma, sgn_ma, is_ma_z), one, is_ma_y);
8972 *out_sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8975 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_y, deriv_z, is_ma_y);
8976 sgn = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, sgn_ma, is_ma_y);
8977 *out_tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8980 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8981 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_x, deriv_y, is_ma_y),
8983 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x7fffffffu), tmp);
8984 *out_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), two, tmp);
8988 prepare_cube_coords(isel_context* ctx, std::vector<Temp>& coords, Temp* ddx, Temp* ddy,
8989 bool is_deriv, bool is_array)
8991 Builder bld(ctx->program, ctx->block);
8992 Temp ma, tc, sc, id;
8994 ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fmaak_f32 : aco_opcode::v_madak_f32;
8996 ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fmamk_f32 : aco_opcode::v_madmk_f32;
8999 coords[3] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[3]);
9001 /* see comment in ac_prepare_cube_coords() */
9002 if (ctx->options->chip_class <= GFX8)
9003 coords[3] = bld.vop2(aco_opcode::v_max_f32, bld.def(v1), Operand::zero(), coords[3]);
9006 ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9008 aco_ptr<VOP3_instruction> vop3a{
9009 create_instruction<VOP3_instruction>(aco_opcode::v_rcp_f32, asVOP3(Format::VOP1), 1, 1)};
9010 vop3a->operands[0] = Operand(ma);
9011 vop3a->abs[0] = true;
9012 Temp invma = bld.tmp(v1);
9013 vop3a->definitions[0] = Definition(invma);
9014 ctx->block->instructions.emplace_back(std::move(vop3a));
9016 sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9018 sc = bld.vop2(madak, bld.def(v1), sc, invma, Operand::c32(0x3fc00000u /*1.5*/));
9020 tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9022 tc = bld.vop2(madak, bld.def(v1), tc, invma, Operand::c32(0x3fc00000u /*1.5*/));
9024 id = bld.vop3(aco_opcode::v_cubeid_f32, bld.def(v1), coords[0], coords[1], coords[2]);
9027 sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, invma);
9028 tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, invma);
9030 for (unsigned i = 0; i < 2; i++) {
9031 /* see comment in ac_prepare_cube_coords() */
9033 Temp deriv_sc, deriv_tc;
9034 build_cube_select(ctx, ma, id, i ? *ddy : *ddx, &deriv_ma, &deriv_sc, &deriv_tc);
9036 deriv_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, invma);
9038 Temp x = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
9039 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_sc, invma),
9040 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, sc));
9041 Temp y = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
9042 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_tc, invma),
9043 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, tc));
9044 *(i ? ddy : ddx) = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), x, y);
9047 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3fc00000u /*1.5*/), sc);
9048 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand::c32(0x3fc00000u /*1.5*/), tc);
9052 id = bld.vop2(madmk, bld.def(v1), coords[3], id, Operand::c32(0x41000000u /*8.0*/));
9060 get_const_vec(nir_ssa_def* vec, nir_const_value* cv[4])
9062 if (vec->parent_instr->type != nir_instr_type_alu)
9064 nir_alu_instr* vec_instr = nir_instr_as_alu(vec->parent_instr);
9065 if (vec_instr->op != nir_op_vec(vec->num_components))
9068 for (unsigned i = 0; i < vec->num_components; i++) {
9070 vec_instr->src[i].swizzle[0] == 0 ? nir_src_as_const_value(vec_instr->src[i].src) : NULL;
9075 visit_tex(isel_context* ctx, nir_tex_instr* instr)
9077 assert(instr->op != nir_texop_txf_ms && instr->op != nir_texop_samples_identical);
9079 Builder bld(ctx->program, ctx->block);
9080 bool has_bias = false, has_lod = false, level_zero = false, has_compare = false,
9081 has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false,
9082 has_sample_index = false, has_clamped_lod = false;
9083 Temp resource, sampler, bias = Temp(), compare = Temp(), sample_index = Temp(), lod = Temp(),
9084 offset = Temp(), ddx = Temp(), ddy = Temp(), clamped_lod = Temp();
9085 std::vector<Temp> coords;
9086 std::vector<Temp> derivs;
9087 nir_const_value* const_offset[4] = {NULL, NULL, NULL, NULL};
9089 for (unsigned i = 0; i < instr->num_srcs; i++) {
9090 switch (instr->src[i].src_type) {
9091 case nir_tex_src_texture_handle:
9092 resource = bld.as_uniform(get_ssa_temp(ctx, instr->src[i].src.ssa));
9094 case nir_tex_src_sampler_handle:
9095 sampler = bld.as_uniform(get_ssa_temp(ctx, instr->src[i].src.ssa));
9101 bool tg4_integer_workarounds = ctx->options->chip_class <= GFX8 && instr->op == nir_texop_tg4 &&
9102 (instr->dest_type & (nir_type_int | nir_type_uint));
9103 bool tg4_integer_cube_workaround =
9104 tg4_integer_workarounds && instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
9106 for (unsigned i = 0; i < instr->num_srcs; i++) {
9107 switch (instr->src[i].src_type) {
9108 case nir_tex_src_coord: {
9109 Temp coord = get_ssa_temp(ctx, instr->src[i].src.ssa);
9110 for (unsigned j = 0; j < coord.size(); j++)
9111 coords.emplace_back(emit_extract_vector(ctx, coord, j, v1));
9114 case nir_tex_src_bias:
9115 bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
9118 case nir_tex_src_lod: {
9119 if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0) {
9122 lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
9127 case nir_tex_src_min_lod:
9128 clamped_lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
9129 has_clamped_lod = true;
9131 case nir_tex_src_comparator:
9132 if (instr->is_shadow) {
9133 compare = get_ssa_temp(ctx, instr->src[i].src.ssa);
9137 case nir_tex_src_offset:
9138 offset = get_ssa_temp(ctx, instr->src[i].src.ssa);
9139 get_const_vec(instr->src[i].src.ssa, const_offset);
9142 case nir_tex_src_ddx:
9143 ddx = get_ssa_temp(ctx, instr->src[i].src.ssa);
9146 case nir_tex_src_ddy:
9147 ddy = get_ssa_temp(ctx, instr->src[i].src.ssa);
9150 case nir_tex_src_ms_index:
9151 sample_index = get_ssa_temp(ctx, instr->src[i].src.ssa);
9152 has_sample_index = true;
9154 case nir_tex_src_texture_offset:
9155 case nir_tex_src_sampler_offset:
9160 if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
9161 return get_buffer_size(ctx, resource, get_ssa_temp(ctx, &instr->dest.ssa));
9163 if (instr->op == nir_texop_texture_samples) {
9164 get_image_samples(ctx, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), resource);
9168 if (has_offset && instr->op != nir_texop_txf) {
9169 aco_ptr<Instruction> tmp_instr;
9170 Temp acc, pack = Temp();
9172 uint32_t pack_const = 0;
9173 for (unsigned i = 0; i < offset.size(); i++) {
9174 if (!const_offset[i])
9176 pack_const |= (const_offset[i]->u32 & 0x3Fu) << (8u * i);
9179 if (offset.type() == RegType::sgpr) {
9180 for (unsigned i = 0; i < offset.size(); i++) {
9181 if (const_offset[i])
9184 acc = emit_extract_vector(ctx, offset, i, s1);
9185 acc = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), acc,
9186 Operand::c32(0x3Fu));
9189 acc = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), acc,
9190 Operand::c32(8u * i));
9193 if (pack == Temp()) {
9196 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), pack, acc);
9200 if (pack_const && pack != Temp())
9201 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc),
9202 Operand::c32(pack_const), pack);
9204 for (unsigned i = 0; i < offset.size(); i++) {
9205 if (const_offset[i])
9208 acc = emit_extract_vector(ctx, offset, i, v1);
9209 acc = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand::c32(0x3Fu), acc);
9212 acc = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(8u * i), acc);
9215 if (pack == Temp()) {
9218 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), pack, acc);
9222 if (pack_const && pack != Temp())
9223 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand::c32(pack_const), pack);
9225 if (pack_const && pack == Temp())
9226 offset = bld.copy(bld.def(v1), Operand::c32(pack_const));
9227 else if (pack == Temp())
9233 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && instr->coord_components)
9234 prepare_cube_coords(ctx, coords, &ddx, &ddy, instr->op == nir_texop_txd,
9235 instr->is_array && instr->op != nir_texop_lod);
9237 /* pack derivatives */
9238 if (has_ddx || has_ddy) {
9239 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->chip_class == GFX9) {
9240 assert(has_ddx && has_ddy && ddx.size() == 1 && ddy.size() == 1);
9241 Temp zero = bld.copy(bld.def(v1), Operand::zero());
9242 derivs = {ddx, zero, ddy, zero};
9244 for (unsigned i = 0; has_ddx && i < ddx.size(); i++)
9245 derivs.emplace_back(emit_extract_vector(ctx, ddx, i, v1));
9246 for (unsigned i = 0; has_ddy && i < ddy.size(); i++)
9247 derivs.emplace_back(emit_extract_vector(ctx, ddy, i, v1));
9252 if (instr->coord_components > 1 && instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
9253 instr->is_array && instr->op != nir_texop_txf)
9254 coords[1] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[1]);
9256 if (instr->coord_components > 2 &&
9257 (instr->sampler_dim == GLSL_SAMPLER_DIM_2D || instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
9258 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
9259 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
9260 instr->is_array && instr->op != nir_texop_txf && instr->op != nir_texop_fragment_fetch_amd &&
9261 instr->op != nir_texop_fragment_mask_fetch_amd)
9262 coords[2] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[2]);
9264 if (ctx->options->chip_class == GFX9 && instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
9265 instr->op != nir_texop_lod && instr->coord_components) {
9266 assert(coords.size() > 0 && coords.size() < 3);
9268 coords.insert(std::next(coords.begin()),
9269 bld.copy(bld.def(v1), instr->op == nir_texop_txf ? Operand::c32(0)
9270 : Operand::c32(0x3f000000)));
9273 bool da = should_declare_array(ctx, instr->sampler_dim, instr->is_array);
9275 if (has_offset && instr->op == nir_texop_txf) {
9276 for (unsigned i = 0; i < std::min(offset.size(), instr->coord_components); i++) {
9277 Temp off = emit_extract_vector(ctx, offset, i, v1);
9278 coords[i] = bld.vadd32(bld.def(v1), coords[i], off);
9283 /* Build tex instruction */
9284 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa) & 0xf;
9285 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
9286 dmask = u_bit_consecutive(0, util_last_bit(dmask));
9287 if (instr->is_sparse)
9288 dmask = MAX2(dmask, 1) | 0x10;
9290 ctx->options->chip_class >= GFX10 && instr->sampler_dim != GLSL_SAMPLER_DIM_BUF
9291 ? ac_get_sampler_dim(ctx->options->chip_class, instr->sampler_dim, instr->is_array)
9293 bool d16 = instr->dest.ssa.bit_size == 16;
9294 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9297 /* gather4 selects the component by dmask and always returns vec4 (vec5 if sparse) */
9298 if (instr->op == nir_texop_tg4) {
9299 assert(instr->dest.ssa.num_components == (4 + instr->is_sparse));
9300 if (instr->is_shadow)
9303 dmask = 1 << instr->component;
9304 if (tg4_integer_cube_workaround || dst.type() == RegType::sgpr)
9305 tmp_dst = bld.tmp(instr->is_sparse ? v5 : (d16 ? v2 : v4));
9306 } else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
9307 tmp_dst = bld.tmp(v1);
9308 } else if (util_bitcount(dmask) != instr->dest.ssa.num_components ||
9309 dst.type() == RegType::sgpr) {
9310 unsigned bytes = util_bitcount(dmask) * instr->dest.ssa.bit_size / 8;
9311 tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, bytes));
9314 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
9316 lod = bld.copy(bld.def(v1), Operand::zero());
9318 MIMG_instruction* tex = emit_mimg(bld, aco_opcode::image_get_resinfo, Definition(tmp_dst),
9319 resource, Operand(s4), std::vector<Temp>{lod});
9320 if (ctx->options->chip_class == GFX9 && instr->op == nir_texop_txs &&
9321 instr->sampler_dim == GLSL_SAMPLER_DIM_1D && instr->is_array) {
9322 tex->dmask = (dmask & 0x1) | ((dmask & 0x2) << 1);
9323 } else if (instr->op == nir_texop_query_levels) {
9324 tex->dmask = 1 << 3;
9331 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
9335 Temp tg4_compare_cube_wa64 = Temp();
9337 if (tg4_integer_workarounds) {
9338 Temp tg4_lod = bld.copy(bld.def(v1), Operand::zero());
9339 Temp size = bld.tmp(v2);
9340 MIMG_instruction* tex = emit_mimg(bld, aco_opcode::image_get_resinfo, Definition(size),
9341 resource, Operand(s4), std::vector<Temp>{tg4_lod});
9345 emit_split_vector(ctx, size, size.size());
9348 for (unsigned i = 0; i < 2; i++) {
9349 half_texel[i] = emit_extract_vector(ctx, size, i, v1);
9350 half_texel[i] = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), half_texel[i]);
9351 half_texel[i] = bld.vop1(aco_opcode::v_rcp_iflag_f32, bld.def(v1), half_texel[i]);
9352 half_texel[i] = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1),
9353 Operand::c32(0xbf000000 /*-0.5*/), half_texel[i]);
9356 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
9357 /* In vulkan, whether the sampler uses unnormalized
9358 * coordinates or not is a dynamic property of the
9359 * sampler. Hence, to figure out whether or not we
9360 * need to divide by the texture size, we need to test
9361 * the sampler at runtime. This tests the bit set by
9362 * radv_init_sampler().
9364 unsigned bit_idx = ffs(S_008F30_FORCE_UNNORMALIZED(1)) - 1;
9366 bld.sopc(aco_opcode::s_bitcmp0_b32, bld.def(s1, scc), sampler, Operand::c32(bit_idx));
9368 not_needed = bool_to_vector_condition(ctx, not_needed);
9369 half_texel[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
9370 Operand::c32(0xbf000000 /*-0.5*/), half_texel[0], not_needed);
9371 half_texel[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
9372 Operand::c32(0xbf000000 /*-0.5*/), half_texel[1], not_needed);
9375 Temp new_coords[2] = {bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[0], half_texel[0]),
9376 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[1], half_texel[1])};
9378 if (tg4_integer_cube_workaround) {
9379 /* see comment in ac_nir_to_llvm.c's lower_gather4_integer() */
9380 Temp* const desc = (Temp*)alloca(resource.size() * sizeof(Temp));
9381 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(
9382 aco_opcode::p_split_vector, Format::PSEUDO, 1, resource.size())};
9383 split->operands[0] = Operand(resource);
9384 for (unsigned i = 0; i < resource.size(); i++) {
9385 desc[i] = bld.tmp(s1);
9386 split->definitions[i] = Definition(desc[i]);
9388 ctx->block->instructions.emplace_back(std::move(split));
9390 Temp dfmt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), desc[1],
9391 Operand::c32(20u | (6u << 16)));
9392 Temp compare_cube_wa = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), dfmt,
9393 Operand::c32(V_008F14_IMG_DATA_FORMAT_8_8_8_8));
9396 if (instr->dest_type & nir_type_uint) {
9397 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
9398 Operand::c32(V_008F14_IMG_NUM_FORMAT_USCALED),
9399 Operand::c32(V_008F14_IMG_NUM_FORMAT_UINT), bld.scc(compare_cube_wa));
9401 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
9402 Operand::c32(V_008F14_IMG_NUM_FORMAT_SSCALED),
9403 Operand::c32(V_008F14_IMG_NUM_FORMAT_SINT), bld.scc(compare_cube_wa));
9405 tg4_compare_cube_wa64 = bld.tmp(bld.lm);
9406 bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64);
9408 nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt,
9411 desc[1] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), desc[1],
9412 Operand::c32(C_008F14_NUM_FORMAT));
9413 desc[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), desc[1], nfmt);
9415 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(
9416 aco_opcode::p_create_vector, Format::PSEUDO, resource.size(), 1)};
9417 for (unsigned i = 0; i < resource.size(); i++)
9418 vec->operands[i] = Operand(desc[i]);
9419 resource = bld.tmp(resource.regClass());
9420 vec->definitions[0] = Definition(resource);
9421 ctx->block->instructions.emplace_back(std::move(vec));
9423 new_coords[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), new_coords[0], coords[0],
9424 tg4_compare_cube_wa64);
9425 new_coords[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), new_coords[1], coords[1],
9426 tg4_compare_cube_wa64);
9428 coords[0] = new_coords[0];
9429 coords[1] = new_coords[1];
9432 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
9433 // FIXME: if (ctx->abi->gfx9_stride_size_workaround) return
9434 // ac_build_buffer_load_format_gfx9_safe()
9436 assert(coords.size() == 1);
9439 switch (util_last_bit(dmask & 0xf)) {
9440 case 1: op = aco_opcode::buffer_load_format_d16_x; break;
9441 case 2: op = aco_opcode::buffer_load_format_d16_xy; break;
9442 case 3: op = aco_opcode::buffer_load_format_d16_xyz; break;
9443 case 4: op = aco_opcode::buffer_load_format_d16_xyzw; break;
9444 default: unreachable("Tex instruction loads more than 4 components.");
9447 switch (util_last_bit(dmask & 0xf)) {
9448 case 1: op = aco_opcode::buffer_load_format_x; break;
9449 case 2: op = aco_opcode::buffer_load_format_xy; break;
9450 case 3: op = aco_opcode::buffer_load_format_xyz; break;
9451 case 4: op = aco_opcode::buffer_load_format_xyzw; break;
9452 default: unreachable("Tex instruction loads more than 4 components.");
9456 aco_ptr<MUBUF_instruction> mubuf{
9457 create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3 + instr->is_sparse, 1)};
9458 mubuf->operands[0] = Operand(resource);
9459 mubuf->operands[1] = Operand(coords[0]);
9460 mubuf->operands[2] = Operand::c32(0);
9461 mubuf->definitions[0] = Definition(tmp_dst);
9462 mubuf->idxen = true;
9463 mubuf->tfe = instr->is_sparse;
9465 mubuf->operands[3] = emit_tfe_init(bld, tmp_dst);
9466 ctx->block->instructions.emplace_back(std::move(mubuf));
9468 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
9472 /* gather MIMG address components */
9473 std::vector<Temp> args;
9474 unsigned wqm_mask = 0;
9476 wqm_mask |= u_bit_consecutive(args.size(), 1);
9477 args.emplace_back(offset);
9480 args.emplace_back(bias);
9482 args.emplace_back(compare);
9484 args.insert(args.end(), derivs.begin(), derivs.end());
9486 wqm_mask |= u_bit_consecutive(args.size(), coords.size());
9487 args.insert(args.end(), coords.begin(), coords.end());
9489 if (has_sample_index)
9490 args.emplace_back(sample_index);
9492 args.emplace_back(lod);
9493 if (has_clamped_lod)
9494 args.emplace_back(clamped_lod);
9496 if (instr->op == nir_texop_txf || instr->op == nir_texop_fragment_fetch_amd ||
9497 instr->op == nir_texop_fragment_mask_fetch_amd) {
9498 aco_opcode op = level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
9499 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS
9500 ? aco_opcode::image_load
9501 : aco_opcode::image_load_mip;
9502 Operand vdata = instr->is_sparse ? emit_tfe_init(bld, tmp_dst) : Operand(v1);
9503 MIMG_instruction* tex =
9504 emit_mimg(bld, op, Definition(tmp_dst), resource, Operand(s4), args, 0, vdata);
9505 if (instr->op == nir_texop_fragment_mask_fetch_amd)
9506 tex->dim = da ? ac_image_2darray : ac_image_2d;
9509 tex->dmask = dmask & 0xf;
9512 tex->tfe = instr->is_sparse;
9515 if (instr->op == nir_texop_fragment_mask_fetch_amd) {
9516 /* Use 0x76543210 if the image doesn't have FMASK. */
9517 assert(dmask == 1 && dst.bytes() == 4);
9518 assert(dst.id() != tmp_dst.id());
9520 if (dst.regClass() == s1) {
9521 Temp is_not_null = bld.sopc(aco_opcode::s_cmp_lg_u32, bld.def(s1, scc), Operand::zero(),
9522 emit_extract_vector(ctx, resource, 1, s1));
9523 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst),
9524 bld.as_uniform(tmp_dst), Operand::c32(0x76543210),
9525 bld.scc(is_not_null));
9527 Temp is_not_null = bld.tmp(bld.lm);
9528 bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(is_not_null), Operand::zero(),
9529 emit_extract_vector(ctx, resource, 1, s1));
9530 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst),
9531 bld.copy(bld.def(v1), Operand::c32(0x76543210)), tmp_dst, is_not_null);
9534 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
9539 // TODO: would be better to do this by adding offsets, but needs the opcodes ordered.
9540 aco_opcode opcode = aco_opcode::image_sample;
9541 if (has_offset) { /* image_sample_*_o */
9542 if (has_clamped_lod) {
9544 opcode = aco_opcode::image_sample_c_cl_o;
9546 opcode = aco_opcode::image_sample_c_d_cl_o;
9548 opcode = aco_opcode::image_sample_c_b_cl_o;
9550 opcode = aco_opcode::image_sample_cl_o;
9552 opcode = aco_opcode::image_sample_d_cl_o;
9554 opcode = aco_opcode::image_sample_b_cl_o;
9556 } else if (has_compare) {
9557 opcode = aco_opcode::image_sample_c_o;
9559 opcode = aco_opcode::image_sample_c_d_o;
9561 opcode = aco_opcode::image_sample_c_b_o;
9563 opcode = aco_opcode::image_sample_c_lz_o;
9565 opcode = aco_opcode::image_sample_c_l_o;
9567 opcode = aco_opcode::image_sample_o;
9569 opcode = aco_opcode::image_sample_d_o;
9571 opcode = aco_opcode::image_sample_b_o;
9573 opcode = aco_opcode::image_sample_lz_o;
9575 opcode = aco_opcode::image_sample_l_o;
9577 } else if (has_clamped_lod) { /* image_sample_*_cl */
9579 opcode = aco_opcode::image_sample_c_cl;
9581 opcode = aco_opcode::image_sample_c_d_cl;
9583 opcode = aco_opcode::image_sample_c_b_cl;
9585 opcode = aco_opcode::image_sample_cl;
9587 opcode = aco_opcode::image_sample_d_cl;
9589 opcode = aco_opcode::image_sample_b_cl;
9591 } else { /* no offset */
9593 opcode = aco_opcode::image_sample_c;
9595 opcode = aco_opcode::image_sample_c_d;
9597 opcode = aco_opcode::image_sample_c_b;
9599 opcode = aco_opcode::image_sample_c_lz;
9601 opcode = aco_opcode::image_sample_c_l;
9603 opcode = aco_opcode::image_sample;
9605 opcode = aco_opcode::image_sample_d;
9607 opcode = aco_opcode::image_sample_b;
9609 opcode = aco_opcode::image_sample_lz;
9611 opcode = aco_opcode::image_sample_l;
9615 if (instr->op == nir_texop_tg4) {
9616 if (has_offset) { /* image_gather4_*_o */
9618 opcode = aco_opcode::image_gather4_c_lz_o;
9620 opcode = aco_opcode::image_gather4_c_l_o;
9622 opcode = aco_opcode::image_gather4_c_b_o;
9624 opcode = aco_opcode::image_gather4_lz_o;
9626 opcode = aco_opcode::image_gather4_l_o;
9628 opcode = aco_opcode::image_gather4_b_o;
9632 opcode = aco_opcode::image_gather4_c_lz;
9634 opcode = aco_opcode::image_gather4_c_l;
9636 opcode = aco_opcode::image_gather4_c_b;
9638 opcode = aco_opcode::image_gather4_lz;
9640 opcode = aco_opcode::image_gather4_l;
9642 opcode = aco_opcode::image_gather4_b;
9645 } else if (instr->op == nir_texop_lod) {
9646 opcode = aco_opcode::image_get_lod;
9649 bool implicit_derivs = bld.program->stage == fragment_fs && !has_derivs && !has_lod &&
9650 !level_zero && instr->sampler_dim != GLSL_SAMPLER_DIM_MS &&
9651 instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS;
9653 Operand vdata = instr->is_sparse ? emit_tfe_init(bld, tmp_dst) : Operand(v1);
9654 MIMG_instruction* tex = emit_mimg(bld, opcode, Definition(tmp_dst), resource, Operand(sampler),
9655 args, implicit_derivs ? wqm_mask : 0, vdata);
9657 tex->dmask = dmask & 0xf;
9659 tex->tfe = instr->is_sparse;
9662 if (tg4_integer_cube_workaround) {
9663 assert(tmp_dst.id() != dst.id());
9664 assert(tmp_dst.size() == dst.size());
9666 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
9668 for (unsigned i = 0; i < 4; i++) {
9669 val[i] = emit_extract_vector(ctx, tmp_dst, i, v1);
9671 if (instr->dest_type & nir_type_uint)
9672 cvt_val = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), val[i]);
9674 cvt_val = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), val[i]);
9675 val[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), val[i], cvt_val,
9676 tg4_compare_cube_wa64);
9679 Temp tmp = dst.regClass() == tmp_dst.regClass() ? dst : bld.tmp(tmp_dst.regClass());
9680 if (instr->is_sparse)
9681 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), val[0], val[1], val[2],
9682 val[3], emit_extract_vector(ctx, tmp_dst, 4, v1));
9684 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), val[0], val[1], val[2],
9687 unsigned mask = instr->op == nir_texop_tg4 ? (instr->is_sparse ? 0x1F : 0xF) : dmask;
9688 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask);
9692 get_phi_operand(isel_context* ctx, nir_ssa_def* ssa, RegClass rc, bool logical)
9694 Temp tmp = get_ssa_temp(ctx, ssa);
9695 if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
9697 } else if (logical && ssa->bit_size == 1 &&
9698 ssa->parent_instr->type == nir_instr_type_load_const) {
9699 if (ctx->program->wave_size == 64)
9700 return Operand::c64(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT64_MAX
9703 return Operand::c32(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT32_MAX
9706 return Operand(tmp);
9711 visit_phi(isel_context* ctx, nir_phi_instr* instr)
9713 aco_ptr<Pseudo_instruction> phi;
9714 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
9715 assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
9717 bool logical = !dst.is_linear() || nir_dest_is_divergent(instr->dest);
9718 logical |= (ctx->block->kind & block_kind_merge) != 0;
9719 aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
9721 /* we want a sorted list of sources, since the predecessor list is also sorted */
9722 std::map<unsigned, nir_ssa_def*> phi_src;
9723 nir_foreach_phi_src (src, instr)
9724 phi_src[src->pred->index] = src->src.ssa;
9726 std::vector<unsigned>& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds;
9727 unsigned num_operands = 0;
9728 Operand* const operands = (Operand*)alloca(
9729 (std::max(exec_list_length(&instr->srcs), (unsigned)preds.size()) + 1) * sizeof(Operand));
9730 unsigned num_defined = 0;
9731 unsigned cur_pred_idx = 0;
9732 for (std::pair<unsigned, nir_ssa_def*> src : phi_src) {
9733 if (cur_pred_idx < preds.size()) {
9734 /* handle missing preds (IF merges with discard/break) and extra preds
9735 * (loop exit with discard) */
9736 unsigned block = ctx->cf_info.nir_to_aco[src.first];
9737 unsigned skipped = 0;
9738 while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block)
9740 if (cur_pred_idx + skipped < preds.size()) {
9741 for (unsigned i = 0; i < skipped; i++)
9742 operands[num_operands++] = Operand(dst.regClass());
9743 cur_pred_idx += skipped;
9748 /* Handle missing predecessors at the end. This shouldn't happen with loop
9749 * headers and we can't ignore these sources for loop header phis. */
9750 if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
9753 Operand op = get_phi_operand(ctx, src.second, dst.regClass(), logical);
9754 operands[num_operands++] = op;
9755 num_defined += !op.isUndefined();
9757 /* handle block_kind_continue_or_break at loop exit blocks */
9758 while (cur_pred_idx++ < preds.size())
9759 operands[num_operands++] = Operand(dst.regClass());
9761 /* If the loop ends with a break, still add a linear continue edge in case
9762 * that break is divergent or continue_or_break is used. We'll either remove
9763 * this operand later in visit_loop() if it's not necessary or replace the
9764 * undef with something correct. */
9765 if (!logical && ctx->block->kind & block_kind_loop_header) {
9766 nir_loop* loop = nir_cf_node_as_loop(instr->instr.block->cf_node.parent);
9767 nir_block* last = nir_loop_last_block(loop);
9768 if (last->successors[0] != instr->instr.block)
9769 operands[num_operands++] = Operand(RegClass());
9772 /* we can use a linear phi in some cases if one src is undef */
9773 if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) {
9774 phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO,
9777 Block* linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]];
9778 Block* invert = &ctx->program->blocks[linear_else->linear_preds[0]];
9779 assert(invert->kind & block_kind_invert);
9781 unsigned then_block = invert->linear_preds[0];
9783 Block* insert_block = NULL;
9784 for (unsigned i = 0; i < num_operands; i++) {
9785 Operand op = operands[i];
9786 if (op.isUndefined())
9788 insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block;
9789 phi->operands[0] = op;
9792 assert(insert_block); /* should be handled by the "num_defined == 0" case above */
9793 phi->operands[1] = Operand(dst.regClass());
9794 phi->definitions[0] = Definition(dst);
9795 insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi));
9799 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
9800 for (unsigned i = 0; i < num_operands; i++)
9801 phi->operands[i] = operands[i];
9802 phi->definitions[0] = Definition(dst);
9803 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
9807 visit_undef(isel_context* ctx, nir_ssa_undef_instr* instr)
9809 Temp dst = get_ssa_temp(ctx, &instr->def);
9811 assert(dst.type() == RegType::sgpr);
9813 if (dst.size() == 1) {
9814 Builder(ctx->program, ctx->block).copy(Definition(dst), Operand::zero());
9816 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
9817 aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
9818 for (unsigned i = 0; i < dst.size(); i++)
9819 vec->operands[i] = Operand::zero();
9820 vec->definitions[0] = Definition(dst);
9821 ctx->block->instructions.emplace_back(std::move(vec));
9826 begin_loop(isel_context* ctx, loop_context* lc)
9828 // TODO: we might want to wrap the loop around a branch if exec_potentially_empty=true
9829 append_logical_end(ctx->block);
9830 ctx->block->kind |= block_kind_loop_preheader | block_kind_uniform;
9831 Builder bld(ctx->program, ctx->block);
9832 bld.branch(aco_opcode::p_branch, bld.def(s2));
9833 unsigned loop_preheader_idx = ctx->block->index;
9835 lc->loop_exit.kind |= (block_kind_loop_exit | (ctx->block->kind & block_kind_top_level));
9837 ctx->program->next_loop_depth++;
9839 Block* loop_header = ctx->program->create_and_insert_block();
9840 loop_header->kind |= block_kind_loop_header;
9841 add_edge(loop_preheader_idx, loop_header);
9842 ctx->block = loop_header;
9844 append_logical_start(ctx->block);
9846 lc->header_idx_old = std::exchange(ctx->cf_info.parent_loop.header_idx, loop_header->index);
9847 lc->exit_old = std::exchange(ctx->cf_info.parent_loop.exit, &lc->loop_exit);
9848 lc->divergent_cont_old = std::exchange(ctx->cf_info.parent_loop.has_divergent_continue, false);
9849 lc->divergent_branch_old = std::exchange(ctx->cf_info.parent_loop.has_divergent_branch, false);
9850 lc->divergent_if_old = std::exchange(ctx->cf_info.parent_if.is_divergent, false);
9854 end_loop(isel_context* ctx, loop_context* lc)
9856 // TODO: what if a loop ends with a unconditional or uniformly branched continue
9857 // and this branch is never taken?
9858 if (!ctx->cf_info.has_branch) {
9859 unsigned loop_header_idx = ctx->cf_info.parent_loop.header_idx;
9860 Builder bld(ctx->program, ctx->block);
9861 append_logical_end(ctx->block);
9863 if (ctx->cf_info.exec_potentially_empty_discard ||
9864 ctx->cf_info.exec_potentially_empty_break) {
9865 /* Discards can result in code running with an empty exec mask.
9866 * This would result in divergent breaks not ever being taken. As a
9867 * workaround, break the loop when the loop mask is empty instead of
9868 * always continuing. */
9869 ctx->block->kind |= (block_kind_continue_or_break | block_kind_uniform);
9870 unsigned block_idx = ctx->block->index;
9872 /* create helper blocks to avoid critical edges */
9873 Block* break_block = ctx->program->create_and_insert_block();
9874 break_block->kind = block_kind_uniform;
9875 bld.reset(break_block);
9876 bld.branch(aco_opcode::p_branch, bld.def(s2));
9877 add_linear_edge(block_idx, break_block);
9878 add_linear_edge(break_block->index, &lc->loop_exit);
9880 Block* continue_block = ctx->program->create_and_insert_block();
9881 continue_block->kind = block_kind_uniform;
9882 bld.reset(continue_block);
9883 bld.branch(aco_opcode::p_branch, bld.def(s2));
9884 add_linear_edge(block_idx, continue_block);
9885 add_linear_edge(continue_block->index, &ctx->program->blocks[loop_header_idx]);
9887 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9888 add_logical_edge(block_idx, &ctx->program->blocks[loop_header_idx]);
9889 ctx->block = &ctx->program->blocks[block_idx];
9891 ctx->block->kind |= (block_kind_continue | block_kind_uniform);
9892 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9893 add_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9895 add_linear_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9898 bld.reset(ctx->block);
9899 bld.branch(aco_opcode::p_branch, bld.def(s2));
9902 ctx->cf_info.has_branch = false;
9903 ctx->program->next_loop_depth--;
9905 // TODO: if the loop has not a single exit, we must add one °°
9906 /* emit loop successor block */
9907 ctx->block = ctx->program->insert_block(std::move(lc->loop_exit));
9908 append_logical_start(ctx->block);
9911 // TODO: check if it is beneficial to not branch on continues
9912 /* trim linear phis in loop header */
9913 for (auto&& instr : loop_entry->instructions) {
9914 if (instr->opcode == aco_opcode::p_linear_phi) {
9915 aco_ptr<Pseudo_instruction> new_phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, loop_entry->linear_predecessors.size(), 1)};
9916 new_phi->definitions[0] = instr->definitions[0];
9917 for (unsigned i = 0; i < new_phi->operands.size(); i++)
9918 new_phi->operands[i] = instr->operands[i];
9919 /* check that the remaining operands are all the same */
9920 for (unsigned i = new_phi->operands.size(); i < instr->operands.size(); i++)
9921 assert(instr->operands[i].tempId() == instr->operands.back().tempId());
9922 instr.swap(new_phi);
9923 } else if (instr->opcode == aco_opcode::p_phi) {
9931 ctx->cf_info.parent_loop.header_idx = lc->header_idx_old;
9932 ctx->cf_info.parent_loop.exit = lc->exit_old;
9933 ctx->cf_info.parent_loop.has_divergent_continue = lc->divergent_cont_old;
9934 ctx->cf_info.parent_loop.has_divergent_branch = lc->divergent_branch_old;
9935 ctx->cf_info.parent_if.is_divergent = lc->divergent_if_old;
9936 if (!ctx->block->loop_nest_depth && !ctx->cf_info.parent_if.is_divergent)
9937 ctx->cf_info.exec_potentially_empty_discard = false;
9941 emit_loop_jump(isel_context* ctx, bool is_break)
9943 Builder bld(ctx->program, ctx->block);
9944 Block* logical_target;
9945 append_logical_end(ctx->block);
9946 unsigned idx = ctx->block->index;
9949 logical_target = ctx->cf_info.parent_loop.exit;
9950 add_logical_edge(idx, logical_target);
9951 ctx->block->kind |= block_kind_break;
9953 if (!ctx->cf_info.parent_if.is_divergent &&
9954 !ctx->cf_info.parent_loop.has_divergent_continue) {
9955 /* uniform break - directly jump out of the loop */
9956 ctx->block->kind |= block_kind_uniform;
9957 ctx->cf_info.has_branch = true;
9958 bld.branch(aco_opcode::p_branch, bld.def(s2));
9959 add_linear_edge(idx, logical_target);
9962 ctx->cf_info.parent_loop.has_divergent_branch = true;
9964 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
9965 add_logical_edge(idx, logical_target);
9966 ctx->block->kind |= block_kind_continue;
9968 if (!ctx->cf_info.parent_if.is_divergent) {
9969 /* uniform continue - directly jump to the loop header */
9970 ctx->block->kind |= block_kind_uniform;
9971 ctx->cf_info.has_branch = true;
9972 bld.branch(aco_opcode::p_branch, bld.def(s2));
9973 add_linear_edge(idx, logical_target);
9977 /* for potential uniform breaks after this continue,
9978 we must ensure that they are handled correctly */
9979 ctx->cf_info.parent_loop.has_divergent_continue = true;
9980 ctx->cf_info.parent_loop.has_divergent_branch = true;
9983 if (ctx->cf_info.parent_if.is_divergent && !ctx->cf_info.exec_potentially_empty_break) {
9984 ctx->cf_info.exec_potentially_empty_break = true;
9985 ctx->cf_info.exec_potentially_empty_break_depth = ctx->block->loop_nest_depth;
9988 /* remove critical edges from linear CFG */
9989 bld.branch(aco_opcode::p_branch, bld.def(s2));
9990 Block* break_block = ctx->program->create_and_insert_block();
9991 break_block->kind |= block_kind_uniform;
9992 add_linear_edge(idx, break_block);
9993 /* the loop_header pointer might be invalidated by this point */
9995 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
9996 add_linear_edge(break_block->index, logical_target);
9997 bld.reset(break_block);
9998 bld.branch(aco_opcode::p_branch, bld.def(s2));
10000 Block* continue_block = ctx->program->create_and_insert_block();
10001 add_linear_edge(idx, continue_block);
10002 append_logical_start(continue_block);
10003 ctx->block = continue_block;
10007 emit_loop_break(isel_context* ctx)
10009 emit_loop_jump(ctx, true);
10013 emit_loop_continue(isel_context* ctx)
10015 emit_loop_jump(ctx, false);
10019 visit_jump(isel_context* ctx, nir_jump_instr* instr)
10021 /* visit_block() would usually do this but divergent jumps updates ctx->block */
10022 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
10024 switch (instr->type) {
10025 case nir_jump_break: emit_loop_break(ctx); break;
10026 case nir_jump_continue: emit_loop_continue(ctx); break;
10027 default: isel_err(&instr->instr, "Unknown NIR jump instr"); abort();
10032 visit_block(isel_context* ctx, nir_block* block)
10034 nir_foreach_instr (instr, block) {
10035 switch (instr->type) {
10036 case nir_instr_type_alu: visit_alu_instr(ctx, nir_instr_as_alu(instr)); break;
10037 case nir_instr_type_load_const: visit_load_const(ctx, nir_instr_as_load_const(instr)); break;
10038 case nir_instr_type_intrinsic: visit_intrinsic(ctx, nir_instr_as_intrinsic(instr)); break;
10039 case nir_instr_type_tex: visit_tex(ctx, nir_instr_as_tex(instr)); break;
10040 case nir_instr_type_phi: visit_phi(ctx, nir_instr_as_phi(instr)); break;
10041 case nir_instr_type_ssa_undef: visit_undef(ctx, nir_instr_as_ssa_undef(instr)); break;
10042 case nir_instr_type_deref: break;
10043 case nir_instr_type_jump: visit_jump(ctx, nir_instr_as_jump(instr)); break;
10044 default: isel_err(instr, "Unknown NIR instr type");
10048 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10049 ctx->cf_info.nir_to_aco[block->index] = ctx->block->index;
10053 create_continue_phis(isel_context* ctx, unsigned first, unsigned last,
10054 aco_ptr<Instruction>& header_phi, Operand* vals)
10056 vals[0] = Operand(header_phi->definitions[0].getTemp());
10057 RegClass rc = vals[0].regClass();
10059 unsigned loop_nest_depth = ctx->program->blocks[first].loop_nest_depth;
10061 unsigned next_pred = 1;
10063 for (unsigned idx = first + 1; idx <= last; idx++) {
10064 Block& block = ctx->program->blocks[idx];
10065 if (block.loop_nest_depth != loop_nest_depth) {
10066 vals[idx - first] = vals[idx - 1 - first];
10070 if ((block.kind & block_kind_continue) && block.index != last) {
10071 vals[idx - first] = header_phi->operands[next_pred];
10076 bool all_same = true;
10077 for (unsigned i = 1; all_same && (i < block.linear_preds.size()); i++)
10078 all_same = vals[block.linear_preds[i] - first] == vals[block.linear_preds[0] - first];
10082 val = vals[block.linear_preds[0] - first];
10084 aco_ptr<Instruction> phi(create_instruction<Pseudo_instruction>(
10085 aco_opcode::p_linear_phi, Format::PSEUDO, block.linear_preds.size(), 1));
10086 for (unsigned i = 0; i < block.linear_preds.size(); i++)
10087 phi->operands[i] = vals[block.linear_preds[i] - first];
10088 val = Operand(ctx->program->allocateTmp(rc));
10089 phi->definitions[0] = Definition(val.getTemp());
10090 block.instructions.emplace(block.instructions.begin(), std::move(phi));
10092 vals[idx - first] = val;
10095 return vals[last - first];
10098 static void begin_uniform_if_then(isel_context* ctx, if_context* ic, Temp cond);
10099 static void begin_uniform_if_else(isel_context* ctx, if_context* ic);
10100 static void end_uniform_if(isel_context* ctx, if_context* ic);
10103 visit_loop(isel_context* ctx, nir_loop* loop)
10106 begin_loop(ctx, &lc);
10108 /* NIR seems to allow this, and even though the loop exit has no predecessors, SSA defs from the
10109 * loop header are live. Handle this without complicating the ACO IR by creating a dummy break.
10111 if (nir_cf_node_cf_tree_next(&loop->cf_node)->predecessors->entries == 0) {
10112 Builder bld(ctx->program, ctx->block);
10113 Temp cond = bld.copy(bld.def(s1, scc), Operand::zero());
10115 begin_uniform_if_then(ctx, &ic, cond);
10116 emit_loop_break(ctx);
10117 begin_uniform_if_else(ctx, &ic);
10118 end_uniform_if(ctx, &ic);
10121 bool unreachable = visit_cf_list(ctx, &loop->body);
10123 unsigned loop_header_idx = ctx->cf_info.parent_loop.header_idx;
10125 /* Fixup phis in loop header from unreachable blocks.
10126 * has_branch/has_divergent_branch also indicates if the loop ends with a
10127 * break/continue instruction, but we don't emit those if unreachable=true */
10129 assert(ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch);
10130 bool linear = ctx->cf_info.has_branch;
10131 bool logical = ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch;
10132 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
10133 if ((logical && instr->opcode == aco_opcode::p_phi) ||
10134 (linear && instr->opcode == aco_opcode::p_linear_phi)) {
10135 /* the last operand should be the one that needs to be removed */
10136 instr->operands.pop_back();
10137 } else if (!is_phi(instr)) {
10143 /* Fixup linear phis in loop header from expecting a continue. Both this fixup
10144 * and the previous one shouldn't both happen at once because a break in the
10145 * merge block would get CSE'd */
10146 if (nir_loop_last_block(loop)->successors[0] != nir_loop_first_block(loop)) {
10147 unsigned num_vals = ctx->cf_info.has_branch ? 1 : (ctx->block->index - loop_header_idx + 1);
10148 Operand* const vals = (Operand*)alloca(num_vals * sizeof(Operand));
10149 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
10150 if (instr->opcode == aco_opcode::p_linear_phi) {
10151 if (ctx->cf_info.has_branch)
10152 instr->operands.pop_back();
10154 instr->operands.back() =
10155 create_continue_phis(ctx, loop_header_idx, ctx->block->index, instr, vals);
10156 } else if (!is_phi(instr)) {
10162 end_loop(ctx, &lc);
10166 begin_divergent_if_then(isel_context* ctx, if_context* ic, Temp cond)
10170 append_logical_end(ctx->block);
10171 ctx->block->kind |= block_kind_branch;
10173 /* branch to linear then block */
10174 assert(cond.regClass() == ctx->program->lane_mask);
10175 aco_ptr<Pseudo_branch_instruction> branch;
10176 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z,
10177 Format::PSEUDO_BRANCH, 1, 1));
10178 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10179 branch->operands[0] = Operand(cond);
10180 ctx->block->instructions.push_back(std::move(branch));
10182 ic->BB_if_idx = ctx->block->index;
10183 ic->BB_invert = Block();
10184 /* Invert blocks are intentionally not marked as top level because they
10185 * are not part of the logical cfg. */
10186 ic->BB_invert.kind |= block_kind_invert;
10187 ic->BB_endif = Block();
10188 ic->BB_endif.kind |= (block_kind_merge | (ctx->block->kind & block_kind_top_level));
10190 ic->exec_potentially_empty_discard_old = ctx->cf_info.exec_potentially_empty_discard;
10191 ic->exec_potentially_empty_break_old = ctx->cf_info.exec_potentially_empty_break;
10192 ic->exec_potentially_empty_break_depth_old = ctx->cf_info.exec_potentially_empty_break_depth;
10193 ic->divergent_old = ctx->cf_info.parent_if.is_divergent;
10194 ctx->cf_info.parent_if.is_divergent = true;
10196 /* divergent branches use cbranch_execz */
10197 ctx->cf_info.exec_potentially_empty_discard = false;
10198 ctx->cf_info.exec_potentially_empty_break = false;
10199 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10201 /** emit logical then block */
10202 ctx->program->next_divergent_if_logical_depth++;
10203 Block* BB_then_logical = ctx->program->create_and_insert_block();
10204 add_edge(ic->BB_if_idx, BB_then_logical);
10205 ctx->block = BB_then_logical;
10206 append_logical_start(BB_then_logical);
10210 begin_divergent_if_else(isel_context* ctx, if_context* ic)
10212 Block* BB_then_logical = ctx->block;
10213 append_logical_end(BB_then_logical);
10214 /* branch from logical then block to invert block */
10215 aco_ptr<Pseudo_branch_instruction> branch;
10216 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10217 Format::PSEUDO_BRANCH, 0, 1));
10218 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10219 BB_then_logical->instructions.emplace_back(std::move(branch));
10220 add_linear_edge(BB_then_logical->index, &ic->BB_invert);
10221 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10222 add_logical_edge(BB_then_logical->index, &ic->BB_endif);
10223 BB_then_logical->kind |= block_kind_uniform;
10224 assert(!ctx->cf_info.has_branch);
10225 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
10226 ctx->cf_info.parent_loop.has_divergent_branch = false;
10227 ctx->program->next_divergent_if_logical_depth--;
10229 /** emit linear then block */
10230 Block* BB_then_linear = ctx->program->create_and_insert_block();
10231 BB_then_linear->kind |= block_kind_uniform;
10232 add_linear_edge(ic->BB_if_idx, BB_then_linear);
10233 /* branch from linear then block to invert block */
10234 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10235 Format::PSEUDO_BRANCH, 0, 1));
10236 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10237 BB_then_linear->instructions.emplace_back(std::move(branch));
10238 add_linear_edge(BB_then_linear->index, &ic->BB_invert);
10240 /** emit invert merge block */
10241 ctx->block = ctx->program->insert_block(std::move(ic->BB_invert));
10242 ic->invert_idx = ctx->block->index;
10244 /* branch to linear else block (skip else) */
10245 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10246 Format::PSEUDO_BRANCH, 0, 1));
10247 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10248 ctx->block->instructions.push_back(std::move(branch));
10250 ic->exec_potentially_empty_discard_old |= ctx->cf_info.exec_potentially_empty_discard;
10251 ic->exec_potentially_empty_break_old |= ctx->cf_info.exec_potentially_empty_break;
10252 ic->exec_potentially_empty_break_depth_old = std::min(
10253 ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
10254 /* divergent branches use cbranch_execz */
10255 ctx->cf_info.exec_potentially_empty_discard = false;
10256 ctx->cf_info.exec_potentially_empty_break = false;
10257 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10259 /** emit logical else block */
10260 ctx->program->next_divergent_if_logical_depth++;
10261 Block* BB_else_logical = ctx->program->create_and_insert_block();
10262 add_logical_edge(ic->BB_if_idx, BB_else_logical);
10263 add_linear_edge(ic->invert_idx, BB_else_logical);
10264 ctx->block = BB_else_logical;
10265 append_logical_start(BB_else_logical);
10269 end_divergent_if(isel_context* ctx, if_context* ic)
10271 Block* BB_else_logical = ctx->block;
10272 append_logical_end(BB_else_logical);
10274 /* branch from logical else block to endif block */
10275 aco_ptr<Pseudo_branch_instruction> branch;
10276 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10277 Format::PSEUDO_BRANCH, 0, 1));
10278 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10279 BB_else_logical->instructions.emplace_back(std::move(branch));
10280 add_linear_edge(BB_else_logical->index, &ic->BB_endif);
10281 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10282 add_logical_edge(BB_else_logical->index, &ic->BB_endif);
10283 BB_else_logical->kind |= block_kind_uniform;
10284 ctx->program->next_divergent_if_logical_depth--;
10286 assert(!ctx->cf_info.has_branch);
10287 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
10289 /** emit linear else block */
10290 Block* BB_else_linear = ctx->program->create_and_insert_block();
10291 BB_else_linear->kind |= block_kind_uniform;
10292 add_linear_edge(ic->invert_idx, BB_else_linear);
10294 /* branch from linear else block to endif block */
10295 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10296 Format::PSEUDO_BRANCH, 0, 1));
10297 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10298 BB_else_linear->instructions.emplace_back(std::move(branch));
10299 add_linear_edge(BB_else_linear->index, &ic->BB_endif);
10301 /** emit endif merge block */
10302 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
10303 append_logical_start(ctx->block);
10305 ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
10306 ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
10307 ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
10308 ctx->cf_info.exec_potentially_empty_break_depth = std::min(
10309 ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
10310 if (ctx->block->loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
10311 !ctx->cf_info.parent_if.is_divergent) {
10312 ctx->cf_info.exec_potentially_empty_break = false;
10313 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10315 /* uniform control flow never has an empty exec-mask */
10316 if (!ctx->block->loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
10317 ctx->cf_info.exec_potentially_empty_discard = false;
10318 ctx->cf_info.exec_potentially_empty_break = false;
10319 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
10324 begin_uniform_if_then(isel_context* ctx, if_context* ic, Temp cond)
10326 assert(cond.regClass() == s1);
10328 append_logical_end(ctx->block);
10329 ctx->block->kind |= block_kind_uniform;
10331 aco_ptr<Pseudo_branch_instruction> branch;
10332 aco_opcode branch_opcode = aco_opcode::p_cbranch_z;
10334 create_instruction<Pseudo_branch_instruction>(branch_opcode, Format::PSEUDO_BRANCH, 1, 1));
10335 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10336 branch->operands[0] = Operand(cond);
10337 branch->operands[0].setFixed(scc);
10338 ctx->block->instructions.emplace_back(std::move(branch));
10340 ic->BB_if_idx = ctx->block->index;
10341 ic->BB_endif = Block();
10342 ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level;
10344 ctx->cf_info.has_branch = false;
10345 ctx->cf_info.parent_loop.has_divergent_branch = false;
10347 /** emit then block */
10348 ctx->program->next_uniform_if_depth++;
10349 Block* BB_then = ctx->program->create_and_insert_block();
10350 add_edge(ic->BB_if_idx, BB_then);
10351 append_logical_start(BB_then);
10352 ctx->block = BB_then;
10356 begin_uniform_if_else(isel_context* ctx, if_context* ic)
10358 Block* BB_then = ctx->block;
10360 ic->uniform_has_then_branch = ctx->cf_info.has_branch;
10361 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
10363 if (!ic->uniform_has_then_branch) {
10364 append_logical_end(BB_then);
10365 /* branch from then block to endif block */
10366 aco_ptr<Pseudo_branch_instruction> branch;
10367 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10368 Format::PSEUDO_BRANCH, 0, 1));
10369 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10370 BB_then->instructions.emplace_back(std::move(branch));
10371 add_linear_edge(BB_then->index, &ic->BB_endif);
10372 if (!ic->then_branch_divergent)
10373 add_logical_edge(BB_then->index, &ic->BB_endif);
10374 BB_then->kind |= block_kind_uniform;
10377 ctx->cf_info.has_branch = false;
10378 ctx->cf_info.parent_loop.has_divergent_branch = false;
10380 /** emit else block */
10381 Block* BB_else = ctx->program->create_and_insert_block();
10382 add_edge(ic->BB_if_idx, BB_else);
10383 append_logical_start(BB_else);
10384 ctx->block = BB_else;
10388 end_uniform_if(isel_context* ctx, if_context* ic)
10390 Block* BB_else = ctx->block;
10392 if (!ctx->cf_info.has_branch) {
10393 append_logical_end(BB_else);
10394 /* branch from then block to endif block */
10395 aco_ptr<Pseudo_branch_instruction> branch;
10396 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch,
10397 Format::PSEUDO_BRANCH, 0, 1));
10398 branch->definitions[0] = Definition(ctx->program->allocateTmp(s2));
10399 BB_else->instructions.emplace_back(std::move(branch));
10400 add_linear_edge(BB_else->index, &ic->BB_endif);
10401 if (!ctx->cf_info.parent_loop.has_divergent_branch)
10402 add_logical_edge(BB_else->index, &ic->BB_endif);
10403 BB_else->kind |= block_kind_uniform;
10406 ctx->cf_info.has_branch &= ic->uniform_has_then_branch;
10407 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
10409 /** emit endif merge block */
10410 ctx->program->next_uniform_if_depth--;
10411 if (!ctx->cf_info.has_branch) {
10412 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
10413 append_logical_start(ctx->block);
10418 visit_if(isel_context* ctx, nir_if* if_stmt)
10420 Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa);
10421 Builder bld(ctx->program, ctx->block);
10422 aco_ptr<Pseudo_branch_instruction> branch;
10425 if (!nir_src_is_divergent(if_stmt->condition)) { /* uniform condition */
10427 * Uniform conditionals are represented in the following way*) :
10429 * The linear and logical CFG:
10432 * BB_THEN (logical) BB_ELSE (logical)
10436 * *) Exceptions may be due to break and continue statements within loops
10437 * If a break/continue happens within uniform control flow, it branches
10438 * to the loop exit/entry block. Otherwise, it branches to the next
10442 assert(cond.regClass() == ctx->program->lane_mask);
10443 cond = bool_to_scalar_condition(ctx, cond);
10445 begin_uniform_if_then(ctx, &ic, cond);
10446 visit_cf_list(ctx, &if_stmt->then_list);
10448 begin_uniform_if_else(ctx, &ic);
10449 visit_cf_list(ctx, &if_stmt->else_list);
10451 end_uniform_if(ctx, &ic);
10452 } else { /* non-uniform condition */
10454 * To maintain a logical and linear CFG without critical edges,
10455 * non-uniform conditionals are represented in the following way*) :
10460 * BB_THEN (logical) BB_THEN (linear)
10462 * BB_INVERT (linear)
10464 * BB_ELSE (logical) BB_ELSE (linear)
10471 * BB_THEN (logical) BB_ELSE (logical)
10475 * *) Exceptions may be due to break and continue statements within loops
10478 begin_divergent_if_then(ctx, &ic, cond);
10479 visit_cf_list(ctx, &if_stmt->then_list);
10481 begin_divergent_if_else(ctx, &ic);
10482 visit_cf_list(ctx, &if_stmt->else_list);
10484 end_divergent_if(ctx, &ic);
10487 return !ctx->cf_info.has_branch && !ctx->block->logical_preds.empty();
10491 visit_cf_list(isel_context* ctx, struct exec_list* list)
10493 foreach_list_typed (nir_cf_node, node, node, list) {
10494 switch (node->type) {
10495 case nir_cf_node_block: visit_block(ctx, nir_cf_node_as_block(node)); break;
10496 case nir_cf_node_if:
10497 if (!visit_if(ctx, nir_cf_node_as_if(node)))
10500 case nir_cf_node_loop: visit_loop(ctx, nir_cf_node_as_loop(node)); break;
10501 default: unreachable("unimplemented cf list type");
10508 export_vs_varying(isel_context* ctx, int slot, bool is_pos, int* next_pos)
10510 assert(ctx->stage.hw == HWStage::VS || ctx->stage.hw == HWStage::NGG);
10512 const uint8_t *vs_output_param_offset =
10513 ctx->stage.has(SWStage::GS) ? ctx->program->info.vs.outinfo.vs_output_param_offset :
10514 ctx->stage.has(SWStage::TES) ? ctx->program->info.tes.outinfo.vs_output_param_offset :
10515 ctx->stage.has(SWStage::MS) ? ctx->program->info.ms.outinfo.vs_output_param_offset :
10516 ctx->program->info.vs.outinfo.vs_output_param_offset;
10518 assert(vs_output_param_offset);
10520 int offset = vs_output_param_offset[slot];
10521 unsigned mask = ctx->outputs.mask[slot];
10522 if (!is_pos && !mask)
10524 if (!is_pos && offset == AC_EXP_PARAM_UNDEFINED)
10526 aco_ptr<Export_instruction> exp{
10527 create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
10528 exp->enabled_mask = mask;
10529 for (unsigned i = 0; i < 4; ++i) {
10530 if (mask & (1 << i))
10531 exp->operands[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
10533 exp->operands[i] = Operand(v1);
10535 /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
10536 * Setting valid_mask=1 prevents it and has no other effect.
10538 exp->valid_mask = ctx->options->chip_class == GFX10 && is_pos && *next_pos == 0;
10540 exp->compressed = false;
10542 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
10544 exp->dest = V_008DFC_SQ_EXP_PARAM + offset;
10545 ctx->block->instructions.emplace_back(std::move(exp));
10549 export_vs_psiz_layer_viewport_vrs(isel_context* ctx, int* next_pos,
10550 const aco_vp_output_info* outinfo)
10552 aco_ptr<Export_instruction> exp{
10553 create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
10554 exp->enabled_mask = 0;
10555 for (unsigned i = 0; i < 4; ++i)
10556 exp->operands[i] = Operand(v1);
10557 if (ctx->outputs.mask[VARYING_SLOT_PSIZ]) {
10558 exp->operands[0] = Operand(ctx->outputs.temps[VARYING_SLOT_PSIZ * 4u]);
10559 exp->enabled_mask |= 0x1;
10561 if (ctx->outputs.mask[VARYING_SLOT_LAYER] && !outinfo->writes_layer_per_primitive) {
10562 exp->operands[2] = Operand(ctx->outputs.temps[VARYING_SLOT_LAYER * 4u]);
10563 exp->enabled_mask |= 0x4;
10565 if (ctx->outputs.mask[VARYING_SLOT_VIEWPORT] && !outinfo->writes_viewport_index_per_primitive) {
10566 if (ctx->options->chip_class < GFX9) {
10567 exp->operands[3] = Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]);
10568 exp->enabled_mask |= 0x8;
10570 Builder bld(ctx->program, ctx->block);
10572 Temp out = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(16u),
10573 Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]));
10574 if (exp->operands[2].isTemp())
10575 out = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(out), exp->operands[2]);
10577 exp->operands[2] = Operand(out);
10578 exp->enabled_mask |= 0x4;
10581 if (ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_SHADING_RATE]) {
10582 exp->operands[1] = Operand(ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_SHADING_RATE * 4u]);
10583 exp->enabled_mask |= 0x2;
10586 exp->valid_mask = ctx->options->chip_class == GFX10 && *next_pos == 0;
10588 exp->compressed = false;
10589 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
10590 ctx->block->instructions.emplace_back(std::move(exp));
10594 create_vs_exports(isel_context* ctx)
10596 assert(ctx->stage.hw == HWStage::VS || ctx->stage.hw == HWStage::NGG);
10597 const aco_vp_output_info* outinfo =
10598 ctx->stage.has(SWStage::GS) ? &ctx->program->info.vs.outinfo :
10599 ctx->stage.has(SWStage::TES) ? &ctx->program->info.tes.outinfo :
10600 ctx->stage.has(SWStage::MS) ? &ctx->program->info.ms.outinfo :
10601 &ctx->program->info.vs.outinfo;
10604 ctx->block->kind |= block_kind_export_end;
10606 if (outinfo->export_prim_id && ctx->stage.hw != HWStage::NGG) {
10607 ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
10608 if (ctx->stage.has(SWStage::TES))
10609 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] =
10610 get_arg(ctx, ctx->args->ac.tes_patch_id);
10612 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] =
10613 get_arg(ctx, ctx->args->ac.vs_prim_id);
10616 /* Hardware requires position data to always be exported, even if the
10617 * application did not write gl_Position.
10619 ctx->outputs.mask[VARYING_SLOT_POS] = 0xf;
10621 /* the order these position exports are created is important */
10623 export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
10625 if (outinfo->writes_pointsize || outinfo->writes_layer || outinfo->writes_viewport_index ||
10626 outinfo->writes_primitive_shading_rate) {
10627 export_vs_psiz_layer_viewport_vrs(ctx, &next_pos, outinfo);
10629 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
10630 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
10631 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
10632 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
10634 if (ctx->export_clip_dists) {
10635 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
10636 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, false, &next_pos);
10637 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
10638 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, false, &next_pos);
10641 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10642 if (i < VARYING_SLOT_VAR0 && i != VARYING_SLOT_LAYER && i != VARYING_SLOT_PRIMITIVE_ID &&
10643 i != VARYING_SLOT_VIEWPORT)
10645 if (ctx->shader && ctx->shader->info.per_primitive_outputs & BITFIELD64_BIT(i))
10648 export_vs_varying(ctx, i, false, NULL);
10653 create_primitive_exports(isel_context *ctx, Temp prim_ch1)
10655 assert(ctx->stage.hw == HWStage::NGG);
10656 const aco_vp_output_info* outinfo =
10657 ctx->stage.has(SWStage::GS) ? &ctx->program->info.vs.outinfo :
10658 ctx->stage.has(SWStage::TES) ? &ctx->program->info.tes.outinfo :
10659 ctx->stage.has(SWStage::MS) ? &ctx->program->info.ms.outinfo :
10660 &ctx->program->info.vs.outinfo;
10662 Builder bld(ctx->program, ctx->block);
10664 /* Use zeroes if the shader doesn't write these but they are needed by eg. PS. */
10665 if (outinfo->writes_layer_per_primitive && !ctx->outputs.mask[VARYING_SLOT_LAYER])
10666 ctx->outputs.temps[VARYING_SLOT_LAYER * 4u] = bld.copy(bld.def(v1), Operand::c32(0));
10667 if (outinfo->writes_viewport_index_per_primitive && !ctx->outputs.mask[VARYING_SLOT_VIEWPORT])
10668 ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u] = bld.copy(bld.def(v1), Operand::c32(0));
10669 if (outinfo->export_prim_id_per_primitive && !ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID])
10670 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = bld.copy(bld.def(v1), Operand::c32(0));
10672 /* When layer, viewport etc. are per-primitive, they need to be encoded in
10673 * the primitive export instruction's second channel. The encoding is:
10674 * bits 31..30: VRS rate Y
10675 * bits 29..28: VRS rate X
10676 * bits 23..20: viewport
10677 * bits 19..17: layer
10679 Temp ch2 = bld.copy(bld.def(v1), Operand::c32(0));
10682 if (outinfo->writes_layer_per_primitive) {
10684 Temp tmp = ctx->outputs.temps[VARYING_SLOT_LAYER * 4u];
10685 ch2 = bld.vop3(aco_opcode::v_lshl_or_b32, bld.def(v1), tmp, Operand::c32(17), ch2);
10687 if (outinfo->writes_viewport_index_per_primitive) {
10689 Temp tmp = ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u];
10690 ch2 = bld.vop3(aco_opcode::v_lshl_or_b32, bld.def(v1), tmp, Operand::c32(20), ch2);
10692 if (outinfo->writes_primitive_shading_rate_per_primitive) {
10694 Temp tmp = ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_SHADING_RATE * 4u];
10695 ch2 = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), tmp, ch2);
10698 Operand prim_ch2 = (en_mask & 2) ? Operand(ch2) : Operand(v1);
10700 bld.exp(aco_opcode::exp, prim_ch1, prim_ch2, Operand(v1), Operand(v1),
10701 en_mask /* enabled mask */, V_008DFC_SQ_EXP_PRIM /* dest */, false /* compressed */,
10702 true /* done */, false /* valid mask */);
10704 /* Export generic per-primitive attributes. */
10705 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10706 if (!(ctx->shader->info.per_primitive_outputs & BITFIELD64_BIT(i)))
10708 if (i == VARYING_SLOT_PRIMITIVE_SHADING_RATE)
10711 export_vs_varying(ctx, i, false, NULL);
10716 export_fs_mrt_z(isel_context* ctx)
10718 Builder bld(ctx->program, ctx->block);
10719 unsigned enabled_channels = 0;
10720 bool compr = false;
10723 for (unsigned i = 0; i < 4; ++i) {
10724 values[i] = Operand(v1);
10727 /* Both stencil and sample mask only need 16-bits. */
10728 if (!ctx->program->info.ps.writes_z &&
10729 (ctx->program->info.ps.writes_stencil || ctx->program->info.ps.writes_sample_mask)) {
10730 compr = true; /* COMPR flag */
10732 if (ctx->program->info.ps.writes_stencil) {
10733 /* Stencil should be in X[23:16]. */
10734 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
10735 values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(16u), values[0]);
10736 enabled_channels |= 0x3;
10739 if (ctx->program->info.ps.writes_sample_mask) {
10740 /* SampleMask should be in Y[15:0]. */
10741 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
10742 enabled_channels |= 0xc;
10745 if (ctx->program->info.ps.writes_z) {
10746 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_DEPTH * 4u]);
10747 enabled_channels |= 0x1;
10750 if (ctx->program->info.ps.writes_stencil) {
10751 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
10752 enabled_channels |= 0x2;
10755 if (ctx->program->info.ps.writes_sample_mask) {
10756 values[2] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
10757 enabled_channels |= 0x4;
10761 /* GFX6 (except OLAND and HAINAN) has a bug that it only looks at the X
10762 * writemask component.
10764 if (ctx->options->chip_class == GFX6 && ctx->options->family != CHIP_OLAND &&
10765 ctx->options->family != CHIP_HAINAN) {
10766 enabled_channels |= 0x1;
10769 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3], enabled_channels,
10770 V_008DFC_SQ_EXP_MRTZ, compr);
10776 export_fs_mrt_color(isel_context* ctx, int slot)
10778 Builder bld(ctx->program, ctx->block);
10779 unsigned write_mask = ctx->outputs.mask[slot];
10782 for (unsigned i = 0; i < 4; ++i) {
10783 if (write_mask & (1 << i)) {
10784 values[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
10786 values[i] = Operand(v1);
10790 unsigned target, col_format;
10791 unsigned enabled_channels = 0;
10792 bool compr = false;
10794 slot -= FRAG_RESULT_DATA0;
10795 target = V_008DFC_SQ_EXP_MRT + slot;
10796 col_format = (ctx->options->key.ps.col_format >> (4 * slot)) & 0xf;
10798 switch (col_format) {
10799 case V_028714_SPI_SHADER_32_R: enabled_channels = 1; break;
10801 case V_028714_SPI_SHADER_32_GR: enabled_channels = 0x3; break;
10803 case V_028714_SPI_SHADER_32_AR:
10804 if (ctx->options->chip_class >= GFX10) {
10805 /* Special case: on GFX10, the outputs are different for 32_AR */
10806 enabled_channels = 0x3;
10807 values[1] = values[3];
10808 values[3] = Operand(v1);
10810 enabled_channels = 0x9;
10814 case V_028714_SPI_SHADER_FP16_ABGR:
10815 case V_028714_SPI_SHADER_UNORM16_ABGR:
10816 case V_028714_SPI_SHADER_SNORM16_ABGR:
10817 case V_028714_SPI_SHADER_UINT16_ABGR:
10818 case V_028714_SPI_SHADER_SINT16_ABGR:
10819 enabled_channels = util_widen_mask(write_mask, 2);
10823 case V_028714_SPI_SHADER_32_ABGR: enabled_channels = 0xF; break;
10825 case V_028714_SPI_SHADER_ZERO:
10826 default: return false;
10830 for (int i = 0; i < 4; i++)
10831 values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
10834 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3], enabled_channels, target,
10840 create_fs_null_export(isel_context* ctx)
10842 /* FS must always have exports.
10843 * So when there are none, we need to add a null export.
10846 Builder bld(ctx->program, ctx->block);
10847 /* GFX11 doesn't support NULL exports, and MRT0 should be exported instead. */
10848 unsigned dest = ctx->options->chip_class >= GFX11 ? V_008DFC_SQ_EXP_MRT : V_008DFC_SQ_EXP_NULL;
10849 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
10850 /* enabled_mask */ 0, dest, /* compr */ false, /* done */ true, /* vm */ true);
10854 create_fs_exports(isel_context* ctx)
10856 bool exported = false;
10858 /* Export depth, stencil and sample mask. */
10859 if (ctx->outputs.mask[FRAG_RESULT_DEPTH] || ctx->outputs.mask[FRAG_RESULT_STENCIL] ||
10860 ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK])
10861 exported |= export_fs_mrt_z(ctx);
10863 /* Export all color render targets. */
10864 for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i)
10865 if (ctx->outputs.mask[i])
10866 exported |= export_fs_mrt_color(ctx, i);
10869 create_fs_null_export(ctx);
10871 ctx->block->kind |= block_kind_export_end;
10875 emit_stream_output(isel_context* ctx, Temp const* so_buffers, Temp const* so_write_offset,
10876 const struct aco_stream_output* output)
10878 assert(ctx->stage.hw == HWStage::VS);
10880 unsigned loc = output->location;
10881 unsigned buf = output->buffer;
10883 unsigned writemask = output->component_mask & ctx->outputs.mask[loc];
10884 while (writemask) {
10886 u_bit_scan_consecutive_range(&writemask, &start, &count);
10887 if (count == 3 && ctx->options->chip_class == GFX6) {
10888 /* GFX6 doesn't support storing vec3, split it. */
10889 writemask |= 1u << (start + 2);
10893 unsigned offset = output->offset + (start - (ffs(output->component_mask) - 1)) * 4;
10895 Temp write_data = ctx->program->allocateTmp(RegClass(RegType::vgpr, count));
10896 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
10897 aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
10898 for (int i = 0; i < count; ++i)
10899 vec->operands[i] = Operand(ctx->outputs.temps[loc * 4 + start + i]);
10900 vec->definitions[0] = Definition(write_data);
10901 ctx->block->instructions.emplace_back(std::move(vec));
10903 aco_opcode opcode = get_buffer_store_op(count * 4);
10904 aco_ptr<MUBUF_instruction> store{
10905 create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
10906 store->operands[0] = Operand(so_buffers[buf]);
10907 store->operands[1] = Operand(so_write_offset[buf]);
10908 store->operands[2] = Operand::c32(0);
10909 store->operands[3] = Operand(write_data);
10910 if (offset > 4095) {
10911 /* Don't think this can happen in RADV, but maybe GL? It's easy to do this anyway. */
10912 Builder bld(ctx->program, ctx->block);
10913 store->operands[1] =
10914 bld.vadd32(bld.def(v1), Operand::c32(offset), Operand(so_write_offset[buf]));
10916 store->offset = offset;
10918 store->offen = true;
10920 store->dlc = false;
10922 ctx->block->instructions.emplace_back(std::move(store));
10927 emit_streamout(isel_context* ctx, unsigned stream)
10929 Builder bld(ctx->program, ctx->block);
10931 Temp so_vtx_count =
10932 bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10933 get_arg(ctx, ctx->args->ac.streamout_config), Operand::c32(0x70010u));
10935 Temp tid = emit_mbcnt(ctx, bld.tmp(v1));
10937 Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(bld.lm), so_vtx_count, tid);
10940 begin_divergent_if_then(ctx, &ic, can_emit);
10942 bld.reset(ctx->block);
10944 Temp so_write_index =
10945 bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->ac.streamout_write_index), tid);
10947 Temp so_buffers[4];
10948 Temp so_write_offset[4];
10949 Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers));
10951 for (unsigned i = 0; i < 4; i++) {
10952 unsigned stride = ctx->program->info.so.strides[i];
10956 so_buffers[i] = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), buf_ptr,
10957 bld.copy(bld.def(s1), Operand::c32(i * 16u)));
10960 Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
10961 get_arg(ctx, ctx->args->ac.streamout_write_index),
10962 get_arg(ctx, ctx->args->ac.streamout_offset[i]));
10963 Temp new_offset = bld.vadd32(bld.def(v1), offset, tid);
10965 so_write_offset[i] =
10966 bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u), new_offset);
10968 Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u);
10969 Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand::c32(4u),
10970 get_arg(ctx, ctx->args->ac.streamout_offset[i]));
10971 so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2);
10975 for (unsigned i = 0; i < ctx->program->info.so.num_outputs; i++) {
10976 const struct aco_stream_output* output = &ctx->program->info.so.outputs[i];
10977 if (stream != output->stream)
10980 emit_stream_output(ctx, so_buffers, so_write_offset, output);
10983 begin_divergent_if_else(ctx, &ic);
10984 end_divergent_if(ctx, &ic);
10987 Pseudo_instruction*
10988 add_startpgm(struct isel_context* ctx)
10990 unsigned def_count = 0;
10991 for (unsigned i = 0; i < ctx->args->ac.arg_count; i++) {
10992 if (ctx->args->ac.args[i].skip)
10994 unsigned align = MIN2(4, util_next_power_of_two(ctx->args->ac.args[i].size));
10995 if (ctx->args->ac.args[i].file == AC_ARG_SGPR && ctx->args->ac.args[i].offset % align)
10996 def_count += ctx->args->ac.args[i].size;
11001 Pseudo_instruction* startpgm =
11002 create_instruction<Pseudo_instruction>(aco_opcode::p_startpgm, Format::PSEUDO, 0, def_count);
11003 ctx->block->instructions.emplace_back(startpgm);
11004 for (unsigned i = 0, arg = 0; i < ctx->args->ac.arg_count; i++) {
11005 if (ctx->args->ac.args[i].skip)
11008 enum ac_arg_regfile file = ctx->args->ac.args[i].file;
11009 unsigned size = ctx->args->ac.args[i].size;
11010 unsigned reg = ctx->args->ac.args[i].offset;
11011 RegClass type = RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size);
11013 if (file == AC_ARG_SGPR && reg % MIN2(4, util_next_power_of_two(size))) {
11015 for (unsigned j = 0; j < size; j++) {
11016 elems[j] = ctx->program->allocateTmp(s1);
11017 startpgm->definitions[arg++] = Definition(elems[j].id(), PhysReg{reg + j}, s1);
11019 ctx->arg_temps[i] = create_vec_from_array(ctx, elems, size, RegType::sgpr, 4);
11021 Temp dst = ctx->program->allocateTmp(type);
11022 ctx->arg_temps[i] = dst;
11023 startpgm->definitions[arg] = Definition(dst);
11024 startpgm->definitions[arg].setFixed(PhysReg{file == AC_ARG_SGPR ? reg : reg + 256});
11029 /* Stash these in the program so that they can be accessed later when
11030 * handling spilling.
11032 ctx->program->private_segment_buffer = get_arg(ctx, ctx->args->ring_offsets);
11033 ctx->program->scratch_offset = get_arg(ctx, ctx->args->ac.scratch_offset);
11035 if (ctx->stage.has(SWStage::VS) && ctx->program->info.vs.dynamic_inputs) {
11036 unsigned num_attributes = util_last_bit(ctx->program->info.vs.vb_desc_usage_mask);
11037 for (unsigned i = 0; i < num_attributes; i++) {
11038 Definition def(get_arg(ctx, ctx->args->vs_inputs[i]));
11040 unsigned idx = ctx->args->vs_inputs[i].arg_index;
11041 def.setFixed(PhysReg(256 + ctx->args->ac.args[idx].offset));
11043 ctx->program->vs_inputs.push_back(def);
11051 fix_ls_vgpr_init_bug(isel_context* ctx, Pseudo_instruction* startpgm)
11053 assert(ctx->shader->info.stage == MESA_SHADER_VERTEX);
11054 Builder bld(ctx->program, ctx->block);
11055 constexpr unsigned hs_idx = 1u;
11056 Builder::Result hs_thread_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
11057 get_arg(ctx, ctx->args->ac.merged_wave_info),
11058 Operand::c32((8u << 16) | (hs_idx * 8u)));
11059 Temp ls_has_nonzero_hs_threads = bool_to_vector_condition(ctx, hs_thread_count.def(1).getTemp());
11061 /* If there are no HS threads, SPI mistakenly loads the LS VGPRs starting at VGPR 0. */
11064 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.vertex_id),
11065 get_arg(ctx, ctx->args->ac.instance_id), ls_has_nonzero_hs_threads);
11066 Temp vs_rel_patch_id =
11067 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.tcs_rel_ids),
11068 get_arg(ctx, ctx->args->ac.vs_rel_patch_id), ls_has_nonzero_hs_threads);
11070 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), get_arg(ctx, ctx->args->ac.tcs_patch_id),
11071 get_arg(ctx, ctx->args->ac.vertex_id), ls_has_nonzero_hs_threads);
11073 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = instance_id;
11074 ctx->arg_temps[ctx->args->ac.vs_rel_patch_id.arg_index] = vs_rel_patch_id;
11075 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = vertex_id;
11079 split_arguments(isel_context* ctx, Pseudo_instruction* startpgm)
11081 /* Split all arguments except for the first (ring_offsets) and the last
11082 * (exec) so that the dead channels don't stay live throughout the program.
11084 for (int i = 1; i < startpgm->definitions.size(); i++) {
11085 if (startpgm->definitions[i].regClass().size() > 1) {
11086 emit_split_vector(ctx, startpgm->definitions[i].getTemp(),
11087 startpgm->definitions[i].regClass().size());
11093 handle_bc_optimize(isel_context* ctx)
11095 /* needed when SPI_PS_IN_CONTROL.BC_OPTIMIZE_DISABLE is set to 0 */
11096 Builder bld(ctx->program, ctx->block);
11097 uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena;
11099 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena);
11100 bool uses_persp_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena);
11101 bool uses_linear_centroid = G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena);
11103 if (uses_persp_centroid)
11104 ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
11105 if (uses_linear_centroid)
11106 ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
11108 if (uses_center && (uses_persp_centroid || uses_linear_centroid)) {
11109 Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.def(bld.lm),
11110 get_arg(ctx, ctx->args->ac.prim_mask), Operand::zero());
11112 if (uses_persp_centroid) {
11114 for (unsigned i = 0; i < 2; i++) {
11115 Temp persp_centroid =
11116 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1);
11117 Temp persp_center =
11118 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1);
11120 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), persp_centroid, persp_center, sel);
11122 ctx->persp_centroid = bld.tmp(v2);
11123 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid),
11124 Operand(new_coord[0]), Operand(new_coord[1]));
11125 emit_split_vector(ctx, ctx->persp_centroid, 2);
11128 if (uses_linear_centroid) {
11130 for (unsigned i = 0; i < 2; i++) {
11131 Temp linear_centroid =
11132 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1);
11133 Temp linear_center =
11134 emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1);
11135 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), linear_centroid,
11136 linear_center, sel);
11138 ctx->linear_centroid = bld.tmp(v2);
11139 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid),
11140 Operand(new_coord[0]), Operand(new_coord[1]));
11141 emit_split_vector(ctx, ctx->linear_centroid, 2);
11147 setup_fp_mode(isel_context* ctx, nir_shader* shader)
11149 Program* program = ctx->program;
11151 unsigned float_controls = shader->info.float_controls_execution_mode;
11153 program->next_fp_mode.preserve_signed_zero_inf_nan32 =
11154 float_controls & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32;
11155 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 =
11156 float_controls & (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 |
11157 FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
11159 program->next_fp_mode.must_flush_denorms32 =
11160 float_controls & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32;
11161 program->next_fp_mode.must_flush_denorms16_64 =
11163 (FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 | FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
11165 program->next_fp_mode.care_about_round32 =
11167 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32);
11169 program->next_fp_mode.care_about_round16_64 =
11171 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 |
11172 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
11174 /* default to preserving fp16 and fp64 denorms, since it's free for fp64 and
11175 * the precision seems needed for Wolfenstein: Youngblood to render correctly */
11176 if (program->next_fp_mode.must_flush_denorms16_64)
11177 program->next_fp_mode.denorm16_64 = 0;
11179 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
11181 /* preserving fp32 denorms is expensive, so only do it if asked */
11182 if (float_controls & FLOAT_CONTROLS_DENORM_PRESERVE_FP32)
11183 program->next_fp_mode.denorm32 = fp_denorm_keep;
11185 program->next_fp_mode.denorm32 = 0;
11187 if (float_controls & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32)
11188 program->next_fp_mode.round32 = fp_round_tz;
11190 program->next_fp_mode.round32 = fp_round_ne;
11192 if (float_controls &
11193 (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64))
11194 program->next_fp_mode.round16_64 = fp_round_tz;
11196 program->next_fp_mode.round16_64 = fp_round_ne;
11198 ctx->block->fp_mode = program->next_fp_mode;
11202 cleanup_cfg(Program* program)
11204 /* create linear_succs/logical_succs */
11205 for (Block& BB : program->blocks) {
11206 for (unsigned idx : BB.linear_preds)
11207 program->blocks[idx].linear_succs.emplace_back(BB.index);
11208 for (unsigned idx : BB.logical_preds)
11209 program->blocks[idx].logical_succs.emplace_back(BB.index);
11214 lanecount_to_mask(isel_context* ctx, Temp count, bool allow64 = true)
11216 assert(count.regClass() == s1);
11218 Builder bld(ctx->program, ctx->block);
11219 Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand::zero());
11222 if (ctx->program->wave_size == 64) {
11223 /* If we know that all 64 threads can't be active at a time, we just use the mask as-is */
11227 /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */
11228 Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count,
11229 Operand::c32(6u /* log2(64) */));
11231 bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand::c32(-1u), mask, bld.scc(active_64));
11233 /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of
11235 cond = emit_extract_vector(ctx, mask, 0, bld.lm);
11242 merged_wave_info_to_mask(isel_context* ctx, unsigned i)
11244 Builder bld(ctx->program, ctx->block);
11246 /* lanecount_to_mask() only cares about s0.u[6:0] so we don't need either s_bfe nor s_and here */
11247 Temp count = i == 0
11248 ? get_arg(ctx, ctx->args->ac.merged_wave_info)
11249 : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
11250 get_arg(ctx, ctx->args->ac.merged_wave_info), Operand::c32(i * 8u));
11252 return lanecount_to_mask(ctx, count);
11256 ngg_emit_sendmsg_gs_alloc_req(isel_context* ctx, Temp vtx_cnt, Temp prm_cnt)
11258 assert(vtx_cnt.id() && prm_cnt.id());
11260 Builder bld(ctx->program, ctx->block);
11263 if (ctx->program->chip_class == GFX10 &&
11264 (ctx->stage.has(SWStage::GS) || ctx->program->info.has_ngg_culling)) {
11265 /* Navi 1x workaround: check whether the workgroup has no output.
11266 * If so, change the number of exported vertices and primitives to 1.
11268 prm_cnt_0 = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), prm_cnt, Operand::zero());
11269 prm_cnt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::c32(1u), prm_cnt,
11270 bld.scc(prm_cnt_0));
11271 vtx_cnt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand::c32(1u), vtx_cnt,
11272 bld.scc(prm_cnt_0));
11275 /* Put the number of vertices and primitives into m0 for the GS_ALLOC_REQ */
11277 bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), prm_cnt, Operand::c32(12u));
11278 tmp = bld.sop2(aco_opcode::s_or_b32, bld.m0(bld.def(s1)), bld.def(s1, scc), tmp, vtx_cnt);
11280 /* Request the SPI to allocate space for the primitives and vertices
11281 * that will be exported by the threadgroup.
11283 bld.sopp(aco_opcode::s_sendmsg, bld.m0(tmp), -1, sendmsg_gs_alloc_req);
11285 if (prm_cnt_0.id()) {
11286 /* Navi 1x workaround: export a triangle with NaN coordinates when NGG has no output.
11287 * It can't have all-zero positions because that would render an undesired pixel with
11288 * conservative rasterization.
11290 Temp first_lane = bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm));
11291 Temp cond = bld.sop2(Builder::s_lshl, bld.def(bld.lm), bld.def(s1, scc),
11292 Operand::c32_or_c64(1u, ctx->program->wave_size == 64), first_lane);
11293 cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), cond,
11294 Operand::zero(ctx->program->wave_size == 64 ? 8 : 4), bld.scc(prm_cnt_0));
11296 if_context ic_prim_0;
11297 begin_divergent_if_then(ctx, &ic_prim_0, cond);
11298 bld.reset(ctx->block);
11299 ctx->block->kind |= block_kind_export_end;
11301 /* Use zero: means that it's a triangle whose every vertex index is 0. */
11302 Temp zero = bld.copy(bld.def(v1), Operand::zero());
11303 /* Use NaN for the coordinates, so that the rasterizer allways culls it. */
11304 Temp nan_coord = bld.copy(bld.def(v1), Operand::c32(-1u));
11306 bld.exp(aco_opcode::exp, zero, Operand(v1), Operand(v1), Operand(v1), 1 /* enabled mask */,
11307 V_008DFC_SQ_EXP_PRIM /* dest */, false /* compressed */, true /* done */,
11308 false /* valid mask */);
11309 bld.exp(aco_opcode::exp, nan_coord, nan_coord, nan_coord, nan_coord, 0xf /* enabled mask */,
11310 V_008DFC_SQ_EXP_POS /* dest */, false /* compressed */, true /* done */,
11311 true /* valid mask */);
11313 begin_divergent_if_else(ctx, &ic_prim_0);
11314 end_divergent_if(ctx, &ic_prim_0);
11315 bld.reset(ctx->block);
11319 } /* end namespace */
11322 select_program(Program* program, unsigned shader_count, struct nir_shader* const* shaders,
11323 ac_shader_config* config, const struct radv_nir_compiler_options* options,
11324 const struct aco_shader_info* info,
11325 const struct radv_shader_args* args)
11327 isel_context ctx = setup_isel_context(program, shader_count, shaders, config, options, info, args, false);
11328 if_context ic_merged_wave_info;
11329 bool ngg_gs = ctx.stage.hw == HWStage::NGG && ctx.stage.has(SWStage::GS);
11331 for (unsigned i = 0; i < shader_count; i++) {
11332 nir_shader* nir = shaders[i];
11333 init_context(&ctx, nir);
11335 setup_fp_mode(&ctx, nir);
11338 /* needs to be after init_context() for FS */
11339 Pseudo_instruction* startpgm = add_startpgm(&ctx);
11340 append_logical_start(ctx.block);
11342 if (unlikely(ctx.options->has_ls_vgpr_init_bug && ctx.stage == vertex_tess_control_hs))
11343 fix_ls_vgpr_init_bug(&ctx, startpgm);
11345 split_arguments(&ctx, startpgm);
11347 if (!info->vs.has_prolog &&
11348 (program->stage.has(SWStage::VS) || program->stage.has(SWStage::TES))) {
11349 Builder(ctx.program, ctx.block).sopp(aco_opcode::s_setprio, -1u, 0x3u);
11353 /* In a merged VS+TCS HS, the VS implementation can be completely empty. */
11354 nir_function_impl* func = nir_shader_get_entrypoint(nir);
11355 bool empty_shader =
11356 nir_cf_list_is_empty_block(&func->body) &&
11357 ((nir->info.stage == MESA_SHADER_VERTEX &&
11358 (ctx.stage == vertex_tess_control_hs || ctx.stage == vertex_geometry_gs)) ||
11359 (nir->info.stage == MESA_SHADER_TESS_EVAL && ctx.stage == tess_eval_geometry_gs));
11361 bool check_merged_wave_info =
11362 ctx.tcs_in_out_eq ? i == 0 : (shader_count >= 2 && !empty_shader && !(ngg_gs && i == 1));
11363 bool endif_merged_wave_info =
11364 ctx.tcs_in_out_eq ? i == 1 : (check_merged_wave_info && !(ngg_gs && i == 1));
11366 if (program->chip_class == GFX10 && program->stage.hw == HWStage::NGG &&
11367 program->stage.num_sw_stages() == 1) {
11368 /* Workaround for Navi1x HW bug to ensure that all NGG waves launch before
11369 * s_sendmsg(GS_ALLOC_REQ). */
11370 Builder(ctx.program, ctx.block).sopp(aco_opcode::s_barrier, -1u, 0u);
11373 if (check_merged_wave_info) {
11374 Temp cond = merged_wave_info_to_mask(&ctx, i);
11375 begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond);
11379 Builder bld(ctx.program, ctx.block);
11381 /* Skip s_barrier from TCS when VS outputs are not stored in the LDS. */
11382 bool tcs_skip_barrier = ctx.stage == vertex_tess_control_hs &&
11383 ctx.tcs_temp_only_inputs == nir->info.inputs_read;
11385 if (!ngg_gs && !tcs_skip_barrier) {
11387 ctx.stage == vertex_tess_control_hs &&
11388 program->wave_size % ctx.options->key.tcs.tess_input_vertices == 0
11391 bld.barrier(aco_opcode::p_barrier,
11392 memory_sync_info(storage_shared, semantic_acqrel, scope), scope);
11395 if (ctx.stage == vertex_geometry_gs || ctx.stage == tess_eval_geometry_gs) {
11396 ctx.gs_wave_id = bld.pseudo(aco_opcode::p_extract, bld.def(s1, m0), bld.def(s1, scc),
11397 get_arg(&ctx, args->ac.merged_wave_info), Operand::c32(2u),
11398 Operand::c32(8u), Operand::zero());
11400 } else if (ctx.stage == geometry_gs)
11401 ctx.gs_wave_id = get_arg(&ctx, args->ac.gs_wave_id);
11403 if (ctx.stage == fragment_fs)
11404 handle_bc_optimize(&ctx);
11406 visit_cf_list(&ctx, &func->body);
11408 if (ctx.program->info.so.num_outputs && ctx.stage.hw == HWStage::VS)
11409 emit_streamout(&ctx, 0);
11411 if (ctx.stage.hw == HWStage::VS) {
11412 create_vs_exports(&ctx);
11413 } else if (nir->info.stage == MESA_SHADER_GEOMETRY && !ngg_gs) {
11414 Builder bld(ctx.program, ctx.block);
11415 bld.barrier(aco_opcode::p_barrier,
11416 memory_sync_info(storage_vmem_output, semantic_release, scope_device));
11417 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx.gs_wave_id), -1,
11418 sendmsg_gs_done(false, false, 0));
11421 if (ctx.stage == fragment_fs) {
11422 create_fs_exports(&ctx);
11425 if (endif_merged_wave_info) {
11426 begin_divergent_if_else(&ctx, &ic_merged_wave_info);
11427 end_divergent_if(&ctx, &ic_merged_wave_info);
11430 if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) {
11431 /* Outputs of the previous stage are inputs to the next stage */
11432 ctx.inputs = ctx.outputs;
11433 ctx.outputs = shader_io_state();
11436 cleanup_context(&ctx);
11439 program->config->float_mode = program->blocks[0].fp_mode.val;
11441 append_logical_end(ctx.block);
11442 ctx.block->kind |= block_kind_uniform;
11443 Builder bld(ctx.program, ctx.block);
11444 bld.sopp(aco_opcode::s_endpgm);
11446 cleanup_cfg(program);
11450 select_gs_copy_shader(Program* program, struct nir_shader* gs_shader, ac_shader_config* config,
11451 const struct radv_nir_compiler_options* options,
11452 const struct aco_shader_info* info,
11453 const struct radv_shader_args* args)
11455 isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, options, info, args, true);
11457 ctx.block->fp_mode = program->next_fp_mode;
11459 add_startpgm(&ctx);
11460 append_logical_start(ctx.block);
11462 Builder bld(ctx.program, ctx.block);
11464 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4),
11465 program->private_segment_buffer, Operand::c32(RING_GSVS_VS * 16u));
11467 Operand stream_id = Operand::zero();
11468 if (program->info.so.num_outputs)
11469 stream_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
11470 get_arg(&ctx, ctx.args->ac.streamout_config), Operand::c32(0x20018u));
11472 Temp vtx_offset = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand::c32(2u),
11473 get_arg(&ctx, ctx.args->ac.vertex_id));
11475 std::stack<if_context, std::vector<if_context>> if_contexts;
11477 for (unsigned stream = 0; stream < 4; stream++) {
11478 if (stream_id.isConstant() && stream != stream_id.constantValue())
11481 unsigned num_components = program->info.gs.num_stream_output_components[stream];
11482 if (stream > 0 && (!num_components || !program->info.so.num_outputs))
11485 memset(ctx.outputs.mask, 0, sizeof(ctx.outputs.mask));
11487 if (!stream_id.isConstant()) {
11489 bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), stream_id, Operand::c32(stream));
11490 if_contexts.emplace();
11491 begin_uniform_if_then(&ctx, &if_contexts.top(), cond);
11492 bld.reset(ctx.block);
11495 unsigned offset = 0;
11496 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
11497 if (program->info.gs.output_streams[i] != stream)
11500 unsigned output_usage_mask = program->info.gs.output_usage_mask[i];
11501 unsigned length = util_last_bit(output_usage_mask);
11502 for (unsigned j = 0; j < length; ++j) {
11503 if (!(output_usage_mask & (1 << j)))
11506 Temp val = bld.tmp(v1);
11507 unsigned const_offset = offset * program->info.gs.vertices_out * 16 * 4;
11508 load_vmem_mubuf(&ctx, val, gsvs_ring, vtx_offset, Temp(), const_offset, 4, 1, 0u, true,
11511 ctx.outputs.mask[i] |= 1 << j;
11512 ctx.outputs.temps[i * 4u + j] = val;
11518 if (program->info.so.num_outputs) {
11519 emit_streamout(&ctx, stream);
11520 bld.reset(ctx.block);
11524 create_vs_exports(&ctx);
11527 if (!stream_id.isConstant()) {
11528 begin_uniform_if_else(&ctx, &if_contexts.top());
11529 bld.reset(ctx.block);
11533 while (!if_contexts.empty()) {
11534 end_uniform_if(&ctx, &if_contexts.top());
11538 program->config->float_mode = program->blocks[0].fp_mode.val;
11540 append_logical_end(ctx.block);
11541 ctx.block->kind |= block_kind_uniform;
11542 bld.reset(ctx.block);
11543 bld.sopp(aco_opcode::s_endpgm);
11545 cleanup_cfg(program);
11549 select_trap_handler_shader(Program* program, struct nir_shader* shader, ac_shader_config* config,
11550 const struct radv_nir_compiler_options* options,
11551 const struct aco_shader_info* info,
11552 const struct radv_shader_args* args)
11554 assert(options->chip_class == GFX8);
11556 init_program(program, compute_cs, info, options->chip_class,
11557 options->family, options->wgp_mode, config);
11559 isel_context ctx = {};
11560 ctx.program = program;
11562 ctx.options = options;
11563 ctx.stage = program->stage;
11565 ctx.block = ctx.program->create_and_insert_block();
11566 ctx.block->kind = block_kind_top_level;
11568 program->workgroup_size = 1; /* XXX */
11570 add_startpgm(&ctx);
11571 append_logical_start(ctx.block);
11573 Builder bld(ctx.program, ctx.block);
11575 /* Load the buffer descriptor from TMA. */
11576 bld.smem(aco_opcode::s_load_dwordx4, Definition(PhysReg{ttmp4}, s4), Operand(PhysReg{tma}, s2),
11579 /* Store TTMP0-TTMP1. */
11580 bld.smem(aco_opcode::s_buffer_store_dwordx2, Operand(PhysReg{ttmp4}, s4), Operand::zero(),
11581 Operand(PhysReg{ttmp0}, s2), memory_sync_info(), true);
11583 uint32_t hw_regs_idx[] = {
11584 2, /* HW_REG_STATUS */
11585 3, /* HW_REG_TRAP_STS */
11586 4, /* HW_REG_HW_ID */
11587 7, /* HW_REG_IB_STS */
11590 /* Store some hardware registers. */
11591 for (unsigned i = 0; i < ARRAY_SIZE(hw_regs_idx); i++) {
11592 /* "((size - 1) << 11) | register" */
11593 bld.sopk(aco_opcode::s_getreg_b32, Definition(PhysReg{ttmp8}, s1),
11594 ((20 - 1) << 11) | hw_regs_idx[i]);
11596 bld.smem(aco_opcode::s_buffer_store_dword, Operand(PhysReg{ttmp4}, s4),
11597 Operand::c32(8u + i * 4), Operand(PhysReg{ttmp8}, s1), memory_sync_info(), true);
11600 program->config->float_mode = program->blocks[0].fp_mode.val;
11602 append_logical_end(ctx.block);
11603 ctx.block->kind |= block_kind_uniform;
11604 bld.sopp(aco_opcode::s_endpgm);
11606 cleanup_cfg(program);
11610 get_arg_fixed(const struct radv_shader_args* args, struct ac_arg arg)
11614 enum ac_arg_regfile file = args->ac.args[arg.arg_index].file;
11615 unsigned size = args->ac.args[arg.arg_index].size;
11616 unsigned reg = args->ac.args[arg.arg_index].offset;
11618 return Operand(PhysReg(file == AC_ARG_SGPR ? reg : reg + 256),
11619 RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size));
11623 load_vb_descs(Builder& bld, PhysReg dest, Operand base, unsigned start, unsigned max)
11625 unsigned count = MIN2((bld.program->dev.sgpr_limit - dest.reg()) / 4u, max);
11627 unsigned num_loads = (count / 4u) + util_bitcount(count & 0x3);
11628 if (bld.program->chip_class >= GFX10 && num_loads > 1)
11629 bld.sopp(aco_opcode::s_clause, -1, num_loads - 1);
11631 for (unsigned i = 0; i < count;) {
11632 unsigned size = 1u << util_logbase2(MIN2(count - i, 4));
11635 bld.smem(aco_opcode::s_load_dwordx16, Definition(dest, s16), base,
11636 Operand::c32((start + i) * 16u));
11637 else if (size == 2)
11638 bld.smem(aco_opcode::s_load_dwordx8, Definition(dest, s8), base,
11639 Operand::c32((start + i) * 16u));
11641 bld.smem(aco_opcode::s_load_dwordx4, Definition(dest, s4), base,
11642 Operand::c32((start + i) * 16u));
11644 dest = dest.advance(size * 16u);
11652 calc_nontrivial_instance_id(Builder& bld, const struct radv_shader_args* args, unsigned index,
11653 Operand instance_id, Operand start_instance, PhysReg tmp_sgpr,
11654 PhysReg tmp_vgpr0, PhysReg tmp_vgpr1)
11656 bld.smem(aco_opcode::s_load_dwordx2, Definition(tmp_sgpr, s2),
11657 get_arg_fixed(args, args->prolog_inputs), Operand::c32(8u + index * 8u));
11661 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(bld.program->chip_class));
11663 Definition fetch_index_def(tmp_vgpr0, v1);
11664 Operand fetch_index(tmp_vgpr0, v1);
11666 Operand div_info(tmp_sgpr, s1);
11667 if (bld.program->chip_class >= GFX8) {
11669 if (bld.program->chip_class < GFX9) {
11670 bld.vop1(aco_opcode::v_mov_b32, Definition(tmp_vgpr1, v1), div_info);
11671 div_info = Operand(tmp_vgpr1, v1);
11674 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, instance_id);
11676 Instruction* instr;
11677 if (bld.program->chip_class >= GFX9)
11678 instr = bld.vop2_sdwa(aco_opcode::v_add_u32, fetch_index_def, div_info, fetch_index).instr;
11680 instr = bld.vop2_sdwa(aco_opcode::v_add_co_u32, fetch_index_def, Definition(vcc, bld.lm),
11681 div_info, fetch_index)
11683 instr->sdwa().sel[0] = SubdwordSel::ubyte1;
11685 bld.vop3(aco_opcode::v_mul_hi_u32, fetch_index_def, Operand(tmp_sgpr.advance(4), s1),
11689 bld.vop2_sdwa(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, fetch_index).instr;
11690 instr->sdwa().sel[0] = SubdwordSel::ubyte2;
11692 Operand tmp_op(tmp_vgpr1, v1);
11693 Definition tmp_def(tmp_vgpr1, v1);
11695 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, div_info, instance_id);
11697 bld.vop3(aco_opcode::v_bfe_u32, tmp_def, div_info, Operand::c32(8u), Operand::c32(8u));
11698 bld.vadd32(fetch_index_def, tmp_op, fetch_index, false, Operand(s2), true);
11700 bld.vop3(aco_opcode::v_mul_hi_u32, fetch_index_def, fetch_index,
11701 Operand(tmp_sgpr.advance(4), s1));
11703 bld.vop3(aco_opcode::v_bfe_u32, tmp_def, div_info, Operand::c32(16u), Operand::c32(8u));
11704 bld.vop2(aco_opcode::v_lshrrev_b32, fetch_index_def, tmp_op, fetch_index);
11707 bld.vadd32(fetch_index_def, start_instance, fetch_index, false, Operand(s2), true);
11709 return fetch_index;
11713 select_vs_prolog(Program* program, const struct aco_vs_prolog_key* key, ac_shader_config* config,
11714 const struct radv_nir_compiler_options* options,
11715 const struct aco_shader_info* info,
11716 const struct radv_shader_args* args, unsigned* num_preserved_sgprs)
11718 assert(key->num_attributes > 0);
11720 /* This should be enough for any shader/stage. */
11721 unsigned max_user_sgprs = options->chip_class >= GFX9 ? 32 : 16;
11722 *num_preserved_sgprs = max_user_sgprs + 14;
11724 init_program(program, compute_cs, info, options->chip_class,
11725 options->family, options->wgp_mode, config);
11727 Block* block = program->create_and_insert_block();
11728 block->kind = block_kind_top_level;
11730 program->workgroup_size = 64;
11731 calc_min_waves(program);
11733 Builder bld(program, block);
11735 block->instructions.reserve(16 + key->num_attributes * 4);
11737 bld.sopp(aco_opcode::s_setprio, -1u, 0x3u);
11739 uint32_t attrib_mask = BITFIELD_MASK(key->num_attributes);
11740 bool has_nontrivial_divisors = key->state.nontrivial_divisors & attrib_mask;
11746 PhysReg vertex_buffers(align(*num_preserved_sgprs, 2));
11747 PhysReg prolog_input = vertex_buffers.advance(8);
11749 align((has_nontrivial_divisors ? prolog_input : vertex_buffers).advance(8).reg(), 4));
11751 Operand start_instance = get_arg_fixed(args, args->ac.start_instance);
11752 Operand instance_id = get_arg_fixed(args, args->ac.instance_id);
11754 PhysReg attributes_start(256 + args->ac.num_vgprs_used);
11755 /* choose vgprs that won't be used for anything else until the last attribute load */
11756 PhysReg vertex_index(attributes_start.reg() + key->num_attributes * 4 - 1);
11757 PhysReg instance_index(attributes_start.reg() + key->num_attributes * 4 - 2);
11758 PhysReg start_instance_vgpr(attributes_start.reg() + key->num_attributes * 4 - 3);
11759 PhysReg nontrivial_tmp_vgpr0(attributes_start.reg() + key->num_attributes * 4 - 4);
11760 PhysReg nontrivial_tmp_vgpr1(attributes_start.reg() + key->num_attributes * 4);
11762 bld.sop1(aco_opcode::s_mov_b32, Definition(vertex_buffers, s1),
11763 get_arg_fixed(args, args->ac.vertex_buffers));
11764 if (options->address32_hi >= 0xffff8000 || options->address32_hi <= 0x7fff) {
11765 bld.sopk(aco_opcode::s_movk_i32, Definition(vertex_buffers.advance(4), s1),
11766 options->address32_hi & 0xFFFF);
11768 bld.sop1(aco_opcode::s_mov_b32, Definition(vertex_buffers.advance(4), s1),
11769 Operand::c32((unsigned)options->address32_hi));
11772 /* calculate vgpr requirements */
11773 unsigned num_vgprs = attributes_start.reg() - 256;
11774 num_vgprs += key->num_attributes * 4;
11775 if (has_nontrivial_divisors && program->chip_class <= GFX8)
11776 num_vgprs++; /* make space for nontrivial_tmp_vgpr1 */
11777 unsigned num_sgprs = 0;
11779 for (unsigned loc = 0; loc < key->num_attributes;) {
11780 unsigned num_descs =
11781 load_vb_descs(bld, desc, Operand(vertex_buffers, s2), loc, key->num_attributes - loc);
11782 num_sgprs = MAX2(num_sgprs, desc.advance(num_descs * 16u).reg());
11785 /* perform setup while we load the descriptors */
11786 if (key->is_ngg || key->next_stage != MESA_SHADER_VERTEX) {
11787 Operand count = get_arg_fixed(args, args->ac.merged_wave_info);
11788 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), count, Operand::c32(0u));
11789 if (program->wave_size == 64) {
11790 bld.sopc(aco_opcode::s_bitcmp1_b32, Definition(scc, s1), count,
11791 Operand::c32(6u /* log2(64) */));
11792 bld.sop2(aco_opcode::s_cselect_b64, Definition(exec, s2), Operand::c64(UINT64_MAX),
11793 Operand(exec, s2), Operand(scc, s1));
11797 bool needs_instance_index = false;
11798 bool needs_start_instance = false;
11799 u_foreach_bit(i, key->state.instance_rate_inputs & attrib_mask)
11801 needs_instance_index |= key->state.divisors[i] == 1;
11802 needs_start_instance |= key->state.divisors[i] == 0;
11804 bool needs_vertex_index = ~key->state.instance_rate_inputs & attrib_mask;
11805 if (needs_vertex_index)
11806 bld.vadd32(Definition(vertex_index, v1), get_arg_fixed(args, args->ac.base_vertex),
11807 get_arg_fixed(args, args->ac.vertex_id), false, Operand(s2), true);
11808 if (needs_instance_index)
11809 bld.vadd32(Definition(instance_index, v1), start_instance, instance_id, false,
11810 Operand(s2), true);
11811 if (needs_start_instance)
11812 bld.vop1(aco_opcode::v_mov_b32, Definition(start_instance_vgpr, v1), start_instance);
11815 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(program->chip_class));
11817 for (unsigned i = 0; i < num_descs; i++, loc++) {
11818 PhysReg dest(attributes_start.reg() + loc * 4u);
11820 /* calculate index */
11821 Operand fetch_index = Operand(vertex_index, v1);
11822 if (key->state.instance_rate_inputs & (1u << loc)) {
11823 uint32_t divisor = key->state.divisors[loc];
11825 fetch_index = instance_id;
11826 if (key->state.nontrivial_divisors & (1u << loc)) {
11828 util_bitcount(key->state.nontrivial_divisors & BITFIELD_MASK(loc));
11829 fetch_index = calc_nontrivial_instance_id(
11830 bld, args, index, instance_id, start_instance, prolog_input,
11831 nontrivial_tmp_vgpr0, nontrivial_tmp_vgpr1);
11833 fetch_index = Operand(instance_index, v1);
11836 fetch_index = Operand(start_instance_vgpr, v1);
11841 PhysReg cur_desc = desc.advance(i * 16);
11842 if ((key->misaligned_mask & (1u << loc))) {
11843 unsigned dfmt = key->state.formats[loc] & 0xf;
11844 unsigned nfmt = key->state.formats[loc] >> 4;
11845 const struct ac_data_format_info* vtx_info = ac_get_data_format_info(dfmt);
11846 for (unsigned j = 0; j < vtx_info->num_channels; j++) {
11847 bool post_shuffle = key->state.post_shuffle & (1u << loc);
11848 unsigned offset = vtx_info->chan_byte_size * (post_shuffle && j < 3 ? 2 - j : j);
11850 /* Use MUBUF to workaround hangs for byte-aligned dword loads. The Vulkan spec
11851 * doesn't require this to work, but some GL CTS tests over Zink do this anyway.
11852 * MTBUF can hang, but MUBUF doesn't (probably gives garbage, but GL CTS doesn't
11855 if (vtx_info->chan_format == V_008F0C_BUF_DATA_FORMAT_32)
11856 bld.mubuf(aco_opcode::buffer_load_dword, Definition(dest.advance(j * 4u), v1),
11857 Operand(cur_desc, s4), fetch_index, Operand::c32(0u), offset, false,
11860 bld.mtbuf(aco_opcode::tbuffer_load_format_x, Definition(dest.advance(j * 4u), v1),
11861 Operand(cur_desc, s4), fetch_index, Operand::c32(0u),
11862 vtx_info->chan_format, nfmt, offset, false, true);
11865 nfmt == V_008F0C_BUF_NUM_FORMAT_UINT || nfmt == V_008F0C_BUF_NUM_FORMAT_SINT
11868 for (unsigned j = vtx_info->num_channels; j < 4; j++) {
11869 bld.vop1(aco_opcode::v_mov_b32, Definition(dest.advance(j * 4u), v1),
11870 Operand::c32(j == 3 ? one : 0u));
11873 bld.mubuf(aco_opcode::buffer_load_format_xyzw, Definition(dest, v4),
11874 Operand(cur_desc, s4), fetch_index, Operand::c32(0u), 0u, false, false, true);
11879 if (key->state.alpha_adjust_lo | key->state.alpha_adjust_hi) {
11882 bld.sopp(aco_opcode::s_waitcnt, -1, vm_imm.pack(program->chip_class));
11885 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
11886 * so we may need to fix it up. */
11887 u_foreach_bit(loc, (key->state.alpha_adjust_lo | key->state.alpha_adjust_hi))
11889 PhysReg alpha(attributes_start.reg() + loc * 4u + 3);
11891 unsigned alpha_adjust = (key->state.alpha_adjust_lo >> loc) & 0x1;
11892 alpha_adjust |= ((key->state.alpha_adjust_hi >> loc) & 0x1) << 1;
11894 if (alpha_adjust == ALPHA_ADJUST_SSCALED)
11895 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(alpha, v1), Operand(alpha, v1));
11897 /* For the integer-like cases, do a natural sign extension.
11899 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
11900 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
11903 unsigned offset = alpha_adjust == ALPHA_ADJUST_SNORM ? 23u : 0u;
11904 bld.vop3(aco_opcode::v_bfe_i32, Definition(alpha, v1), Operand(alpha, v1),
11905 Operand::c32(offset), Operand::c32(2u));
11907 /* Convert back to the right type. */
11908 if (alpha_adjust == ALPHA_ADJUST_SNORM) {
11909 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(alpha, v1), Operand(alpha, v1));
11910 bld.vop2(aco_opcode::v_max_f32, Definition(alpha, v1), Operand::c32(0xbf800000u),
11911 Operand(alpha, v1));
11912 } else if (alpha_adjust == ALPHA_ADJUST_SSCALED) {
11913 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(alpha, v1), Operand(alpha, v1));
11917 block->kind |= block_kind_uniform;
11919 /* continue on to the main shader */
11920 Operand continue_pc = get_arg_fixed(args, args->prolog_inputs);
11921 if (has_nontrivial_divisors) {
11922 bld.smem(aco_opcode::s_load_dwordx2, Definition(prolog_input, s2),
11923 get_arg_fixed(args, args->prolog_inputs), Operand::c32(0u));
11924 bld.sopp(aco_opcode::s_waitcnt, -1, lgkm_imm.pack(program->chip_class));
11925 continue_pc = Operand(prolog_input, s2);
11928 bld.sop1(aco_opcode::s_setpc_b64, continue_pc);
11930 program->config->float_mode = program->blocks[0].fp_mode.val;
11931 /* addition on GFX6-8 requires a carry-out (we use VCC) */
11932 program->needs_vcc = program->chip_class <= GFX8;
11933 program->config->num_vgprs = get_vgpr_alloc(program, num_vgprs);
11934 program->config->num_sgprs = get_sgpr_alloc(program, num_sgprs);