defm V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, fshr>;
defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbyte>;
-let mayRaiseFPException = 0 in { // XXX - Seems suspect but manual doesn't say it does
-defm V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>;
-defm V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>;
-defm V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>;
-defm V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmax3>;
-defm V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmax3>;
-defm V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumax3>;
-defm V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>;
-defm V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>;
-defm V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>;
-} // End mayRaiseFPException = 0
+// XXX - No FPException seems suspect but manual doesn't say it does
+let mayRaiseFPException = 0, isCommutable = 1 in {
+ defm V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>;
+ defm V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>;
+ defm V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>;
+ defm V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmax3>;
+ defm V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumax3>;
+ defm V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmax3>;
+ defm V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>;
+ defm V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>;
+ defm V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>;
+} // End mayRaiseFPException = 0, isCommutable = 1
-defm V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-defm V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-defm V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-defm V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+let isCommutable = 1 in {
+ defm V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+ defm V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+ defm V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+ defm V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
+} // End isCommutable = 1
defm V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile<VOP_I32_F32_I32_I32>, int_amdgcn_cvt_pk_u8_f32>;
defm V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUdiv_fixup>;
}
let SubtargetPredicate = isGFX9Plus in {
+let isCommutable = 1 in {
+ defm V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ defm V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ defm V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ defm V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ defm V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmed3>;
+ defm V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumed3>;
+ defm V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmed3>;
+ defm V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmin3>;
+ defm V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumin3>;
+ defm V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmin3>;
+ defm V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmax3>;
+ defm V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumax3>;
+ defm V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmax3>;
+ defm V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
+ defm V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
+ defm V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
+ defm V_ADD_I32 : VOP3Inst <"v_add_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
+ defm V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+} // End isCommutable = 1
+defm V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
+defm V_SUB_I32 : VOP3Inst <"v_sub_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
defm V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
defm V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-defm V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-defm V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
defm V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-defm V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-defm V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-
-defm V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
-defm V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmed3>;
-defm V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmed3>;
-defm V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumed3>;
-
-defm V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmin3>;
-defm V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmin3>;
-defm V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumin3>;
-
-defm V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmax3>;
-defm V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmax3>;
-defm V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumax3>;
-
-defm V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
-defm V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
-
-defm V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
-defm V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
defm V_CVT_PKNORM_I16_F16 : VOP3Inst <"v_cvt_pknorm_i16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
defm V_CVT_PKNORM_U16_F16 : VOP3Inst <"v_cvt_pknorm_u16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
-defm V_ADD_I32 : VOP3Inst <"v_add_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
-defm V_SUB_I32 : VOP3Inst <"v_sub_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
-
-
class ThreeOp_i32_Pats <SDPatternOperator op1, SDPatternOperator op2, Instruction inst> : GCNPat <
// This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions.
(ThreeOpFrag<op1, op2> i32:$src0, i32:$src1, i32:$src2),
let SubtargetPredicate = isGFX10Plus in {
- defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ let isCommutable = 1 in {
+ defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
+ } // End isCommutable = 1
def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32_e64>;
let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
;
; GFX10-LABEL: add_shl_vgpr_const_inline_const:
; GFX10: ; %bb.0:
-; GFX10-NEXT: v_add_lshl_u32 v0, v0, 0x3f4, 9
+; GFX10-NEXT: v_add_lshl_u32 v0, 0x3f4, v0, 9
; GFX10-NEXT: ; return to shader part epilog
%x = add i32 %a, 1012
%result = shl i32 %x, 9
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v2, v3, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v3, v2
; GFX10-NEXT: v_pk_sub_i16 v3, 16, v2 op_sel_hi:[0,1]
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v2, v0
; GFX10-NEXT: v_pk_lshrrev_b16 v1, v3, v1
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v2, v3, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v3, v2
; GFX10-NEXT: v_pk_sub_i16 v3, 16, v2 op_sel_hi:[0,1]
; GFX10-NEXT: v_pk_lshrrev_b16 v1, v2, v1
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v3, v0
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v2, v2, 0xffff, v3
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v2, v2, 0xffff, v3
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10NSA-NEXT: s_mov_b32 s9, s11
; GFX10NSA-NEXT: s_mov_b32 s10, s12
; GFX10NSA-NEXT: s_mov_b32 s11, s13
-; GFX10NSA-NEXT: v_and_or_b32 v0, v0, 0xffff, v1
+; GFX10NSA-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s28
; GFX10NSA-NEXT: image_gather4 v[0:3], v0, s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX10NSA-NEXT: s_waitcnt vmcnt(0)
; GFX10NSA-NEXT: s_mov_b32 s9, s11
; GFX10NSA-NEXT: s_mov_b32 s10, s12
; GFX10NSA-NEXT: s_mov_b32 s11, s13
-; GFX10NSA-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s28
; GFX10NSA-NEXT: image_gather4_c v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX10NSA-NEXT: s_waitcnt vmcnt(0)
; GFX10NSA-NEXT: s_mov_b32 s9, s11
; GFX10NSA-NEXT: s_mov_b32 s10, s12
; GFX10NSA-NEXT: s_mov_b32 s11, s13
-; GFX10NSA-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s28
; GFX10NSA-NEXT: image_gather4_b v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX10NSA-NEXT: s_waitcnt vmcnt(0)
; GFX10NSA-NEXT: s_mov_b32 s9, s11
; GFX10NSA-NEXT: s_mov_b32 s10, s12
; GFX10NSA-NEXT: s_mov_b32 s11, s13
-; GFX10NSA-NEXT: v_and_or_b32 v2, v2, 0xffff, v3
+; GFX10NSA-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s28
; GFX10NSA-NEXT: image_gather4_c_b v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX10NSA-NEXT: s_waitcnt vmcnt(0)
; GFX10NSA-NEXT: s_mov_b32 s1, s3
; GFX10NSA-NEXT: s_mov_b32 s2, s4
; GFX10NSA-NEXT: s_mov_b32 s3, s5
-; GFX10NSA-NEXT: v_and_or_b32 v0, v0, 0xffff, v1
+; GFX10NSA-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
; GFX10NSA-NEXT: s_mov_b32 s4, s6
; GFX10NSA-NEXT: s_mov_b32 s5, s7
; GFX10NSA-NEXT: s_mov_b32 s6, s8
; GFX10NSA-NEXT: s_mov_b32 s1, s3
; GFX10NSA-NEXT: s_mov_b32 s2, s4
; GFX10NSA-NEXT: s_mov_b32 s3, s5
-; GFX10NSA-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10NSA-NEXT: s_mov_b32 s4, s6
; GFX10NSA-NEXT: s_mov_b32 s5, s7
; GFX10NSA-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v0, v0, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s10, s12
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
; GFX10-NEXT: s_mov_b32 s1, s3
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, s12
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12
; GFX10-NEXT: s_mov_b32 s3, s5
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s7, s9
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s10, s12
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
; GFX10-NEXT: s_mov_b32 s1, s3
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, s12
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12
; GFX10-NEXT: s_mov_b32 s3, s5
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s7, s9
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s10, s12
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
; GFX10-NEXT: s_mov_b32 s1, s3
-; GFX10-NEXT: v_and_or_b32 v2, v2, 0xffff, s12
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, s12
; GFX10-NEXT: s_mov_b32 s3, s5
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s7, s9
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v2, v2, 0xffff, v3
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v0, v0, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v1, v1, 0xffff, v2
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
; GFX10-NEXT: s_mov_b32 s1, s3
; GFX10-NEXT: s_mov_b32 s2, s4
; GFX10-NEXT: s_mov_b32 s3, s5
-; GFX10-NEXT: v_and_or_b32 v2, v2, 0xffff, v3
+; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
; GFX10-NEXT: s_mov_b32 s4, s6
; GFX10-NEXT: s_mov_b32 s5, s7
; GFX10-NEXT: s_mov_b32 s6, s8
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=machine-cse -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -run-pass=machine-cse -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX10 %s
+
+---
+
+name: commute_vop3
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX9-LABEL: name: commute_vop3
+ ; GFX9: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX9: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX9: [[V_XOR3_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR3_B32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec
+ ; GFX9: [[V_MED3_F16_e64_:%[0-9]+]]:vgpr_32 = V_MED3_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_MED3_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MED3_F16_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_MED3_I16_e64_:%[0-9]+]]:vgpr_32 = V_MED3_I16_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+ ; GFX9: [[V_MAX3_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_SAD_HI_U8_e64_:%[0-9]+]]:vgpr_32 = V_SAD_HI_U8_e64 [[COPY]], [[COPY1]], [[COPY2]], 0, implicit $exec
+ ; GFX9: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec
+ ; GFX9: [[V_SUB_I32_e64_1:%[0-9]+]]:vgpr_32 = V_SUB_I32_e64 [[COPY1]], [[COPY]], 0, implicit $exec
+ ; GFX9: [[V_MAX3_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 0, [[COPY]], 2, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_MAX3_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 1, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_FMA_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMA_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_FMA_F32_e64_1:%[0-9]+]]:vgpr_32 = V_FMA_F32_e64 1, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX9: [[V_MIN3_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN3_F32_e64 8, [[COPY]], 4, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10-LABEL: name: commute_vop3
+ ; GFX10: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX10: [[V_XOR3_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR3_B32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec
+ ; GFX10: [[V_MED3_F16_e64_:%[0-9]+]]:vgpr_32 = V_MED3_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_MED3_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MED3_F16_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_MED3_I16_e64_:%[0-9]+]]:vgpr_32 = V_MED3_I16_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+ ; GFX10: [[V_MAX3_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_SAD_HI_U8_e64_:%[0-9]+]]:vgpr_32 = V_SAD_HI_U8_e64 [[COPY]], [[COPY1]], [[COPY2]], 0, implicit $exec
+ ; GFX10: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec
+ ; GFX10: [[V_SUB_I32_e64_1:%[0-9]+]]:vgpr_32 = V_SUB_I32_e64 [[COPY1]], [[COPY]], 0, implicit $exec
+ ; GFX10: [[V_MAX3_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 0, [[COPY]], 2, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_MAX3_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX3_F32_e64 1, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_FMA_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMA_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_FMA_F32_e64_1:%[0-9]+]]:vgpr_32 = V_FMA_F32_e64 1, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10: [[V_MIN3_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN3_F32_e64 8, [[COPY]], 4, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vgpr_32 = COPY $vgpr2
+ %3:vgpr_32 = V_XOR3_B32_e64 %0, %1, %2, implicit $exec
+ %4:vgpr_32 = V_XOR3_B32_e64 %1, %0, %2, implicit $exec
+ ; Insts with MayRaiseFPException do not get CSE
+ %5:vgpr_32 = V_MED3_F16_e64 0, %0, 0, %1, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %6:vgpr_32 = V_MED3_F16_e64 0, %1, 0, %0, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %7:vgpr_32 = V_MED3_I16_e64 0, %0, 0, %1, 0, %2, 0, 0, implicit $exec
+ %8:vgpr_32 = V_MED3_I16_e64 0, %1, 0, %0, 0, %2, 0, 0, implicit $exec
+ %9:vgpr_32 = V_MAX3_F32_e64 0, %0, 0, %1, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %10:vgpr_32 = V_MAX3_F32_e64 0, %1, 0, %0, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %11:vgpr_32 = V_SAD_HI_U8_e64 %0, %1, %2, 0, implicit $exec
+ %12:vgpr_32 = V_SAD_HI_U8_e64 %1, %0, %2, 0, implicit $exec
+ ; Sub should not be commuted
+ %13:vgpr_32 = V_SUB_I32_e64 %0, %1, 0, implicit $exec
+ %14:vgpr_32 = V_SUB_I32_e64 %1, %0, 0, implicit $exec
+ ; non-matching modifiers means no commute and cse
+ %15:vgpr_32 = V_MAX3_F32_e64 0, %0, 2, %1, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %16:vgpr_32 = V_MAX3_F32_e64 1, %1, 0, %0, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %17:vgpr_32 = V_FMA_F32_e64 0, %0, 0, %1, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %18:vgpr_32 = V_FMA_F32_e64 1, %1, 0, %0, 0, %2, 0, 0, implicit $mode, implicit $exec
+ ; matching modifiers can be commuted and cse
+ %19:vgpr_32 = V_MIN3_F32_e64 8, %0, 4, %1, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %20:vgpr_32 = V_MIN3_F32_e64 4, %1, 8, %0, 0, %2, 0, 0, implicit $mode, implicit $exec
+
+...
+