BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "")
BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "")
+BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "")
+BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "")
+BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "")
+BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "")
+
// vector Insert/Extract exponent/significand builtins
BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "")
BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "")
return __builtin_altivec_vpextd(__a, __b);
}
+/* vec_genpcvm */
+
+#ifdef __VSX__
+#define vec_genpcvm(__a, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+#endif /* __VSX__ */
+
/* vec_clrl */
static __inline__ vector signed char __ATTRS_o_ai
vector signed char vsca;
vector unsigned char vuca;
+vector unsigned short vusa;
+vector unsigned int vuia;
vector unsigned long long vulla, vullb;
unsigned int uia;
return vec_pext(vulla, vullb);
}
+vector unsigned char test_xxgenpcvbm(void) {
+ // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
+ // CHECK-NEXT: ret <16 x i8>
+ return vec_genpcvm(vuca, 0);
+}
+
+vector unsigned short test_xxgenpcvhm(void) {
+ // CHECK: @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %{{.+}}, i32
+ // CHECK-NEXT: ret <8 x i16>
+ return vec_genpcvm(vusa, 0);
+}
+
+vector unsigned int test_xxgenpcvwm(void) {
+ // CHECK: @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %{{.+}}, i32
+ // CHECK-NEXT: ret <4 x i32>
+ return vec_genpcvm(vuia, 0);
+}
+
+vector unsigned long long test_xxgenpcvdm(void) {
+ // CHECK: @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %{{.+}}, i32
+ // CHECK-NEXT: ret <2 x i64>
+ return vec_genpcvm(vulla, 0);
+}
+
vector signed char test_vec_vclrl_sc(void) {
// CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
// CHECK-BE-NEXT: ret <16 x i8>
PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
[llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
[IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvbm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvbm", [llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvhm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvhm", [llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvwm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvwm", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxgenpcvdm :
+ PowerPC_VSX_Intrinsic<"xxgenpcvdm", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
let Inst{48-63} = D_RA{15-0}; // d1
}
+// X-Form: [PO T IMM VRB XO TX]
+class XForm_XT6_IMM5_VB5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<5> VRB;
+ bits<5> IMM;
+
+ let Pattern = pattern;
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = IMM;
+ let Inst{16-20} = VRB;
+ let Inst{21-30} = xo;
+ let Inst{31} = XT{5};
+}
+
multiclass MLS_DForm_R_SI34_RTA5_MEM_p<bits<6> opcode, dag OOL, dag IOL,
dag PCRel_IOL, string asmstr,
InstrItinClass itin> {
def PEXTD : XForm_6<31, 188, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
"pextd $rA, $rS, $rB", IIC_IntGeneral,
[(set i64:$rA, (int_ppc_pextd i64:$rS, i64:$rB))]>;
+ def XXGENPCVBM :
+ XForm_XT6_IMM5_VB5<60, 916, (outs vsrc:$XT), (ins vrrc:$VRB, s5imm:$IMM),
+ "xxgenpcvbm $XT, $VRB, $IMM", IIC_VecGeneral, []>;
+ def XXGENPCVHM :
+ XForm_XT6_IMM5_VB5<60, 917, (outs vsrc:$XT), (ins vrrc:$VRB, s5imm:$IMM),
+ "xxgenpcvhm $XT, $VRB, $IMM", IIC_VecGeneral, []>;
+ def XXGENPCVWM :
+ XForm_XT6_IMM5_VB5<60, 948, (outs vsrc:$XT), (ins vrrc:$VRB, s5imm:$IMM),
+ "xxgenpcvwm $XT, $VRB, $IMM", IIC_VecGeneral, []>;
+ def XXGENPCVDM :
+ XForm_XT6_IMM5_VB5<60, 949, (outs vsrc:$XT), (ins vrrc:$VRB, s5imm:$IMM),
+ "xxgenpcvdm $XT, $VRB, $IMM", IIC_VecGeneral, []>;
def VCLRLB : VXForm_1<397, (outs vrrc:$vD), (ins vrrc:$vA, gprc:$rB),
"vclrlb $vD, $vA, $rB", IIC_VecGeneral,
[(set v16i8:$vD,
[(set v16i8:$vD,
(int_ppc_altivec_vclrrb v16i8:$vA, i32:$rB))]>;
}
+
+//---------------------------- Anonymous Patterns ----------------------------//
+let Predicates = [IsISA3_1] in {
+ def : Pat<(v16i8 (int_ppc_vsx_xxgenpcvbm v16i8:$VRB, imm:$IMM)),
+ (v16i8 (COPY_TO_REGCLASS (XXGENPCVBM $VRB, imm:$IMM), VRRC))>;
+ def : Pat<(v8i16 (int_ppc_vsx_xxgenpcvhm v8i16:$VRB, imm:$IMM)),
+ (v8i16 (COPY_TO_REGCLASS (XXGENPCVHM $VRB, imm:$IMM), VRRC))>;
+ def : Pat<(v4i32 (int_ppc_vsx_xxgenpcvwm v4i32:$VRB, imm:$IMM)),
+ (v4i32 (COPY_TO_REGCLASS (XXGENPCVWM $VRB, imm:$IMM), VRRC))>;
+ def : Pat<(v2i64 (int_ppc_vsx_xxgenpcvdm v2i64:$VRB, imm:$IMM)),
+ (v2i64 (COPY_TO_REGCLASS (XXGENPCVDM $VRB, imm:$IMM), VRRC))>;
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
+; RUN: FileCheck %s
+
+; These test cases aim to test the VSX PCV Generate Operations on Power10.
+
+declare <16 x i8> @llvm.ppc.vsx.xxgenpcvbm(<16 x i8>, i32)
+declare <8 x i16> @llvm.ppc.vsx.xxgenpcvhm(<8 x i16>, i32)
+declare <4 x i32> @llvm.ppc.vsx.xxgenpcvwm(<4 x i32>, i32)
+declare <2 x i64> @llvm.ppc.vsx.xxgenpcvdm(<2 x i64>, i32)
+
+define <16 x i8> @test_xxgenpcvbm(<16 x i8> %a) {
+; CHECK-LABEL: test_xxgenpcvbm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xxgenpcvbm v2, v2, 1
+; CHECK-NEXT: blr
+entry:
+ %gen = tail call <16 x i8> @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %a, i32 1)
+ ret <16 x i8> %gen
+}
+
+define <8 x i16> @test_xxgenpcvhm(<8 x i16> %a) {
+; CHECK-LABEL: test_xxgenpcvhm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xxgenpcvhm v2, v2, 1
+; CHECK-NEXT: blr
+entry:
+ %gen = tail call <8 x i16> @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %a, i32 1)
+ ret <8 x i16> %gen
+}
+
+define <4 x i32> @test_xxgenpcvwm(<4 x i32> %a) {
+; CHECK-LABEL: test_xxgenpcvwm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xxgenpcvwm v2, v2, 1
+; CHECK-NEXT: blr
+entry:
+ %gen = tail call <4 x i32> @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %a, i32 1)
+ ret <4 x i32> %gen
+}
+
+define <2 x i64> @test_xxgenpcvdm(<2 x i64> %a) {
+; CHECK-LABEL: test_xxgenpcvdm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xxgenpcvdm v2, v2, 1
+; CHECK-NEXT: blr
+entry:
+ %gen = tail call <2 x i64> @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %a, i32 1)
+ ret <2 x i64> %gen
+}
# CHECK: pextd 1, 2, 4
0x7c 0x41 0x21 0x78
+# CHECK xxgenpcvbm 0, 1, 2
+0xf0 0x02 0x0f 0x28
+
+# CHECK xxgenpcvhm 0, 1, 2
+0xf0 0x02 0x0f 0x2a
+
+# CHECK xxgenpcvwm 0, 1, 2
+0xf0 0x02 0x0f 0x68
+
+# CHECK xxgenpcvdm 0, 1, 2
+0xf0 0x02 0x0f 0x6a
+
# CHECK: vclrlb 1, 4, 3
0x10 0x24 0x19 0x8d
# CHECK-BE: pextd 1, 2, 4 # encoding: [0x7c,0x41,0x21,0x78]
# CHECK-LE: pextd 1, 2, 4 # encoding: [0x78,0x21,0x41,0x7c]
pextd 1, 2, 4
+# CHECK-BE: xxgenpcvbm 0, 1, 2 # encoding: [0xf0,0x02,0x0f,0x28]
+# CHECK-LE: xxgenpcvbm 0, 1, 2 # encoding: [0x28,0x0f,0x02,0xf0]
+ xxgenpcvbm 0, 1, 2
+# CHECK-BE: xxgenpcvhm 0, 1, 2 # encoding: [0xf0,0x02,0x0f,0x2a]
+# CHECK-LE: xxgenpcvhm 0, 1, 2 # encoding: [0x2a,0x0f,0x02,0xf0]
+ xxgenpcvhm 0, 1, 2
+# CHECK-BE: xxgenpcvwm 0, 1, 2 # encoding: [0xf0,0x02,0x0f,0x68]
+# CHECK-LE: xxgenpcvwm 0, 1, 2 # encoding: [0x68,0x0f,0x02,0xf0]
+ xxgenpcvwm 0, 1, 2
+# CHECK-BE: xxgenpcvdm 0, 1, 2 # encoding: [0xf0,0x02,0x0f,0x6a]
+# CHECK-LE: xxgenpcvdm 0, 1, 2 # encoding: [0x6a,0x0f,0x02,0xf0]
+ xxgenpcvdm 0, 1, 2
# CHECK-BE: vclrlb 1, 4, 3 # encoding: [0x10,0x24,0x19,0x8d]
# CHECK-LE: vclrlb 1, 4, 3 # encoding: [0x8d,0x19,0x24,0x10]
vclrlb 1, 4, 3