TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_widen_low_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_widen_high_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_widen_low_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_widen_high_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
+
TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128")
TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128")
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
+ case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
+ case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
+ IntNo = Intrinsic::wasm_widen_low_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
+ IntNo = Intrinsic::wasm_widen_high_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
+ IntNo = Intrinsic::wasm_widen_low_unsigned;
+ break;
+ case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
+ IntNo = Intrinsic::wasm_widen_high_unsigned;
+ break;
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, Vec);
+ }
case WebAssembly::BI__builtin_wasm_load32_zero: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
// WEBASSEMBLY: ret
}
+i64x2 widen_low_s_i32x4_i64x2(i32x4 x) {
+ return __builtin_wasm_widen_low_s_i32x4_i64x2(x);
+ // WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32> %x)
+ // WEBASSEMBLY: ret
+}
+
+i64x2 widen_high_s_i32x4_i64x2(i32x4 x) {
+ return __builtin_wasm_widen_high_s_i32x4_i64x2(x);
+ // WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32> %x)
+ // WEBASSEMBLY: ret
+}
+
+u64x2 widen_low_u_i32x4_i64x2(u32x4 x) {
+ return __builtin_wasm_widen_low_u_i32x4_i64x2(x);
+ // WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32> %x)
+ // WEBASSEMBLY: ret
+}
+
+u64x2 widen_high_u_i32x4_i64x2(u32x4 x) {
+ return __builtin_wasm_widen_high_u_i32x4_i64x2(x);
+ // WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32> %x)
+ // WEBASSEMBLY: ret
+}
+
i32x4 load32_zero(int *p) {
return __builtin_wasm_load32_zero(p);
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.load32.zero(i32* %p)
Intrinsic<[llvm_v4i32_ty],
[llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem, IntrSpeculatable]>;
+
def int_wasm_narrow_signed :
Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>],
Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem, IntrSpeculatable]>;
+
+// TODO: Replace these intrinsics with normal ISel patterns once i32x4 to i64x2
+// widening is merged to the proposal.
+def int_wasm_widen_low_signed :
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_high_signed :
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_low_unsigned :
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_widen_high_unsigned :
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
+
def int_wasm_q15mulr_saturate_signed :
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_v8i16_ty],
defm "" : SIMDWiden<v8i16, "i16x8", v16i8, "i8x16", 135>;
defm "" : SIMDWiden<v4i32, "i32x4", v8i16, "i16x8", 167>;
+defm "" : SIMDConvert<v2i64, v4i32, int_wasm_widen_low_signed,
+ "i64x2.widen_low_i32x4_s", 199>;
+defm "" : SIMDConvert<v2i64, v4i32, int_wasm_widen_high_signed,
+ "i64x2.widen_high_i32x4_s", 200>;
+defm "" : SIMDConvert<v2i64, v4i32, int_wasm_widen_low_unsigned,
+ "i64x2.widen_low_i32x4_u", 201>;
+defm "" : SIMDConvert<v2i64, v4i32, int_wasm_widen_high_unsigned,
+ "i64x2.widen_high_i32x4_u", 202>;
+
// Narrowing operations
multiclass SIMDNarrow<ValueType vec_t, string vec, ValueType arg_t, string arg,
bits<32> baseInst> {
; ==============================================================================
; 2 x i64
; ==============================================================================
+; CHECK-LABEL: widen_low_s_v2i64:
+; SIMD128-NEXT: .functype widen_low_s_v2i64 (v128) -> (v128){{$}}
+; SIMD128-NEXT: i64x2.widen_low_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
+; SIMD128-NEXT: return $pop[[R]]{{$}}
+declare <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32>)
+define <2 x i64> @widen_low_s_v2i64(<4 x i32> %x) {
+ %a = call <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32> %x)
+ ret <2 x i64> %a
+}
+
+; CHECK-LABEL: widen_high_s_v2i64:
+; SIMD128-NEXT: .functype widen_high_s_v2i64 (v128) -> (v128){{$}}
+; SIMD128-NEXT: i64x2.widen_high_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
+; SIMD128-NEXT: return $pop[[R]]{{$}}
+declare <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32>)
+define <2 x i64> @widen_high_s_v2i64(<4 x i32> %x) {
+ %a = call <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32> %x)
+ ret <2 x i64> %a
+}
+
+; CHECK-LABEL: widen_low_u_v2i64:
+; SIMD128-NEXT: .functype widen_low_u_v2i64 (v128) -> (v128){{$}}
+; SIMD128-NEXT: i64x2.widen_low_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
+; SIMD128-NEXT: return $pop[[R]]{{$}}
+declare <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32>)
+define <2 x i64> @widen_low_u_v2i64(<4 x i32> %x) {
+ %a = call <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32> %x)
+ ret <2 x i64> %a
+}
+
+; CHECK-LABEL: widen_high_u_v2i64:
+; SIMD128-NEXT: .functype widen_high_u_v2i64 (v128) -> (v128){{$}}
+; SIMD128-NEXT: i64x2.widen_high_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
+; SIMD128-NEXT: return $pop[[R]]{{$}}
+declare <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32>)
+define <2 x i64> @widen_high_u_v2i64(<4 x i32> %x) {
+ %a = call <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32> %x)
+ ret <2 x i64> %a
+}
+
; CHECK-LABEL: extmul_low_s_v2i64:
; SIMD128-NEXT: .functype extmul_low_s_v2i64 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i64x2.extmul_low_i32x4_s $push[[R:[0-9]+]]=, $0, $1{{$}}
# CHECK: i64x2.all_true # encoding: [0xfd,0xc3,0x01]
i64x2.all_true
+ # CHECK: i64x2.widen_low_i32x4_s # encoding: [0xfd,0xc7,0x01]
+ i64x2.widen_low_i32x4_s
+
+ # CHECK: i64x2.widen_high_i32x4_s # encoding: [0xfd,0xc8,0x01]
+ i64x2.widen_high_i32x4_s
+
+ # CHECK: i64x2.widen_low_i32x4_u # encoding: [0xfd,0xc9,0x01]
+ i64x2.widen_low_i32x4_u
+
+ # CHECK: i64x2.widen_high_i32x4_u # encoding: [0xfd,0xca,0x01]
+ i64x2.widen_high_i32x4_u
+
# CHECK: i64x2.shl # encoding: [0xfd,0xcb,0x01]
i64x2.shl