TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2, "V4iV2d", "nc", "relaxed-simd")
TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2, "V4UiV2d", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_q15mulr_s_i16x8, "V8sV8sV8s", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_dot_i8x16_i7x16_s_i16x8, "V8sV16ScV16Sc", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_dot_i8x16_i7x16_add_s_i32x4, "V4iV16ScV16ScV4i", "nc", "relaxed-simd")
+
#undef BUILTIN
#undef TARGET_BUILTIN
Function *Callee = CGM.getIntrinsic(IntNo);
return Builder.CreateCall(Callee, {Vec});
}
+ case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_dot_i8x16_i7x16_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot_i8x16_i7x16_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_dot_i8x16_i7x16_add_s_i32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Acc = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_dot_i8x16_i7x16_add_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS, Acc});
+ }
default:
return nullptr;
}
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.relaxed.trunc.unsigned.zero(<2 x double> %x)
// WEBASSEMBLY-NEXT: ret
}
+
+i16x8 relaxed_q15mulr_s_i16x8(i16x8 a, i16x8 b) {
+ return __builtin_wasm_relaxed_q15mulr_s_i16x8(a, b);
+ // WEBASSEMBLY: call <8 x i16> @llvm.wasm.relaxed.q15mulr.signed(
+ // WEBASSEMBLY-SAME: <8 x i16> %a, <8 x i16> %b)
+ // WEBASSEMBLY-NEXT: ret
+}
+
+i16x8 dot_i8x16_i7x16_s_i16x8(i8x16 a, i8x16 b) {
+ return __builtin_wasm_dot_i8x16_i7x16_s_i16x8(a, b);
+ // WEBASSEMBLY: call <8 x i16> @llvm.wasm.dot.i8x16.i7x16.signed(
+ // WEBASSEMBLY-SAME: <16 x i8> %a, <16 x i8> %b)
+ // WEBASSEMBLY-NEXT: ret
+}
+
+i32x4 dot_i8x16_i7x16_add_s_i32x4(i8x16 a, i8x16 b, i32x4 c) {
+ return __builtin_wasm_dot_i8x16_i7x16_add_s_i32x4(a, b, c);
+ // WEBASSEMBLY: call <4 x i32> @llvm.wasm.dot.i8x16.i7x16.add.signed(
+ // WEBASSEMBLY-SAME: <16 x i8> %a, <16 x i8> %b, <4 x i32> %c)
+ // WEBASSEMBLY-NEXT: ret
+}
[llvm_v2f64_ty],
[IntrNoMem, IntrSpeculatable]>;
+def int_wasm_relaxed_q15mulr_signed:
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
+def int_wasm_dot_i8x16_i7x16_signed:
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
+def int_wasm_dot_i8x16_i7x16_add_signed:
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v4i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Thread-local storage intrinsics
defm "" : SIMDLANESELECT<I32x4, 0x10b>;
defm "" : SIMDLANESELECT<I64x2, 0x10c>;
-
//===----------------------------------------------------------------------===//
// Relaxed floating-point min and max.
//===----------------------------------------------------------------------===//
RelaxedBinary<F64x2, int_wasm_relaxed_min, "relaxed_min", 0x10f>;
defm SIMD_RELAXED_FMAX :
RelaxedBinary<F64x2, int_wasm_relaxed_max, "relaxed_max", 0x110>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed rounding q15 multiplication
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_Q15MULR_S :
+ RelaxedBinary<I16x8, int_wasm_relaxed_q15mulr_signed, "relaxed_q15mulr_s",
+ 0x111>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed integer dot product
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_DOT :
+ RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins),
+ [(set (v8i16 V128:$dst), (int_wasm_dot_i8x16_i7x16_signed
+ (v16i8 V128:$lhs), (v16i8 V128:$rhs)))],
+ "i16x8.dot_i8x16_i7x16_s\t$dst, $lhs, $rhs",
+ "i16x8.dot_i8x16_i7x16_s", 0x112>;
+
+defm RELAXED_DOT_ADD :
+ RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, V128:$acc),
+ (outs), (ins),
+ [(set (v4i32 V128:$dst), (int_wasm_dot_i8x16_i7x16_add_signed
+ (v16i8 V128:$lhs), (v16i8 V128:$rhs), (v4i32 V128:$acc)))],
+ "i32x4.dot_i8x16_i7x16_add_s\t$dst, $lhs, $rhs, $acc",
+ "i32x4.dot_i8x16_i7x16_add_s", 0x113>;
ret <8 x i16> %v
}
+; CHECK-LABEL: relaxed_q15mulr_s_i16x8:
+; CHECK-NEXT: .functype relaxed_q15mulr_s_i16x8 (v128, v128) -> (v128){{$}}
+; CHECK-NEXT: i16x8.relaxed_q15mulr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: return $pop[[R]]{{$}}
+declare <8 x i16> @llvm.wasm.relaxed.q15mulr.signed(<8 x i16>, <8 x i16>)
+define <8 x i16> @relaxed_q15mulr_s_i16x8(<8 x i16> %a, <8 x i16> %b) {
+ %v = call <8 x i16> @llvm.wasm.relaxed.q15mulr.signed(
+ <8 x i16> %a, <8 x i16> %b
+ )
+ ret <8 x i16> %v
+}
+
+; CHECK-LABEL: dot_i8x16_i7x16_s_i16x8:
+; CHECK-NEXT: .functype dot_i8x16_i7x16_s_i16x8 (v128, v128) -> (v128){{$}}
+; CHECK-NEXT: i16x8.dot_i8x16_i7x16_s $push[[R:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: return $pop[[R]]{{$}}
+declare <8 x i16> @llvm.wasm.dot.i8x16.i7x16.signed(<16 x i8>, <16 x i8>)
+define <8 x i16> @dot_i8x16_i7x16_s_i16x8(<16 x i8> %a, <16 x i8> %b) {
+ %v = call <8 x i16> @llvm.wasm.dot.i8x16.i7x16.signed(
+ <16 x i8> %a, <16 x i8> %b
+ )
+ ret <8 x i16> %v
+}
+
; ==============================================================================
; 4 x i32
; ==============================================================================
ret <4 x i32> %a
}
+; CHECK-LABEL: dot_i8x16_i7x16_add_s_i32x4:
+; CHECK-NEXT: .functype dot_i8x16_i7x16_add_s_i32x4 (v128, v128, v128) -> (v128){{$}}
+; CHECK-NEXT: i32x4.dot_i8x16_i7x16_add_s $push[[R:[0-9]+]]=, $0, $1, $2{{$}}
+; CHECK-NEXT: return $pop[[R]]{{$}}
+declare <4 x i32> @llvm.wasm.dot.i8x16.i7x16.add.signed(<16 x i8>, <16 x i8>,
+ <4 x i32>)
+define <4 x i32> @dot_i8x16_i7x16_add_s_i32x4(<16 x i8> %a, <16 x i8> %b,
+ <4 x i32> %c) {
+ %v = call <4 x i32> @llvm.wasm.dot.i8x16.i7x16.add.signed(
+ <16 x i8> %a, <16 x i8> %b, <4 x i32> %c
+ )
+ ret <4 x i32> %v
+}
+
; ==============================================================================
; 2 x i64
; ==============================================================================
# CHECK: f64x2.relaxed_max # encoding: [0xfd,0x90,0x02]
f64x2.relaxed_max
- # TODO: i16x8.relaxed_q15mulr_s # encoding: [0xfd,0x91,0x02]
- # TODO: i16x8.dot_i8x16_i7x16_s # encoding: [0xfd,0x92,0x02]
- # TODO: i32x4.dot_i8x16_i7x16_add_s # encoding: [0xfd,0x93,0x02]
+ # CHECK: i16x8.relaxed_q15mulr_s # encoding: [0xfd,0x91,0x02]
+ i16x8.relaxed_q15mulr_s
+
+ # CHECK: i16x8.dot_i8x16_i7x16_s # encoding: [0xfd,0x92,0x02]
+ i16x8.dot_i8x16_i7x16_s
+
+ # CHECK: i32x4.dot_i8x16_i7x16_add_s # encoding: [0xfd,0x93,0x02]
+ i32x4.dot_i8x16_i7x16_add_s
end_function