defm Q15MULR_SAT_S :
SIMDBinary<I16x8, int_wasm_q15mulr_saturate_signed, "q15mulr_sat_s", 156>;
+
+//===----------------------------------------------------------------------===//
+// Experimental prefetch instructions: prefetch.t, prefetch.nt
+//===----------------------------------------------------------------------===//
+
+let mayLoad = true, UseNamedOperandTable = true in {
+defm PREFETCH_T_A32 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "prefetch.t\t${off}(${addr})$p2align",
+ "prefetch.t\t$off$p2align", 0xc5>;
+defm PREFETCH_T_A64 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "prefetch.t\t${off}(${addr})$p2align",
+ "prefetch.t\t$off$p2align", 0xc5>;
+defm PREFETCH_NT_A32 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "prefetch.nt\t${off}(${addr})$p2align",
+ "prefetch.nt\t$off$p2align", 0xc6>;
+defm PREFETCH_NT_A64 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "prefetch.nt\t${off}(${addr})$p2align",
+ "prefetch.nt\t$off$p2align", 0xc6>;
+} // mayLoad, UseNamedOperandTable
+
+multiclass PrefetchPatNoOffset<PatFrag kind, string inst> {
+ def : Pat<(kind I32:$addr), (!cast<NI>(inst # "_A32") 0, 0, $addr)>,
+ Requires<[HasAddr32]>;
+ def : Pat<(kind I64:$addr), (!cast<NI>(inst # "_A64") 0, 0, $addr)>,
+ Requires<[HasAddr64]>;
+}
+
+foreach inst = [["PREFETCH_T", "int_wasm_prefetch_t"],
+ ["PREFETCH_NT", "int_wasm_prefetch_nt"]] in {
+defvar node = !cast<Intrinsic>(inst[1]);
+defm : PrefetchPatNoOffset<node, inst[0]>;
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s
+
+; Test experimental prefetch instructions
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+declare void @llvm.wasm.prefetch.t(i8*)
+declare void @llvm.wasm.prefetch.nt(i8*)
+@gv = global i8 0
+
+;===----------------------------------------------------------------------------
+; prefetch.t
+;===----------------------------------------------------------------------------
+
+define void @prefetch_t_no_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_no_offset:
+; CHECK: .functype prefetch_t_no_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ tail call void @llvm.wasm.prefetch.t(i8* %p)
+ ret void
+}
+
+define void @prefetch_t_with_folded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_folded_offset:
+; CHECK: .functype prefetch_t_with_folded_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 24
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_with_folded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_folded_gep_offset:
+; CHECK: .functype prefetch_t_with_folded_gep_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr inbounds i8, i8* %p, i32 6
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_with_unfolded_gep_negative_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_gep_negative_offset:
+; CHECK: .functype prefetch_t_with_unfolded_gep_negative_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const -6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr inbounds i8, i8* %p, i32 -6
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_with_unfolded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_offset:
+; CHECK: .functype prefetch_t_with_unfolded_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 24
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %q = ptrtoint i8* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_with_unfolded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_gep_offset:
+; CHECK: .functype prefetch_t_with_unfolded_gep_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr i8, i8* %p, i32 6
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_from_numeric_address() {
+; CHECK-LABEL: prefetch_t_from_numeric_address:
+; CHECK: .functype prefetch_t_from_numeric_address () -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i32.const 42
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ %s = inttoptr i32 42 to i8*
+ tail call void @llvm.wasm.prefetch.t(i8* %s)
+ ret void
+}
+
+define void @prefetch_t_from_global_address() {
+; CHECK-LABEL: prefetch_t_from_global_address:
+; CHECK: .functype prefetch_t_from_global_address () -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i32.const gv
+; CHECK-NEXT: prefetch.t 0
+; CHECK-NEXT: # fallthrough-return
+ tail call void @llvm.wasm.prefetch.t(i8* @gv)
+ ret void
+}
+
+;===----------------------------------------------------------------------------
+; prefetch.nt
+;===----------------------------------------------------------------------------
+
+define void @prefetch_nt_no_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_no_offset:
+; CHECK: .functype prefetch_nt_no_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ tail call void @llvm.wasm.prefetch.nt(i8* %p)
+ ret void
+}
+
+define void @prefetch_nt_with_folded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_folded_offset:
+; CHECK: .functype prefetch_nt_with_folded_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 24
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_with_folded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_folded_gep_offset:
+; CHECK: .functype prefetch_nt_with_folded_gep_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr inbounds i8, i8* %p, i64 6
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_with_unfolded_gep_negative_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_gep_negative_offset:
+; CHECK: .functype prefetch_nt_with_unfolded_gep_negative_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const -6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr inbounds i8, i8* %p, i64 -6
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_with_unfolded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_offset:
+; CHECK: .functype prefetch_nt_with_unfolded_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 24
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %q = ptrtoint i8* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_with_unfolded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_gep_offset:
+; CHECK: .functype prefetch_nt_with_unfolded_gep_offset (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const 6
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %s = getelementptr i8, i8* %p, i64 6
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_from_numeric_address() {
+; CHECK-LABEL: prefetch_nt_from_numeric_address:
+; CHECK: .functype prefetch_nt_from_numeric_address () -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i32.const 42
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ %s = inttoptr i32 42 to i8*
+ tail call void @llvm.wasm.prefetch.nt(i8* %s)
+ ret void
+}
+
+define void @prefetch_nt_from_global_address() {
+; CHECK-LABEL: prefetch_nt_from_global_address:
+; CHECK: .functype prefetch_nt_from_global_address () -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i32.const gv
+; CHECK-NEXT: prefetch.nt 0
+; CHECK-NEXT: # fallthrough-return
+ tail call void @llvm.wasm.prefetch.nt(i8* @gv)
+ ret void
+}