From 275b2e5243639268033d6f0183b9b3b1a30574be Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Fri, 10 Jun 2022 12:45:54 -0700 Subject: [PATCH] [RISCV] Add cost model coverage for scalable scatter/gather --- .../Analysis/CostModel/RISCV/scalable-gather.ll | 204 +++++++++++++++++++++ .../Analysis/CostModel/RISCV/scalable-scatter.ll | 204 +++++++++++++++++++++ 2 files changed, 408 insertions(+) create mode 100644 llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll create mode 100644 llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll diff --git a/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll b/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll new file mode 100644 index 0000000..b87120b --- /dev/null +++ b/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll @@ -0,0 +1,204 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-min=256 -riscv-v-vector-bits-min=256 < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 < %s | FileCheck %s + +define i32 @masked_gather() { +; CHECK-LABEL: 'masked_gather' +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64 = call @llvm.masked.gather.nxv8f64.nxv8p0f64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F64 = call @llvm.masked.gather.nxv4f64.nxv4p0f64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F64 = call @llvm.masked.gather.nxv2f64.nxv2p0f64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F64 = call @llvm.masked.gather.nxv1f64.nxv1p0f64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F32 = call @llvm.masked.gather.nxv16f32.nxv16p0f32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F32 = call @llvm.masked.gather.nxv8f32.nxv8p0f32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F32 = call @llvm.masked.gather.nxv4f32.nxv4p0f32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F32 = call @llvm.masked.gather.nxv2f32.nxv2p0f32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F32 = call @llvm.masked.gather.nxv1f32.nxv1p0f32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16 = call @llvm.masked.gather.nxv32f16.nxv32p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16 = call @llvm.masked.gather.nxv16f16.nxv16p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16 = call @llvm.masked.gather.nxv8f16.nxv8p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16 = call @llvm.masked.gather.nxv4f16.nxv4p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16 = call @llvm.masked.gather.nxv2f16.nxv2p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16 = call @llvm.masked.gather.nxv1f16.nxv1p0f16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I64 = call @llvm.masked.gather.nxv8i64.nxv8p0i64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I64 = call @llvm.masked.gather.nxv4i64.nxv4p0i64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I64 = call @llvm.masked.gather.nxv2i64.nxv2p0i64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I64 = call @llvm.masked.gather.nxv1i64.nxv1p0i64( undef, i32 8, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I32 = call @llvm.masked.gather.nxv16i32.nxv16p0i32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I32 = call @llvm.masked.gather.nxv8i32.nxv8p0i32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I32 = call @llvm.masked.gather.nxv4i32.nxv4p0i32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I32 = call @llvm.masked.gather.nxv2i32.nxv2p0i32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I32 = call @llvm.masked.gather.nxv1i32.nxv1p0i32( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32I16 = call @llvm.masked.gather.nxv32i16.nxv32p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I16 = call @llvm.masked.gather.nxv16i16.nxv16p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I16 = call @llvm.masked.gather.nxv8i16.nxv8p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I16 = call @llvm.masked.gather.nxv4i16.nxv4p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I16 = call @llvm.masked.gather.nxv2i16.nxv2p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I16 = call @llvm.masked.gather.nxv1i16.nxv1p0i16( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V64I8 = call @llvm.masked.gather.nxv64i8.nxv64p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32I8 = call @llvm.masked.gather.nxv32i8.nxv32p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I8 = call @llvm.masked.gather.nxv16i8.nxv16p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I8 = call @llvm.masked.gather.nxv8i8.nxv8p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I8 = call @llvm.masked.gather.nxv4i8.nxv4p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I8 = call @llvm.masked.gather.nxv2i8.nxv2p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I8 = call @llvm.masked.gather.nxv1i8.nxv1p0i8( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64.u = call @llvm.masked.gather.nxv8f64.nxv8p0f64( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F64.u = call @llvm.masked.gather.nxv4f64.nxv4p0f64( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F64.u = call @llvm.masked.gather.nxv2f64.nxv2p0f64( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F64.u = call @llvm.masked.gather.nxv1f64.nxv1p0f64( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F32.u = call @llvm.masked.gather.nxv16f32.nxv16p0f32( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F32.u = call @llvm.masked.gather.nxv8f32.nxv8p0f32( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F32.u = call @llvm.masked.gather.nxv4f32.nxv4p0f32( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F32.u = call @llvm.masked.gather.nxv2f32.nxv2p0f32( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F32.u = call @llvm.masked.gather.nxv1f32.nxv1p0f32( undef, i32 2, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16.u = call @llvm.masked.gather.nxv32f16.nxv32p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16.u = call @llvm.masked.gather.nxv16f16.nxv16p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16.u = call @llvm.masked.gather.nxv8f16.nxv8p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16.u = call @llvm.masked.gather.nxv4f16.nxv4p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16.u = call @llvm.masked.gather.nxv2f16.nxv2p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16.u = call @llvm.masked.gather.nxv1f16.nxv1p0f16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I64.u = call @llvm.masked.gather.nxv8i64.nxv8p0i64( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I64.u = call @llvm.masked.gather.nxv4i64.nxv4p0i64( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I64.u = call @llvm.masked.gather.nxv2i64.nxv2p0i64( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I64.u = call @llvm.masked.gather.nxv1i64.nxv1p0i64( undef, i32 4, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I32.u = call @llvm.masked.gather.nxv16i32.nxv16p0i32( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I32.u = call @llvm.masked.gather.nxv8i32.nxv8p0i32( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I32.u = call @llvm.masked.gather.nxv4i32.nxv4p0i32( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I32.u = call @llvm.masked.gather.nxv2i32.nxv2p0i32( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I32.u = call @llvm.masked.gather.nxv1i32.nxv1p0i32( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32I16.u = call @llvm.masked.gather.nxv32i16.nxv32p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I16.u = call @llvm.masked.gather.nxv16i16.nxv16p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I16.u = call @llvm.masked.gather.nxv8i16.nxv8p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I16.u = call @llvm.masked.gather.nxv4i16.nxv4p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I16.u = call @llvm.masked.gather.nxv2i16.nxv2p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I16.u = call @llvm.masked.gather.nxv1i16.nxv1p0i16( undef, i32 1, undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 0 +; + %V8F64 = call @llvm.masked.gather.nxv8f64.nxv8p0f64( undef, i32 8, undef, undef) + %V4F64 = call @llvm.masked.gather.nxv4f64.nxv4p0f64( undef, i32 8, undef, undef) + %V2F64 = call @llvm.masked.gather.nxv2f64.nxv2p0f64( undef, i32 8, undef, undef) + %V1F64 = call @llvm.masked.gather.nxv1f64.nxv1p0f64( undef, i32 8, undef, undef) + + %V16F32 = call @llvm.masked.gather.nxv16f32.nxv16p0f32( undef, i32 4, undef, undef) + %V8F32 = call @llvm.masked.gather.nxv8f32.nxv8p0f32( undef, i32 4, undef, undef) + %V4F32 = call @llvm.masked.gather.nxv4f32.nxv4p0f32( undef, i32 4, undef, undef) + %V2F32 = call @llvm.masked.gather.nxv2f32.nxv2p0f32( undef, i32 4, undef, undef) + %V1F32 = call @llvm.masked.gather.nxv1f32.nxv1p0f32( undef, i32 4, undef, undef) + + %V32F16 = call @llvm.masked.gather.nxv32f16.nxv32p0f16( undef, i32 2, undef, undef) + %V16F16 = call @llvm.masked.gather.nxv16f16.nxv16p0f16( undef, i32 2, undef, undef) + %V8F16 = call @llvm.masked.gather.nxv8f16.nxv8p0f16( undef, i32 2, undef, undef) + %V4F16 = call @llvm.masked.gather.nxv4f16.nxv4p0f16( undef, i32 2, undef, undef) + %V2F16 = call @llvm.masked.gather.nxv2f16.nxv2p0f16( undef, i32 2, undef, undef) + %V1F16 = call @llvm.masked.gather.nxv1f16.nxv1p0f16( undef, i32 2, undef, undef) + + %V8I64 = call @llvm.masked.gather.nxv8i64.nxv8p0i64( undef, i32 8, undef, undef) + %V4I64 = call @llvm.masked.gather.nxv4i64.nxv4p0i64( undef, i32 8, undef, undef) + %V2I64 = call @llvm.masked.gather.nxv2i64.nxv2p0i64( undef, i32 8, undef, undef) + %V1I64 = call @llvm.masked.gather.nxv1i64.nxv1p0i64( undef, i32 8, undef, undef) + + %V16I32 = call @llvm.masked.gather.nxv16i32.nxv16p0i32( undef, i32 4, undef, undef) + %V8I32 = call @llvm.masked.gather.nxv8i32.nxv8p0i32( undef, i32 4, undef, undef) + %V4I32 = call @llvm.masked.gather.nxv4i32.nxv4p0i32( undef, i32 4, undef, undef) + %V2I32 = call @llvm.masked.gather.nxv2i32.nxv2p0i32( undef, i32 4, undef, undef) + %V1I32 = call @llvm.masked.gather.nxv1i32.nxv1p0i32( undef, i32 4, undef, undef) + + %V32I16 = call @llvm.masked.gather.nxv32i16.nxv32p0i16( undef, i32 2, undef, undef) + %V16I16 = call @llvm.masked.gather.nxv16i16.nxv16p0i16( undef, i32 2, undef, undef) + %V8I16 = call @llvm.masked.gather.nxv8i16.nxv8p0i16( undef, i32 2, undef, undef) + %V4I16 = call @llvm.masked.gather.nxv4i16.nxv4p0i16( undef, i32 2, undef, undef) + %V2I16 = call @llvm.masked.gather.nxv2i16.nxv2p0i16( undef, i32 2, undef, undef) + %V1I16 = call @llvm.masked.gather.nxv1i16.nxv1p0i16( undef, i32 2, undef, undef) + + %V64I8 = call @llvm.masked.gather.nxv64i8.nxv64p0i8( undef, i32 1, undef, undef) + %V32I8 = call @llvm.masked.gather.nxv32i8.nxv32p0i8( undef, i32 1, undef, undef) + %V16I8 = call @llvm.masked.gather.nxv16i8.nxv16p0i8( undef, i32 1, undef, undef) + %V8I8 = call @llvm.masked.gather.nxv8i8.nxv8p0i8( undef, i32 1, undef, undef) + %V4I8 = call @llvm.masked.gather.nxv4i8.nxv4p0i8( undef, i32 1, undef, undef) + %V2I8 = call @llvm.masked.gather.nxv2i8.nxv2p0i8( undef, i32 1, undef, undef) + %V1I8 = call @llvm.masked.gather.nxv1i8.nxv1p0i8( undef, i32 1, undef, undef) + + ; Test unaligned gathers + %V8F64.u = call @llvm.masked.gather.nxv8f64.nxv8p0f64( undef, i32 2, undef, undef) + %V4F64.u = call @llvm.masked.gather.nxv4f64.nxv4p0f64( undef, i32 2, undef, undef) + %V2F64.u = call @llvm.masked.gather.nxv2f64.nxv2p0f64( undef, i32 2, undef, undef) + %V1F64.u = call @llvm.masked.gather.nxv1f64.nxv1p0f64( undef, i32 2, undef, undef) + + %V16F32.u = call @llvm.masked.gather.nxv16f32.nxv16p0f32( undef, i32 2, undef, undef) + %V8F32.u = call @llvm.masked.gather.nxv8f32.nxv8p0f32( undef, i32 2, undef, undef) + %V4F32.u = call @llvm.masked.gather.nxv4f32.nxv4p0f32( undef, i32 2, undef, undef) + %V2F32.u = call @llvm.masked.gather.nxv2f32.nxv2p0f32( undef, i32 2, undef, undef) + %V1F32.u = call @llvm.masked.gather.nxv1f32.nxv1p0f32( undef, i32 2, undef, undef) + + %V32F16.u = call @llvm.masked.gather.nxv32f16.nxv32p0f16( undef, i32 1, undef, undef) + %V16F16.u = call @llvm.masked.gather.nxv16f16.nxv16p0f16( undef, i32 1, undef, undef) + %V8F16.u = call @llvm.masked.gather.nxv8f16.nxv8p0f16( undef, i32 1, undef, undef) + %V4F16.u = call @llvm.masked.gather.nxv4f16.nxv4p0f16( undef, i32 1, undef, undef) + %V2F16.u = call @llvm.masked.gather.nxv2f16.nxv2p0f16( undef, i32 1, undef, undef) + %V1F16.u = call @llvm.masked.gather.nxv1f16.nxv1p0f16( undef, i32 1, undef, undef) + + %V8I64.u = call @llvm.masked.gather.nxv8i64.nxv8p0i64( undef, i32 4, undef, undef) + %V4I64.u = call @llvm.masked.gather.nxv4i64.nxv4p0i64( undef, i32 4, undef, undef) + %V2I64.u = call @llvm.masked.gather.nxv2i64.nxv2p0i64( undef, i32 4, undef, undef) + %V1I64.u = call @llvm.masked.gather.nxv1i64.nxv1p0i64( undef, i32 4, undef, undef) + + %V16I32.u = call @llvm.masked.gather.nxv16i32.nxv16p0i32( undef, i32 1, undef, undef) + %V8I32.u = call @llvm.masked.gather.nxv8i32.nxv8p0i32( undef, i32 1, undef, undef) + %V4I32.u = call @llvm.masked.gather.nxv4i32.nxv4p0i32( undef, i32 1, undef, undef) + %V2I32.u = call @llvm.masked.gather.nxv2i32.nxv2p0i32( undef, i32 1, undef, undef) + %V1I32.u = call @llvm.masked.gather.nxv1i32.nxv1p0i32( undef, i32 1, undef, undef) + + %V32I16.u = call @llvm.masked.gather.nxv32i16.nxv32p0i16( undef, i32 1, undef, undef) + %V16I16.u = call @llvm.masked.gather.nxv16i16.nxv16p0i16( undef, i32 1, undef, undef) + %V8I16.u = call @llvm.masked.gather.nxv8i16.nxv8p0i16( undef, i32 1, undef, undef) + %V4I16.u = call @llvm.masked.gather.nxv4i16.nxv4p0i16( undef, i32 1, undef, undef) + %V2I16.u = call @llvm.masked.gather.nxv2i16.nxv2p0i16( undef, i32 1, undef, undef) + %V1I16.u = call @llvm.masked.gather.nxv1i16.nxv1p0i16( undef, i32 1, undef, undef) + + ret i32 0 +} + +declare @llvm.masked.gather.nxv8f64.nxv8p0f64(, i32, , ) +declare @llvm.masked.gather.nxv4f64.nxv4p0f64(, i32, , ) +declare @llvm.masked.gather.nxv2f64.nxv2p0f64(, i32, , ) +declare @llvm.masked.gather.nxv1f64.nxv1p0f64(, i32, , ) + +declare @llvm.masked.gather.nxv16f32.nxv16p0f32(, i32, , ) +declare @llvm.masked.gather.nxv8f32.nxv8p0f32(, i32, , ) +declare @llvm.masked.gather.nxv4f32.nxv4p0f32(, i32, , ) +declare @llvm.masked.gather.nxv2f32.nxv2p0f32(, i32, , ) +declare @llvm.masked.gather.nxv1f32.nxv1p0f32(, i32, , ) + +declare @llvm.masked.gather.nxv32f16.nxv32p0f16(, i32, , ) +declare @llvm.masked.gather.nxv16f16.nxv16p0f16(, i32, , ) +declare @llvm.masked.gather.nxv8f16.nxv8p0f16(, i32, , ) +declare @llvm.masked.gather.nxv4f16.nxv4p0f16(, i32, , ) +declare @llvm.masked.gather.nxv2f16.nxv2p0f16(, i32, , ) +declare @llvm.masked.gather.nxv1f16.nxv1p0f16(, i32, , ) + +declare @llvm.masked.gather.nxv8i64.nxv8p0i64(, i32, , ) +declare @llvm.masked.gather.nxv4i64.nxv4p0i64(, i32, , ) +declare @llvm.masked.gather.nxv2i64.nxv2p0i64(, i32, , ) +declare @llvm.masked.gather.nxv1i64.nxv1p0i64(, i32, , ) + +declare @llvm.masked.gather.nxv16i32.nxv16p0i32(, i32, , ) +declare @llvm.masked.gather.nxv8i32.nxv8p0i32(, i32, , ) +declare @llvm.masked.gather.nxv4i32.nxv4p0i32(, i32, , ) +declare @llvm.masked.gather.nxv2i32.nxv2p0i32(, i32, , ) +declare @llvm.masked.gather.nxv1i32.nxv1p0i32(, i32, , ) + +declare @llvm.masked.gather.nxv32i16.nxv32p0i16(, i32, , ) +declare @llvm.masked.gather.nxv16i16.nxv16p0i16(, i32, , ) +declare @llvm.masked.gather.nxv8i16.nxv8p0i16(, i32, , ) +declare @llvm.masked.gather.nxv4i16.nxv4p0i16(, i32, , ) +declare @llvm.masked.gather.nxv2i16.nxv2p0i16(, i32, , ) +declare @llvm.masked.gather.nxv1i16.nxv1p0i16(, i32, , ) + +declare @llvm.masked.gather.nxv64i8.nxv64p0i8(, i32, , ) +declare @llvm.masked.gather.nxv32i8.nxv32p0i8(, i32, , ) +declare @llvm.masked.gather.nxv16i8.nxv16p0i8(, i32, , ) +declare @llvm.masked.gather.nxv8i8.nxv8p0i8(, i32, , ) +declare @llvm.masked.gather.nxv4i8.nxv4p0i8(, i32, , ) +declare @llvm.masked.gather.nxv2i8.nxv2p0i8(, i32, , ) +declare @llvm.masked.gather.nxv1i8.nxv1p0i8(, i32, , ) diff --git a/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll b/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll new file mode 100644 index 0000000..07f5f91 --- /dev/null +++ b/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll @@ -0,0 +1,204 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-min=256 -riscv-v-vector-bits-min=256 < %s | FileCheck %s +; RUN: opt -passes='print' 2>&1 -disable-output -mtriple=riscv64 < %s | FileCheck %s + +define i32 @masked_scatter() { +; CHECK-LABEL: 'masked_scatter' +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f32.nxv16p0f32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( undef, undef, i32 8, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16i32.nxv16p0i32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( undef, undef, i32 4, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32i16.nxv32p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16i16.nxv16p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv64i8.nxv64p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32i8.nxv32p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16i8.nxv16p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i8.nxv8p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i8.nxv4p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i8.nxv1p0i8( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f32.nxv16p0f32( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( undef, undef, i32 2, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16i32.nxv16p0i32( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32i16.nxv32p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16i16.nxv16p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( undef, undef, i32 1, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 0 +; + call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( undef, undef, i32 8, undef) + + call void @llvm.masked.scatter.nxv16f32.nxv16p0f32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( undef, undef, i32 4, undef) + + call void @llvm.masked.scatter.nxv32f16.nxv32p0f16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv16f16.nxv16p0f16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( undef, undef, i32 2, undef) + + call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( undef, undef, i32 8, undef) + call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( undef, undef, i32 8, undef) + + call void @llvm.masked.scatter.nxv16i32.nxv16p0i32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( undef, undef, i32 4, undef) + call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( undef, undef, i32 4, undef) + + call void @llvm.masked.scatter.nxv32i16.nxv32p0i16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv16i16.nxv16p0i16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( undef, undef, i32 2, undef) + + call void @llvm.masked.scatter.nxv64i8.nxv64p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv32i8.nxv32p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv16i8.nxv16p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv8i8.nxv8p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv4i8.nxv4p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv1i8.nxv1p0i8( undef, undef, i32 1, undef) + + ; Test unaligned scatters + call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( undef, undef, i32 2, undef) + + call void @llvm.masked.scatter.nxv16f32.nxv16p0f32( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( undef, undef, i32 2, undef) + call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( undef, undef, i32 2, undef) + + call void @llvm.masked.scatter.nxv32f16.nxv32p0f16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv16f16.nxv16p0f16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( undef, undef, i32 1, undef) + + call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( undef, undef, i32 1, undef) + + call void @llvm.masked.scatter.nxv16i32.nxv16p0i32( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( undef, undef, i32 1, undef) + + call void @llvm.masked.scatter.nxv32i16.nxv32p0i16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv16i16.nxv16p0i16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( undef, undef, i32 1, undef) + call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( undef, undef, i32 1, undef) + + ret i32 0 +} + +declare void @llvm.masked.scatter.nxv8f64.nxv8p0f64(, , i32, ) +declare void @llvm.masked.scatter.nxv4f64.nxv4p0f64(, , i32, ) +declare void @llvm.masked.scatter.nxv2f64.nxv2p0f64(, , i32, ) +declare void @llvm.masked.scatter.nxv1f64.nxv1p0f64(, , i32, ) + +declare void @llvm.masked.scatter.nxv16f32.nxv16p0f32(, , i32, ) +declare void @llvm.masked.scatter.nxv8f32.nxv8p0f32(, , i32, ) +declare void @llvm.masked.scatter.nxv4f32.nxv4p0f32(, , i32, ) +declare void @llvm.masked.scatter.nxv2f32.nxv2p0f32(, , i32, ) +declare void @llvm.masked.scatter.nxv1f32.nxv1p0f32(, , i32, ) + +declare void @llvm.masked.scatter.nxv32f16.nxv32p0f16(, , i32, ) +declare void @llvm.masked.scatter.nxv16f16.nxv16p0f16(, , i32, ) +declare void @llvm.masked.scatter.nxv8f16.nxv8p0f16(, , i32, ) +declare void @llvm.masked.scatter.nxv4f16.nxv4p0f16(, , i32, ) +declare void @llvm.masked.scatter.nxv2f16.nxv2p0f16(, , i32, ) +declare void @llvm.masked.scatter.nxv1f16.nxv1p0f16(, , i32, ) + +declare void @llvm.masked.scatter.nxv8i64.nxv8p0i64(, , i32, ) +declare void @llvm.masked.scatter.nxv4i64.nxv4p0i64(, , i32, ) +declare void @llvm.masked.scatter.nxv2i64.nxv2p0i64(, , i32, ) +declare void @llvm.masked.scatter.nxv1i64.nxv1p0i64(, , i32, ) + +declare void @llvm.masked.scatter.nxv16i32.nxv16p0i32(, , i32, ) +declare void @llvm.masked.scatter.nxv8i32.nxv8p0i32(, , i32, ) +declare void @llvm.masked.scatter.nxv4i32.nxv4p0i32(, , i32, ) +declare void @llvm.masked.scatter.nxv2i32.nxv2p0i32(, , i32, ) +declare void @llvm.masked.scatter.nxv1i32.nxv1p0i32(, , i32, ) + +declare void @llvm.masked.scatter.nxv32i16.nxv32p0i16(, , i32, ) +declare void @llvm.masked.scatter.nxv16i16.nxv16p0i16(, , i32, ) +declare void @llvm.masked.scatter.nxv8i16.nxv8p0i16(, , i32, ) +declare void @llvm.masked.scatter.nxv4i16.nxv4p0i16(, , i32, ) +declare void @llvm.masked.scatter.nxv2i16.nxv2p0i16(, , i32, ) +declare void @llvm.masked.scatter.nxv1i16.nxv1p0i16(, , i32, ) + +declare void @llvm.masked.scatter.nxv64i8.nxv64p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv32i8.nxv32p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv16i8.nxv16p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv8i8.nxv8p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv4i8.nxv4p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv2i8.nxv2p0i8(, , i32, ) +declare void @llvm.masked.scatter.nxv1i8.nxv1p0i8(, , i32, ) -- 2.7.4