#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
case Intrinsic::x86_sse42_crc32_64_64:
Known.Zero.setBitsFrom(32);
break;
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax:
+ // Assume that VL output is positive and would fit in an int32_t.
+ // TODO: VLEN might be capped at 16 bits in a future V spec update.
+ if (BitWidth >= 32)
+ Known.Zero.setBitsFrom(31);
+ break;
}
}
break;
// We assume VLENB is at least 16 bytes.
Known.Zero.setLowBits(4);
break;
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ // We can't do anything for most intrinsics.
+ break;
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax:
+ // Assume that VL output is positive and would fit in an int32_t.
+ // TODO: VLEN might be capped at 16 bits in a future V spec update.
+ if (BitWidth >= 32)
+ Known.Zero.setBitsFrom(31);
+ break;
+ }
+ break;
+ }
}
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+experimental-v | FileCheck %s
+
+declare i64 @llvm.riscv.vsetvli(
+ i64, i64, i64);
+
+define signext i32 @vsetvl_sext() {
+; CHECK-LABEL: vsetvl_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+
+define zeroext i32 @vsetvl_zext() {
+; CHECK-LABEL: vsetvl_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
--- /dev/null
+if not 'RISCV' in config.root.targets:
+ config.unsupported = True
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare i32 @llvm.riscv.vsetvli.i32(i32, i32, i32)
+declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64)
+
+define i32 @vsetvli_i32() nounwind {
+; CHECK-LABEL: @vsetvli_i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+ %1 = and i32 %0, 2147483647
+ ret i32 %1
+}
+
+define i64 @vsetvli_sext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_sext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = sext i32 %1 to i64
+ ret i64 %2
+}
+
+define i64 @vsetvli_zext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_zext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = zext i32 %1 to i64
+ ret i64 %2
+}