From e0ce87509b18957fc82dd5b1aa5ad50e81412294 Mon Sep 17 00:00:00 2001 From: Noah Goldstein Date: Sat, 18 Feb 2023 13:36:24 -0600 Subject: [PATCH] [ValueTracking] Add tests for additional `isKnownNonZero` cases; NFC Add cases for the following ops: - 0-X - bitreverse(X) - bswap(X) - ctpop(X) - abs(X) - uadd_sat(X, Y) Reviewed By: nikic Differential Revision: https://reviews.llvm.org/D142827 --- llvm/test/Analysis/ValueTracking/known-non-zero.ll | 183 +++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 llvm/test/Analysis/ValueTracking/known-non-zero.ll diff --git a/llvm/test/Analysis/ValueTracking/known-non-zero.ll b/llvm/test/Analysis/ValueTracking/known-non-zero.ll new file mode 100644 index 0000000..013aa07 --- /dev/null +++ b/llvm/test/Analysis/ValueTracking/known-non-zero.ll @@ -0,0 +1,183 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=instsimplify < %s -S | FileCheck %s + +declare void @llvm.assume(i1) +declare i8 @llvm.abs.i8(i8, i1) +declare i8 @llvm.bitreverse.i8(i8) +declare i16 @llvm.bswap.i16(i16) +declare i8 @llvm.ctpop.i8(i8) +declare <2 x i8> @llvm.uadd.sat.2xi8(<2 x i8>, <2 x i8>) +declare i8 @llvm.uadd.sat.i8(i8, i8) + +;; Throughout use: X > Y || Y == 0 which folds to X > Y iff X known +;; non-zero. Do this because many of the expressions already have +;; hardcoded cases for folding Foo(X) == 0 -> X == 0 and we want to +;; test explicitly that `isKnownNonZero` works. + +define i1 @check_neg(i8 %x, i8 %y) { +; CHECK-LABEL: @check_neg( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[Z:%.*]] = sub i8 0, [[X]] +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %ne = icmp ne i8 %x, 0 + call void @llvm.assume(i1 %ne) + %z = sub i8 0, %x + %cmp0 = icmp ugt i8 %z, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_abs(i8 %x, i8 %y) { +; CHECK-LABEL: @check_abs( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 true) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: ret i1 [[NE]] +; + %ne = icmp ne i8 %x, 0 + br i1 %ne, label %true, label %false +true: + %z = call i8 @llvm.abs.i8(i8 %x, i1 true) + %cmp0 = icmp ugt i8 %z, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ret i1 %ne +} + +define i1 @check_abs_failish(i8 %x, i8 %y) { +; CHECK-LABEL: @check_abs_failish( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: false: +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 true) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: true: +; CHECK-NEXT: ret i1 [[NE]] +; + %ne = icmp ne i8 %x, 0 + br i1 %ne, label %true, label %false +false: + %z = call i8 @llvm.abs.i8(i8 %x, i1 true) + %cmp0 = icmp ugt i8 %z, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +true: + ret i1 %ne +} + +define i1 @check_bitreverse(i8 %x, i8 %y) { +; CHECK-LABEL: @check_bitreverse( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[X]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %ne = icmp ne i8 %x, 0 + call void @llvm.assume(i1 %ne) + %z = call i8 @llvm.bitreverse.i8(i8 %x) + %cmp0 = icmp ugt i8 %z, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_bswap(i16 %x, i16 %y) { +; CHECK-LABEL: @check_bswap( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i16 [[X:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[Z:%.*]] = call i16 @llvm.bswap.i16(i16 [[X]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i16 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %ne = icmp ne i16 %x, 0 + call void @llvm.assume(i1 %ne) + %z = call i16 @llvm.bswap.i16(i16 %x) + %cmp0 = icmp ugt i16 %z, %y + %cmp1 = icmp eq i16 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_ctpop(i8 %x, i8 %y) { +; CHECK-LABEL: @check_ctpop( +; CHECK-NEXT: [[NE:%.*]] = icmp eq i8 [[X:%.*]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: ret i1 [[NE]] +; CHECK: false: +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.ctpop.i8(i8 [[X]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %ne = icmp eq i8 %x, 0 + br i1 %ne, label %true, label %false +true: + ret i1 %ne +false: + %z = call i8 @llvm.ctpop.i8(i8 %x) + %cmp0 = icmp ugt i8 %z, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_add_sat(i8 %x, i8 %y, i8 %w) { +; CHECK-LABEL: @check_add_sat( +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[X]], i8 [[Y:%.*]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[W:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[W]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %ne = icmp ne i8 %x, 0 + call void @llvm.assume(i1 %ne) + %z = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y) + %cmp0 = icmp ugt i8 %z, %w + %cmp1 = icmp eq i8 %w, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define <2 x i1> @check_add_sat_vec(<2 x i8> %x, <2 x i8> %y, <2 x i8> %w) { +; CHECK-LABEL: @check_add_sat_vec( +; CHECK-NEXT: [[YNZ:%.*]] = or <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[Z:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[YNZ]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt <2 x i8> [[Z]], [[W:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <2 x i8> [[W]], zeroinitializer +; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %ynz = or <2 x i8> %y, + %z = call <2 x i8> @llvm.uadd.sat.2xi8(<2 x i8> %x, <2 x i8> %ynz) + %cmp0 = icmp ugt <2 x i8> %z, %w + %cmp1 = icmp eq <2 x i8> %w, + %r = or <2 x i1> %cmp0, %cmp1 + ret <2 x i1> %r +} -- 2.7.4