From: Paul Walker Date: Mon, 21 Nov 2022 16:56:55 +0000 (+0000) Subject: [InstCombine] Increase test coverage of vector.reverse ready for follow on work. X-Git-Tag: upstream/17.0.6~25259 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=03b3017aef14efa705d67767d229752b1c6d0b2d;p=platform%2Fupstream%2Fllvm.git [InstCombine] Increase test coverage of vector.reverse ready for follow on work. --- diff --git a/llvm/test/Transforms/InstCombine/vector-reverse.ll b/llvm/test/Transforms/InstCombine/vector-reverse.ll index 309c9124..11d4c02 100644 --- a/llvm/test/Transforms/InstCombine/vector-reverse.ll +++ b/llvm/test/Transforms/InstCombine/vector-reverse.ll @@ -1,62 +1,707 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instcombine -S | FileCheck %s -; Test that the reverse is eliminated if the output and all the inputs -; of the instruction are calls to reverse. +; Tests to ensure operand reversals are bubbled to the result when there is no +; increase in the total number of reversals. Often this resuls in back to back +; reversals that can be eliminated entirely. This outcome is tested separately. + define @binop_reverse( %a, %b) { ; CHECK-LABEL: @binop_reverse( -; CHECK-NEXT: [[ADD1:%.*]] = add [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret [[ADD1]] +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[ADD:%.*]] = add [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[ADD]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %add = add %a.rev, %b.rev + ret %add +} + +; %a.rev has multiple uses +define @binop_reverse_1( %a, %b) { +; CHECK-LABEL: @binop_reverse_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[ADD:%.*]] = add [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[ADD]] ; - %reva = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) - %revb = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) - %add = add %reva, %revb - %revadd = tail call @llvm.experimental.vector.reverse.nxv4i32( %add) - ret %revadd + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %a.rev) + %add = add %a.rev, %b.rev + ret %add +} + +; %b.rev has multiple uses +define @binop_reverse_2( %a, %b) { +; CHECK-LABEL: @binop_reverse_2( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[ADD:%.*]] = add [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[ADD]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %b.rev) + %add = add %a.rev, %b.rev + ret %add +} + +; %a.rev and %b.rev have multiple uses +define @binop_reverse_3( %a, %b) { +; CHECK-LABEL: @binop_reverse_3( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[ADD:%.*]] = add [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[ADD]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %a.rev) + call void @use_nxv4i32( %b.rev) + %add = add %a.rev, %b.rev + ret %add +} + +; %a.rev used as both operands +define @binop_reverse_4( %a) { +; CHECK-LABEL: @binop_reverse_4( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = mul [[A_REV]], [[A_REV]] +; CHECK-NEXT: ret [[MUL]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %mul = mul %a.rev, %a.rev + ret %mul } define @binop_reverse_splat_RHS( %a, i32 %b) { ; CHECK-LABEL: @binop_reverse_splat_RHS( -; CHECK-NEXT: [[SPLAT_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 -; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[SPLAT_INSERT]], poison, zeroinitializer -; CHECK-NEXT: [[UDIV1:%.*]] = udiv [[A:%.*]], [[SPLAT]] -; CHECK-NEXT: ret [[UDIV1]] +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DIV:%.*]] = udiv [[A_REV]], [[B_SPLAT]] +; CHECK-NEXT: ret [[DIV]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %div = udiv %a.rev, %b.splat + ret %div +} + +; %a.rev has multiple uses +define @binop_reverse_splat_RHS_1( %a, i32 %b) { +; CHECK-LABEL: @binop_reverse_splat_RHS_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[DIV:%.*]] = udiv [[A_REV]], [[B_SPLAT]] +; CHECK-NEXT: ret [[DIV]] ; - %reva = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) - %splat_insert = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %splat_insert, poison, zeroinitializer - %udiv = udiv %reva, %splat - %revadd = tail call @llvm.experimental.vector.reverse.nxv4i32( %udiv) - ret %revadd + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + call void @use_nxv4i32( %a.rev) + %div = udiv %a.rev, %b.splat + ret %div } define @binop_reverse_splat_LHS( %a, i32 %b) { ; CHECK-LABEL: @binop_reverse_splat_LHS( -; CHECK-NEXT: [[SPLAT_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 -; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector [[SPLAT_INSERT]], poison, zeroinitializer -; CHECK-NEXT: [[UDIV1:%.*]] = udiv [[SPLAT]], [[A:%.*]] -; CHECK-NEXT: ret [[UDIV1]] +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DIV:%.*]] = udiv [[B_SPLAT]], [[A_REV]] +; CHECK-NEXT: ret [[DIV]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %div = udiv %b.splat, %a.rev + ret %div +} + +; %a.rev has multiple uses +define @binop_reverse_splat_LHS_1( %a, i32 %b) { +; CHECK-LABEL: @binop_reverse_splat_LHS_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[DIV:%.*]] = udiv [[B_SPLAT]], [[A_REV]] +; CHECK-NEXT: ret [[DIV]] ; - %reva = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) - %splat_insert = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %splat_insert, poison, zeroinitializer - %udiv = udiv %splat, %reva - %revadd = tail call @llvm.experimental.vector.reverse.nxv4i32( %udiv) - ret %revadd + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + call void @use_nxv4i32( %a.rev) + %div = udiv %b.splat, %a.rev + ret %div } define @unop_reverse( %a) { ; CHECK-LABEL: @unop_reverse( -; CHECK-NEXT: [[NEG1:%.*]] = fneg fast [[A:%.*]] +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[A:%.*]]) +; CHECK-NEXT: [[NEG:%.*]] = fneg fast [[A_REV]] +; CHECK-NEXT: ret [[NEG]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %neg = fneg fast %a.rev + ret %neg +} + +; %a.rev has multiple uses +define @unop_reverse_1( %a) { +; CHECK-LABEL: @unop_reverse_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[A:%.*]]) +; CHECK-NEXT: call void @use_nxv4f32( [[A_REV]]) +; CHECK-NEXT: [[NEG:%.*]] = fneg fast [[A_REV]] +; CHECK-NEXT: ret [[NEG]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + call void @use_nxv4f32( %a.rev) + %neg = fneg fast %a.rev + ret %neg +} + +define @icmp_reverse( %a, %b) { +; CHECK-LABEL: @icmp_reverse( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %cmp = icmp eq %a.rev, %b.rev + ret %cmp +} + +; %a.rev has multiple uses +define @icmp_reverse_1( %a, %b) { +; CHECK-LABEL: @icmp_reverse_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %a.rev) + %cmp = icmp eq %a.rev, %b.rev + ret %cmp +} + +; %b.rev has multiple uses +define @icmp_reverse_2( %a, %b) { +; CHECK-LABEL: @icmp_reverse_2( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %b.rev) + %cmp = icmp eq %a.rev, %b.rev + ret %cmp +} + +; %a.rev and %b.rev have multiple uses +define @icmp_reverse_3( %a, %b) { +; CHECK-LABEL: @icmp_reverse_3( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[A_REV]], [[B_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + call void @use_nxv4i32( %a.rev) + call void @use_nxv4i32( %b.rev) + %cmp = icmp eq %a.rev, %b.rev + ret %cmp +} + +define @icmp_reverse_splat_RHS( %a, i32 %b) { +; CHECK-LABEL: @icmp_reverse_splat_RHS( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt [[A_REV]], [[B_SPLAT]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %cmp = icmp sgt %a.rev, %b.splat + ret %cmp +} + +; %a.rev has multiple uses +define @icmp_reverse_splat_RHS_1( %a, i32 %b) { +; CHECK-LABEL: @icmp_reverse_splat_RHS_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt [[A_REV]], [[B_SPLAT]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + call void @use_nxv4i32( %a.rev) + %cmp = icmp sgt %a.rev, %b.splat + ret %cmp +} + +define @icmp_reverse_splat_LHS( %a, i32 %b) { +; CHECK-LABEL: @icmp_reverse_splat_LHS( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[CMP:%.*]] = icmp ult [[B_SPLAT]], [[A_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %cmp = icmp ult %b.splat, %a.rev + ret %cmp +} + +; %a.rev has multiple uses +define @icmp_reverse_splat_LHS_1( %a, i32 %b) { +; CHECK-LABEL: @icmp_reverse_splat_LHS_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[A:%.*]]) +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[A_REV]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult [[B_SPLAT]], [[A_REV]] +; CHECK-NEXT: ret [[CMP]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %a) + %b.insert = insertelement poison, i32 %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + call void @use_nxv4i32( %a.rev) + %cmp = icmp ult %b.splat, %a.rev + ret %cmp +} + +define @select_reverse( %a, %b, %c) { +; CHECK-LABEL: @select_reverse( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %a.rev has multiple uses +define @select_reverse_1( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i1( %a.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %b.rev has multiple uses +define @select_reverse_2( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_2( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %c.rev has multiple uses +define @select_reverse_3( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_3( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[C_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i32( %c.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %a.rev and %b.rev have multiple uses +define @select_reverse_4( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_4( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i1( %a.rev) + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %a.rev and %c.rev have multiple uses +define @select_reverse_5( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_5( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[C_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i1( %a.rev) + call void @use_nxv4i32( %c.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %b.rev and %c.rev have multiple uses +define @select_reverse_6( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_6( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[C_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i32( %b.rev) + call void @use_nxv4i32( %c.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +; %a.rev, %b.rev and %c.rev have multiple uses +define @select_reverse_7( %a, %b, %c) { +; CHECK-LABEL: @select_reverse_7( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[C:%.*]]) +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[C_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %c) + call void @use_nxv4i1( %a.rev) + call void @use_nxv4i32( %b.rev) + call void @use_nxv4i32( %c.rev) + %select = select %a.rev, %b.rev, %c.rev + ret %select +} + +define @select_reverse_splat_false( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_false( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_SPLAT]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + %select = select %a.rev, %b.rev, %c.splat + ret %select +} + +; %a.rev has multiple uses +define @select_reverse_splat_false_1( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_false_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_SPLAT]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i1( %a.rev) + %select = select %a.rev, %b.rev, %c.splat + ret %select +} + +; %b.rev has multiple uses +define @select_reverse_splat_false_2( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_false_2( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_SPLAT]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %b.rev, %c.splat + ret %select +} + +; %a.rev and %b.rev have multiple uses +define @select_reverse_splat_false_3( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_false_3( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_SPLAT]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i1( %a.rev) + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %b.rev, %c.splat + ret %select +} + +define @select_reverse_splat_true( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_true( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[C_SPLAT]], [[B_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + %select = select %a.rev, %c.splat, %b.rev + ret %select +} + +; %a.rev has multiple uses +define @select_reverse_splat_true_1( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_true_1( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[C_SPLAT]], [[B_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i1( %a.rev) + %select = select %a.rev, %c.splat, %b.rev + ret %select +} + +; %b.rev has multiple uses +define @select_reverse_splat_true_2( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_true_2( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[C_SPLAT]], [[B_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %c.splat, %b.rev + ret %select +} + +; %a.rev and %b.rev have multiple uses +define @select_reverse_splat_true_3( %a, %b, i32 %c) { +; CHECK-LABEL: @select_reverse_splat_true_3( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i32( [[B:%.*]]) +; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement poison, i32 [[C:%.*]], i64 0 +; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector [[C_INSERT]], poison, zeroinitializer +; CHECK-NEXT: call void @use_nxv4i1( [[A_REV]]) +; CHECK-NEXT: call void @use_nxv4i32( [[B_REV]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[C_SPLAT]], [[B_REV]] +; CHECK-NEXT: ret [[SELECT]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4i32( %b) + %c.insert = insertelement poison, i32 %c, i32 0 + %c.splat = shufflevector %c.insert, poison, zeroinitializer + call void @use_nxv4i1( %a.rev) + call void @use_nxv4i32( %b.rev) + %select = select %a.rev, %c.splat, %b.rev + ret %select +} + +; Tests to ensure no reversals exist when all operands are reversed and the +; result is also reversed. + +define @reverse_binop_reverse( %a, %b) { +; CHECK-LABEL: @reverse_binop_reverse( +; CHECK-NEXT: [[ADD1:%.*]] = fadd [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret [[ADD1]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %b) + %add = fadd %a.rev, %b.rev + %add.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %add) + ret %add.rev +} + +define @reverse_binop_reverse_splat_RHS( %a, float %b) { +; CHECK-LABEL: @reverse_binop_reverse_splat_RHS( +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, float [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DIV1:%.*]] = fdiv [[A:%.*]], [[B_SPLAT]] +; CHECK-NEXT: ret [[DIV1]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %b.insert = insertelement poison, float %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %div = fdiv %a.rev, %b.splat + %div.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %div) + ret %div.rev +} + +define @reverse_binop_reverse_splat_LHS( %a, float %b) { +; CHECK-LABEL: @reverse_binop_reverse_splat_LHS( +; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, float [[B:%.*]], i64 0 +; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DIV1:%.*]] = fdiv [[B_SPLAT]], [[A:%.*]] +; CHECK-NEXT: ret [[DIV1]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %b.insert = insertelement poison, float %b, i32 0 + %b.splat = shufflevector %b.insert, poison, zeroinitializer + %div = fdiv %b.splat, %a.rev + %div.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %div) + ret %div.rev +} + +define @reverse_fcmp_reverse( %a, %b) { +; CHECK-LABEL: @reverse_fcmp_reverse( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[B:%.*]]) +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt [[A_REV]], [[B_REV]] +; CHECK-NEXT: [[CMP_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[CMP]]) +; CHECK-NEXT: ret [[CMP_REV]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %b) + %cmp = fcmp olt %a.rev, %b.rev + %cmp.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %cmp) + ret %cmp.rev +} + +define @reverse_select_reverse( %a, %b, %c) { +; CHECK-LABEL: @reverse_select_reverse( +; CHECK-NEXT: [[A_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4i1( [[A:%.*]]) +; CHECK-NEXT: [[B_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[B:%.*]]) +; CHECK-NEXT: [[C_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[C:%.*]]) +; CHECK-NEXT: [[SELECT:%.*]] = select [[A_REV]], [[B_REV]], [[C_REV]] +; CHECK-NEXT: [[SELECT_REV:%.*]] = tail call @llvm.experimental.vector.reverse.nxv4f32( [[SELECT]]) +; CHECK-NEXT: ret [[SELECT_REV]] +; + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4i1( %a) + %b.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %b) + %c.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %c) + %select = select %a.rev, %b.rev, %c.rev + %select.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %select) + ret %select.rev +} + +define @reverse_unop_reverse( %a) { +; CHECK-LABEL: @reverse_unop_reverse( +; CHECK-NEXT: [[NEG1:%.*]] = fneg [[A:%.*]] ; CHECK-NEXT: ret [[NEG1]] ; - %reva = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) - %neg = fneg fast %reva - %revneg = tail call @llvm.experimental.vector.reverse.nxv4f32( %neg) - ret %revneg + %a.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %a) + %neg = fneg %a.rev + %neg.rev = tail call @llvm.experimental.vector.reverse.nxv4f32( %neg) + ret %neg.rev } -declare @llvm.experimental.vector.reverse.nxv4f32() -declare @llvm.experimental.vector.reverse.nxv4i32() +declare void @use_nxv4i1() +declare void @use_nxv4i32() +declare void @use_nxv4f32() +declare @llvm.experimental.vector.reverse.nxv4i1() +declare @llvm.experimental.vector.reverse.nxv4i32() +declare @llvm.experimental.vector.reverse.nxv4f32()