From cd1a0b92710e567c00f6d2b932b197e9a1773f7d Mon Sep 17 00:00:00 2001 From: Oliver Cruickshank Date: Mon, 16 Sep 2019 15:19:49 +0000 Subject: [PATCH] [ARM] Add patterns for CTLZ on MVE CTLZ intrinsic can use the VCLS instruction on MVE, which produces better results than expanding. llvm-svn: 371999 --- llvm/lib/Target/ARM/ARMISelLowering.cpp | 1 + llvm/lib/Target/ARM/ARMInstrMVE.td | 9 ++ llvm/test/CodeGen/Thumb2/mve-ctlz.ll | 140 ++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+) create mode 100644 llvm/test/CodeGen/Thumb2/mve-ctlz.ll diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 0ffb931..f7d5496 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -261,6 +261,7 @@ void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::MSTORE, VT, Legal); + setOperationAction(ISD::CTLZ, VT, Legal); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td index f996024..aa12d50 100644 --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -1756,6 +1756,15 @@ def MVE_VCLZs8 : MVE_VCLSCLZ<"vclz", "i8", 0b00, 0b1>; def MVE_VCLZs16 : MVE_VCLSCLZ<"vclz", "i16", 0b01, 0b1>; def MVE_VCLZs32 : MVE_VCLSCLZ<"vclz", "i32", 0b10, 0b1>; +let Predicates = [HasMVEInt] in { + def : Pat<(v16i8 ( ctlz (v16i8 MQPR:$val1))), + (v16i8 ( MVE_VCLZs8 (v16i8 MQPR:$val1)))>; + def : Pat<(v4i32 ( ctlz (v4i32 MQPR:$val1))), + (v4i32 ( MVE_VCLZs32 (v4i32 MQPR:$val1)))>; + def : Pat<(v8i16 ( ctlz (v8i16 MQPR:$val1))), + (v8i16 ( MVE_VCLZs16 (v8i16 MQPR:$val1)))>; +} + class MVE_VABSNEG_int size, bit negate, list pattern=[]> : MVEIntSingleSrc { diff --git a/llvm/test/CodeGen/Thumb2/mve-ctlz.ll b/llvm/test/CodeGen/Thumb2/mve-ctlz.ll new file mode 100644 index 0000000..0645fad --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-ctlz.ll @@ -0,0 +1,140 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -verify-machineinstrs -mattr=+mve %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <2 x i64> @ctlz_2i64_0_t(<2 x i64> %src){ +; CHECK-LABEL: ctlz_2i64_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, s3 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r1, ne +; CHECK-NEXT: lsls r1, r1, #31 +; CHECK-NEXT: vmov r1, s2 +; CHECK-NEXT: clz r1, r1 +; CHECK-NEXT: add.w r1, r1, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: vmov r0, s1 +; CHECK-NEXT: vmov s6, r1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r1, ne +; CHECK-NEXT: lsls r1, r1, #31 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: clz r1, r1 +; CHECK-NEXT: add.w r1, r1, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: vldr s5, .LCPI0_0 +; CHECK-NEXT: vmov.f32 s7, s5 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 0 @ float 0 +entry: + %0 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 0) + ret <2 x i64> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @ctlz_4i32_0_t(<4 x i32> %src){ +; CHECK-LABEL: ctlz_4i32_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 0) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @ctlz_8i16_0_t(<8 x i16> %src){ +; CHECK-LABEL: ctlz_8i16_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 0) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @ctlz_16i8_0_t(<16 x i8> %src){ +; CHECK-LABEL: ctlz_16i8_0_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i8 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 0) + ret <16 x i8> %0 +} + +define arm_aapcs_vfpcc <2 x i64> @ctlz_2i64_1_t(<2 x i64> %src){ +; CHECK-LABEL: ctlz_2i64_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, s3 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r1, ne +; CHECK-NEXT: lsls r1, r1, #31 +; CHECK-NEXT: vmov r1, s2 +; CHECK-NEXT: clz r1, r1 +; CHECK-NEXT: add.w r1, r1, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: vmov r0, s1 +; CHECK-NEXT: vmov s6, r1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: cset r1, ne +; CHECK-NEXT: lsls r1, r1, #31 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: clz r1, r1 +; CHECK-NEXT: add.w r1, r1, #32 +; CHECK-NEXT: it ne +; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: vldr s5, .LCPI4_0 +; CHECK-NEXT: vmov.f32 s7, s5 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI4_0: +; CHECK-NEXT: .long 0 @ float 0 +entry: + %0 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 1) + ret <2 x i64> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @ctlz_4i32_1_t(<4 x i32> %src){ +; CHECK-LABEL: ctlz_4i32_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 1) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @ctlz_8i16_1_t(<8 x i16> %src){ +; CHECK-LABEL: ctlz_8i16_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 1) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @ctlz_16i8_1_t(<16 x i8> %src){ +; CHECK-LABEL: ctlz_16i8_1_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vclz.i8 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 1) + ret <16 x i8> %0 +} + + +declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) +declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) +declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) +declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) -- 2.7.4