From 14bc85e0ebb6c00c1672158ab6a692bfbb11e1cc Mon Sep 17 00:00:00 2001 From: David Sherwood Date: Tue, 14 Jul 2020 16:20:00 +0100 Subject: [PATCH] [SVE] Don't use LocalStackAllocation for SVE objects I have introduced a new TargetFrameLowering query function: isStackIdSafeForLocalArea that queries whether or not it is safe for objects of a given stack id to be bundled into the local area. The default behaviour is to always bundle regardless of the stack id, however for AArch64 this is overriden so that it's only safe for fixed-size stack objects. There is future work here to extend this algorithm for multiple local areas so that SVE stack objects can be bundled together and accessed from their own virtual base-pointer. Differential Revision: https://reviews.llvm.org/D83859 --- llvm/include/llvm/CodeGen/TargetFrameLowering.h | 6 +++ llvm/lib/CodeGen/LocalStackSlotAllocation.cpp | 4 ++ llvm/lib/Target/AArch64/AArch64FrameLowering.h | 6 +++ llvm/test/CodeGen/AArch64/sve-localstackalloc.mir | 61 +++++++++++++++++++++++ 4 files changed, 77 insertions(+) create mode 100644 llvm/test/CodeGen/AArch64/sve-localstackalloc.mir diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h index c3a11b1..d658043 100644 --- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -134,6 +134,12 @@ public: /// was called). virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const; + /// This method returns whether or not it is safe for an object with the + /// given stack id to be bundled into the local area. + virtual bool isStackIdSafeForLocalArea(unsigned StackId) const { + return true; + } + /// getOffsetOfLocalArea - This method returns the offset of the local area /// from the stack pointer on entrance to a function. /// diff --git a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp index 6c5ef02..204fb55 100644 --- a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp +++ b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp @@ -220,6 +220,8 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) { continue; if (StackProtectorFI == (int)i) continue; + if (!TFI.isStackIdSafeForLocalArea(MFI.getStackID(i))) + continue; switch (MFI.getObjectSSPLayout(i)) { case MachineFrameInfo::SSPLK_None: @@ -254,6 +256,8 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) { continue; if (ProtectedObjs.count(i)) continue; + if (!TFI.isStackIdSafeForLocalArea(MFI.getStackID(i))) + continue; AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign); } diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index 9d0a6d9..444740c 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -105,6 +105,12 @@ public: } } + bool isStackIdSafeForLocalArea(unsigned StackId) const override { + // We don't support putting SVE objects into the pre-allocated local + // frame block at the moment. + return StackId != TargetStackID::SVEVector; + } + private: bool shouldCombineCSRLocalStackBump(MachineFunction &MF, uint64_t StackBumpBytes) const; diff --git a/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir b/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir new file mode 100644 index 0000000..c20846c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir @@ -0,0 +1,61 @@ +# RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -run-pass=localstackalloc -o - %s | FileCheck %s + +--- | + ; ModuleID = '' + source_filename = "" + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + target triple = "aarch64-unknown-linux-gnu" + + define @insert_32i8_idx( %a, i8 %elt, i64 %idx) #0 { + %ins = insertelement %a, i8 %elt, i64 %idx + ret %ins + } + + attributes #0 = { "target-features"="+sve" } + +... +--- +name: insert_32i8_idx +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: zpr, preferred-register: '' } + - { id: 1, class: zpr, preferred-register: '' } + - { id: 2, class: gpr32, preferred-register: '' } + - { id: 3, class: gpr64, preferred-register: '' } + - { id: 5, class: ppr_3b, preferred-register: '' } + - { id: 6, class: gpr64sp, preferred-register: '' } + - { id: 7, class: zpr, preferred-register: '' } + - { id: 8, class: zpr, preferred-register: '' } +liveins: + - { reg: '$z0', virtual-reg: '%0' } + - { reg: '$z1', virtual-reg: '%1' } + - { reg: '$w0', virtual-reg: '%2' } +frameInfo: + maxAlignment: 1 + maxCallFrameSize: 0 +# CHECK-LABEL: name: insert_32i8_idx +# CHECK: localFrameSize: 0 +stack: + - { id: 0, name: '', type: default, offset: 0, size: 32, alignment: 16, + stack-id: sve-vec, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +machineFunctionInfo: {} +body: | + bb.0 (%ir-block.0): + liveins: $z0, $z1, $w0 + + %2:gpr32 = COPY $w0 + %1:zpr = COPY $z1 + %0:zpr = COPY $z0 + %5:ppr_3b = PTRUE_B 31 + %6:gpr64sp = ADDXri %stack.0, 0, 0 + ST1B_IMM %1, %5, %6, 1 :: (store unknown-size, align 16) + ST1B_IMM %0, %5, %stack.0, 0 :: (store unknown-size into %stack.0, align 16) + %7:zpr = LD1B_IMM %5, %6, 1 :: (load unknown-size from %stack.0 + 16, align 16) + %8:zpr = LD1B_IMM %5, %stack.0, 0 :: (load unknown-size from %stack.0, align 16) + $z0 = COPY %8 + $z1 = COPY %7 + RET_ReallyLR implicit $z0, implicit $z1 + +... -- 2.7.4