From 4a5ccf4e93421e406b638ddfa18445a9a5a1211f Mon Sep 17 00:00:00 2001 From: David Sherwood Date: Thu, 1 Dec 2022 09:53:13 +0000 Subject: [PATCH] Revert "[AArch64][SME]: Generate streaming-compatible code for ld2-alloca." This reverts commit 279c0a83aa22cd35d4b7c7c52b85d2a86f2528a7. --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 13 +- .../sve-streaming-mode-fixed-length-ld2-alloca.ll | 162 +++++++-------------- 2 files changed, 59 insertions(+), 116 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 3dd5ccf..1d25d6a 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -13952,10 +13952,6 @@ bool AArch64TargetLowering::isLegalInterleavedAccessType( UseScalable = false; - // Ensure that the predicate for this elemnts num is available. - if (Subtarget->hasSVE() && !getSVEPredPatternFromNumElements(NumElements)) - return false; - // Ensure the number of vector elements is greater than 1. if (NumElements < 2) return false; @@ -13964,11 +13960,10 @@ bool AArch64TargetLowering::isLegalInterleavedAccessType( if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) return false; - if (Subtarget->forceStreamingCompatibleSVE() || - (Subtarget->useSVEForFixedLengthVectors() && - (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || - (VecSize < Subtarget->getMinSVEVectorSizeInBits() && - isPowerOf2_32(NumElements) && VecSize > 128)))) { + if (Subtarget->useSVEForFixedLengthVectors() && + (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || + (VecSize < Subtarget->getMinSVEVectorSizeInBits() && + isPowerOf2_32(NumElements) && VecSize > 128))) { UseScalable = true; return true; } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll index f7d04c9..8edab93 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll @@ -13,144 +13,92 @@ define void @alloc_v4i8(ptr %st_ptr) #0 { ; CHECK-NEXT: mov x19, x0 ; CHECK-NEXT: add x0, sp, #12 ; CHECK-NEXT: bl def -; CHECK-NEXT: add x8, sp, #12 -; CHECK-NEXT: ptrue p0.b, vl2 -; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8] -; CHECK-NEXT: ptrue p0.s, vl2 -; CHECK-NEXT: mov z2.b, z0.b[1] +; CHECK-NEXT: ldr s0, [sp, #12] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z2.h, z0.h[1] +; CHECK-NEXT: mov z0.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp] ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: stp w8, w9, [sp] +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: strh w8, [sp, #2] ; CHECK-NEXT: ldr d0, [sp] -; CHECK-NEXT: st1b { z0.s }, p0, [x19] +; CHECK-NEXT: st1b { z0.h }, p0, [x19] ; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %alloc = alloca [4 x i8] call void @def(ptr %alloc) %load = load <4 x i8>, ptr %alloc - %strided.vec = shufflevector <4 x i8> %load, <4 x i8> poison, <2 x i32> - store <2 x i8> %strided.vec, ptr %st_ptr + %strided.vec = shufflevector <4 x i8> %load, <4 x i8> poison, <4 x i32> + store <4 x i8> %strided.vec, ptr %st_ptr ret void } define void @alloc_v6i8(ptr %st_ptr) #0 { ; CHECK-LABEL: alloc_v6i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov x19, x0 -; CHECK-NEXT: add x0, sp, #24 +; CHECK-NEXT: add x0, sp, #8 ; CHECK-NEXT: bl def -; CHECK-NEXT: add x8, sp, #24 -; CHECK-NEXT: ptrue p0.b, vl3 -; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8] -; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z2.b, z1.b[3] -; CHECK-NEXT: mov z3.b, z1.b[2] -; CHECK-NEXT: mov z0.b, z1.b[1] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: add x8, sp, #20 ; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: st1b { z0.h }, p0, [x8] -; CHECK-NEXT: ldrh w8, [sp, #20] -; CHECK-NEXT: strb w10, [x19, #2] -; CHECK-NEXT: strh w8, [x19] -; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #48 -; CHECK-NEXT: ret - %alloc = alloca [6 x i8] - call void @def(ptr %alloc) - %load = load <6 x i8>, ptr %alloc - %strided.vec = shufflevector <6 x i8> %load, <6 x i8> poison, <3 x i32> - store <3 x i8> %strided.vec, ptr %st_ptr - ret void -} - -define void @alloc_v32i8(ptr %st_ptr) #0 { -; CHECK-LABEL: alloc_v32i8: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: mov x19, x0 -; CHECK-NEXT: add x0, sp, #16 -; CHECK-NEXT: bl def -; CHECK-NEXT: ldp q0, q1, [sp, #16] -; CHECK-NEXT: mov z2.b, z0.b[14] -; CHECK-NEXT: mov z3.b, z0.b[12] +; CHECK-NEXT: mov z1.b, z0.b[4] +; CHECK-NEXT: mov z2.b, z0.b[5] ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: mov z4.b, z0.b[10] -; CHECK-NEXT: mov z5.b, z0.b[8] -; CHECK-NEXT: mov z6.b, z0.b[6] -; CHECK-NEXT: strb w8, [sp] -; CHECK-NEXT: fmov w8, s4 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s5 -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s6 -; CHECK-NEXT: mov z7.b, z0.b[4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: mov z4.b, z0.b[1] ; CHECK-NEXT: mov z0.b, z0.b[2] -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s7 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: strb w10, [x19, #8] -; CHECK-NEXT: ldr q0, [sp] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: str x8, [x19] -; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: strb w8, [sp] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strb w9, [sp, #5] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w10, [sp, #4] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strb w8, [sp, #3] +; CHECK-NEXT: strb w9, [sp, #2] +; CHECK-NEXT: strb w10, [sp, #1] +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: str w8, [x19] +; CHECK-NEXT: strh w9, [x19, #4] +; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret - %alloc = alloca [32 x i8] + %alloc = alloca [6 x i8] call void @def(ptr %alloc) - %load = load <32 x i8>, ptr %alloc - %strided.vec = shufflevector <32 x i8> %load, <32 x i8> poison, <9 x i32> - store <9 x i8> %strided.vec, ptr %st_ptr + %load = load <6 x i8>, ptr %alloc + %strided.vec = shufflevector <6 x i8> %load, <6 x i8> poison, <6 x i32> + store <6 x i8> %strided.vec, ptr %st_ptr ret void } - define void @alloc_v8f64(ptr %st_ptr) #0 { ; CHECK-LABEL: alloc_v8f64: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: mov x19, x0 ; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-NEXT: mov x20, sp ; CHECK-NEXT: bl def -; CHECK-NEXT: cntd x8 -; CHECK-NEXT: ptrue p0.d, vl4 -; CHECK-NEXT: sub x8, x8, #2 -; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp] -; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x8, #2 -; CHECK-NEXT: csel x8, x8, x9, lo -; CHECK-NEXT: add x10, sp, #64 -; CHECK-NEXT: lsl x8, x8, #3 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: add x9, sp, #64 -; CHECK-NEXT: st1d { z0.d }, p0, [x10] -; CHECK-NEXT: ldr q2, [x9, x8] +; CHECK-NEXT: ld2 { v0.2d, v1.2d }, [x20], #32 +; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-NEXT: ld2 { v2.2d, v3.2d }, [x20] ; CHECK-NEXT: stp q0, q2, [x19] -; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x29, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %alloc = alloca [8 x double] call void @def(ptr %alloc) -- 2.7.4