From 56440b974539dea36431e1b2a0da33d462367a09 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 2 Jul 2018 17:01:54 +0000 Subject: [PATCH] [X86] Don't use aligned load/store instructions for fp128 if the load/store isn't aligned. Similarily, don't fold fp128 loads into SSE instructions if the load isn't aligned. Unless we're targeting an AMD CPU that doesn't check alignment on arithmetic instructions. Should fix PR38001 llvm-svn: 336121 --- llvm/lib/Target/X86/X86InstrInfo.td | 9 +++++++++ llvm/lib/Target/X86/X86InstrSSE.td | 14 +++++++++----- llvm/test/CodeGen/X86/extract-store.ll | 4 ++-- llvm/test/CodeGen/X86/fp128-select.ll | 2 +- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 81ab1e0..25a41a6 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1093,6 +1093,15 @@ def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>; def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>; def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>; def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>; +def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ + LoadSDNode *Ld = cast(N); + return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); +}]>; +def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ + LoadSDNode *Ld = cast(N); + return Subtarget->hasSSEUnalignedMem() || + Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); +}]>; def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>; def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>; diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 71265b8..75c7e32 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -8228,14 +8228,18 @@ let Predicates = [UseAVX2] in { // Extra selection patterns for FR128, f128, f128mem // movaps is shorter than movdqa. movaps is in SSE and movdqa is in SSE2. -def : Pat<(store (f128 FR128:$src), addr:$dst), +def : Pat<(alignedstore (f128 FR128:$src), addr:$dst), (MOVAPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>; +def : Pat<(store (f128 FR128:$src), addr:$dst), + (MOVUPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>; -def : Pat<(loadf128 addr:$src), +def : Pat<(alignedloadf128 addr:$src), (COPY_TO_REGCLASS (MOVAPSrm addr:$src), FR128)>; +def : Pat<(loadf128 addr:$src), + (COPY_TO_REGCLASS (MOVUPSrm addr:$src), FR128)>; // andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2 -def : Pat<(X86fand FR128:$src1, (loadf128 addr:$src2)), +def : Pat<(X86fand FR128:$src1, (memopf128 addr:$src2)), (COPY_TO_REGCLASS (ANDPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2), FR128)>; @@ -8250,7 +8254,7 @@ def : Pat<(and FR128:$src1, FR128:$src2), (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128), (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>; -def : Pat<(X86for FR128:$src1, (loadf128 addr:$src2)), +def : Pat<(X86for FR128:$src1, (memopf128 addr:$src2)), (COPY_TO_REGCLASS (ORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2), FR128)>; @@ -8265,7 +8269,7 @@ def : Pat<(or FR128:$src1, FR128:$src2), (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128), (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>; -def : Pat<(X86fxor FR128:$src1, (loadf128 addr:$src2)), +def : Pat<(X86fxor FR128:$src1, (memopf128 addr:$src2)), (COPY_TO_REGCLASS (XORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2), FR128)>; diff --git a/llvm/test/CodeGen/X86/extract-store.ll b/llvm/test/CodeGen/X86/extract-store.ll index 871d66d..41c2f5c 100644 --- a/llvm/test/CodeGen/X86/extract-store.ll +++ b/llvm/test/CodeGen/X86/extract-store.ll @@ -554,7 +554,7 @@ define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind { ; ; SSE-F128-LABEL: extract_f128_0: ; SSE-F128: # %bb.0: -; SSE-F128-NEXT: movaps %xmm0, (%rdi) +; SSE-F128-NEXT: movups %xmm0, (%rdi) ; SSE-F128-NEXT: retq %vecext = extractelement <2 x fp128> %foo, i32 0 store fp128 %vecext, fp128* %dst, align 1 @@ -606,7 +606,7 @@ define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind { ; ; SSE-F128-LABEL: extract_f128_1: ; SSE-F128: # %bb.0: -; SSE-F128-NEXT: movaps %xmm1, (%rdi) +; SSE-F128-NEXT: movups %xmm1, (%rdi) ; SSE-F128-NEXT: retq %vecext = extractelement <2 x fp128> %foo, i32 1 store fp128 %vecext, fp128* %dst, align 1 diff --git a/llvm/test/CodeGen/X86/fp128-select.ll b/llvm/test/CodeGen/X86/fp128-select.ll index 85f7d97..503c7a9 100644 --- a/llvm/test/CodeGen/X86/fp128-select.ll +++ b/llvm/test/CodeGen/X86/fp128-select.ll @@ -18,7 +18,7 @@ define void @test_select(fp128* %p, fp128* %q, i1 zeroext %c) { ; MMX-NEXT: movaps %xmm0, (%rsi) ; MMX-NEXT: retq ; MMX-NEXT: .LBB0_1: -; MMX-NEXT: movaps (%rdi), %xmm0 +; MMX-NEXT: movups (%rdi), %xmm0 ; MMX-NEXT: movaps %xmm0, (%rsi) ; MMX-NEXT: retq ; -- 2.7.4