// This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll
// end up with a movsd or bleand instead of shufp.
- def : Pat<(X86Shufp (memopv4f32 addr:$src2), VR128:$src1, (i8 -28)),
+ // No need for aligned load, we're only loading 64-bits.
+ def : Pat<(X86Shufp (loadv4f32 addr:$src2), VR128:$src1, (i8 -28)),
(MOVLPSrm VR128:$src1, addr:$src2)>;
}
// This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll
// end up with a movsd or bleand instead of shufp.
- def : Pat<(X86Movlhps VR128:$src1, (memopv4f32 addr:$src2)),
+ // No need for aligned load, we're only loading 64-bits.
+ def : Pat<(X86Movlhps VR128:$src1, (loadv4f32 addr:$src2)),
(MOVHPSrm VR128:$src1, addr:$src2)>;
}
; SSE1: # %bb.0:
; SSE1-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; SSE1-NEXT: retq
- %b = load <4 x float>, <4 x float>* %pb, align 16
+ %b = load <4 x float>, <4 x float>* %pb, align 1
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
ret <4 x float> %shuffle
}
; SSE1: # %bb.0:
; SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; SSE1-NEXT: retq
- %b = load <4 x float>, <4 x float>* %pb, align 16
+ %b = load <4 x float>, <4 x float>* %pb, align 1
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
ret <4 x float> %shuffle
}