if (DAG.getDataLayout().isBigEndian())
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
+ bool IsFast = false;
Align NewAlign = commonAlignment(LD->getAlign(), PtrOff);
- Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
- if (NewAlign < DAG.getDataLayout().getABITypeAlign(NewVTTy))
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), NewVT,
+ LD->getAddressSpace(), NewAlign,
+ LD->getMemOperand()->getFlags(), &IsFast) ||
+ !IsFast)
return SDValue();
SDValue NewPtr =
store i64 %r12, i64* %ptr, align 8
ret void
}
+
+define void @foo_noalign(i64* %ptr) {
+; CHECK-LABEL: foo_noalign:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: orb $16, (%rdi)
+; CHECK-NEXT: retq
+ %r11 = load i64, i64* %ptr, align 1
+ %r12 = or i64 16, %r11
+ store i64 %r12, i64* %ptr, align 1
+ ret void
+}
%1 = or i32 %0, 65536
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf, %struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
+}
+define dso_local void @t1_noalign() nounwind optsize ssp {
+; CHECK-LABEL: t1_noalign:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: orb $1, bfi+10(%rip)
+; CHECK-NEXT: retq
+entry:
+ %0 = load i32, i32* bitcast (i16* getelementptr (%struct.bf, %struct.bf* @bfi, i32 0, i32 1) to i32*), align 1
+ %1 = or i32 %0, 65536
+ store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf, %struct.bf* @bfi, i32 0, i32 1) to i32*), align 1
+ ret void
}
define dso_local void @t2() nounwind optsize ssp {
%1 = or i32 %0, 16842752
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf, %struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
-
}