QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
+ llvm::Type *PtrTy = IntTy->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
? Intrinsic::aarch64_stlxr
: Intrinsic::aarch64_stxr,
StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
if (BuiltinID == AArch64::BI__getReg) {
// AArch64-NEXT: [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
// AArch64-NEXT: br label [[DO_BODY_I:%.*]]
// AArch64: do.body.i:
-// AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[TMP0]]) [[ATTR3]]
// AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[LDXR_I]] to i32
// AArch64-NEXT: [[TMP2:%.*]] = zext i32 [[X:%.*]] to i64
-// AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* elementtype(i32) [[TMP0]]) [[ATTR3]]
// AArch64-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STXR_I]], 0
// AArch64-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], [[LOOP6:!llvm.loop !.*]]
// AArch64: __swp.exit:
// CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
// CHECK: trunc i32 [[INTRES]] to i8
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
// CHECK-ARM64: trunc i64 [[INTRES]] to i8
sum += __builtin_arm_ldrex((short *)addr);
// CHECK: trunc i32 [[INTRES]] to i16
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i16(i16* [[ADDR16]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) [[ADDR16]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i16
sum += __builtin_arm_ldrex((int *)addr);
// CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[ADDR32]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[ADDR32]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i32
sum += __builtin_arm_ldrex((long long *)addr);
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[ADDR64]])
sum += __builtin_arm_ldrex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[ADDR64_AS8]])
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr64)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr64)
sum += __builtin_arm_ldrex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
// CHECK: bitcast i32 [[INTRES]] to float
// CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[INTADDR]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[INTADDR]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldrex((int **)addr);
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to i32**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast i32** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
return sum;
}
// CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %addr)
// CHECK: trunc i32 [[INTRES]] to i8
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
// CHECK-ARM64: trunc i64 [[INTRES]] to i8
sum += __builtin_arm_ldaex((short *)addr);
// CHECK: trunc i32 [[INTRES]] to i16
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* [[ADDR16]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) [[ADDR16]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i16
sum += __builtin_arm_ldaex((int *)addr);
// CHECK: call i32 @llvm.arm.ldaex.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[ADDR32]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[ADDR32]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i32
sum += __builtin_arm_ldaex((long long *)addr);
// CHECK: call { i32, i32 } @llvm.arm.ldaexd(i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[ADDR64]])
sum += __builtin_arm_ldaex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldaexd(i8* [[ADDR64_AS8]])
-// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr64)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr64)
sum += __builtin_arm_ldaex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
// CHECK: bitcast i32 [[INTRES]] to float
// CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[INTADDR]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[INTADDR]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldaex((int **)addr);
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to i32**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast i32** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
sum += __builtin_arm_ldaex((struct Simple **)addr)->a;
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
return sum;
}
res |= __builtin_arm_strex(4, addr);
// CHECK: call i32 @llvm.arm.strex.p0i8(i32 4, i8* %addr)
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i8(i64 4, i8* %addr)
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i8(i64 4, i8* elementtype(i8) %addr)
res |= __builtin_arm_strex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.strex.p0i16(i32 42, i16* [[ADDR16]])
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i16(i64 42, i16* [[ADDR16]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i16(i64 42, i16* elementtype(i16) [[ADDR16]])
res |= __builtin_arm_strex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 42, i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 42, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 42, i32* elementtype(i32) [[ADDR32]])
res |= __builtin_arm_strex(42, (long long *)addr);
// CHECK: store i64 42, i64* [[TMP:%.*]], align 8
// CHECK: call i32 @llvm.arm.strexd(i32 [[LO]], i32 [[HI]], i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 42, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 42, i64* elementtype(i64) [[ADDR64]])
res |= __builtin_arm_strex(2.71828f, (float *)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast float* [[TMP4]] to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 1076754509, i32* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 1076754509, i32* elementtype(i32) [[TMP5]])
res |= __builtin_arm_strex(3.14159, (double *)addr);
// CHECK: store double 3.141590e+00, double* [[TMP:%.*]], align 8
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 4614256650576692846, i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 4614256650576692846, i64* elementtype(i64) [[TMP5]])
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
// CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint %struct.Simple* %var to i64
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 [[INTVAL]], i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 [[INTVAL]], i64* elementtype(i64) [[TMP5]])
return res;
}
res |= __builtin_arm_stlex(4, addr);
// CHECK: call i32 @llvm.arm.stlex.p0i8(i32 4, i8* %addr)
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i8(i64 4, i8* %addr)
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i8(i64 4, i8* elementtype(i8) %addr)
res |= __builtin_arm_stlex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.stlex.p0i16(i32 42, i16* [[ADDR16]])
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i16(i64 42, i16* [[ADDR16]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i16(i64 42, i16* elementtype(i16) [[ADDR16]])
res |= __builtin_arm_stlex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.stlex.p0i32(i32 42, i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 42, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 42, i32* elementtype(i32) [[ADDR32]])
res |= __builtin_arm_stlex(42, (long long *)addr);
// CHECK: store i64 42, i64* [[TMP:%.*]], align 8
// CHECK: call i32 @llvm.arm.stlexd(i32 [[LO]], i32 [[HI]], i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 42, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 42, i64* elementtype(i64) [[ADDR64]])
res |= __builtin_arm_stlex(2.71828f, (float *)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast float* [[TMP4]] to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 1076754509, i32* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 1076754509, i32* elementtype(i32) [[TMP5]])
res |= __builtin_arm_stlex(3.14159, (double *)addr);
// CHECK: store double 3.141590e+00, double* [[TMP:%.*]], align 8
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 4614256650576692846, i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 4614256650576692846, i64* elementtype(i64) [[TMP5]])
res |= __builtin_arm_stlex(&var, (struct Simple **)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
// CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint %struct.Simple* %var to i64
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 [[INTVAL]], i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 [[INTVAL]], i64* elementtype(i64) [[TMP5]])
return res;
}
// CHECK: call i32 @llvm.arm.ldrex.p0i8(i8* @b)
// CHECK-ARM64-LABEL: @_Z10test_ldrexv()
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i8(i8* @b)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) @b)
void test_ldrex() {
b = __builtin_arm_ldrex(&b);
// CHECK: %{{.*}} = call i32 @llvm.arm.strex.p0i8(i32 1, i8* @b)
// CHECK-ARM64-LABEL: @_Z10tset_strexv()
-// CHECK-ARM64: %{{.*}} = call i32 @llvm.aarch64.stxr.p0i8(i64 1, i8* @b)
+// CHECK-ARM64: %{{.*}} = call i32 @llvm.aarch64.stxr.p0i8(i64 1, i8* elementtype(i8) @b)
void tset_strex() {
__builtin_arm_strex(true, &b);
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
switch (CB->getIntrinsicID()) {
case Intrinsic::preserve_array_access_index:
case Intrinsic::preserve_struct_access_index:
- if (!Attrs.getParamElementType(0)) {
- Type *ElTy = getPtrElementTypeByID(ArgTyIDs[0]);
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr:
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr: {
+ unsigned ArgNo = CB->getIntrinsicID() == Intrinsic::aarch64_stlxr ||
+ CB->getIntrinsicID() == Intrinsic::aarch64_stxr
+ ? 1
+ : 0;
+ if (!Attrs.getParamElementType(ArgNo)) {
+ Type *ElTy = getPtrElementTypeByID(ArgTyIDs[ArgNo]);
if (!ElTy)
return error("Missing element type for elementtype upgrade");
Attribute NewAttr = Attribute::get(Context, Attribute::ElementType, ElTy);
- Attrs = Attrs.addParamAttribute(Context, 0, NewAttr);
+ Attrs = Attrs.addParamAttribute(Context, ArgNo, NewAttr);
}
break;
+ }
default:
break;
}
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
break;
}
case Intrinsic::preserve_array_access_index:
- case Intrinsic::preserve_struct_access_index: {
+ case Intrinsic::preserve_struct_access_index:
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr: {
Type *ElemTy = Call.getParamElementType(0);
Assert(ElemTy,
"Intrinsic requires elementtype attribute on first argument.",
&Call);
break;
}
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr: {
+ Type *ElemTy = Call.getAttributes().getParamElementType(1);
+ Assert(ElemTy,
+ "Intrinsic requires elementtype attribute on second argument.",
+ &Call);
+ break;
+ }
};
}
}
case Intrinsic::aarch64_ldaxr:
case Intrinsic::aarch64_ldxr: {
- PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Type *ValTy = I.getParamElementType(0);
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
+ Info.memVT = MVT::getVT(ValTy);
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
+ Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::aarch64_stlxr:
case Intrinsic::aarch64_stxr: {
- PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
+ Type *ValTy = I.getParamElementType(1);
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
+ Info.memVT = MVT::getVT(ValTy);
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
+ Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
return true;
}
const DataLayout &DL = M->getDataLayout();
IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
- Value *Trunc = Builder.CreateTrunc(Builder.CreateCall(Ldxr, Addr), IntEltTy);
+ CallInst *CI = Builder.CreateCall(Ldxr, Addr);
+ CI->addParamAttr(
+ 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
+ Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
return Builder.CreateBitCast(Trunc, ValueTy);
}
IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
Val = Builder.CreateBitCast(Val, IntValTy);
- return Builder.CreateCall(Stxr,
- {Builder.CreateZExtOrBitCast(
- Val, Stxr->getFunctionType()->getParamType(0)),
- Addr});
+ CallInst *CI = Builder.CreateCall(
+ Stxr, {Builder.CreateZExtOrBitCast(
+ Val, Stxr->getFunctionType()->getParamType(0)),
+ Addr});
+ CI->addParamAttr(1, Attribute::get(Builder.getContext(),
+ Attribute::ElementType, Val->getType()));
+ return CI;
}
bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
--- /dev/null
+; RUN: llvm-dis < %S/upgrade-aarch64-ldstxr.bc | FileCheck %s
+
+define void @f(i32* %p) {
+; CHECK: call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32)
+ %a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
+; CHECK: call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* elementtype(i32)
+ %c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
+
+; CHECK: call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32)
+ %a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
+; CHECK: call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* elementtype(i32)
+ %c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
+ ret void
+}
+
+declare i64 @llvm.aarch64.ldxr.p0i32(i32*)
+declare i64 @llvm.aarch64.ldaxr.p0i32(i32*)
+declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*)
+declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*)
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load (s32) from %ir.addr)
; CHECK: G_TRUNC [[VAL]](s64)
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%trunc = trunc i64 %val to i32
ret i32 %trunc
}
; GISEL: ldxrb w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
%extval = zext i8 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL: ldxrh w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
%extval = zext i16 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL: ldxr w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
%extval = zext i32 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL: ldxr x[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
store i64 %val, i64* @var, align 8
ret void
}
; GISEL-NOT: and
; GISEL: stxrb w0, w1, [x2]
%extval = zext i8 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %res
}
; GISEL-NOT: and
; GISEL: stxrh w0, w1, [x2]
%extval = zext i16 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %res
}
; GISEL-NOT: and
; GISEL: stxr w0, w1, [x2]
%extval = zext i32 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %res
}
; CHECK: stxr w0, x1, [x2]
; GISEL-LABEL: test_store_i64:
; GISEL: stxr w0, x1, [x2]
- %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %res
}
; GISEL-LABEL: test_load_acquire_i8:
; GISEL: ldaxrb w[[LOADVAL:[0-9]+]], [x0]
; GISEL-DAG: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
%extval = zext i8 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL-LABEL: test_load_acquire_i16:
; GISEL: ldaxrh w[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
%extval = zext i16 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL-LABEL: test_load_acquire_i32:
; GISEL: ldaxr w[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
%extval = zext i32 %shortval to i64
store i64 %extval, i64* @var, align 8
; GISEL-LABEL: test_load_acquire_i64:
; GISEL: ldaxr x[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
store i64 %val, i64* @var, align 8
ret void
}
; GISEL-NOT: and
; GISEL: stlxrb w0, w1, [x2]
%extval = zext i8 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %res
}
; GISEL-NOT: and
; GISEL: stlxrh w0, w1, [x2]
%extval = zext i16 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %res
}
; GISEL-NOT: and
; GISEL: stlxr w0, w1, [x2]
%extval = zext i32 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %res
}
; CHECK: stlxr w0, x1, [x2]
; GISEL-LABEL: test_store_release_i64:
; GISEL: stlxr w0, x1, [x2]
- %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %res
}
; CHECK-LABEL: test_ldxr_8:
; CHECK: ldxrb w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
%val8 = trunc i64 %val to i8
ret i8 %val8
}
; CHECK-LABEL: test_ldxr_16:
; CHECK: ldxrh w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
%val16 = trunc i64 %val to i16
ret i16 %val16
}
; CHECK-LABEL: test_ldxr_32:
; CHECK: ldxr w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%val32 = trunc i64 %val to i32
ret i32 %val32
}
; CHECK-LABEL: test_ldxr_64:
; CHECK: ldxr x0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
ret i64 %val
}
; CHECK-LABEL: test_ldaxr_8:
; CHECK: ldaxrb w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
%val8 = trunc i64 %val to i8
ret i8 %val8
}
; CHECK-LABEL: test_ldaxr_16:
; CHECK: ldaxrh w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
%val16 = trunc i64 %val to i16
ret i16 %val16
}
; CHECK-LABEL: test_ldaxr_32:
; CHECK: ldaxr w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
%val32 = trunc i64 %val to i32
ret i32 %val32
}
; CHECK-LABEL: test_ldaxr_64:
; CHECK: ldaxr x0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
ret i64 %val
}
; CHECK: mov w0, [[TMP]]
%extval = zext i8 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %success
}
; CHECK: mov w0, [[TMP]]
%extval = zext i16 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %success
}
; CHECK: mov w0, [[TMP]]
%extval = zext i32 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %success
}
; CHECK: stxr [[TMP:w[0-9]+]], x1, [x0]
; CHECK: mov w0, [[TMP]]
- %success = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %success
}
; CHECK: mov w0, [[TMP]]
%extval = zext i8 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %success
}
; CHECK: mov w0, [[TMP]]
%extval = zext i16 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %success
}
; CHECK: mov w0, [[TMP]]
%extval = zext i32 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %success
}
; CHECK: stlxr [[TMP:w[0-9]+]], x1, [x0]
; CHECK: mov w0, [[TMP]]
- %success = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %success
}
; CHECK-NEXT: [[TMP2:%.*]] = bitcast half [[VAL:%.*]] to i16
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) [[TMP1]])
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i16
; CHECK-NEXT: [[TMP5:%.*]] = zext i16 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i16(i64 [[TMP5]], i16* [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i16(i64 [[TMP5]], i16* elementtype(i16) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[VAL:%.*]] to i32
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[TMP1]])
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP5]], i32* [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP5]], i32* elementtype(i32) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[VAL:%.*]] to i64
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP1]])
-; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[TMP2]], i64* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP1]])
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[TMP2]], i64* elementtype(i64) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP4]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: [[CONST:%.*]] = bitcast i64 -9223372036317904832 to i64
; CHECK-NEXT: [[PTR_0:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 0
; CHECK-NEXT: [[CONST_MAT:%.*]] = add i64 [[CONST]], -64
-; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT]], i64* [[PTR_0]])
+; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT]], i64* elementtype(i64) [[PTR_0]])
; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr i64, i64* [[PTR]], i64 1
-; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST]], i64* [[PTR_1]])
+; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST]], i64* elementtype(i64) [[PTR_1]])
; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr i64, i64* [[PTR]], i64 2
; CHECK-NEXT: [[CONST_MAT1:%.*]] = add i64 [[CONST]], 64
-; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT1]], i64* [[PTR_2]])
+; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT1]], i64* elementtype(i64) [[PTR_2]])
; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr i64, i64* [[PTR]], i64 3
; CHECK-NEXT: [[CONST_MAT2:%.*]] = add i64 [[CONST]], 128
-; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT2]], i64* [[PTR_3]])
+; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT2]], i64* elementtype(i64) [[PTR_3]])
; CHECK-NEXT: ret void
;
entry:
%ptr.0 = getelementptr i64, i64* %ptr, i64 0
- %bar.0 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904896, i64* %ptr.0)
+ %bar.0 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904896, i64* elementtype(i64) %ptr.0)
%ptr.1 = getelementptr i64, i64* %ptr, i64 1
- %bar.1 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904832, i64* %ptr.1)
+ %bar.1 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904832, i64* elementtype(i64) %ptr.1)
%ptr.2 = getelementptr i64, i64* %ptr, i64 2
- %bar.2 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904768, i64* %ptr.2)
+ %bar.2 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904768, i64* elementtype(i64) %ptr.2)
%ptr.3 = getelementptr i64, i64* %ptr, i64 3
- %bar.3 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904704, i64* %ptr.3)
+ %bar.3 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904704, i64* elementtype(i64) %ptr.3)
ret void
}
--- /dev/null
+; RUN: not opt -passes=verify -S < %s 2>&1 | FileCheck %s
+
+define void @f(i32* %p) {
+; CHECK: Intrinsic requires elementtype attribute on first argument
+ %a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
+; CHECK: Intrinsic requires elementtype attribute on second argument
+ %c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
+
+; CHECK: Intrinsic requires elementtype attribute on first argument
+ %a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
+; CHECK: Intrinsic requires elementtype attribute on second argument
+ %c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
+ ret void
+}
+
+declare i64 @llvm.aarch64.ldxr.p0i32(i32*)
+declare i64 @llvm.aarch64.ldaxr.p0i32(i32*)
+declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*)
+declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*)