assert isAny, "LLVMAnyType.VT should have isOverloaded";
}
-class LLVMQualPointerType<LLVMType elty, int addrspace>
- : LLVMType<iPTR>{
- LLVMType ElTy = elty;
+class LLVMQualPointerType<int addrspace>
+ : LLVMType<iPTR> {
assert !and(!le(0, addrspace), !le(addrspace, 255)),
"Address space exceeds 255";
- // D63507: LLVMPointerType<llvm_any_ty>
- let isAny = elty.isAny;
-
- let Sig = !listconcat(
+ let Sig =
!if(addrspace, [
IIT_ANYPTR.Number,
addrspace,
], [
IIT_PTR.Number,
- ]),
- ElTy.Sig);
+ ]);
}
-class LLVMPointerType<LLVMType elty>
- : LLVMQualPointerType<elty, 0>;
-
-class LLVMAnyPointerType<LLVMType elty>
- : LLVMAnyType<iPTRAny> {
- LLVMType ElTy = elty;
-
+class LLVMAnyPointerType : LLVMAnyType<iPTRAny> {
assert isAny, "iPTRAny should have isOverloaded";
}
def llvm_f80_ty : LLVMType<f80>;
def llvm_f128_ty : LLVMType<f128>;
def llvm_ppcf128_ty : LLVMType<ppcf128>;
-def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8*
-def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8**
-def llvm_anyptr_ty : LLVMAnyPointerType<llvm_i8_ty>; // (space)i8*
-def llvm_empty_ty : LLVMType<OtherVT>; // { }
-def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
-def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...}
-def llvm_token_ty : LLVMType<token>; // token
+def llvm_ptr_ty : LLVMQualPointerType<0>; // ptr
+def llvm_anyptr_ty : LLVMAnyPointerType; // ptr addrspace(N)
+def llvm_empty_ty : LLVMType<OtherVT>; // { }
+def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...}
+def llvm_token_ty : LLVMType<token>; // token
def llvm_x86mmx_ty : LLVMType<x86mmx>;
-def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>*
def llvm_aarch64_svcount_ty : LLVMType<aarch64svcount>;
//===------------------- Garbage Collection Intrinsics --------------------===//
//
def int_gcroot : Intrinsic<[],
- [llvm_ptrptr_ty, llvm_ptr_ty]>;
+ [llvm_ptr_ty, llvm_ptr_ty]>;
def int_gcread : Intrinsic<[llvm_ptr_ty],
- [llvm_ptr_ty, llvm_ptrptr_ty],
+ [llvm_ptr_ty, llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_gcwrite : Intrinsic<[],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<1>>,
NoCapture<ArgIndex<2>>]>;
def int_objc_autoreleaseReturnValue : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty]>;
def int_objc_copyWeak : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
-def int_objc_destroyWeak : Intrinsic<[], [llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
+def int_objc_destroyWeak : Intrinsic<[], [llvm_ptr_ty]>;
def int_objc_initWeak : Intrinsic<[llvm_ptr_ty],
- [llvm_ptrptr_ty,
+ [llvm_ptr_ty,
llvm_ptr_ty]>;
def int_objc_loadWeak : Intrinsic<[llvm_ptr_ty],
- [llvm_ptrptr_ty]>;
+ [llvm_ptr_ty]>;
def int_objc_loadWeakRetained : Intrinsic<[llvm_ptr_ty],
- [llvm_ptrptr_ty]>;
+ [llvm_ptr_ty]>;
def int_objc_moveWeak : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
def int_objc_release : Intrinsic<[], [llvm_ptr_ty]>;
def int_objc_retain : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty]>;
def int_objc_retainBlock : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty]>;
def int_objc_storeStrong : Intrinsic<[],
- [llvm_ptrptr_ty,
+ [llvm_ptr_ty,
llvm_ptr_ty]>;
def int_objc_storeWeak : Intrinsic<[llvm_ptr_ty],
- [llvm_ptrptr_ty,
+ [llvm_ptr_ty,
llvm_ptr_ty]>;
def int_objc_clang_arc_use : Intrinsic<[],
[llvm_vararg_ty]>;
def int_objc_sync_exit : Intrinsic<[llvm_i32_ty],
[llvm_ptr_ty]>;
def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
def int_objc_arc_annotation_topdown_bbend : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
def int_objc_arc_annotation_bottomup_bbstart : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[],
- [llvm_ptrptr_ty,
- llvm_ptrptr_ty]>;
+ [llvm_ptr_ty,
+ llvm_ptr_ty]>;
//===--------------- Swift asynchronous context intrinsics ----------------===//
// Returns the location of the Swift asynchronous context (usually stored just
// before the frame pointer), and triggers the creation of a null context if it
// would otherwise be unneeded.
-def int_swift_async_context_addr : Intrinsic<[llvm_ptrptr_ty], [], []>;
+def int_swift_async_context_addr : Intrinsic<[llvm_ptr_ty], [], []>;
//===--------------------- Code Generator Intrinsics ----------------------===//
//
// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
// guard to the correct place on the stack frame.
-def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
+def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
def int_stackguard : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], []>;
// A cover for instrumentation based profiling.
[IntrInaccessibleMemOnly], "llvm.var.annotation">;
def int_ptr_annotation : DefaultAttrsIntrinsic<
- [LLVMAnyPointerType<llvm_anyint_ty>],
+ [llvm_anyptr_ty],
[LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
[IntrInaccessibleMemOnly], "llvm.ptr.annotation">;
[IntrArgMemOnly, IntrWillReturn,
NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<0>>]>;
-def int_invariant_start : DefaultAttrsIntrinsic<[llvm_descriptor_ty],
+def int_invariant_start : DefaultAttrsIntrinsic<[llvm_ptr_ty],
[llvm_i64_ty, llvm_anyptr_ty],
[IntrArgMemOnly, IntrWillReturn,
NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<0>>]>;
def int_invariant_end : DefaultAttrsIntrinsic<[],
- [llvm_descriptor_ty, llvm_i64_ty,
+ [llvm_ptr_ty, llvm_i64_ty,
llvm_anyptr_ty],
[IntrArgMemOnly, IntrWillReturn,
NoCapture<ArgIndex<2>>,
// Memory Intrinsics
def int_vp_store : DefaultAttrsIntrinsic<[],
[ llvm_anyvector_ty,
- LLVMAnyPointerType<LLVMMatchType<0>>,
+ llvm_anyptr_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_i32_ty],
[ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;
def int_vp_load : DefaultAttrsIntrinsic<[ llvm_anyvector_ty],
- [ LLVMAnyPointerType<LLVMMatchType<0>>,
+ [ llvm_anyptr_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_i32_ty],
[ NoCapture<ArgIndex<0>>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>;
// Experimental strided memory accesses
def int_experimental_vp_strided_store : DefaultAttrsIntrinsic<[],
[ llvm_anyvector_ty,
- LLVMAnyPointerToElt<0>,
+ llvm_anyptr_ty,
llvm_anyint_ty, // Stride in bytes
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_i32_ty],
[ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;
def int_experimental_vp_strided_load : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [ LLVMAnyPointerToElt<0>,
+ [ llvm_anyptr_ty,
llvm_anyint_ty, // Stride in bytes
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_i32_ty],
//
def int_masked_load:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
+ [llvm_anyptr_ty, llvm_i32_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
[IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg<ArgIndex<1>>,
NoCapture<ArgIndex<0>>]>;
def int_masked_store:
DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty, LLVMAnyPointerType<LLVMMatchType<0>>,
+ [llvm_anyvector_ty, llvm_anyptr_ty,
llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
ImmArg<ArgIndex<2>>, NoCapture<ArgIndex<1>>]>;
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_1Vec_Load_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_1Vec_Store_Lane_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
class AdvSIMD_2Vec_Load_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_2Vec_Load_Lane_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_2Vec_Store_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMAnyPointerType<LLVMMatchType<0>>],
+ llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
class AdvSIMD_2Vec_Store_Lane_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
class AdvSIMD_3Vec_Load_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_3Vec_Load_Lane_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_3Vec_Store_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+ LLVMMatchType<0>, llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
class AdvSIMD_3Vec_Store_Lane_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty,
class AdvSIMD_4Vec_Load_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>, llvm_anyvector_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
class AdvSIMD_4Vec_Load_Lane_Intrinsic
: DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
class AdvSIMD_4Vec_Store_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMAnyPointerType<LLVMMatchType<0>>],
+ llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
class AdvSIMD_4Vec_Store_Lane_Intrinsic
: DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
class SVE2_CONFLICT_DETECT_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMAnyPointerType<llvm_any_ty>,
- LLVMMatchType<1>],
+ [llvm_anyptr_ty, LLVMMatchType<1>],
[IntrNoMem]>;
class SVE2_3VectorArg_Indexed_Intrinsic
// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
def int_r600_implicitarg_ptr :
ClangBuiltin<"__builtin_r600_implicitarg_ptr">,
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<7>], [],
[IntrNoMem, IntrSpeculatable]>;
def int_r600_rat_store_typed :
<"__builtin_amdgcn_workgroup_id">;
def int_amdgcn_dispatch_ptr :
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
[Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_queue_ptr :
ClangBuiltin<"__builtin_amdgcn_queue_ptr">,
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
[Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_kernarg_segment_ptr :
ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
[Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_implicitarg_ptr :
ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
[Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_groupstaticsize :
def int_amdgcn_implicit_buffer_ptr :
ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
- DefaultAttrsIntrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
+ DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
[Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;
// Set EXEC to the 64-bit value given.
class AMDGPULDSIntrin :
Intrinsic<[llvm_any_ty],
- [LLVMQualPointerType<LLVMMatchType<0>, 3>,
+ [LLVMQualPointerType<3>,
LLVMMatchType<0>,
llvm_i32_ty, // ordering
llvm_i32_ty, // scope
[llvm_i32_ty],
// M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
// the bit packing can be optimized at the IR level.
- [LLVMQualPointerType<llvm_i32_ty, 2>, // IntToPtr(M0)
+ [LLVMQualPointerType<2>, // IntToPtr(M0)
llvm_i32_ty, // value to add or swap
llvm_i32_ty, // ordering
llvm_i32_ty, // scope
// Data type for buffer resources (V#). Maybe, in the future, we can create a
// similar one for textures (T#).
-class AMDGPUBufferRsrcTy<LLVMType data_ty = llvm_any_ty>
- : LLVMQualPointerType<data_ty, 8>;
+def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>;
let TargetPrefix = "amdgcn" in {
def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic <
- [AMDGPUBufferRsrcTy<llvm_i8_ty>],
+ [AMDGPUBufferRsrcTy],
[llvm_anyptr_ty, // base
llvm_i16_ty, // stride (and swizzle control)
llvm_i32_ty, // NumRecords / extent
class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[data_ty],
- [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[data_ty],
- [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
class AMDGPURawPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[],
[data_ty, // vdata(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
class AMDGPUStructPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[],
[data_ty, // vdata(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
class AMDGPURawPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
!if(NoRtn, [], [data_ty]),
[!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
LLVMMatchType<0>, // cmp(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
class AMDGPUStructPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
!if(NoRtn, [], [data_ty]),
[!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
[llvm_anyint_ty],
[LLVMMatchType<0>, // src(VGPR)
LLVMMatchType<0>, // cmp(VGPR)
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
def int_amdgcn_raw_ptr_tbuffer_load : DefaultAttrsIntrinsic <
[llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
- [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds` checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
def int_amdgcn_raw_ptr_tbuffer_store : DefaultAttrsIntrinsic <
[],
[llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic <
[llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
- [AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
def int_amdgcn_struct_ptr_tbuffer_store : DefaultAttrsIntrinsic <
[],
[llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
- AMDGPUBufferRsrcTy<LLVMMatchType<0>>, // rsrc(SGPR)
+ AMDGPUBufferRsrcTy, // rsrc(SGPR)
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
class AMDGPURawBufferLoadLDS : Intrinsic <
[],
[llvm_v4i32_ty, // rsrc(SGPR)
- LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+ LLVMQualPointerType<3>, // LDS base offset
llvm_i32_ty, // Data byte size: 1/2/4
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
class AMDGPURawPtrBufferLoadLDS : Intrinsic <
[],
- [AMDGPUBufferRsrcTy<llvm_i8_ty>, // rsrc(SGPR)
- LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
+ LLVMQualPointerType<3>, // LDS base offset
llvm_i32_ty, // Data byte size: 1/2/4
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
class AMDGPUStructBufferLoadLDS : Intrinsic <
[],
[llvm_v4i32_ty, // rsrc(SGPR)
- LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+ LLVMQualPointerType<3>, // LDS base offset
llvm_i32_ty, // Data byte size: 1/2/4
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
class AMDGPUStructPtrBufferLoadLDS : Intrinsic <
[],
- [AMDGPUBufferRsrcTy<llvm_i8_ty>, // rsrc(SGPR)
- LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base offset
+ [AMDGPUBufferRsrcTy, // rsrc(SGPR)
+ LLVMQualPointerType<3> , // LDS base offset
llvm_i32_ty, // Data byte size: 1/2/4
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling)
class AMDGPUGlobalLoadLDS : Intrinsic <
[],
- [LLVMQualPointerType<llvm_i8_ty, 1>, // Base global pointer to load from
- LLVMQualPointerType<llvm_i8_ty, 3>, // LDS base pointer to store to
+ [LLVMQualPointerType<1>, // Base global pointer to load from
+ LLVMQualPointerType<3>, // LDS base pointer to store to
llvm_i32_ty, // Data byte size: 1/2/4
llvm_i32_ty, // imm offset (applied to both global and LDS address)
llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0,
def int_amdgcn_flat_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn<llvm_v2i16_ty>;
def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic<
[llvm_v2i16_ty],
- [LLVMQualPointerType<llvm_v2i16_ty, 3>, llvm_v2i16_ty],
+ [LLVMQualPointerType<3>, llvm_v2i16_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">;
def int_arm_neon_vld1x2 : DefaultAttrsIntrinsic<
[llvm_anyvector_ty, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+ [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld1x3 : DefaultAttrsIntrinsic<
[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+ [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld1x4 : DefaultAttrsIntrinsic<
[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>;
+ [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
// Vector load N-element structure to one lane.
// Source operands are: the address, the N input vectors (since only one
def int_hexagon_prefetch :
Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
-def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
-def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
-
// Mark locked loads as read/write to prevent any accidental reordering.
// These don't use Hexagon_Intrinsic, because they are not nosync, and as such
// cannot use default attributes.
let TargetPrefix = "hexagon" in {
def int_hexagon_L2_loadw_locked :
ClangBuiltin<"__builtin_HEXAGON_L2_loadw_locked">,
- Intrinsic<[llvm_i32_ty], [llvm_ptr32_ty],
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_L4_loadd_locked :
ClangBuiltin<"__builtin__HEXAGON_L4_loadd_locked">,
- Intrinsic<[llvm_i64_ty], [llvm_ptr64_ty],
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S2_storew_locked :
ClangBuiltin<"__builtin_HEXAGON_S2_storew_locked">,
Intrinsic<[llvm_i32_ty],
- [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+ [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S4_stored_locked :
ClangBuiltin<"__builtin_HEXAGON_S4_stored_locked">,
Intrinsic<[llvm_i32_ty],
- [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+ [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
}
def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
class Hexagon_pred_vload_imm<LLVMType ValTy>
: Hexagon_NonGCC_Intrinsic<
[ValTy],
- [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
+ [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>]>;
class Hexagom_pred_vload_upd<LLVMType ValTy, bit TakesImm>
: Hexagon_NonGCC_Intrinsic<
- [ValTy, LLVMPointerType<ValTy>],
- [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
+ [ValTy, llvm_ptr_ty],
+ [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
!if(TakesImm,
[IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>],
class Hexagon_pred_vstore_imm<LLVMType ValTy>
: Hexagon_NonGCC_Intrinsic<
[],
- [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
+ [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
[IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>]>;
class Hexagon_pred_vstore_upd<LLVMType ValTy, bit TakesImm>
: Hexagon_NonGCC_Intrinsic<
- [LLVMPointerType<ValTy>],
- [llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
+ [llvm_ptr_ty],
+ [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
!if(TakesImm,
[IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>],
// * llvm.nvvm.max.ull --> ibid.
// * llvm.nvvm.h2f --> llvm.convert.to.fp16.f32
-def llvm_global_i8ptr_ty : LLVMQualPointerType<llvm_i8_ty, 1>; // (global)i8*
-def llvm_shared_i8ptr_ty : LLVMQualPointerType<llvm_i8_ty, 3>; // (shared)i8*
-def llvm_i64ptr_ty : LLVMPointerType<llvm_i64_ty>; // i64*
-def llvm_any_i64ptr_ty : LLVMAnyPointerType<llvm_i64_ty>; // (space)i64*
-def llvm_shared_i64ptr_ty : LLVMQualPointerType<llvm_i64_ty, 3>; // (shared)i64*
+def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
+def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
//
// MISC
// Atomics not available as llvm intrinsics.
def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
- [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
- [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
class SCOPED_ATOMIC2_impl<LLVMType elty>
: Intrinsic<[elty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>],
+ [llvm_anyptr_ty, LLVMMatchType<0>],
[IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
class SCOPED_ATOMIC3_impl<LLVMType elty>
: Intrinsic<[elty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>,
+ [llvm_anyptr_ty, LLVMMatchType<0>,
LLVMMatchType<0>],
[IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
// Async Copy
def int_nvvm_cp_async_mbarrier_arrive :
ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive">,
- Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_shared :
ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_shared">,
- Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_noinc :
ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc">,
- Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_noinc_shared :
ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc_shared">,
- Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
multiclass CP_ASYNC_SHARED_GLOBAL<string n, string cc> {
- def NAME: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty],
+ def NAME: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty],
[IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
"llvm.nvvm.cp.async." # cc # ".shared.global." # n>;
- def _s: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty, llvm_i32_ty],
+ def _s: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty, llvm_i32_ty],
[IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
"llvm.nvvm.cp.async." # cc # ".shared.global." # n # ".s">;
// mbarrier
def int_nvvm_mbarrier_init : ClangBuiltin<"__nvvm_mbarrier_init">,
- Intrinsic<[],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_init_shared :
ClangBuiltin<"__nvvm_mbarrier_init_shared">,
- Intrinsic<[],[llvm_shared_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_inval : ClangBuiltin<"__nvvm_mbarrier_inval">,
- Intrinsic<[],[llvm_i64ptr_ty],
+ Intrinsic<[],[llvm_ptr_ty],
[IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
def int_nvvm_mbarrier_inval_shared :
ClangBuiltin<"__nvvm_mbarrier_inval_shared">,
- Intrinsic<[],[llvm_shared_i64ptr_ty],
+ Intrinsic<[],[llvm_shared_ptr_ty],
[IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
def int_nvvm_mbarrier_arrive : ClangBuiltin<"__nvvm_mbarrier_arrive">,
- Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_shared :
ClangBuiltin<"__nvvm_mbarrier_arrive_shared">,
- Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_noComplete :
ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete">,
- Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_noComplete_shared :
ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete_shared">,
- Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty,
+ Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop :
ClangBuiltin<"__nvvm_mbarrier_arrive_drop">,
- Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_shared :
ClangBuiltin<"__nvvm_mbarrier_arrive_drop_shared">,
- Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_noComplete :
ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete">,
- Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_noComplete_shared :
ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete_shared">,
- Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty,
+ Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_test_wait :
ClangBuiltin<"__nvvm_mbarrier_test_wait">,
- Intrinsic<[llvm_i1_ty],[llvm_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i1_ty],[llvm_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_test_wait_shared :
ClangBuiltin<"__nvvm_mbarrier_test_wait_shared">,
- Intrinsic<[llvm_i1_ty],[llvm_shared_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
+ Intrinsic<[llvm_i1_ty],[llvm_shared_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_pending_count :
ClangBuiltin<"__nvvm_mbarrier_pending_count">,
// Generated within nvvm. Use for ldu on sm_20 or later. Second arg is the
// pointer's alignment.
def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.i">;
def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.f">;
def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldu.global.p">;
// Generated within nvvm. Use for ldg on sm_35 or later. Second arg is the
// pointer's alignment.
def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.i">;
def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.f">;
def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
- [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.p">;
// For getting the handle from a texture or surface variable
def int_nvvm_texsurf_handle
- : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_any_i64ptr_ty],
+ : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty],
[IntrNoMem], "llvm.nvvm.texsurf.handle">;
def int_nvvm_texsurf_handle_internal
: Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_shared_i8ptr_ty], [llvm_shared_i8ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.getctarank">;
def int_nvvm_getctarank_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_i8ptr_ty],
+ : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_ptr_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.getctarank.shared.cluster">;
def int_nvvm_is_explicit_cluster
// Input: (pointer, vl)
class RISCVUSMLoad
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
+ [llvm_ptr_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
let VLOperand = 1;
}
// Input: (passthru, pointer, vl)
class RISCVUSLoad
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
let VLOperand = 2;
}
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
class RISCVUSLoadFF
: DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
+ [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>]>,
RISCVVIntrinsic {
let VLOperand = 2;
// Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSLoadMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>, llvm_ptr_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
class RISCVUSLoadFFMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>, llvm_ptr_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<1>, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
// Input: (passthru, pointer, stride, vl)
class RISCVSLoad
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>, llvm_ptr_ty,
llvm_anyint_ty, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
let VLOperand = 3;
// Input: (maskedoff, pointer, stride, mask, vl, policy)
class RISCVSLoadMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+ [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
// Input: (passthru, pointer, index, vl)
class RISCVILoad
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>, llvm_ptr_ty,
llvm_anyvector_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
let VLOperand = 3;
// Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVILoadMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+ [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
// Input: (vector_in, pointer, vl)
class RISCVUSStore
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
+ [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
let VLOperand = 2;
}
// Input: (vector_in, pointer, mask, vl)
class RISCVUSStoreMasked
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ [llvm_anyvector_ty, llvm_ptr_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
// Input: (vector_in, pointer, stride, vl)
class RISCVSStore
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ [llvm_anyvector_ty, llvm_ptr_ty,
llvm_anyint_ty, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
let VLOperand = 3;
// Input: (vector_in, pointer, stirde, mask, vl)
class RISCVSStoreMasked
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+ [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
let VLOperand = 4;
// Input: (vector_in, pointer, index, vl)
class RISCVIStore
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
+ [llvm_anyvector_ty, llvm_ptr_ty,
llvm_anyint_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
let VLOperand = 3;
// Input: (vector_in, pointer, index, mask, vl)
class RISCVIStoreMasked
: DefaultAttrsIntrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+ [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
let VLOperand = 4;
def int_s390_etnd : ClangBuiltin<"__builtin_tx_nesting_depth">,
Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
- def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty],
+ def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty],
[IntrArgMemOnly, IntrWriteMem]>;
def int_s390_ppa_txassist : ClangBuiltin<"__builtin_tx_assist">,
//===----------------------------------------------------------------------===//
// Type definition for a table in an intrinsic
-def llvm_table_ty : LLVMQualPointerType<llvm_i8_ty, 1>;
+def llvm_table_ty : LLVMQualPointerType<1>;
let TargetPrefix = "wasm" in { // All intrinsics start with "llvm.wasm.".
// These don't use default attributes, because they are not nosync.
def int_wasm_memory_atomic_wait32 :
Intrinsic<[llvm_i32_ty],
- [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>, IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_memory_atomic_wait64 :
Intrinsic<[llvm_i32_ty],
- [LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
+ [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>, IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_memory_atomic_notify:
- Intrinsic<[llvm_i32_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty],
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrInaccessibleMemOnly, NoCapture<ArgIndex<0>>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_movnt_dq : ClangBuiltin<"__builtin_ia32_movntq">,
- Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_x86mmx_ty], []>;
def int_x86_mmx_palignr_b : ClangBuiltin<"__builtin_ia32_palignr">,
DefaultAttrsIntrinsic<[llvm_x86mmx_ty],
return;
case IIT_EXTERNREF:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10));
- OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
return;
case IIT_FUNCREF:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20));
- OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
return;
case IIT_PTR:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
- DecodeIITType(NextElt, Infos, Info, OutputTable);
return;
- case IIT_ANYPTR: { // [ANYPTR addrspace, subtype]
+ case IIT_ANYPTR: // [ANYPTR addrspace]
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
Infos[NextElt++]));
- DecodeIITType(NextElt, Infos, Info, OutputTable);
return;
- }
case IIT_ARG: {
unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
return VectorType::get(DecodeFixedType(Infos, Tys, Context),
D.Vector_Width);
case IITDescriptor::Pointer:
- return PointerType::get(DecodeFixedType(Infos, Tys, Context),
- D.Pointer_AddressSpace);
+ return PointerType::get(Context, D.Pointer_AddressSpace);
case IITDescriptor::Struct: {
SmallVector<Type *, 8> Elts;
for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
}
case IITDescriptor::Pointer: {
PointerType *PT = dyn_cast<PointerType>(Ty);
- if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace)
- return true;
- if (!PT->isOpaque()) {
- /* Manually consume a pointer to empty struct descriptor, which is
- * used for externref. We don't want to enforce that the struct is
- * anonymous in this case. (This renders externref intrinsics
- * non-unique, but this will go away with opaque pointers anyway.) */
- if (Infos.front().Kind == IITDescriptor::Struct &&
- Infos.front().Struct_NumElements == 0) {
- Infos = Infos.slice(1);
- return false;
- }
- return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos,
- ArgTys, DeferredChecks, IsDeferredCheck);
- }
- // Consume IIT descriptors relating to the pointer element type.
- // FIXME: Intrinsic type matching of nested single value types or even
- // aggregates doesn't work properly with opaque pointers but hopefully
- // doesn't happen in practice.
- while (Infos.front().Kind == IITDescriptor::Pointer ||
- Infos.front().Kind == IITDescriptor::Vector)
- Infos = Infos.slice(1);
- assert((Infos.front().Kind != IITDescriptor::Argument ||
- Infos.front().getArgumentKind() == IITDescriptor::AK_MatchType) &&
- "Unsupported polymorphic pointer type with opaque pointer");
- Infos = Infos.slice(1);
- return false;
+ return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace;
}
case IITDescriptor::Struct: {
+++ /dev/null
-// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s
-
-// This test is validating that it an Intrinsic with an LLVMPointerType to
-// llvm_any_ty still properly work after r363233. That patch rewrote the
-// substitution handling code in the Intrinsic Emitter, and didn't consider this
-// case, so TableGen would hit an assertion in EncodeFixedType that was checking
-// to ensure that the substitution being processed was correctly replaced.
-
-include "llvm/IR/Intrinsics.td"
-
-def int_has_ptr_to_any : Intrinsic<[LLVMPointerType<llvm_any_ty>, llvm_i8_ty]>;
-// CHECK: /* 0 */ 21, 14, 15, 0, 2, 0