// else is not lossless. Conservatively assume we can't losslessly convert
// between pointers with different address spaces.
if (auto *PTy = dyn_cast<PointerType>(this)) {
- if (auto *OtherPTy = dyn_cast<PointerType>(Ty)) {
- // Don't bitcast "load <256 x i32>, <256 x i32>*" to
- // "load x86_amx, x86_amx*", because we don't have a corresponding
- // instruction to load x86_amx. Doing the transform causes trouble
- // to lower "load x86_amx" instruction in backend.
- if (OtherPTy->getElementType()->isX86_AMXTy())
- return false;
+ if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
- }
return false;
}
return false; // Other types have no identity values
bool ArrayType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
!ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
- !ElemTy->isTokenTy() && !isa<ScalableVectorType>(ElemTy);
+ !ElemTy->isTokenTy() && !ElemTy->isX86_AMXTy() &&
+ !isa<ScalableVectorType>(ElemTy);
}
//===----------------------------------------------------------------------===//
bool PointerType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
- !ElemTy->isMetadataTy() && !ElemTy->isTokenTy();
+ !ElemTy->isMetadataTy() && !ElemTy->isTokenTy() &&
+ !ElemTy->isX86_AMXTy();
}
bool PointerType::isLoadableOrStorableType(Type *ElemTy) {
"Function takes metadata but isn't an intrinsic", &Arg, &F);
Assert(!Arg.getType()->isTokenTy(),
"Function takes token but isn't an intrinsic", &Arg, &F);
+ Assert(!Arg.getType()->isX86_AMXTy(),
+ "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
}
// Check that swifterror argument is only used by loads and stores.
++i;
}
- if (!isLLVMdotName)
+ if (!isLLVMdotName) {
Assert(!F.getReturnType()->isTokenTy(),
- "Functions returns a token but isn't an intrinsic", &F);
+ "Function returns a token but isn't an intrinsic", &F);
+ Assert(!F.getReturnType()->isX86_AMXTy(),
+ "Function returns a x86_amx but isn't an intrinsic", &F);
+ }
// Get the function metadata attachments.
SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
}
// Verify that indirect calls don't return tokens.
- if (!Call.getCalledFunction())
+ if (!Call.getCalledFunction()) {
Assert(!FTy->getReturnType()->isTokenTy(),
"Return type cannot be token for indirect call!");
+ Assert(!FTy->getReturnType()->isX86_AMXTy(),
+ "Return type cannot be x86_amx for indirect call!");
+ }
if (Function *F = Call.getCalledFunction())
if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (Value *V : Call.args())
+ for (Value *V : Call.args()) {
if (auto *MD = dyn_cast<MetadataAsValue>(V))
visitMetadataAsValue(*MD, Call.getCaller());
+ if (auto *Const = dyn_cast<Constant>(V))
+ Assert(!Const->getType()->isX86_AMXTy(),
+ "const x86_amx is not allowed in argument!");
+ }
switch (ID) {
default:
--- /dev/null
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+@buf = dso_local global [1024 x i8] zeroinitializer, align 16
+
+define dso_local void @test_tile_init(i16 signext %row, i16 signext %col) {
+entry:
+ tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 64, x86_amx bitcast (<256 x i32> <i32 1, i32 2, i32 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> to x86_amx))
+ ret void
+}
+; CHECK: const x86_amx is not allowed in argument!
+
+declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)