// RUN: %clang_cc1 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+// Also check we do not crash when running some middle-end passes. Most
+// importantly this includes the IR verifier, to ensure we emit valid IR.
+// RUN: %clang_cc1 -fenable-matrix -emit-llvm -triple x86_64-apple-darwin %s -o %t
+
// Tests for the matrix type builtins.
typedef double dx5x5_t __attribute__((matrix_type(5, 5)));
void column_major_load_with_const_stride_double(double *Ptr) {
// CHECK-LABEL: define void @column_major_load_with_const_stride_double(double* %Ptr)
// CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
dx5x5_t m_a1 = __builtin_matrix_column_major_load(Ptr, 5, 5, 5);
}
void column_major_load_with_const_stride2_double(double *Ptr) {
// CHECK-LABEL: define void @column_major_load_with_const_stride2_double(double* %Ptr)
// CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5)
dx5x5_t m_a2 = __builtin_matrix_column_major_load(Ptr, 5, 5, 2 * 3 + 9);
}
// CHECK-LABEL: define void @column_major_load_with_variable_stride_ull_float(float* %Ptr, i64 %S)
// CHECK: [[S:%.*]] = load i64, i64* %S.addr, align 8
// CHECK-NEXT: [[PTR:%.*]] = load float*, float** %Ptr.addr, align 8
- // CHECK-NEXT: call <6 x float> @llvm.matrix.column.major.load.v6f32.p0f32(float* align 4 [[PTR]], i64 [[S]], i1 false, i32 2, i32 3)
+ // CHECK-NEXT: call <6 x float> @llvm.matrix.column.major.load.v6f32(float* align 4 [[PTR]], i64 [[S]], i1 false, i32 2, i32 3)
fx2x3_t m_b = __builtin_matrix_column_major_load(Ptr, 2, 3, S);
}
// CHECK-NEXT: [[STRIDE:%.*]] = add nsw i32 [[S]], 32
// CHECK-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <80 x i32> @llvm.matrix.column.major.load.v80i32.p0i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 4, i32 20)
+ // CHECK-NEXT: call <80 x i32> @llvm.matrix.column.major.load.v80i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 4, i32 20)
ix4x20_t m_c = __builtin_matrix_column_major_load(Ptr, 4, 20, S + 32);
}
// CHECK-NEXT: [[STRIDE:%.*]] = add nsw i32 [[S_EXT]], 32
// CHECK-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: %matrix = call <80 x i32> @llvm.matrix.column.major.load.v80i32.p0i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 4, i32 20)
+ // CHECK-NEXT: %matrix = call <80 x i32> @llvm.matrix.column.major.load.v80i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 4, i32 20)
ix4x20_t m_c = __builtin_matrix_column_major_load(Ptr, 4, 20, S + 32);
}
void column_major_load_array1(double Ptr[25]) {
// CHECK-LABEL: define void @column_major_load_array1(double* %Ptr)
// CHECK: [[ADDR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 8 [[ADDR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[ADDR]], i64 5, i1 false, i32 5, i32 5)
dx5x5_t m = __builtin_matrix_column_major_load(Ptr, 5, 5, 5);
}
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR:%.*]] = alloca [25 x double], align 16
// CHECK: [[ARRAY_DEC:%.*]] = getelementptr inbounds [25 x double], [25 x double]* [[PTR]], i64 0, i64 0
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 16 [[ARRAY_DEC]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 16 [[ARRAY_DEC]], i64 5, i1 false, i32 5, i32 5)
double Ptr[25];
dx5x5_t m = __builtin_matrix_column_major_load(Ptr, 5, 5, 5);
void column_major_load_const(const double *Ptr) {
// CHECK-LABEL: define void @column_major_load_const(double* %Ptr)
// CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
dx5x5_t m_a1 = __builtin_matrix_column_major_load(Ptr, 5, 5, 5);
}
void column_major_load_volatile(volatile double *Ptr) {
// CHECK-LABEL: define void @column_major_load_volatile(double* %Ptr)
// CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64.p0f64(double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5)
+ // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5)
dx5x5_t m_a1 = __builtin_matrix_column_major_load(Ptr, 5, 5, 5);
}
// CHECK-LABEL: define void @column_major_store_with_const_stride_double(double* %Ptr)
// CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64.p0f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
dx5x5_t m;
__builtin_matrix_column_major_store(m, Ptr, 5);
// CHECK-LABEL: define void @column_major_store_with_const_stride2_double(double* %Ptr)
// CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64.p0f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5)
//
dx5x5_t m;
__builtin_matrix_column_major_store(m, Ptr, 2 * 3 + 9);
// CHECK-NEXT: [[S:%.*]] = load i32, i32* %S.addr, align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[S]], 32
// CHECK-NEXT: [[IDX:%.*]] = sext i32 [[ADD]] to i64
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v80i32.p0i32(<80 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX]], i1 false, i32 4, i32 20)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v80i32(<80 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX]], i1 false, i32 4, i32 20)
ix4x20_t m;
__builtin_matrix_column_major_store(m, Ptr, S + 32);
// CHECK-NEXT: [[EXT:%.*]] = sext i16 [[S]] to i32
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[EXT]], 2
// CHECK-NEXT: [[IDX:%.*]] = sext i32 [[ADD]] to i64
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v80i32.p0i32(<80 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX]], i1 false, i32 4, i32 20)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v80i32(<80 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX]], i1 false, i32 4, i32 20)
ix4x20_t m;
__builtin_matrix_column_major_store(m, Ptr, S + 2);
// CHECK-LABEL: define void @column_major_store_array1(double* %Ptr)
// CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64.p0f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5)
dx5x5_t m;
__builtin_matrix_column_major_store(m, Ptr, 5);
// CHECK-LABEL: define void @column_major_store_array2()
// CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [25 x double], [25 x double]* %Ptr, i64 0, i64 0
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64.p0f64(<25 x double> [[M]], double* align 16 [[PTR]], i64 5, i1 false, i32 5, i32 5)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 16 [[PTR]], i64 5, i1 false, i32 5, i32 5)
double Ptr[25];
dx5x5_t m;
// CHECK-LABEL: define void @column_major_store_volatile(double* %Ptr) #0 {
// CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64.p0f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5)
dx5x5_t m;
__builtin_matrix_column_major_store(m, Ptr, 5);
// CHECK-LABEL: define linkonce_odr <40 x double> @_Z29column_major_load_with_strideIdLj10ELj4ELj15EEU11matrix_typeXT0_EXT1_ET_PS0_(double* %Ptr)
// CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call <40 x double> @llvm.matrix.column.major.load.v40f64.p0f64(double* align 8 [[PTR]], i64 15, i1 false, i32 10, i32 4)
+ // CHECK-NEXT: call <40 x double> @llvm.matrix.column.major.load.v40f64(double* align 8 [[PTR]], i64 15, i1 false, i32 10, i32 4)
matrix_t<double, 10, 4> M1 = column_major_load_with_stride<double, 10, 4, 15>(Ptr);
}
// CHECK-LABEL: define linkonce_odr <6 x i32> @_Z29column_major_load_with_strideIiLj3ELj2ELj12EEU11matrix_typeXT0_EXT1_ET_PS0_(i32* %Ptr)
// CHECK: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <6 x i32> @llvm.matrix.column.major.load.v6i32.p0i32(i32* align 4 [[PTR]], i64 12, i1 false, i32 3, i32 2)
+ // CHECK-NEXT: call <6 x i32> @llvm.matrix.column.major.load.v6i32(i32* align 4 [[PTR]], i64 12, i1 false, i32 3, i32 2)
matrix_t<int, 3, 2> M1 = column_major_load_with_stride<int, 3, 2, 12>(Ptr);
}
// CHECK-NEXT: [[STRIDE:%.*]] = call i32 @_ZN15UnsignedWrappercvjEv(%struct.UnsignedWrapper* [[W]])
// CHECK-NEXT: [[STRIDE_EXT:%.*]] = zext i32 [[STRIDE]] to i64
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <4 x i32> @llvm.matrix.column.major.load.v4i32.p0i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 2, i32 2)
+ // CHECK-NEXT: call <4 x i32> @llvm.matrix.column.major.load.v4i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 2, i32 2)
matrix_t<int, 2, 2> M1 = __builtin_matrix_column_major_load(Ptr, 2, 2, W);
}
void test_column_major_load_constexpr_num_rows(int *Ptr) {
// CHECK-LABEL: define void @_Z41test_column_major_load_constexpr_num_rowsPi(i32* %Ptr)
// CHECK: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <6 x i32> @llvm.matrix.column.major.load.v6i32.p0i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 3, i32 2)
+ // CHECK-NEXT: call <6 x i32> @llvm.matrix.column.major.load.v6i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 3, i32 2)
matrix_t<int, 3, 2> M1 = __builtin_matrix_column_major_load(Ptr, constexpr3(), 2, 3);
}
void test_column_major_load_constexpr_num_columns(int *Ptr) {
// CHECK-LABEL: define void @_Z44test_column_major_load_constexpr_num_columnsPi(i32* %Ptr)
// CHECK: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <2 x i32> @llvm.matrix.column.major.load.v2i32.p0i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 2, i32 1)
+ // CHECK-NEXT: call <2 x i32> @llvm.matrix.column.major.load.v2i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 2, i32 1)
matrix_t<int, 2, 1> M1 = __builtin_matrix_column_major_load(Ptr, 2, constexpr1(), 3);
}
void test_column_major_load_constexpr_num_columns_temp(int *Ptr) {
// CHECK-LABEL: define void @_Z49test_column_major_load_constexpr_num_columns_tempPi(i32* %Ptr)
// CHECK: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <10 x i32> @llvm.matrix.column.major.load.v10i32.p0i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 2, i32 5)
+ // CHECK-NEXT: call <10 x i32> @llvm.matrix.column.major.load.v10i32(i32* align 4 [[PTR]], i64 3, i1 false, i32 2, i32 5)
matrix_t<int, 2, 5> M1 = __builtin_matrix_column_major_load(Ptr, 2, constexpr_plus1<4>(), 3);
}
// CHECK: [[STRIDE:%.*]] = call i32 @_Z10constexpr3v()
// CHECK-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call <4 x i32> @llvm.matrix.column.major.load.v4i32.p0i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 2, i32 2)
+ // CHECK-NEXT: call <4 x i32> @llvm.matrix.column.major.load.v4i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 2, i32 2)
matrix_t<int, 2, 2> M1 = __builtin_matrix_column_major_load(Ptr, 2, 2, constexpr3());
}
// CHECK-LABEL: define linkonce_odr void @_Z30column_major_store_with_strideIdLj10ELj4ELj15EEvRU11matrix_typeXT0_EXT1_ET_PS0_([40 x double]* nonnull align 8 dereferenceable(320) %m, double* %Ptr)
// CHECK: [[M:%.*]] = load <40 x double>, <40 x double>* {{.*}}, align 8
// CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v40f64.p0f64(<40 x double> [[M]], double* align 8 [[PTR]], i64 15, i1 false, i32 10, i32 4)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v40f64(<40 x double> [[M]], double* align 8 [[PTR]], i64 15, i1 false, i32 10, i32 4)
matrix_t<double, 10, 4> M1;
column_major_store_with_stride<double, 10, 4, 15>(M1, Ptr);
// CHECK-LABEL: define linkonce_odr void @_Z30column_major_store_with_strideIiLj3ELj2ELj3EEvRU11matrix_typeXT0_EXT1_ET_PS0_([6 x i32]* nonnull align 4 dereferenceable(24) %m, i32* %Ptr)
// CHECK: [[M:%.*]] = load <6 x i32>, <6 x i32>* {{.*}}, align 4
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v6i32.p0i32(<6 x i32> [[M]], i32* align 4 [[PTR]], i64 3, i1 false, i32 3, i32 2)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v6i32(<6 x i32> [[M]], i32* align 4 [[PTR]], i64 3, i1 false, i32 3, i32 2)
matrix_t<int, 3, 2> M1;
column_major_store_with_stride<int, 3, 2, 3>(M1, Ptr);
// CHECK-NEXT: [[W:%.*]] = load %struct.UnsignedWrapper*, %struct.UnsignedWrapper** %W.addr, align 8
// CHECK-NEXT: [[IDX:%.*]] = call i32 @_ZN15UnsignedWrappercvjEv(%struct.UnsignedWrapper* [[W]])
// CHECK-NEXT: [[IDX_EXT:%.*]] = zext i32 [[IDX]] to i64
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v4i32.p0i32(<4 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 2, i32 2)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v4i32(<4 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 2, i32 2)
matrix_t<int, 2, 2> M1;
__builtin_matrix_column_major_store(M1, Ptr, W);
// CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8
// CHECK-NEXT: [[IDX:%.*]] = call i32 @_Z10constexpr3v()
// CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[IDX]] to i64
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v4i32.p0i32(<4 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 2, i32 2)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v4i32(<4 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 2, i32 2)
matrix_t<int, 2, 2> M;
__builtin_matrix_column_major_store(M, Ptr, constexpr3());
// CHECK: [[STRIDE:%.*]] = call i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)
// CHECK-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64
// CHECK: [[PTR:%.*]] = call i32* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32* (i8*, i8*)*)
- // CHECK-NEXT: call <12 x i32> @llvm.matrix.column.major.load.v12i32.p0i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 3, i32 4)
+ // CHECK-NEXT: call <12 x i32> @llvm.matrix.column.major.load.v12i32(i32* align 4 [[PTR]], i64 [[STRIDE_EXT]], i1 false, i32 3, i32 4)
u3x4 m = __builtin_matrix_column_major_load(Ptr.value, 3, 4, Stride.value);
}
// CHECK: [[PTR:%.*]] = call i32* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32* (i8*, i8*)*)
// CHECK: [[IDX:%.*]] = call i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)
// CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[IDX]] to i64
- // CHECK-NEXT: call void @llvm.matrix.column.major.store.v12i32.p0i32(<12 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 3, i32 4)
+ // CHECK-NEXT: call void @llvm.matrix.column.major.store.v12i32(<12 x i32> [[M]], i32* align 4 [[PTR]], i64 [[IDX_EXT]], i1 false, i32 3, i32 4)
__builtin_matrix_column_major_store(M.value, Ptr.value, Stride.value);
}
Value *Ops[] = {DataPtr, Stride, B.getInt1(IsVolatile), B.getInt32(Rows),
B.getInt32(Columns)};
- Type *OverloadedTypes[] = {RetType, PtrTy};
+ Type *OverloadedTypes[] = {RetType};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_column_major_load, OverloadedTypes);
Value *Ops[] = {Matrix, Ptr,
Stride, B.getInt1(IsVolatile),
B.getInt32(Rows), B.getInt32(Columns)};
- Type *OverloadedTypes[] = {Matrix->getType(), Ptr->getType()};
+ Type *OverloadedTypes[] = {Matrix->getType()};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_column_major_store, OverloadedTypes);
// CHECK: call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %1, i32 3, i32 16)
%D = llvm.intr.matrix.transpose %B { rows = 3: i32, columns = 16: i32} :
!llvm<"<48 x float>"> into !llvm<"<48 x float>">
- // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32.p0f32(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+ // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
%E = llvm.intr.matrix.column.major.load %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
!llvm<"<48 x float>"> from !llvm<"float*"> stride !llvm.i64
- // CHECK: call void @llvm.matrix.column.major.store.v48f32.p0f32(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+ // CHECK: call void @llvm.matrix.column.major.store.v48f32(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
llvm.intr.matrix.column.major.store %E, %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
!llvm<"<48 x float>"> to !llvm<"float*"> stride !llvm.i64
// CHECK-DAG: declare float @llvm.copysign.f32(float, float)
// CHECK-DAG: declare <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float>, <48 x float>, i32 immarg, i32 immarg, i32 immarg)
// CHECK-DAG: declare <48 x float> @llvm.matrix.transpose.v48f32(<48 x float>, i32 immarg, i32 immarg)
-// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32.p0f32(float* nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
-// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32.p0f32(<48 x float>, float* nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32(float* nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32(<48 x float>, float* nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
// CHECK-DAG: declare <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>*, i32 immarg, <7 x i1>, <7 x float>)
// CHECK-DAG: declare void @llvm.masked.store.v7f32.p0v7f32(<7 x float>, <7 x float>*, i32 immarg, <7 x i1>)
// CHECK-DAG: declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)