The special case parsing of `func` operations is being removed.
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
// CHECK: return %[[D]] : index
-func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
+func.func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
%c = arith.constant 0 : index
%0 = tensor.dim %arg0, %c : tensor<?xf64, #SparseVector>
return %0 : index
// CHECK: %[[C:.*]] = arith.constant 2 : index
// CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
// CHECK: return %[[D]] : index
-func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
+func.func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
// Querying for dimension 1 in the tensor type needs to be
// permuted into querying for dimension 2 in the stored sparse
// tensor scheme, since the latter honors the dimOrdering.
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 20 : index
// CHECK: return %[[C]] : index
-func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
+func.func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
// Querying for dimension 1 in the tensor type can be directly
// folded into the right value (even though it corresponds
// to dimension 2 in the stored sparse tensor scheme).
// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
+func.func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
return %0 : tensor<128xf64, #SparseVector>
}
// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
+func.func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #SparseMatrix>
return %0 : tensor<?x?xf32, #SparseMatrix>
}
// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
+func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?x?xf32, #SparseTensor>
return %0 : tensor<?x?x?xf32, #SparseTensor>
}
// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
+func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
%0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf64, #SparseMatrix>
return %0 : tensor<?x?xf64, #SparseMatrix>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr<i8>) -> ()
// CHECK: return
-func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+func.func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
return
}
// CHECK-LABEL: func @sparse_nop_convert(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
// CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
return %0 : tensor<64xf32, #SparseVector>
}
// CHECK-LABEL: func @sparse_hidden_nop_cast(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
// CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
+func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
%0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
}
// CHECK-LABEL: func @sparse_nop_cast(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
// CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
+func.func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
%0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
}
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
// CHECK: call @delSparseTensorCOOI32(%[[C]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
+func.func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
%0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector>
return %0 : tensor<?xi32, #SparseVector>
}
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
// CHECK: call @delSparseTensorCOOF32(%[[C]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
+func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
%0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
return %0 : tensor<?xf32, #SparseVector32>
}
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
// CHECK: call @delSparseTensorCOOF64(%[[C]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> {
+func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> {
%0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix>
return %0 : tensor<2x4xf64, #SparseMatrix>
}
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
// CHECK: call @delSparseTensorCOOF32(%[[C]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
+func.func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
// Initialize a tensor.
%0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32>
// Convert the tensor to a sparse tensor.
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
// CHECK: call @delSparseTensorCOOF64(%[[C]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
+func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
%0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor>
return %0 : tensor<?x?x?xf64, #SparseTensor>
}
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
-func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: return %[[T]] : memref<?xi64>
-func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
+func.func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
%c = arith.constant 0 : index
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
return %0 : memref<?xi64>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: return %[[T]] : memref<?xi32>
-func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
+func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
%c = arith.constant 0 : index
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
return %0 : memref<?xi32>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
-func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: return %[[T]] : memref<?xi64>
-func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
+func.func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
%c = arith.constant 0 : index
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
return %0 : memref<?xi64>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: return %[[T]] : memref<?xi32>
-func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
+func.func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
%c = arith.constant 0 : index
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
return %0 : memref<?xi32>
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK: return %[[T]] : memref<?xf64>
-func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
+func.func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
%0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
return %0 : memref<?xf64>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf32>
// CHECK: return %[[T]] : memref<?xf32>
-func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
+func.func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
%0 = sparse_tensor.values %arg0: tensor<128xf32, #SparseVector> to memref<?xf32>
return %0 : memref<?xf32>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = call @sparseValuesI32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi32>
// CHECK: return %[[T]] : memref<?xi32>
-func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
+func.func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
%0 = sparse_tensor.values %arg0: tensor<128xi32, #SparseVector> to memref<?xi32>
return %0 : memref<?xi32>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = call @sparseValuesI16(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi16>
// CHECK: return %[[T]] : memref<?xi16>
-func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
+func.func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
%0 = sparse_tensor.values %arg0: tensor<128xi16, #SparseVector> to memref<?xi16>
return %0 : memref<?xi16>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = call @sparseValuesI8(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi8>
// CHECK: return %[[T]] : memref<?xi8>
-func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
+func.func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
%0 = sparse_tensor.values %arg0: tensor<128xi8, #SparseVector> to memref<?xi8>
return %0 : memref<?xi8>
}
// CHECK-LABEL: func @sparse_reconstruct(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
// CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
+func.func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
%0 = sparse_tensor.load %arg0 : tensor<128xf32, #SparseVector>
return %0 : tensor<128xf32, #SparseVector>
}
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
// CHECK: call @endInsert(%[[A]]) : (!llvm.ptr<i8>) -> ()
// CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
+func.func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
%0 = sparse_tensor.load %arg0 hasInserts : tensor<128xf32, #SparseVector>
return %0 : tensor<128xf32, #SparseVector>
}
// CHECK-SAME: %[[C:.*]]: f32) {
// CHECK: call @lexInsertF32(%[[A]], %[[B]], %[[C]]) : (!llvm.ptr<i8>, memref<?xindex>, f32) -> ()
// CHECK: return
-func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
+func.func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
%arg1: memref<?xindex>,
%arg2: f32) {
sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf32, #SparseVector>, memref<?xindex>, f32
// CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[A]] : memref<?xf64>)
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
// CHECK: return %[[C]] : memref<?xindex>
-func @sparse_expansion() -> memref<?xindex> {
+func.func @sparse_expansion() -> memref<?xindex> {
%c = arith.constant 8 : index
%0 = sparse_tensor.init [%c, %c] : tensor<8x8xf64, #SparseMatrix>
%values, %filled, %added, %count = sparse_tensor.expand %0
// CHECK-DAG: memref.dealloc %[[D]] : memref<?xi1>
// CHECK-DAG: memref.dealloc %[[E]] : memref<?xindex>
// CHECK: return
-func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
+func.func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
%arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>,
%arg4: memref<?xindex>, %arg5: index) {
sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
// CHECK: call @outSparseTensorF64(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> ()
// CHECK: call @delSparseTensorCOOF64(%[[COO]])
// CHECK: return
-func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr<i8>
return
}
// CHECK: call @outSparseTensorF32(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> ()
// CHECK: call @delSparseTensorCOOF32(%[[COO]])
// CHECK: return
-func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr<i8>) {
sparse_tensor.out %arg0, %arg1 : tensor<?x?x?xf32, #SparseTensor>, !llvm.ptr<i8>
return
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
// CHECK: return %[[T]] : tensor<13xi32>
-func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
+func.func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
%0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
return %0 : tensor<13xi32>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
// CHECK: return %[[T]] : tensor<?xi32>
-func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
+func.func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
%0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
return %0 : tensor<?xi32>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
// CHECK: return %[[T]] : tensor<2x4xf64>
-func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
+func.func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
return %0 : tensor<2x4xf64>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
// CHECK: return %[[T]] : tensor<?x4xf64>
-func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
+func.func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
return %0 : tensor<?x4xf64>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
// CHECK: return %[[T]] : tensor<2x?xf64>
-func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
+func.func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
return %0 : tensor<2x?xf64>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
// CHECK: return %[[T]] : tensor<?x?xf64>
-func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
+func.func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
return %0 : tensor<?x?xf64>
}
// CHECK: }
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
// CHECK: return %[[T]] : tensor<2x3x4xf64>
-func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
+func.func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
return %0 : tensor<2x3x4xf64>
}
// CHECK: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32x16xf32>
// CHECK: return %[[VAL_16]] : tensor<32x16xf32>
// CHECK: }
-func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
+func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
%argx: tensor<32x16xf32> {linalg.inplaceable = false})
-> tensor<32x16xf32> {
%c = arith.constant 1.0 : f32
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32x16xf32>
// CHECK: return %[[VAL_15]] : tensor<32x16xf32>
// CHECK: }
-func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
+func.func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
%argx: tensor<32x16xf32> {linalg.inplaceable = true})
-> tensor<32x16xf32> {
%c = arith.constant 1.0 : f32
// CHECK: %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: return %[[VAL_15]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: }
-func @dense3(%arga: tensor<32x16xf32>,
+func.func @dense3(%arga: tensor<32x16xf32>,
%argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
-> tensor<32x16xf32, #DenseMatrix> {
%c = arith.constant 1.0 : f32
// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: return %[[VAL_20]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: }
-func @dense4(%arga: tensor<32x16x8xf32>,
+func.func @dense4(%arga: tensor<32x16x8xf32>,
%argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
-> tensor<32x16xf32, #DenseMatrix> {
%0 = linalg.generic #trait_3d
// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK-NOT: sparse_tensor.convert
// CHECK: return %[[A]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>
-func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
return %0 : tensor<64xf32, #SparseVector>
}
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK-NOT: sparse_tensor.convert
// CHECK: return
-func @sparse_dce_convert(%arg0: tensor<64xf32>) {
+func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
return
}
// CHECK-NOT: sparse_tensor.indices
// CHECK-NOT: sparse_tensor.values
// CHECK: return
-func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
+func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
%c = arith.constant 0 : index
%0 = sparse_tensor.pointers %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>
%1 = sparse_tensor.indices %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
-func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
+func.func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<32xf32>
return %0 : tensor<32xf32>
// -----
-func @invalid_release_dense(%arg0: tensor<4xi32>) {
+func.func @invalid_release_dense(%arg0: tensor<4xi32>) {
// expected-error@+1 {{expected a sparse tensor to release}}
sparse_tensor.release %arg0 : tensor<4xi32>
return
// -----
-func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
+func.func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf32>
return %0 : tensor<?x?xf32>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
-func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
+func.func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
// expected-error@+1 {{unexpected mismatch between tensor rank and sizes: 1 vs. 2}}
%0 = sparse_tensor.init [%arg0, %arg0] : tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
-func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
+func.func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
%c10 = arith.constant 10 : index
%c20 = arith.constant 20 : index
// expected-error@+1 {{unexpected mismatch with static dimension size 10}}
// -----
-func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
+func.func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
// -----
-func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
+func.func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<*xf64> to memref<?xindex>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], pointerBitWidth=32}>
-func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{unexpected type for pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
-func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{requested pointers dimension out of bounds}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
// -----
-func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
+func.func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{expected a sparse tensor to get indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<10x10xi32> to memref<?xindex>
// -----
-func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
+func.func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<*xf64> to memref<?xindex>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
-func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
+func.func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
%c = arith.constant 0 : index
// expected-error@+1 {{unexpected type for indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<?xf64, #SparseVector> to memref<?xi32>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
-func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{requested indices dimension out of bounds}}
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
// -----
-func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
+func.func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
// expected-error@+1 {{expected a sparse tensor to get values}}
%0 = sparse_tensor.values %arg0 : tensor<1024xf32> to memref<?xf32>
return %0 : memref<?xf32>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
-func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
+func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
// expected-error@+1 {{unexpected mismatch in element types}}
%0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf32>
return %0 : memref<?xf32>
// -----
-func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
+func.func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
// expected-error@+1 {{expected a sparse tensor to materialize}}
%0 = sparse_tensor.load %arg0 : tensor<16x32xf64>
return %0 : tensor<16x32xf64>
// -----
-func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>, %arg2: f64) {
+func.func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>, %arg2: f64) {
// expected-error@+1 {{expected a sparse tensor for insertion}}
sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf64>, memref<?xindex>, f64
return
// -----
-func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
+func.func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
// expected-error@+1 {{expected a sparse tensor for expansion}}
%values, %filled, %added, %count = sparse_tensor.expand %arg0
: tensor<128xf64> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
// -----
-func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
+func.func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
%arg2: memref<?xf64>, %arg3: memref<?xi1>,
%arg4: memref<?xindex>, %arg5: index) {
// expected-error@+1 {{expected a sparse tensor for compression}}
// -----
-func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
+func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
// expected-error@+1 {{unexpected type in convert}}
%0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32>
return %0 : tensor<10xf32>
#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
-func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
+func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
// expected-error@+1 {{unexpected conversion mismatch in rank}}
%0 = sparse_tensor.convert %arg0 : tensor<10x10xf64, #DCSR> to tensor<?xf64>
return %0 : tensor<?xf64>
#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
-func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
+func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
// expected-error@+1 {{unexpected conversion mismatch in dimension 1}}
%0 = sparse_tensor.convert %arg0 : tensor<10x?xf32> to tensor<10x10xf32, #CSR>
return %0 : tensor<10x10xf32, #CSR>
// -----
-func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
+func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
// expected-error@+1 {{expected a sparse tensor for output}}
sparse_tensor.out %arg0, %arg1 : tensor<10xf64>, !llvm.ptr<i8>
return
// -----
-func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{overlap region must have exactly 2 arguments}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={
// -----
-func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{right region must have exactly 1 arguments}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={}
// -----
-func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{overlap region argument 2 type mismatch}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={
// -----
-func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{left region yield type mismatch}}
%0 = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={}
// -----
-func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
+func.func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
// expected-error@+1 {{left=identity requires first argument to have the same type as the output}}
%0 = sparse_tensor.binary %arg0, %arg1 : i64, f64 to f64
overlap={}
// -----
-func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
+func.func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
// expected-error@+1 {{present region argument 1 type mismatch}}
%r = sparse_tensor.unary %arg0 : f64 to f64
present={
// -----
-func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
+func.func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
// expected-error@+1 {{absent region must have exactly 0 arguments}}
%r = sparse_tensor.unary %arg0 : f64 to f64
present={}
// -----
-func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
+func.func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
// expected-error@+1 {{present region yield type mismatch}}
%0 = sparse_tensor.unary %arg0 : f64 to f64
present={
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
#a = #sparse_tensor.encoding<{dimLevelType = []}>
-func private @scalar(%arg0: tensor<f64, #a>) -> () // expected-error {{expected non-scalar sparse tensor}}
+func.func private @scalar(%arg0: tensor<f64, #a>) -> () // expected-error {{expected non-scalar sparse tensor}}
// -----
#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
-func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
+func.func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
// -----
#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}}
-func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{dimLevelType = [1]}> // expected-error {{expected a string value in dimension level types}}
-func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{dimLevelType = ["strange"]}> // expected-error {{unexpected dimension level type: strange}}
-func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}}
-func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{dimOrdering = affine_map<(i,j) -> (i,i)>}> // expected-error {{expected a permutation affine map for dimension ordering}}
-func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}}
-func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}}
-func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
-func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}}
-func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
// -----
#a = #sparse_tensor.encoding<{key = 1}> // expected-error {{unexpected key: key}}
-func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[T:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<128xf64, #{{.*}}>
// CHECK: return %[[T]] : tensor<128xf64, #{{.*}}>
-func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
+func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
return %0 : tensor<128xf64, #SparseVector>
}
// CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index
// CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor<?x32xf64, #{{.*}}>
// CHECK: return %[[T]] : tensor<?x32xf64, #{{.*}}>
-func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
+func.func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
%d1 = arith.constant 16 : index
%d2 = arith.constant 32 : index
%0 = sparse_tensor.init [%d1, %d2] : tensor<?x32xf64, #SparseMatrix>
// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>
// CHECK: sparse_tensor.release %[[A]] : tensor<128xf64, #{{.*}}>
// CHECK: return
-func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+func.func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
return
}
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<64xf32> to tensor<64xf32, #{{.*}}>
// CHECK: return %[[T]] : tensor<64xf32, #{{.*}}>
-func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #SparseVector> {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
return %0 : tensor<64xf32, #SparseVector>
}
// CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<8x8x8xf64, #{{.*}}> to tensor<8x8x8xf64>
// CHECK: return %[[T]] : tensor<8x8x8xf64>
-func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) -> tensor<8x8x8xf64> {
+func.func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) -> tensor<8x8x8xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<8x8x8xf64, #SparseTensor> to tensor<8x8x8xf64>
return %0 : tensor<8x8x8xf64>
}
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]], %[[C]] : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
-func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]], %[[C]] : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
-func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64, #{{.*}}> to memref<?xf64>
// CHECK: return %[[T]] : memref<?xf64>
-func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
+func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
%0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
return %0 : memref<?xf64>
}
// CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] : tensor<16x32xf64, #{{.*}}>
// CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
-func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
+func.func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
%0 = sparse_tensor.load %arg0 : tensor<16x32xf64, #DenseMatrix>
return %0 : tensor<16x32xf64, #DenseMatrix>
}
// CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] hasInserts : tensor<16x32xf64, #{{.*}}>
// CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
-func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
+func.func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
%0 = sparse_tensor.load %arg0 hasInserts : tensor<16x32xf64, #DenseMatrix>
return %0 : tensor<16x32xf64, #DenseMatrix>
}
// CHECK-SAME: %[[C:.*]]: f64) {
// CHECK: sparse_tensor.lex_insert %[[A]], %[[B]], %[[C]] : tensor<128xf64, #{{.*}}>, memref<?xindex>, f64
// CHECK: return
-func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: memref<?xindex>, %arg2: f64) {
+func.func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: memref<?xindex>, %arg2: f64) {
sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf64, #SparseVector>, memref<?xindex>, f64
return
}
// CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>)
// CHECK: sparse_tensor.expand %[[A]]
// CHECK: return
-func @sparse_expansion(%arg0: tensor<8x8xf64, #SparseMatrix>) {
+func.func @sparse_expansion(%arg0: tensor<8x8xf64, #SparseMatrix>) {
%values, %filled, %added, %count = sparse_tensor.expand %arg0
: tensor<8x8xf64, #SparseMatrix> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
return
// CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>,
// CHECK: sparse_tensor.compress %[[A]]
// CHECK: return
-func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
+func.func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
%arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>,
%arg4: memref<?xindex>, %arg5: index) {
sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>)
// CHECK: sparse_tensor.out %[[A]], %[[B]] : tensor<?x?xf64, #sparse_tensor.encoding<{{.*}}>>, !llvm.ptr<i8>
// CHECK: return
-func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr<i8>
return
}
// CHECK: }
// CHECK: return %[[C1]] : f64
// CHECK: }
-func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
+func.func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
%cf0 = arith.constant 0.0 : f64
%r = sparse_tensor.binary %arg0, %arg1 : f64, i64 to f64
overlap={
// CHECK: }
// CHECK: return %[[C1]] : f64
// CHECK: }
-func @sparse_unary(%arg0: f64) -> f64 {
+func.func @sparse_unary(%arg0: f64) -> f64 {
%r = sparse_tensor.unary %arg0 : f64 to f64
present={
^bb0(%x: f64):
// CHECK: }
// CHECK: return %[[C1]] : i64
// CHECK: }
-func @sparse_unary(%arg0: f64) -> i64 {
+func.func @sparse_unary(%arg0: f64) -> i64 {
%r = sparse_tensor.unary %arg0 : f64 to i64
present={
^bb0(%x: f64):
// CHECK-LABEL: func private @sparse_1d_tensor(
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 0, indexBitWidth = 0 }>>)
-func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
+func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
// -----
// CHECK-LABEL: func private @sparse_2d_tensor(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>)
-func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)
+func.func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)
// CHECK: %[[VAL_12:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
// CHECK: return %[[VAL_12]] : tensor<32xf32>
// CHECK: }
-func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #DV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_11:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf32>
// CHECK: return %[[VAL_11]] : tensor<32xf32>
// CHECK: }
-func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
+func.func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
%u = linalg.init_tensor [32] : tensor<32xf32>
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #DV>)
// CHECK: %[[VAL_12:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
// CHECK: return %[[VAL_12]] : tensor<32xf32>
// CHECK: }
-func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #DV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32>
// CHECK: return %[[VAL_30]] : tensor<32xf32>
// CHECK: }
-func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
// CHECK: return %[[VAL_20]] : tensor<32xf32>
// CHECK: }
-func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
// CHECK: return %[[VAL_16]] : tensor<32xf32>
// CHECK: }
-func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
// CHECK: return %[[VAL_14]] : tensor<32xf32>
// CHECK: }
-func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #DV>, tensor<32xf32>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
// CHECK: return %[[VAL_14]] : tensor<32xf32>
// CHECK: }
-func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #DV>, tensor<32xf32>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
// CHECK: return %[[VAL_34]] : tensor<32xf32>
// CHECK: }
-func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32>, tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf32>
// CHECK: return %[[VAL_18]] : tensor<32xf32>
// CHECK: }
-func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32>, tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
// CHECK: return %[[VAL_34]] : tensor<32xf32>
// CHECK: }
-func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf32>
// CHECK: return %[[VAL_18]] : tensor<32xf32>
// CHECK: }
-func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_53:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
// CHECK: return %[[VAL_53]] : tensor<32xf32>
// CHECK: }
-func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_41:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
// CHECK: return %[[VAL_41]] : tensor<32xf32>
// CHECK: }
-func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32, #SV>)
outs(%argx: tensor<32xf32>) {
// CHECK: %[[VAL_60:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<16xf32>
// CHECK: return %[[VAL_60]] : tensor<16xf32>
// CHECK: }
-func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
+func.func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
// Kernel "x(i) = a(i) * c + b(i) * c".
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<16xf32, #SV>, tensor<16xf32, #SV>)
// CHECK: %[[VAL_59:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<16xf32>
// CHECK: return %[[VAL_59]] : tensor<16xf32>
// CHECK: }
-func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
+func.func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
%argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
// Same kernel, but now expressed as "x(i) = (a(i) + b(i)) * c".
%0 = linalg.generic #trait2
// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<f32>
// CHECK: return %[[VAL_17]] : tensor<f32>
// CHECK: }
-func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction
ins(%arga: tensor<?xf32, #SV>)
outs(%argx: tensor<f32>) {
// CHECK: %[[VAL_71:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<f32>
// CHECK: return %[[VAL_71]] : tensor<f32>
// CHECK: }
-func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
+func.func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
%argb: tensor<16xf32, #SV>,
%argx: tensor<f32>) -> tensor<f32> {
// Just for testing. This case would be better expressed
// CHECK: %[[VAL_77:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<f32>
// CHECK: return %[[VAL_77]] : tensor<f32>
// CHECK: }
-func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
+func.func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
%argb: tensor<f32>,
%argc: tensor<16xf32, #SV>,
%argx: tensor<f32>) -> tensor<f32> {
// CHECK: %[[VAL_115:.*]] = bufferization.to_tensor %[[VAL_18]] : memref<?xf64>
// CHECK: return %[[VAL_115]] : tensor<?xf64>
// CHECK: }
-func @four_tensors_op(%arga: tensor<?xf64>,
+func.func @four_tensors_op(%arga: tensor<?xf64>,
%argb: tensor<?xf64, #SV>,
%argc: tensor<?xf64>,
%argd: tensor<?xf64, #SV>,
// CHECK: %[[VAL_252:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<f64>
// CHECK: return %[[VAL_252]] : tensor<f64>
// CHECK: }
-func @red3s(%arga: tensor<?xf64, #SV>,
+func.func @red3s(%arga: tensor<?xf64, #SV>,
%argb: tensor<?xf64, #SV>,
%argc: tensor<?xf64, #SV>, %argx: tensor<f64>) ->tensor<f64>{
%0 = linalg.generic #trait_red3s
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32x16xf32>
// CHECK: return %[[VAL_18]] : tensor<32x16xf32>
// CHECK: }
-func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tdd>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32x16xf32>
// CHECK: return %[[VAL_18]] : tensor<32x16xf32>
// CHECK: }
-func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tdd>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_37:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
// CHECK: return %[[VAL_37]] : tensor<32x16xf32>
// CHECK: }
-func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tds>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16xf32>
// CHECK: return %[[VAL_21]] : tensor<32x16xf32>
// CHECK: }
-func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tds>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_40:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
// CHECK: return %[[VAL_40]] : tensor<32x16xf32>
// CHECK: }
-func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16xf32>
// CHECK: return %[[VAL_22]] : tensor<32x16xf32>
// CHECK: }
-func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_58:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
// CHECK: return %[[VAL_58]] : tensor<32x16xf32>
// CHECK: }
-func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16xf32>
// CHECK: return %[[VAL_25]] : tensor<32x16xf32>
// CHECK: }
-func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_116:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16xf32>
// CHECK: return %[[VAL_116]] : tensor<32x16xf32>
// CHECK: }
-func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32, #Tss>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_72:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16xf32>
// CHECK: return %[[VAL_72]] : tensor<32x16xf32>
// CHECK: }
-func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32, #Tss>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_70:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
// CHECK: return %[[VAL_70]] : tensor<32x16xf32>
// CHECK: }
-func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32, #Tds>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_28:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
// CHECK: return %[[VAL_28]] : tensor<32x16xf32>
// CHECK: }
-func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32, #Tds>)
outs(%argx: tensor<32x16xf32>) {
// CHECK: %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<16xf32>
// CHECK: return %[[VAL_26]] : tensor<16xf32>
// CHECK: }
-func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: tensor<16xf32>) -> tensor<16xf32> {
+func.func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: tensor<16xf32>) -> tensor<16xf32> {
%0 = linalg.generic #trait_matvec
ins(%argA, %argb: tensor<16x32xf32, #Tds>, tensor<32xf32>)
outs(%argx: tensor<16xf32>) {
// CHECK: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<f32>
// CHECK: return %[[VAL_23]] : tensor<f32>
// CHECK: }
-func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction
ins(%arga: tensor<10x20xf32, #Tds>)
outs(%argx: tensor<f32>) {
// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<?x?xf64>
// CHECK: return %[[VAL_20]] : tensor<?x?xf64>
// CHECK: }
-func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
+func.func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
%0 = arith.constant 2.0 : f64
%1 = linalg.generic #trait_scale
ins(%arga: tensor<?x?xf64, #Tds>)
// CHECK: %[[VAL_38:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<?x?xf32>
// CHECK: return %[[VAL_38]] : tensor<?x?xf32>
// CHECK: }
-func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
+func.func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
%arga: tensor<?x?xf32>,
%argb: tensor<?x?xf32>,
%argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: %[[VAL_174:.*]] = bufferization.to_tensor %[[VAL_24]] : memref<?xf32>
// CHECK: return %[[VAL_174]] : tensor<?xf32>
// CHECK: }
-func @sum_kernel_with_inv(%arga: tensor<?x?xf32, #Tss>,
+func.func @sum_kernel_with_inv(%arga: tensor<?x?xf32, #Tss>,
%argb: tensor<?x?xf32, #Tds>,
%argc: tensor<?x?xf32, #Tds>,
%argd: tensor<?xf32>,
// CHECK: %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_22]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tddd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_22]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tddd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_42:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_42]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdds>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_26]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdds>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_43:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_43]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdsd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_25]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdsd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_62:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_62]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdss>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_29]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tdss>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_46:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_46]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsdd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_26]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsdd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_65:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_65]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsds>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_30]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsds>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_66:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_66]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tssd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_29]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tssd>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_85:.*]] = bufferization.to_tensor %[[VAL_19]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_85]] : tensor<32x16x8xf32>
// CHECK: }
-func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsss>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16x8xf32>
// CHECK: return %[[VAL_33]] : tensor<32x16x8xf32>
// CHECK: }
-func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16x8xf32, #Tsss>, tensor<32x16x8xf32>)
outs(%argx: tensor<32x16x8xf32>) {
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<?x?xf32>
// CHECK: return %[[VAL_34]] : tensor<?x?xf32>
// CHECK: }
-func @kernel_3d(%arga: tensor<?x?xf32>,
+func.func @kernel_3d(%arga: tensor<?x?xf32>,
%argb: tensor<?x?x?xf32, #Tdds>,
%argc: tensor<?x?xf32>,
%argd: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
// CHECK: return %[[VAL_34]] : tensor<f32>
// CHECK: }
-func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction
ins(%arga: tensor<10x20x30xf32, #Tsss>)
outs(%argx: tensor<f32>) {
// CHECK: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<f32>
// CHECK: return %[[VAL_30]] : tensor<f32>
// CHECK: }
-func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
+func.func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
%argb: tensor<?xf32, #Td>,
%argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction_inv
// CHECK: %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<10x20x30xf32>
// CHECK: return %[[VAL_22]] : tensor<10x20x30xf32>
// CHECK: }
-func @invariants(%arga: tensor<10xf32, #Td>,
+func.func @invariants(%arga: tensor<10xf32, #Td>,
%argb: tensor<20xf32>,
%argc: tensor<30xf32>,
%argx: tensor<10x20x30xf32>) -> tensor<10x20x30xf32> {
// CHECK: %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32>
// CHECK: return %[[VAL_21]] : tensor<32xf32>
// CHECK: }
-func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
+func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
%argb: tensor<4xf32>,
%argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait1
// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi32>
// CHECK: return %[[VAL_20]] : tensor<32xi32>
// CHECK: }
-func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
+func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
%argb: tensor<34xi32>,
%argx: tensor<32xi32>) -> tensor<32xi32> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_27:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf64>
// CHECK: return %[[VAL_27]] : tensor<32x16xf64>
// CHECK: }
-func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
+func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
%argb: tensor<34x19xf64>,
%argx: tensor<32x16xf64>) -> tensor<32x16xf64> {
%0 = linalg.generic #trait3
// CHECK-CONVERT: memref.dealloc %[[C]] : memref<?xindex>
// CHECK-CONVERT: call @endInsert
//
-func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
+func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
%c0 = arith.constant 0 : index
%n = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSC>
%v = sparse_tensor.init [%n] : tensor<?xf64, #SV>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
-func @abs(%arga: tensor<32xf64, #SV>,
+func.func @abs(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf64, #SV>)
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
-func @ceil(%arga: tensor<32xf64, #SV>,
+func.func @ceil(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf64, #SV>)
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
-func @floor(%arga: tensor<32xf64, #SV>,
+func.func @floor(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf64, #SV>)
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
-func @neg(%arga: tensor<32xf64, #SV>,
+func.func @neg(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf64, #SV>)
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK: return %[[VAL_33]] : tensor<32xf64>
// CHECK: }
-func @add(%arga: tensor<32xf64, #SV>,
+func.func @add(%arga: tensor<32xf64, #SV>,
%argb: tensor<32xf64>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK: return %[[VAL_35]] : tensor<32xf64>
// CHECK: }
-func @sub(%arga: tensor<32xf64, #SV>,
+func.func @sub(%arga: tensor<32xf64, #SV>,
%argb: tensor<32xf64>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf64>
// CHECK: return %[[VAL_17]] : tensor<32xf64>
// CHECK: }
-func @mul(%arga: tensor<32xf64, #SV>,
+func.func @mul(%arga: tensor<32xf64, #SV>,
%argb: tensor<32xf64>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf64>
// CHECK: return %[[VAL_15]] : tensor<32xf64>
// CHECK: }
-func @divbyc(%arga: tensor<32xf64, #SV>,
+func.func @divbyc(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%c = arith.constant 2.0 : f64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_21:.*]] = sparse_tensor.load %[[VAL_5]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK: return %[[VAL_21]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK: }
-func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
+func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
-> tensor<?x?xi64, #DenseMatrix> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 0 : index
// CHECK: %[[VAL_27:.*]] = sparse_tensor.load %[[VAL_6]] hasInserts : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK: return %[[VAL_27]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK: }
-func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
+func.func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
-> tensor<?x?xi64, #SparseMatrix> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 0 : index
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
// CHECK: return %[[VAL_33]] : tensor<32xi64>
// CHECK: }
-func @add(%arga: tensor<32xi64, #SV>,
+func.func @add(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_36:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xi64>
// CHECK: return %[[VAL_36]] : tensor<32xi64>
// CHECK: }
-func @sub(%arga: tensor<32xi64, #SV>,
+func.func @sub(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xi64>
// CHECK: return %[[VAL_17]] : tensor<32xi64>
// CHECK: }
-func @mul(%arga: tensor<32xi64, #SV>,
+func.func @mul(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
// CHECK: return %[[VAL_15]] : tensor<32xi64>
// CHECK: }
-func @divsbyc(%arga: tensor<32xi64, #SV>,
+func.func @divsbyc(%arga: tensor<32xi64, #SV>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%c = arith.constant 2 : i64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
// CHECK: return %[[VAL_15]] : tensor<32xi64>
// CHECK: }
-func @divubyc(%arga: tensor<32xi64, #SV>,
+func.func @divubyc(%arga: tensor<32xi64, #SV>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%c = arith.constant 2 : i64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xi64>
// CHECK: return %[[VAL_17]] : tensor<32xi64>
// CHECK: }
-func @and(%arga: tensor<32xi64, #SV>,
+func.func @and(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
// CHECK: return %[[VAL_33]] : tensor<32xi64>
// CHECK: }
-func @or(%arga: tensor<32xi64, #SV>,
+func.func @or(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
// CHECK: return %[[VAL_33]] : tensor<32xi64>
// CHECK: }
-func @xor(%arga: tensor<32xi64, #SV>,
+func.func @xor(%arga: tensor<32xi64, #SV>,
%argb: tensor<32xi64>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%0 = linalg.generic #trait2
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
// CHECK: return %[[VAL_15]] : tensor<32xi64>
// CHECK: }
-func @ashrbyc(%arga: tensor<32xi64, #SV>,
+func.func @ashrbyc(%arga: tensor<32xi64, #SV>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%c = arith.constant 2 : i64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
// CHECK: return %[[VAL_15]] : tensor<32xi64>
// CHECK: }
-func @lsrbyc(%arga: tensor<32xi64, #SV>,
+func.func @lsrbyc(%arga: tensor<32xi64, #SV>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%c = arith.constant 2 : i64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
// CHECK: return %[[VAL_15]] : tensor<32xi64>
// CHECK: }
-func @lslbyc(%arga: tensor<32xi64, #SV>,
+func.func @lslbyc(%arga: tensor<32xi64, #SV>,
%argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
%c = arith.constant 2 : i64
%0 = linalg.generic #traitc
// CHECK: %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<10x30xf32>
// CHECK: return %[[VAL_29]] : tensor<10x30xf32>
// CHECK: }
-func @matmul1(%a: tensor<10x20xf32, #DCSR>,
+func.func @matmul1(%a: tensor<10x20xf32, #DCSR>,
%b: tensor<20x30xf32>,
%c: tensor<10x30xf32>) -> tensor<10x30xf32> {
%0 = linalg.matmul
// CHECK: %[[VAL_77:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: return %[[VAL_77]] : tensor<4x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: }
-func @matmul2(%A: tensor<4x8xf64, #DCSR>,
+func.func @matmul2(%A: tensor<4x8xf64, #DCSR>,
%B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
%c4 = arith.constant 4 : index
%C = sparse_tensor.init [%c4, %c4] : tensor<4x4xf64, #DCSR>
// CHECK: %[[VAL_32:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<6x6xi32>
// CHECK: return %[[VAL_32]] : tensor<6x6xi32>
// CHECK: }
-func @conv2d(%input: tensor<8x8xi32>,
+func.func @conv2d(%input: tensor<8x8xi32>,
%filter: tensor<3x3xi32, #DCSR>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%0 = linalg.conv_2d
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<5x6xi64>
// CHECK: return %[[VAL_33]] : tensor<5x6xi64>
// CHECK: }
-func @quantized_matmul(%input1: tensor<5x3xi8>,
+func.func @quantized_matmul(%input1: tensor<5x3xi8>,
%input2: tensor<3x6xi8, #DCSR>,
%output: tensor<5x6xi64>) -> tensor<5x6xi64> {
%c0 = arith.constant 0 : i32
// CHECK: %[[VAL_48:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<f32>
// CHECK: return %[[VAL_48]] : tensor<f32>
// CHECK: }
-func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
+func.func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
%b: tensor<1024xf32, #SparseVector>,
%x: tensor<f32>) -> tensor<f32> {
%dot = linalg.dot ins(%a, %b: tensor<1024xf32, #SparseVector>,
// CHECK-LIR: return %[[VAL_9]] : memref<32xf64>
// CHECK-LIR: }
-func @matvec(%arga: tensor<32x64xf64, #CSR>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSR>,
%argb: tensor<64xf64>,
%argx: tensor<32xf64>) -> tensor<32xf64> {
%0 = linalg.generic #trait_matvec
// CHECK-LIR: return %[[VAL_10]] : memref<32xf64>
// CHECK-LIR: }
-func @matvec(%arga: tensor<32x64xf64, #CSC>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSC>,
%argb: tensor<64xf64>,
%argx: tensor<32xf64>) -> tensor<32xf64> {
%0 = linalg.generic #trait_matvec
// CHECK-LIR: return %[[VAL_2]] : memref<32xf64>
// CHECK-LIR: }
-func @matvec(%arga: tensor<32x64xf64, #CSR>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSR>,
%argb: tensor<64xf64>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
%0 = linalg.generic #trait_matvec
// CHECK: %[[VAL_50:.*]] = bufferization.to_tensor %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: return %[[VAL_50]] : tensor<10x20x30x40x50x60x70x80xf32>
// CHECK: }
-func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
+func.func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
%argb: tensor<80x70x60x50x40x30x20x10xf32, #SparseTensor>,
%argx: tensor<10x20x30x40x50x60x70x80xf32>)
-> tensor<10x20x30x40x50x60x70x80xf32> {
// CHECK: %[[VAL_18:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: return %[[VAL_18]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: }
-func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
+func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
%c = arith.constant 2.0 : f32
%0 = linalg.generic #trait_scale_inpl
outs(%argx: tensor<32x16xf32, #DCSR>) {
// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: }
-func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
+func.func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
%0 = linalg.generic #trait_scale_inpl
outs(%argx: tensor<32x16xf32, #DCSR>) {
^bb(%x: f32):
// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_7]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{{.*}}>>
// CHECK: return %[[VAL_20]] : tensor<10x20xf32, #sparse_tensor.encoding<{
// CHECK: }
-func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> {
+func.func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> {
%s = arith.constant 2.0 : f32
%d10 = arith.constant 10 : index
%d20 = arith.constant 20 : index
// CHECK: %[[VAL_112:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<?x?xi32, #{{.*}}>
// CHECK: return %[[VAL_112]] : tensor<?x?xi32, #{{.*}}>
// CHECK: }
-func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
+func.func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
%argb: tensor<?x?x?xi32, #SparseTensor>) -> tensor<?x?xi32, #DCSR> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
// CHECK: %[[VAL_78:.*]] = sparse_tensor.load %[[VAL_9]] hasInserts : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: return %[[VAL_78]] : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: }
-func @matmat(%arga: tensor<?x?xf32, #DCSR>,
+func.func @matmat(%arga: tensor<?x?xf32, #DCSR>,
%argb: tensor<?x?xf32, #DCSR>) -> tensor<?x?xf32, #DCSR> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
// CHECK-PAR4: scf.parallel
// CHECK-PAR4: return
//
-func @scale_dd(%scale: f32,
+func.func @scale_dd(%scale: f32,
%arga: tensor<?x?xf32, #DenseMatrix>,
%argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = linalg.generic #trait_dd
// CHECK-PAR4: scf.parallel
// CHECK-PAR4: return
//
-func @scale_ss(%scale: f32,
+func.func @scale_ss(%scale: f32,
%arga: tensor<?x?xf32, #SparseMatrix>,
%argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = linalg.generic #trait_ss
// CHECK-PAR4: scf.for
// CHECK-PAR4: return
//
-func @matvec(%arga: tensor<16x32xf32, #CSR>,
+func.func @matvec(%arga: tensor<16x32xf32, #CSR>,
%argb: tensor<32xf32>,
%argx: tensor<16xf32>) -> tensor<16xf32> {
%0 = linalg.generic #trait_matvec
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<20x30x10xf32>
// CHECK: return %[[VAL_18]] : tensor<20x30x10xf32>
// CHECK: }
-func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
+func.func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
%argx: tensor<20x30x10xf32>) -> tensor<20x30x10xf32> {
%0 = linalg.generic #trait
ins(%arga: tensor<10x20x30xf32, #X>)
// CHECK: %[[VAL_19:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<?x?x?xf32>
// CHECK: return %[[VAL_19]] : tensor<?x?x?xf32>
// CHECK: }
-func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
+func.func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
%argx: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
%0 = linalg.generic #trait
ins(%arga: tensor<?x?x?xf32, #X>)
// CHECK-MIR: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
// CHECK-MIR: return %[[VAL_30]] : tensor<f32>
// CHECK-MIR: }
-func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
+func.func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
%argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait
ins(%arga: tensor<?x?x?xf32, #X>)
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
// CHECK: return %[[VAL_34]] : tensor<32x16xf32>
// CHECK: }
-func @mul(%arga: tensor<32x16xf32, #SparseMatrix>,
+func.func @mul(%arga: tensor<32x16xf32, #SparseMatrix>,
%argp: tensor<f32>,
%argq: f32,
%argr: f32,
// CHECK: %[[MUL:.*]] = arith.mulf %[[VAL0]], %[[VAL1]] : f64
// CHECK: store %[[MUL]], %{{.*}}[%[[INDC]]] : memref<32xf64>
// CHECK: }
-func @mul64(%arga: tensor<32xf64, #SparseVector64>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
+func.func @mul64(%arga: tensor<32xf64, #SparseVector64>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
%0 = linalg.generic #trait_mul
ins(%arga, %argb: tensor<32xf64, #SparseVector64>, tensor<32xf64>)
outs(%argx: tensor<32xf64>) {
// CHECK: %[[MUL:.*]] = arith.mulf %[[VAL0]], %[[VAL1]] : f64
// CHECK: store %[[MUL]], %{{.*}}[%[[INDC]]] : memref<32xf64>
// CHECK: }
-func @mul32(%arga: tensor<32xf64, #SparseVector32>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
+func.func @mul32(%arga: tensor<32xf64, #SparseVector32>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
%0 = linalg.generic #trait_mul
ins(%arga, %argb: tensor<32xf64, #SparseVector32>, tensor<32xf64>)
outs(%argx: tensor<32xf64>) {
// CHECK-VEC4: }
// CHECK-VEC4: return
//
-func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
%0 = linalg.generic #trait_scale_d
ins(%arga: tensor<1024xf32, #DenseVector>)
outs(%argx: tensor<1024xf32>) {
// CHECK-VEC4: }
// CHECK-VEC4: return
//
-func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
%0 = linalg.generic #trait_mul_s
ins(%arga, %argb: tensor<1024xf32, #SparseVector>, tensor<1024xf32>)
outs(%argx: tensor<1024xf32>) {
// CHECK-VEC4: %{{.*}} = vector.reduction <add>, %[[red]] : vector<[4]xf32> into f32
// CHECK-VEC4: return
//
-func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>, %argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_reduction_d
ins(%arga, %argb: tensor<1024xf32, #DenseVector>, tensor<1024xf32>)
outs(%argx: tensor<f32>) {
// CHECK-VEC4: }
// CHECK-VEC4: return
//
-func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024xf32>, %argx: tensor<512x1024xf32>) -> tensor<512x1024xf32> {
+func.func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024xf32>, %argx: tensor<512x1024xf32>) -> tensor<512x1024xf32> {
%0 = linalg.generic #trait_mul_ds
ins(%arga, %argb: tensor<512x1024xf32, #SparseMatrix>, tensor<512x1024xf32>)
outs(%argx: tensor<512x1024xf32>) {
// CHECK-VEC4: }
// CHECK-VEC4: return
//
-func @add_dense(%arga: tensor<32x64xf64, #SparseMatrix>,
+func.func @add_dense(%arga: tensor<32x64xf64, #SparseMatrix>,
%argx: tensor<33x64xf64> {linalg.inplaceable = true}) -> tensor<33x64xf64> {
%0 = linalg.generic #trait_affine
ins(%arga: tensor<32x64xf64, #SparseMatrix>)
// CHECK: %[[VAL_87:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<f64>
// CHECK: return %[[VAL_87]] : tensor<f64>
// CHECK: }
-func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
+func.func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
%arga: tensor<64x32xf64, #SparseMatrix>,
%argb: tensor<64x32xf64, #SparseMatrix>) -> tensor<f64> {
%0 = linalg.generic #trait
// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<8xi64>
// CHECK: return %[[VAL_20]] : tensor<8xi64>
// CHECK: }
-func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
+func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
// CHECK: %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<8xi64>
// CHECK: return %[[VAL_35]] : tensor<8xi64>
// CHECK: }
-func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
+func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
// CHECK: }
// CHECK: return
//
-func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
%0 = linalg.generic #trait_mul_s
ins(%arga, %argb: tensor<1024xf32, #SparseVector>, tensor<1024xf32>)
outs(%argx: tensor<1024xf32>) {