StridedMemRefRankOf<[Index],[1]>:$added,
Index:$count,
AnySparseTensor:$tensor,
- Variadic<Index>:$indices)>,
+ Variadic<Index>:$indices)>,
Results<(outs AnySparseTensor:$result)> {
string summary = "Compressed an access pattern for insertion";
string description = [{
}];
let assemblyFormat = "$values `,` $filled `,` $added `,` $count"
" `into` $tensor `[` $indices `]` attr-dict"
- " `:` type($values) `,` type($filled) `,` type($added)"
- " `,` type($tensor)";
+ " `:` type($values) `,` type($filled) `,` type($added)"
+ " `,` type($tensor)";
let hasVerifier = 1;
}
%v2 = arith.constant sparse<
[ [0], [3], [5], [11], [13], [17], [18], [21], [31] ],
[ -2147483648, -2147483647, -1000, -1, 0,
- 1, 1000, 2147483646, 2147483647
- ]
+ 1, 1000, 2147483646, 2147483647
+ ]
> : tensor<32xi32>
%sv1 = sparse_tensor.convert %v1
: tensor<32xf64> to tensor<?xf64, #SparseVector>
//
func.func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>,
%argx: tensor<7x3xf64>)
- -> tensor<7x3xf64> {
+ -> tensor<7x3xf64> {
%0 = linalg.generic #trait_flatten
ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>)
outs(%argx: tensor<7x3xf64>) {
func.func @kernel_matvec(%arga: tensor<?x?xi32, #SparseMatrix>,
%argb: tensor<?xi32>,
%argx: tensor<?xi32>)
- -> tensor<?xi32> {
+ -> tensor<?xi32> {
%0 = linalg.generic #matvec
ins(%arga, %argb: tensor<?x?xi32, #SparseMatrix>, tensor<?xi32>)
outs(%argx: tensor<?xi32>) {
%argc: tensor<?x?xf64>,
%argd: tensor<?x?xf64>,
%arga: tensor<?x?xf64>)
- -> tensor<?x?xf64> {
+ -> tensor<?x?xf64> {
%0 = linalg.generic #mttkrp
ins(%argb, %argc, %argd:
tensor<?x?x?xf64, #SparseTensor>, tensor<?x?xf64>, tensor<?x?xf64>)
module {
func.func @redsum(%arga: tensor<?x?x?xi32, #SparseTensor>,
%argb: tensor<?x?x?xi32, #SparseTensor>)
- -> tensor<?x?xi32, #SparseMatrix> {
+ -> tensor<?x?xi32, #SparseMatrix> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arga, %c0 : tensor<?x?x?xi32, #SparseTensor>
%0 = call @quantized_matmul(%input1, %sparse_input2, %output)
: (tensor<5x3xi8>,
tensor<3x6xi8, #DCSR>,
- tensor<5x6xi32>) -> tensor<5x6xi32>
+ tensor<5x6xi32>) -> tensor<5x6xi32>
//
// Verify the output.