From: wren romano <2998727+wrengr@users.noreply.github.com> Date: Tue, 30 May 2023 20:53:27 +0000 (-0700) Subject: [mlir][sparse] Updating STEA parser/printer to use the name "dimSlices" X-Git-Tag: upstream/17.0.6~6744 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=540d5e0ce66cefb072ab8f22df62468357c9ed0f;p=platform%2Fupstream%2Fllvm.git [mlir][sparse] Updating STEA parser/printer to use the name "dimSlices" Depends On D151505 Reviewed By: Peiming Differential Revision: https://reviews.llvm.org/D151513 --- diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index e49d7be..f0a502e 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -244,7 +244,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", // offset = 0, size = 8, and a dynamic stride on the second dimension). #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, ?) ] + dimSlices = [ (0, 4, 1), (0, 8, ?) ] }> ... tensor ... @@ -266,9 +266,6 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", // The required bitwidth for coordinate storage. "unsigned":$crdWidth, // A slice attribute for each dimension of the tensor type. - // FIXME: The name used here is `dimSlices`, however the - // parser/printer uses the name `slice` instead. Therefore - // the parser/printer need to be updated to match. ArrayRefParameter< "::mlir::sparse_tensor::SparseTensorDimSliceAttr", "per dimension slice metadata" diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp index ae4198f..962e0ac21 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -408,7 +408,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) { // Process the data from the parsed dictionary value into struct-like data. SmallVector lvlTypes; - SmallVector slices; + SmallVector dimSlices; AffineMap dimToLvl = {}; unsigned posWidth = 0; unsigned crdWidth = 0; @@ -416,7 +416,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) { StringRef attrName; // Exactly 6 keys. SmallVector keys = {"lvlTypes", "dimToLvl", "posWidth", - "crdWidth", "slice"}; + "crdWidth", "dimSlices"}; while (succeeded(parser.parseOptionalKeyword(&attrName))) { if (!llvm::is_contained(keys, attrName)) { parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName; @@ -464,13 +464,13 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) { auto intAttr = llvm::dyn_cast(attr); ERROR_IF(!intAttr, "expected an integral index bitwidth") crdWidth = intAttr.getInt(); - } else if (attrName == "slice") { + } else if (attrName == "dimSlices") { RETURN_ON_FAIL(parser.parseLSquare()) // Dispatches to DimSliceAttr to skip mnemonic bool finished = false; while (auto attr = SparseTensorDimSliceAttr::parse(parser, nullptr)) { auto sliceAttr = llvm::cast(attr); - slices.push_back(sliceAttr); + dimSlices.push_back(sliceAttr); if (parser.parseOptionalComma().failed()) { finished = true; break; @@ -494,7 +494,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) { // Construct struct-like storage for attribute. return parser.getChecked( - parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, slices); + parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, dimSlices); } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { @@ -512,7 +512,7 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { if (getCrdWidth()) printer << ", crdWidth = " << getCrdWidth(); if (!getDimSlices().empty()) { - printer << ", slice = [ "; + printer << ", dimSlices = [ "; llvm::interleaveComma(getDimSlices(), printer, [&](SparseTensorDimSliceAttr attr) { // Calls SparseTensorDimSliceAttr::print directly to diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir index 26f41e1..fd612d5 100644 --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -41,7 +41,7 @@ #COOSlice = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (2, 2, 1), (12, 13, 1) ] + dimSlices = [ (2, 2, 1), (12, 13, 1) ] }> // CHECK-LABEL: func @sparse_nop_convert( diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir index c1e8afd..7a6c482 100644 --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -202,7 +202,7 @@ func.func @mismatch_values_types(%arg0: tensor) -> memref< #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { @@ -215,7 +215,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { @@ -401,7 +401,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr) { #CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> { diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir index 91c3ef7..e76df65 100644 --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -66,6 +66,6 @@ func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> () #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} + dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} }> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir index d35296b..8aed1d6 100644 --- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir @@ -10,7 +10,7 @@ #Slice = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (?, 1, 1), (?, 3, 1) ] + dimSlices = [ (?, 1, 1), (?, 3, 1) ] }> // CHECK-LABEL: func @sparse_nop_cast( diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir index 57dff1e..43429f4 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -144,7 +144,7 @@ func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func @sparse_slice_offset( @@ -160,7 +160,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func @sparse_slice_stride( @@ -189,7 +189,7 @@ func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVec #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #SparseVector_Slice = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_metadata_init( @@ -221,7 +221,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) #SparseVector_Slice = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_get_md( @@ -238,7 +238,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector_S #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_get_md( diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir index 4a7cd76..75f8d07 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -100,31 +100,31 @@ func.func private @sparse_ell(tensor) #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, ?, 1), (?, 4, 2) ] + dimSlices = [ (1, ?, 1), (?, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir index 8cf8c6c..efb920b 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir @@ -6,7 +6,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> // CHECK-LABEL: func.func @sparse_slice( diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir index 8038e14..339c94d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir @@ -30,12 +30,12 @@ func.func @sparse_foreach_constant() -> () { #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 1), (2, 4, 1) ] + dimSlices = [ (0, 4, 1), (2, 4, 1) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir index 43b75f8..fc259b2 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -16,12 +16,12 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> #COO = #sparse_tensor.encoding<{ @@ -30,12 +30,12 @@ #COO_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> #COO_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir index c5d6032..c972307 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -16,7 +16,7 @@ #DCSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> #CSR = #sparse_tensor.encoding<{ @@ -25,7 +25,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> #COO = #sparse_tensor.encoding<{ @@ -34,32 +34,32 @@ #CSR_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 2), (0, 4, 1) ] + dimSlices = [ (0, 4, 2), (0, 4, 1) ] }> #DCSR_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 2), (1, 4, 1) ] + dimSlices = [ (0, 4, 2), (1, 4, 1) ] }> #COO_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (0, 4, 2), (0, 4, 1) ] + dimSlices = [ (0, 4, 2), (0, 4, 1) ] }> #COO_SLICE_2 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (0, 4, 2), (1, 4, 1) ] + dimSlices = [ (0, 4, 2), (1, 4, 1) ] }> #CSR_SLICE_dyn = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (?, 4, ?), (?, 4, ?) ] + dimSlices = [ (?, 4, ?), (?, 4, ?) ] }> #DCSR_SLICE_dyn = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (?, 4, ?), (?, 4, ?) ] + dimSlices = [ (?, 4, ?), (?, 4, ?) ] }> module {