// offset = 0, size = 8, and a dynamic stride on the second dimension).
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (0, 4, 1), (0, 8, ?) ]
+ dimSlices = [ (0, 4, 1), (0, 8, ?) ]
}>
... tensor<?x?xf64, #CSC_SLICE> ...
// The required bitwidth for coordinate storage.
"unsigned":$crdWidth,
// A slice attribute for each dimension of the tensor type.
- // FIXME: The name used here is `dimSlices`, however the
- // parser/printer uses the name `slice` instead. Therefore
- // the parser/printer need to be updated to match.
ArrayRefParameter<
"::mlir::sparse_tensor::SparseTensorDimSliceAttr",
"per dimension slice metadata"
// Process the data from the parsed dictionary value into struct-like data.
SmallVector<DimLevelType> lvlTypes;
- SmallVector<SparseTensorDimSliceAttr> slices;
+ SmallVector<SparseTensorDimSliceAttr> dimSlices;
AffineMap dimToLvl = {};
unsigned posWidth = 0;
unsigned crdWidth = 0;
StringRef attrName;
// Exactly 6 keys.
SmallVector<StringRef, 6> keys = {"lvlTypes", "dimToLvl", "posWidth",
- "crdWidth", "slice"};
+ "crdWidth", "dimSlices"};
while (succeeded(parser.parseOptionalKeyword(&attrName))) {
if (!llvm::is_contained(keys, attrName)) {
parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName;
auto intAttr = llvm::dyn_cast<IntegerAttr>(attr);
ERROR_IF(!intAttr, "expected an integral index bitwidth")
crdWidth = intAttr.getInt();
- } else if (attrName == "slice") {
+ } else if (attrName == "dimSlices") {
RETURN_ON_FAIL(parser.parseLSquare())
// Dispatches to DimSliceAttr to skip mnemonic
bool finished = false;
while (auto attr = SparseTensorDimSliceAttr::parse(parser, nullptr)) {
auto sliceAttr = llvm::cast<SparseTensorDimSliceAttr>(attr);
- slices.push_back(sliceAttr);
+ dimSlices.push_back(sliceAttr);
if (parser.parseOptionalComma().failed()) {
finished = true;
break;
// Construct struct-like storage for attribute.
return parser.getChecked<SparseTensorEncodingAttr>(
- parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, slices);
+ parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, dimSlices);
}
void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
if (getCrdWidth())
printer << ", crdWidth = " << getCrdWidth();
if (!getDimSlices().empty()) {
- printer << ", slice = [ ";
+ printer << ", dimSlices = [ ";
llvm::interleaveComma(getDimSlices(), printer,
[&](SparseTensorDimSliceAttr attr) {
// Calls SparseTensorDimSliceAttr::print directly to
#COOSlice = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (2, 2, 1), (12, 13, 1) ]
+ dimSlices = [ (2, 2, 1), (12, 13, 1) ]
}>
// CHECK-LABEL: func @sparse_nop_convert(
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
#CSR = #sparse_tensor.encoding<{
lvlTypes = ["dense", "compressed"],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
+ dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
}>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
#Slice = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (?, 1, 1), (?, 3, 1) ]
+ dimSlices = [ (?, 1, 1), (?, 3, 1) ]
}>
// CHECK-LABEL: func @sparse_nop_cast(
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
// CHECK-LABEL: func @sparse_slice_offset(
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
// CHECK-LABEL: func @sparse_slice_stride(
#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}>
#SparseVector_Slice = #sparse_tensor.encoding<{
lvlTypes = ["compressed"],
- slice = [ (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?) ]
}>
// CHECK-LABEL: func @sparse_metadata_init(
#SparseVector_Slice = #sparse_tensor.encoding<{
lvlTypes = ["compressed"],
- slice = [ (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?) ]
}>
// CHECK-LABEL: func @sparse_get_md(
#SparseVector = #sparse_tensor.encoding<{
lvlTypes = ["compressed"],
- slice = [ (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?) ]
}>
// CHECK-LABEL: func @sparse_get_md(
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
// CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
// -----
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
// CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
// -----
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, ?, 1), (?, 4, 2) ]
+ dimSlices = [ (1, ?, 1), (?, 4, 2) ]
}>
// CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, ?, 1), (?, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (0, 4, 1), (0, 8, 1) ]
+ dimSlices = [ (0, 4, 1), (0, 8, 1) ]
}>
// CHECK-LABEL: func.func @sparse_slice(
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed" ],
- slice = [ (0, 4, 1), (2, 4, 1) ]
+ dimSlices = [ (0, 4, 1), (2, 4, 1) ]
}>
#CSR_SLICE_DYN = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed" ],
- slice = [ (?, ?, ?), (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?), (?, ?, ?) ]
}>
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
#CSR_SLICE_DYN = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (?, ?, ?), (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?), (?, ?, ?) ]
}>
#COO = #sparse_tensor.encoding<{
#COO_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (1, 4, 1), (1, 4, 2) ]
+ dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
#COO_SLICE_DYN = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (?, ?, ?), (?, ?, ?) ]
+ dimSlices = [ (?, ?, ?), (?, ?, ?) ]
}>
#DCSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed" ],
- slice = [ (0, 4, 1), (0, 8, 1) ]
+ dimSlices = [ (0, 4, 1), (0, 8, 1) ]
}>
#CSR = #sparse_tensor.encoding<{
#CSR_SLICE = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (0, 4, 1), (0, 8, 1) ]
+ dimSlices = [ (0, 4, 1), (0, 8, 1) ]
}>
#COO = #sparse_tensor.encoding<{
#CSR_SLICE_1 = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (0, 4, 2), (0, 4, 1) ]
+ dimSlices = [ (0, 4, 2), (0, 4, 1) ]
}>
#DCSR_SLICE_1 = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed" ],
- slice = [ (0, 4, 2), (1, 4, 1) ]
+ dimSlices = [ (0, 4, 2), (1, 4, 1) ]
}>
#COO_SLICE_1 = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (0, 4, 2), (0, 4, 1) ]
+ dimSlices = [ (0, 4, 2), (0, 4, 1) ]
}>
#COO_SLICE_2 = #sparse_tensor.encoding<{
lvlTypes = [ "compressed-nu", "singleton" ],
- slice = [ (0, 4, 2), (1, 4, 1) ]
+ dimSlices = [ (0, 4, 2), (1, 4, 1) ]
}>
#CSR_SLICE_dyn = #sparse_tensor.encoding<{
lvlTypes = [ "dense", "compressed" ],
- slice = [ (?, 4, ?), (?, 4, ?) ]
+ dimSlices = [ (?, 4, ?), (?, 4, ?) ]
}>
#DCSR_SLICE_dyn = #sparse_tensor.encoding<{
lvlTypes = [ "compressed", "compressed" ],
- slice = [ (?, 4, ?), (?, 4, ?) ]
+ dimSlices = [ (?, 4, ?), (?, 4, ?) ]
}>
module {