// sparse tensor codegen to progressively lower sparse tensors.
//===----------------------------------------------------------------------===//
-def SparseTensor_StorageNewOp : SparseTensor_Op<"storage", []>,
+def SparseTensor_StorageOp : SparseTensor_Op<"storage", []>,
Arguments<(ins Variadic<AnyType>:$inputs)>,
Results<(outs AnyTuple:$result)> {
let summary = "Pack a list of value into one sparse tensor storage value";
// Sparse Tensor Storage Operation.
//===----------------------------------------------------------------------===//
-LogicalResult StorageNewOp::verify() {
+LogicalResult StorageOp::verify() {
auto retTypes = getResult().getType().getTypes();
if (retTypes.size() != getInputs().size())
return emitError("The number of inputs is inconsistent with output tuple");
/// Creates tuple.
static Value createTupleMake(OpBuilder &builder, Location loc, Type type,
ValueRange values) {
- return builder.create<StorageNewOp>(loc, type, values);
+ return builder.create<StorageOp>(loc, type, values);
}
/// Create allocation operation.
ConversionTarget target(*ctx);
// Almost everything in the sparse dialect must go!
target.addIllegalDialect<SparseTensorDialect>();
- target.addLegalOp<StorageGetOp, StorageSetOp, StorageNewOp>();
+ target.addLegalOp<StorageGetOp, StorageSetOp, StorageOp>();
// All dynamic rules below accept new function, call, return, and various
// tensor and bufferization operations as legal output of the rewriting
// provided that all sparse tensor types have been fully rewritten.