add_dependencies(MLIRLinalgStructuredOpsIncGen LinalgOdsGen)
add_dependencies(mlir-headers MLIRLinalgStructuredOpsIncGen)
+set(LLVM_TARGET_DEFINITIONS LinalgSparseOps.td)
+mlir_tablegen(LinalgSparseOps.h.inc -gen-op-decls)
+mlir_tablegen(LinalgSparseOps.cpp.inc -gen-op-defs)
+add_public_tablegen_target(MLIRLinalgSparseOpsIncGen)
+add_dependencies(mlir-headers MLIRLinalgSparseOpsIncGen)
+
set(LLVM_TARGET_DEFINITIONS LinalgInterfaces.td)
mlir_tablegen(LinalgInterfaces.h.inc -gen-op-interface-decls)
mlir_tablegen(LinalgInterfaces.cpp.inc -gen-op-interface-defs)
#define GET_OP_CLASSES
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.h.inc"
+#define GET_OP_CLASSES
+#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.h.inc"
+
#endif // MLIR_DIALECT_LINALG_LINALGOPS_H_
--- /dev/null
+//===- LinalgSparseOps.td - Linalg dialect sparse ops ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The following operations bootstrap working with sparse tensors solely
+// within the Linalg dialect. They provide temporary bridges between a
+// future SparseTensorType (now an opaque pointer), the actual TensorType,
+// and MemRef arrays underlying an actual sparse storage scheme in memory.
+//
+// Lacking a proper sparse tensor type, the 'sparse_tensor' operation
+// provides a bridge between an opaque pointer and a regular tensor type
+// just to simplify feeding the value into a Linalg op. The operation
+// simply disappears during lowering.
+//
+// The other operations form the bridge between the opaque pointer and
+// the actual storage of pointers, indices, and values. These operations
+// resemble 'tensor_to_memref' in the sense that they map tensors to
+// their bufferized memrefs, but they lower into actual calls since
+// sparse storage does not bufferize into a single memrefs, as dense
+// tensors do, but into a hierarchical storage scheme where pointers
+// access memrefs with indices and eventually into values.
+//
+// TODO: introduce SparseTensorType as first class citizen in MLIR
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LINALG_SPARSE_OPS
+#define LINALG_SPARSE_OPS
+
+include "mlir/Dialect/Linalg/IR/LinalgBase.td"
+
+// Base class.
+class Linalg_SparseOp<string mnemonic> : Op<Linalg_Dialect, mnemonic, []> {
+ let printer = [{ return ::print(p, *this); }];
+ let verifier = ?;
+ let parser = [{ return ::parse$cppClass(parser, result); }];
+}
+
+def Linalg_SparseTensorFromPointerOp :
+ Linalg_SparseOp<"sparse_tensor">,
+ Arguments<(ins AnyType:$ptr)>,
+ Results<(outs AnyTensor:$result)> {
+ let summary = "Views an opaque sparse tensor pointer as a tensor";
+ let description = [{
+ Lacking a first class citizen type for sparse tensors, this operation
+ forms the glue between a sparse storage scheme (behind an opaque
+ pointer) and the (dense) tensors used in the kernel definitions.
+ This operation merely provides a way to assign a proper tensor
+ type and shape to the incoming opaque pointer. It disappears
+ completely during lowering.
+
+ Example:
+
+ ```mlir
+ !SparseTensor = type !llvm.ptr<i8>
+
+ %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<64x64xf64>
+ ```
+ }];
+ let assemblyFormat = "$ptr attr-dict `:` type($ptr) `to` type($result)";
+}
+
+def Linalg_SparseTensorToPointersMemRefOp :
+ Linalg_SparseOp<"sparse_pointers">,
+ Arguments<(ins AnyTensor:$tensor, Index:$dim)>,
+ Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ let summary = "Extract pointers array at given dimension from a tensor";
+ let description = [{
+ Returns the pointers array of the sparse storage scheme at the
+ given dimension for the given tensor. This is similar to the
+ `tensor_to_memref` operation in the sense that it provides a bridge
+ between a tensor world view and a bufferized world view. Unlike the
+ `tensor_to_memref` operation, however, this sparse operation actually
+ lowers into a call into a support library to obtain access to the
+ pointers array.
+
+ Example:
+
+ ```mlir
+ %1 = linalg.sparse_pointers %0, %c1 : tensor<64x64xf64> to memref<?xindex>
+ ```
+ }];
+ let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
+ " `to` type($result)";
+}
+
+def Linalg_SparseTensorToIndicesMemRefOp :
+ Linalg_SparseOp<"sparse_indices">,
+ Arguments<(ins AnyTensor:$tensor, Index:$dim)>,
+ Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ let summary = "Extract indices array at given dimension from a tensor";
+ let description = [{
+ Returns the indices array of the sparse storage scheme at the
+ given dimension for the given tensor. This is similar to the
+ `tensor_to_memref` operation in the sense that it provides a bridge
+ between a tensor world view and a bufferized world view. Unlike the
+ `tensor_to_memref` operation, however, this sparse operation actually
+ lowers into a call into a support library to obtain access to the
+ indices array.
+
+ Example:
+
+ ```mlir
+ %1 = linalg.sparse_indices %0, %c1 : tensor<64x64xf64> to memref<?xindex>
+ ```
+ }];
+ let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
+ " `to` type($result)";
+}
+
+def Linalg_SparseTensorToValuesMemRefOp :
+ Linalg_SparseOp<"sparse_values">,
+ Arguments<(ins AnyTensor:$tensor)>,
+ Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ let summary = "Extract numerical values array from a tensor";
+ let description = [{
+ Returns the values array of the sparse storage scheme for the given
+ tensor, independent of the actual dimension. This is similar to the
+ `tensor_to_memref` operation in the sense that it provides a bridge
+ between a tensor world view and a bufferized world view. Unlike the
+ `tensor_to_memref` operation, however, this sparse operation actually
+ lowers into a call into a support library to obtain access to the
+ values array.
+
+ Example:
+
+ ```mlir
+ %1 = linalg.sparse_values %0 : tensor<64x64xf64> to memref<?xf64>
+ ```
+ }];
+ let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
+}
+
+#endif // LINALG_SPARSE_OPS
struct SparsificationOptions {
SparsificationOptions(SparseParallelizationStrategy p,
SparseVectorizationStrategy v, unsigned vl,
- SparseIntType pt, SparseIntType it)
+ SparseIntType pt, SparseIntType it, bool fo)
: parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl),
- ptrType(pt), indType(it) {
+ ptrType(pt), indType(it), fastOutput(fo) {
// TODO: remove restriction when vectors with index elements are supported
assert((v != SparseVectorizationStrategy::kAnyStorageInnerLoop ||
(ptrType != SparseIntType::kNative &&
SparsificationOptions()
: SparsificationOptions(SparseParallelizationStrategy::kNone,
SparseVectorizationStrategy::kNone, 1u,
- SparseIntType::kNative, SparseIntType::kNative) {}
+ SparseIntType::kNative, SparseIntType::kNative,
+ false) {}
SparseParallelizationStrategy parallelizationStrategy;
SparseVectorizationStrategy vectorizationStrategy;
unsigned vectorLength;
SparseIntType ptrType;
SparseIntType indType;
+ bool fastOutput; // experimental: fast output buffers
};
-/// Set up sparsification rewriting rules with the given options.
+/// Sets up sparsification rewriting rules with the given options.
void populateSparsificationPatterns(
MLIRContext *context, OwningRewritePatternList &patterns,
const SparsificationOptions &options = SparsificationOptions());
+/// Sets up sparsification conversion rules with the given options.
+void populateSparsificationConversionPatterns(
+ MLIRContext *context, OwningRewritePatternList &patterns);
+
} // namespace linalg
} // namespace mlir
--- /dev/null
+// RUN: mlir-opt %s \
+// RUN: --test-sparsification="lower" \
+// RUN: --convert-linalg-to-loops \
+// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize --finalizing-bufferize \
+// RUN: --convert-scf-to-std --convert-vector-to-llvm --convert-std-to-llvm | \
+// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
+// RUN: mlir-cpu-runner \
+// RUN: -e entry -entry-point-result=void \
+// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+//
+// Use descriptive names for opaque pointers.
+//
+!Filename = type !llvm.ptr<i8>
+!SparseTensor = type !llvm.ptr<i8>
+
+#trait_sum_reduce = {
+ indexing_maps = [
+ affine_map<(i,j) -> (i,j)>, // A
+ affine_map<(i,j) -> ()> // x (out)
+ ],
+ sparse = [
+ [ "S", "S" ], // A
+ [ ] // x
+ ],
+ iterator_types = ["reduction", "reduction"],
+ doc = "x += A(i,j)"
+}
+
+//
+// Integration test that lowers a kernel annotated as sparse to
+// actual sparse code, initializes a matching sparse storage scheme
+// from file, and runs the resulting code with the JIT compiler.
+//
+module {
+ //
+ // The kernel expressed as an annotated Linalg op. The kernel
+ // sum reduces a matrix to a single scalar.
+ //
+ func @kernel_sum_reduce(%argA: !SparseTensor,
+ %argx: tensor<f64>) -> tensor<f64> {
+ %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<?x?xf64>
+ %0 = linalg.generic #trait_sum_reduce
+ ins(%arga: tensor<?x?xf64>)
+ outs(%argx: tensor<f64>) {
+ ^bb(%a: f64, %x: f64):
+ %0 = addf %x, %a : f64
+ linalg.yield %0 : f64
+ } -> tensor<f64>
+ return %0 : tensor<f64>
+ }
+
+ //
+ // Runtime support library that is called directly from here.
+ //
+ func private @getTensorFilename(index) -> (!Filename)
+ func private @newSparseTensor(!Filename, memref<?xi1>) -> (!SparseTensor)
+ func private @delSparseTensor(!SparseTensor) -> ()
+ func private @print_memref_f64(%ptr : tensor<*xf64>)
+
+ //
+ // Main driver that reads matrix from file and calls the sparse kernel.
+ //
+ func @entry() {
+ %d0 = constant 0.0 : f64
+ %c0 = constant 0 : index
+ %c1 = constant 1 : index
+ %c2 = constant 2 : index
+
+ // Mark both dimensions of the matrix as sparse
+ // (this must match the annotation in the trait).
+ %annotations = alloc(%c2) : memref<?xi1>
+ %sparse = constant true
+ store %sparse, %annotations[%c0] : memref<?xi1>
+ store %sparse, %annotations[%c1] : memref<?xi1>
+
+ // Setup memory for a single reduction scalar,
+ // initialized to zero.
+ %xdata = alloc() : memref<f64>
+ store %d0, %xdata[] : memref<f64>
+ %x = tensor_load %xdata : memref<f64>
+
+ // Read the sparse matrix from file, construct sparse storage
+ // according to <sparse,sparse> in memory, and call the kernel.
+ %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
+ %a = call @newSparseTensor(%fileName, %annotations)
+ : (!Filename, memref<?xi1>) -> (!SparseTensor)
+ %0 = call @kernel_sum_reduce(%a, %x)
+ : (!SparseTensor, tensor<f64>) -> tensor<f64>
+
+ // Print the result for verification.
+ //
+ // CHECK: 28.2
+ //
+ %m = tensor_to_memref %0 : memref<f64>
+ %v = load %m[] : memref<f64>
+ vector.print %v : f64
+
+ // Release the resources.
+ call @delSparseTensor(%a) : (!SparseTensor) -> ()
+ dealloc %xdata : memref<f64>
+
+ return
+ }
+}
#define GET_OP_CLASSES
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
+#define GET_OP_CLASSES
+#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc"
+
/// Return the dims that are `iteratorTypeName` loops in the LinalgOp `op`.
/// Assumes `op` is a LinalgOp.
void mlir::linalg::getDimsOfType(Operation *op, StringRef iteratorTypeName,
#define GET_OP_LIST
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
>();
+ addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc"
+ >();
addInterfaces<LinalgInlinerInterface>();
}
Interchange.cpp
Loops.cpp
Promotion.cpp
+ SparseLowering.cpp
Sparsification.cpp
Tiling.cpp
Transforms.cpp
--- /dev/null
+//===- SparseLowering.cpp - Lowers sparse primitives to library calls. ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+
+using namespace mlir;
+
+namespace {
+
+/// Returns function reference (first hit also inserts into module).
+static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type result,
+ ValueRange operands) {
+ MLIRContext *context = op->getContext();
+ auto module = op->getParentOfType<ModuleOp>();
+ auto func = module.lookupSymbol<FuncOp>(name);
+ if (!func) {
+ OpBuilder moduleBuilder(module.getBodyRegion());
+ moduleBuilder
+ .create<FuncOp>(op->getLoc(), name,
+ FunctionType::get(context, operands.getTypes(), result))
+ .setPrivate();
+ }
+ return SymbolRefAttr::get(context, name);
+}
+
+/// Sparse conversion rule to remove opaque pointer cast.
+class TensorFromPointerConverter
+ : public OpConversionPattern<linalg::SparseTensorFromPointerOp> {
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(linalg::SparseTensorFromPointerOp op,
+ ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ rewriter.replaceOp(op, operands[0]);
+ return success();
+ }
+};
+
+/// Sparse conversion rule for dimension accesses.
+class TensorToDimSizeConverter : public OpConversionPattern<DimOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(DimOp op, ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ if (!operands[0].getType().isa<LLVM::LLVMPointerType>())
+ return failure();
+ Type resType = op.getType();
+ StringRef name = "sparseDimSize";
+ rewriter.replaceOpWithNewOp<CallOp>(
+ op, resType, getFunc(op, name, resType, operands), operands);
+ return success();
+ }
+};
+
+/// Sparse conversion rule for pointer accesses.
+class TensorToPointersConverter
+ : public OpConversionPattern<linalg::SparseTensorToPointersMemRefOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(linalg::SparseTensorToPointersMemRefOp op,
+ ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ Type resType = op.getType();
+ Type eltType = resType.cast<ShapedType>().getElementType();
+ StringRef name;
+ if (eltType.isIndex() || eltType.isInteger(64))
+ name = "sparsePtrsI64";
+ else
+ return failure();
+ rewriter.replaceOpWithNewOp<CallOp>(
+ op, resType, getFunc(op, name, resType, operands), operands);
+ return success();
+ }
+};
+
+/// Sparse conversion rule for index accesses.
+class TensorToIndicesConverter
+ : public OpConversionPattern<linalg::SparseTensorToIndicesMemRefOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(linalg::SparseTensorToIndicesMemRefOp op,
+ ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ Type resType = op.getType();
+ Type eltType = resType.cast<ShapedType>().getElementType();
+ StringRef name;
+ if (eltType.isIndex() || eltType.isInteger(64))
+ name = "sparseIndxsI64";
+ else
+ return failure();
+ rewriter.replaceOpWithNewOp<CallOp>(
+ op, resType, getFunc(op, name, resType, operands), operands);
+ return success();
+ }
+};
+
+/// Sparse conversion rule for value accesses.
+class TensorToValuesConverter
+ : public OpConversionPattern<linalg::SparseTensorToValuesMemRefOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(linalg::SparseTensorToValuesMemRefOp op,
+ ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const override {
+ Type resType = op.getType();
+ Type eltType = resType.cast<ShapedType>().getElementType();
+ StringRef name;
+ if (eltType.isF64())
+ name = "sparseValsF64";
+ else
+ return failure();
+ rewriter.replaceOpWithNewOp<CallOp>(
+ op, resType, getFunc(op, name, resType, operands), operands);
+ return success();
+ }
+};
+
+} // namespace
+
+/// Populates the given patterns list with conversion rules required for
+/// the sparsification of linear algebra operations.
+void linalg::populateSparsificationConversionPatterns(
+ MLIRContext *context, OwningRewritePatternList &patterns) {
+ patterns.insert<TensorFromPointerConverter, TensorToDimSizeConverter,
+ TensorToPointersConverter, TensorToIndicesConverter,
+ TensorToValuesConverter>(context);
+}
llvm_unreachable("unexpected SparseIntType");
}
+/// Returns true if tensor was set up with sparse storage scheme.
+static bool linkedSparse(linalg::GenericOp op, unsigned tensor) {
+ if (tensor < op.getNumInputs())
+ return isa_and_nonnull<linalg::SparseTensorFromPointerOp>(
+ op.getInput(tensor).getDefiningOp());
+ return false;
+}
+
+/// Generates buffer for the output tensor.
+static Value genOutputBuffer(CodeGen &codegen, PatternRewriter &rewriter,
+ linalg::GenericOp op, MemRefType denseTp,
+ ArrayRef<Value> args) {
+ Location loc = op.getLoc();
+ Value tensor = op.getOutput(0);
+ // The output tensor simply could materialize from the buffer that will
+ // be generated for the tensor present in the outs() clause. This has
+ // the major advantage that the sparse kernel only updates the nonzero
+ // positions for the output tensor. Currently this results in functional,
+ // but slightly imprecise IR, so it is put under an experimental option.
+ if (codegen.options.fastOutput)
+ return rewriter.create<TensorToMemrefOp>(loc, denseTp, tensor);
+ // By default, a new buffer is allocated which is initialized to the
+ // tensor defined in the outs() clause. This is always correct but
+ // introduces a dense initialization component that may negatively
+ // impact the running complexity of the sparse kernel.
+ Value init = rewriter.create<TensorToMemrefOp>(loc, denseTp, tensor);
+ Value alloc = rewriter.create<AllocOp>(loc, denseTp, args);
+ rewriter.create<linalg::CopyOp>(loc, init, alloc);
+ return alloc;
+}
+
/// Local bufferization of all dense and sparse data structures.
/// This code enables testing the first prototype sparse compiler.
// TODO: replace this with a proliferated bufferization strategy
unsigned numTensors = op.getNumShapedOperands();
unsigned numInputs = op.getNumInputs();
assert(numTensors == numInputs + 1);
-
- // For now, set all unknown dimensions to 999.
- // TODO: compute these values (using sparsity or by reading tensor)
- Value unknown = rewriter.create<ConstantIndexOp>(loc, 999);
-
// For every tensor, find lower and upper bound on dimensions, set the
- // same bounds on loop indices, and allocate dense or sparse buffer(s).
+ // same bounds on loop indices, and obtain dense or sparse buffer(s).
SmallVector<Value, 4> args;
for (unsigned t = 0; t < numTensors; t++) {
+ Value tensor = t < numInputs ? op.getInput(t) : op.getOutput(0);
auto tensorType = op.getShapedType(t);
auto shape = tensorType.getShape();
auto map = op.getIndexingMap(t);
// Scan all dimensions of current tensor.
- bool allDense = true;
+ bool dense = !linkedSparse(op, t);
args.clear();
for (unsigned d = 0, rank = shape.size(); d < rank; d++) {
unsigned i = map.getDimPosition(d);
// Handle sparse storage schemes.
if (merger.isDim(t, i, Dim::kSparse)) {
- allDense = false;
+ dense = false;
auto dynShape = {ShapedType::kDynamicSize};
auto ptrTp = MemRefType::get(
dynShape, genIntType(rewriter, codegen.options.ptrType));
auto indTp = MemRefType::get(
dynShape, genIntType(rewriter, codegen.options.indType));
- codegen.pointers[t][i] = rewriter.create<AllocaOp>(loc, ptrTp, unknown);
- codegen.indices[t][i] = rewriter.create<AllocaOp>(loc, indTp, unknown);
+ Value dim = rewriter.create<ConstantIndexOp>(loc, d);
+ // Generate sparse primitives to obtains pointer and indices.
+ codegen.pointers[t][i] =
+ rewriter.create<linalg::SparseTensorToPointersMemRefOp>(
+ loc, ptrTp, tensor, dim);
+ codegen.indices[t][i] =
+ rewriter.create<linalg::SparseTensorToIndicesMemRefOp>(loc, indTp,
+ tensor, dim);
}
// Find lower and upper bound in current dimension.
Value up;
if (shape[d] == TensorType::kDynamicSize) {
- Value arg = t < numInputs ? op.getInput(t) : op.getOutput(0);
- up = rewriter.create<DimOp>(loc, arg, d);
+ up = rewriter.create<DimOp>(loc, tensor, d);
args.push_back(up);
} else {
up = rewriter.create<ConstantIndexOp>(loc, shape[d]);
}
codegen.sizes[i] = codegen.highs[t][i] = up;
}
- // Allocate dense or sparse buffer for numerical values.
- if (allDense) {
+ // Perform the required bufferization. All dense inputs materialize
+ // from the input tensor. The dense output tensor needs special
+ // handling. Sparse inputs use a sparse primitive to obtain the values.
+ if (dense) {
auto denseTp = MemRefType::get(shape, tensorType.getElementType());
- codegen.buffers[t] = rewriter.create<AllocaOp>(loc, denseTp, args);
+ if (t < numInputs)
+ codegen.buffers[t] =
+ rewriter.create<TensorToMemrefOp>(loc, denseTp, tensor);
+ else
+ codegen.buffers[t] =
+ genOutputBuffer(codegen, rewriter, op, denseTp, args);
} else {
- auto sparseTp = MemRefType::get({ShapedType::kDynamicSize},
- tensorType.getElementType());
- codegen.buffers[t] = rewriter.create<AllocaOp>(loc, sparseTp, unknown);
+ auto dynShape = {ShapedType::kDynamicSize};
+ auto sparseTp = MemRefType::get(dynShape, tensorType.getElementType());
+ codegen.buffers[t] =
+ rewriter.create<linalg::SparseTensorToValuesMemRefOp>(loc, sparseTp,
+ tensor);
}
}
}
SmallVector<Value, 4> args;
unsigned tensor = merger.exp(exp).e0;
auto map = op.getIndexingMap(tensor);
- bool sparse = false;
+ bool sparse = linkedSparse(op, tensor);
for (unsigned i = 0, m = map.getNumResults(); i < m; ++i) {
unsigned idx = map.getDimPosition(i);
args.push_back(codegen.loops[idx]); // universal dense index
return false;
}
+public:
std::vector<uint64_t> sizes; // per-rank dimension sizes
std::vector<Element> elements;
uint64_t pos;
};
+/// A memory-resident sparse tensor using a storage scheme based on per-rank
+/// annotations on dense/sparse. This data structure provides a bufferized
+/// form of an imaginary SparseTensorType, until such a type becomes a
+/// first-class citizen of MLIR. In contrast to generating setup methods for
+/// each differently annotated sparse tensor, this method provides a convenient
+/// "one-size-fits-all" solution that simply takes an input tensor and
+/// annotations to implement all required setup in a general manner.
+template <typename P, typename I, typename V>
+class SparseTensorStorage {
+public:
+ /// Constructs sparse tensor storage scheme following the given
+ /// per-rank dimension dense/sparse annotations.
+ SparseTensorStorage(SparseTensor *tensor, bool *sparsity)
+ : sizes(tensor->sizes), positions(sizes.size()), indices(sizes.size()) {
+ // Provide hints on capacity.
+ // TODO: needs fine-tuning based on sparsity
+ values.reserve(tensor->elements.size());
+ for (uint64_t d = 0, s = 1, rank = sizes.size(); d < rank; d++) {
+ s *= tensor->sizes[d];
+ if (sparsity[d]) {
+ positions[d].reserve(s + 1);
+ indices[d].reserve(s);
+ s = 1;
+ }
+ }
+ // Then setup the tensor.
+ traverse(tensor, sparsity, 0, tensor->elements.size(), 0);
+ }
+
+private:
+ /// Initializes sparse tensor storage scheme from a memory-resident
+ /// representation of an external sparse tensor. This method prepares
+ /// the pointers and indices arrays under the given per-rank dimension
+ /// dense/sparse annotations.
+ void traverse(SparseTensor *tensor, bool *sparsity, uint64_t lo, uint64_t hi,
+ uint64_t d) {
+ const std::vector<Element> &elements = tensor->elements;
+ // Once dimensions are exhausted, insert the numerical values.
+ if (d == sizes.size()) {
+ values.push_back(lo < hi ? elements[lo].value : 0.0);
+ return;
+ }
+ // Prepare a sparse pointer structure at this dimension.
+ if (sparsity[d] && positions[d].empty())
+ positions[d].push_back(0);
+ // Visit all elements in this interval.
+ uint64_t full = 0;
+ while (lo < hi) {
+ // Find segment in interval with same index elements in this dimension.
+ unsigned idx = elements[lo].indices[d];
+ unsigned seg = lo + 1;
+ while (seg < hi && elements[seg].indices[d] == idx)
+ seg++;
+ // Handle segment in interval for sparse or dense dimension.
+ if (sparsity[d]) {
+ indices[d].push_back(idx);
+ } else {
+ for (; full < idx; full++)
+ traverse(tensor, sparsity, 0, 0, d + 1); // pass empty
+ full++;
+ }
+ traverse(tensor, sparsity, lo, seg, d + 1);
+ // And move on to next segment in interval.
+ lo = seg;
+ }
+ // Finalize the sparse pointer structure at this dimension.
+ if (sparsity[d]) {
+ positions[d].push_back(indices[d].size());
+ } else {
+ for (uint64_t sz = tensor->sizes[d]; full < sz; full++)
+ traverse(tensor, sparsity, 0, 0, d + 1); // pass empty
+ }
+ }
+
+public:
+ std::vector<uint64_t> sizes; // per-rank dimension sizes
+ std::vector<std::vector<P>> positions;
+ std::vector<std::vector<I>> indices;
+ std::vector<V> values;
+};
+
+typedef SparseTensorStorage<uint64_t, uint64_t, double>
+ SparseTensorStorageU64U64F64;
+
/// Helper to convert string to lower case.
static char *toLower(char *token) {
for (char *c = token; *c; c++)
//
//
// Note that input parameters in the "MLIRized" version of a function mimic
-// the data layout of a MemRef<?xT>:
-//
-// struct MemRef {
-// T *base;
-// T *data;
-// int64_t off;
-// int64_t sizes[1];
-// int64_t strides[1];
-// }
+// the data layout of a MemRef<?xT> (but cannot use a direct struct). The
+// output parameter uses a direct struct.
//
//===----------------------------------------------------------------------===//
+extern "C" {
+
+/// Cannot use templates with C linkage.
+
+struct MemRef1DU64 {
+ const uint64_t *base;
+ const uint64_t *data;
+ uint64_t off;
+ uint64_t sizes[1];
+ uint64_t strides[1];
+};
+
+struct MemRef1DF64 {
+ const double *base;
+ const double *data;
+ uint64_t off;
+ uint64_t sizes[1];
+ uint64_t strides[1];
+};
+
/// Reads in a sparse tensor with the given filename. The call yields a
/// pointer to an opaque memory-resident sparse tensor object that is only
/// understood by other methods in the sparse runtime support library. An
/// array parameter is used to pass the rank, the number of nonzero elements,
/// and the dimension sizes (one per rank).
-extern "C" void *openTensorC(char *filename, uint64_t *idata) {
+void *openTensorC(char *filename, uint64_t *idata) {
// Open the file.
FILE *file = fopen(filename, "r");
if (!file) {
}
/// "MLIRized" version.
-extern "C" void *openTensor(char *filename, uint64_t *ibase, uint64_t *idata,
- uint64_t ioff, uint64_t isize, uint64_t istride) {
+void *openTensor(char *filename, uint64_t *ibase, uint64_t *idata,
+ uint64_t ioff, uint64_t isize, uint64_t istride) {
assert(istride == 1);
return openTensorC(filename, idata + ioff);
}
/// Yields the next element from the given opaque sparse tensor object.
-extern "C" void readTensorItemC(void *tensor, uint64_t *idata, double *ddata) {
+void readTensorItemC(void *tensor, uint64_t *idata, double *ddata) {
const Element &e = static_cast<SparseTensor *>(tensor)->next();
for (uint64_t r = 0, rank = e.indices.size(); r < rank; r++)
idata[r] = e.indices[r];
}
/// "MLIRized" version.
-extern "C" void readTensorItem(void *tensor, uint64_t *ibase, uint64_t *idata,
- uint64_t ioff, uint64_t isize, uint64_t istride,
- double *dbase, double *ddata, uint64_t doff,
- uint64_t dsize, uint64_t dstride) {
+void readTensorItem(void *tensor, uint64_t *ibase, uint64_t *idata,
+ uint64_t ioff, uint64_t isize, uint64_t istride,
+ double *dbase, double *ddata, uint64_t doff, uint64_t dsize,
+ uint64_t dstride) {
assert(istride == 1 && dstride == 1);
readTensorItemC(tensor, idata + ioff, ddata + doff);
}
/// Closes the given opaque sparse tensor object, releasing its memory
-/// resources. After this call, the opague object cannot be used anymore.
-extern "C" void closeTensor(void *tensor) {
- delete static_cast<SparseTensor *>(tensor);
-}
+/// resources. After this call, the opaque object cannot be used anymore.
+void closeTensor(void *tensor) { delete static_cast<SparseTensor *>(tensor); }
/// Helper method to read a sparse tensor filename from the environment,
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
-extern "C" char *getTensorFilename(uint64_t id) {
+char *getTensorFilename(uint64_t id) {
char var[80];
sprintf(var, "TENSOR%" PRIu64, id);
char *env = getenv(var);
return env;
}
+///
+/// Sparse primitives that support an opaque implementation of a bufferized
+/// SparseTensor in MLIR. This could be replaced by actual codegen in MLIR.
+///
+
+void *newSparseTensorC(char *filename, bool *annotations) {
+ uint64_t idata[64];
+ SparseTensor *t = static_cast<SparseTensor *>(openTensorC(filename, idata));
+ SparseTensorStorageU64U64F64 *tensor =
+ new SparseTensorStorageU64U64F64(t, annotations);
+ delete t;
+ return tensor;
+}
+
+/// "MLIRized" version.
+void *newSparseTensor(char *filename, bool *abase, bool *adata, uint64_t aoff,
+ uint64_t asize, uint64_t astride) {
+ assert(astride == 1);
+ return newSparseTensorC(filename, abase + aoff);
+}
+
+uint64_t sparseDimSize(void *tensor, uint64_t d) {
+ return static_cast<SparseTensorStorageU64U64F64 *>(tensor)->sizes[d];
+}
+
+MemRef1DU64 sparsePtrsI64(void *tensor, uint64_t d) {
+ const std::vector<uint64_t> &v =
+ static_cast<SparseTensorStorageU64U64F64 *>(tensor)->positions[d];
+ return {v.data(), v.data(), 0, {v.size()}, {1}};
+}
+
+MemRef1DU64 sparseIndxsI64(void *tensor, uint64_t d) {
+ const std::vector<uint64_t> &v =
+ static_cast<SparseTensorStorageU64U64F64 *>(tensor)->indices[d];
+ return {v.data(), v.data(), 0, {v.size()}, {1}};
+}
+
+MemRef1DF64 sparseValsF64(void *tensor) {
+ const std::vector<double> &v =
+ static_cast<SparseTensorStorageU64U64F64 *>(tensor)->values;
+ return {v.data(), v.data(), 0, {v.size()}, {1}};
+}
+
+void delSparseTensor(void *tensor) {
+ delete static_cast<SparseTensorStorageU64U64F64 *>(tensor);
+}
+
+} // extern "C"
+
#endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
// CHECK: %[[VAL_3:.*]] = constant 32 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32xf32>
-// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
-// CHECK: %[[VAL_9:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref<32xf32>
-// CHECK: %[[VAL_10:.*]] = addf %[[VAL_9]], %[[VAL_1]] : f32
-// CHECK: store %[[VAL_10]], %[[VAL_7]]{{\[}}%[[VAL_8]]] : memref<32xf32>
+// CHECK: %[[VAL_6:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32>
+// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = addf %[[VAL_10]], %[[VAL_1]] : f32
+// CHECK: store %[[VAL_11]], %[[VAL_8]]{{\[}}%[[VAL_9]]] : memref<32xf32>
// CHECK: }
-// CHECK: %[[VAL_11:.*]] = tensor_load %[[VAL_7]] : memref<32xf32>
-// CHECK: return %[[VAL_11]] : tensor<32xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_load %[[VAL_8]] : memref<32xf32>
+// CHECK: return %[[VAL_12]] : tensor<32xf32>
// CHECK: }
func @add_d(%arga: tensor<32xf32>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait_d
// CHECK: %[[VAL_3:.*]] = constant 32 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32xf32>
-// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
-// CHECK: %[[VAL_9:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref<32xf32>
-// CHECK: %[[VAL_10:.*]] = mulf %[[VAL_9]], %[[VAL_1]] : f32
-// CHECK: store %[[VAL_10]], %[[VAL_7]]{{\[}}%[[VAL_8]]] : memref<32xf32>
+// CHECK: %[[VAL_6:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32>
+// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = mulf %[[VAL_10]], %[[VAL_1]] : f32
+// CHECK: store %[[VAL_11]], %[[VAL_8]]{{\[}}%[[VAL_9]]] : memref<32xf32>
// CHECK: }
-// CHECK: %[[VAL_11:.*]] = tensor_load %[[VAL_7]] : memref<32xf32>
-// CHECK: return %[[VAL_11]] : tensor<32xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_load %[[VAL_8]] : memref<32xf32>
+// CHECK: return %[[VAL_12]] : tensor<32xf32>
// CHECK: }
func @mul_d(%arga: tensor<32xf32>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait_d
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: f32,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant true
-// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_5]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant true
+// CHECK: %[[VAL_6:.*]] = constant 1 : index
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_17:.*]] = cmpi ult, %[[VAL_15]], %[[VAL_13]] : index
// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index):
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: scf.if %[[VAL_21]] {
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xf32>
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf32>
// CHECK: %[[VAL_23:.*]] = addf %[[VAL_22]], %[[VAL_1]] : f32
// CHECK: store %[[VAL_23]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
+// CHECK: scf.if %[[VAL_5]] {
// CHECK: store %[[VAL_1]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_24:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
-// CHECK: %[[VAL_25:.*]] = addi %[[VAL_18]], %[[VAL_7]] : index
+// CHECK: %[[VAL_25:.*]] = addi %[[VAL_18]], %[[VAL_6]] : index
// CHECK: %[[VAL_26:.*]] = select %[[VAL_24]], %[[VAL_25]], %[[VAL_18]] : index
-// CHECK: %[[VAL_27:.*]] = addi %[[VAL_19]], %[[VAL_7]] : index
+// CHECK: %[[VAL_27:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_26]], %[[VAL_27]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_29:.*]]#1 to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_29:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: store %[[VAL_1]], %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_30:.*]] = tensor_load %[[VAL_11]] : memref<32xf32>
// CHECK-LABEL: func @repeated_add_s(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_2:.*]] = constant 999 : index
-// CHECK: %[[VAL_3:.*]] = constant 0 : index
-// CHECK: %[[VAL_4:.*]] = constant 1 : index
-// CHECK: %[[VAL_5:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_9:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK: %[[VAL_2:.*]] = constant 0 : index
+// CHECK: %[[VAL_3:.*]] = constant 1 : index
+// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_9:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_3]] {
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf32>
// CHECK: %[[VAL_15:.*]] = addf %[[VAL_13]], %[[VAL_14]] : f32
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xf32>
// CHECK: %[[VAL_18:.*]] = addf %[[VAL_16]], %[[VAL_17]] : f32
// CHECK: %[[VAL_19:.*]] = addf %[[VAL_15]], %[[VAL_18]] : f32
// CHECK: store %[[VAL_19]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<32xf32>
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: f32,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_5]] {
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_9:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_10:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xf32>
// CHECK: %[[VAL_15:.*]] = mulf %[[VAL_14]], %[[VAL_1]] : f32
// CHECK: store %[[VAL_15]], %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_3:.*]] = constant 32 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32xf32>
-// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<32xf32>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_9]]] : memref<32xf32>
-// CHECK: %[[VAL_12:.*]] = addf %[[VAL_10]], %[[VAL_11]] : f32
-// CHECK: store %[[VAL_12]], %[[VAL_8]]{{\[}}%[[VAL_9]]] : memref<32xf32>
+// CHECK: %[[VAL_6:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_9:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32>
+// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_10]]] : memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = addf %[[VAL_11]], %[[VAL_12]] : f32
+// CHECK: store %[[VAL_13]], %[[VAL_9]]{{\[}}%[[VAL_10]]] : memref<32xf32>
// CHECK: }
-// CHECK: %[[VAL_13:.*]] = tensor_load %[[VAL_8]] : memref<32xf32>
-// CHECK: return %[[VAL_13]] : tensor<32xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_load %[[VAL_9]] : memref<32xf32>
+// CHECK: return %[[VAL_14]] : tensor<32xf32>
// CHECK: }
func @add_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait_dd
// CHECK: %[[VAL_3:.*]] = constant 32 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32xf32>
-// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<32xf32>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_9]]] : memref<32xf32>
-// CHECK: %[[VAL_12:.*]] = mulf %[[VAL_10]], %[[VAL_11]] : f32
-// CHECK: store %[[VAL_12]], %[[VAL_8]]{{\[}}%[[VAL_9]]] : memref<32xf32>
+// CHECK: %[[VAL_6:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_9:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32>
+// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_10]]] : memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = mulf %[[VAL_11]], %[[VAL_12]] : f32
+// CHECK: store %[[VAL_13]], %[[VAL_9]]{{\[}}%[[VAL_10]]] : memref<32xf32>
// CHECK: }
-// CHECK: %[[VAL_13:.*]] = tensor_load %[[VAL_8]] : memref<32xf32>
-// CHECK: return %[[VAL_13]] : tensor<32xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_load %[[VAL_9]] : memref<32xf32>
+// CHECK: return %[[VAL_14]] : tensor<32xf32>
// CHECK: }
func @mul_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
%0 = linalg.generic #trait_dd
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant true
-// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_5]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant true
+// CHECK: %[[VAL_6:.*]] = constant 1 : index
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_18:.*]] = cmpi ult, %[[VAL_16]], %[[VAL_14]] : index
// CHECK: scf.condition(%[[VAL_18]]) %[[VAL_16]], %[[VAL_17]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_19:.*]]: index, %[[VAL_20:.*]]: index):
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
// CHECK: %[[VAL_22:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: scf.if %[[VAL_22]] {
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<32xf32>
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<?xf32>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_20]]] : memref<32xf32>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<?xf32>
// CHECK: %[[VAL_25:.*]] = addf %[[VAL_23]], %[[VAL_24]] : f32
// CHECK: store %[[VAL_25]], %[[VAL_12]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<32xf32>
+// CHECK: scf.if %[[VAL_5]] {
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: store %[[VAL_26]], %[[VAL_12]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_27:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
-// CHECK: %[[VAL_28:.*]] = addi %[[VAL_19]], %[[VAL_7]] : index
+// CHECK: %[[VAL_28:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: %[[VAL_29:.*]] = select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
-// CHECK: %[[VAL_30:.*]] = addi %[[VAL_20]], %[[VAL_7]] : index
+// CHECK: %[[VAL_30:.*]] = addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_31]]] : memref<32xf32>
+// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_31]]] : memref<32xf32>
// CHECK: store %[[VAL_33]], %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_34:.*]] = tensor_load %[[VAL_12]] : memref<32xf32>
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_5]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<32xf32>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<?xf32>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = tensor_to_memref %[[VAL_0]] : memref<32xf32>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_10:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_4]] {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_14]]] : memref<32xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xf32>
// CHECK: %[[VAL_17:.*]] = mulf %[[VAL_15]], %[[VAL_16]] : f32
// CHECK: store %[[VAL_17]], %[[VAL_10]]{{\[}}%[[VAL_14]]] : memref<32xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant true
-// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_5]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant true
+// CHECK: %[[VAL_6:.*]] = constant 1 : index
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_18:.*]] = cmpi ult, %[[VAL_16]], %[[VAL_14]] : index
// CHECK: scf.condition(%[[VAL_18]]) %[[VAL_16]], %[[VAL_17]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_19:.*]]: index, %[[VAL_20:.*]]: index):
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
// CHECK: %[[VAL_22:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: scf.if %[[VAL_22]] {
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<?xf32>
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_20]]] : memref<32xf32>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xf32>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: %[[VAL_25:.*]] = addf %[[VAL_23]], %[[VAL_24]] : f32
// CHECK: store %[[VAL_25]], %[[VAL_12]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_20]]] : memref<32xf32>
+// CHECK: scf.if %[[VAL_5]] {
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: store %[[VAL_26]], %[[VAL_12]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_27:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
-// CHECK: %[[VAL_28:.*]] = addi %[[VAL_19]], %[[VAL_7]] : index
+// CHECK: %[[VAL_28:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: %[[VAL_29:.*]] = select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
-// CHECK: %[[VAL_30:.*]] = addi %[[VAL_20]], %[[VAL_7]] : index
+// CHECK: %[[VAL_30:.*]] = addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_31]]] : memref<32xf32>
+// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_31]]] : memref<32xf32>
// CHECK: store %[[VAL_33]], %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_34:.*]] = tensor_load %[[VAL_12]] : memref<32xf32>
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_5]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xf32>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref<32xf32>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_10:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_4]] {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<32xf32>
// CHECK: %[[VAL_17:.*]] = mulf %[[VAL_15]], %[[VAL_16]] : f32
// CHECK: store %[[VAL_17]], %[[VAL_10]]{{\[}}%[[VAL_14]]] : memref<32xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: scf.condition(%[[VAL_22]]) %[[VAL_18]], %[[VAL_19]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = cmpi ult, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_28:.*]] = select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_23]]] : memref<?xf32>
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_23]]] : memref<?xf32>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_34:.*]] = addf %[[VAL_32]], %[[VAL_33]] : f32
// CHECK: store %[[VAL_34]], %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<32xf32>
// CHECK: } else {
// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: scf.if %[[VAL_35]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_23]]] : memref<?xf32>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_23]]] : memref<?xf32>
// CHECK: store %[[VAL_36]], %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<32xf32>
// CHECK: } else {
// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: scf.if %[[VAL_37]] {
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: store %[[VAL_38]], %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_39:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_23]], %[[VAL_5]] : index
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_23]], %[[VAL_4]] : index
// CHECK: %[[VAL_41:.*]] = select %[[VAL_39]], %[[VAL_40]], %[[VAL_23]] : index
// CHECK: %[[VAL_42:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
-// CHECK: %[[VAL_43:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
+// CHECK: %[[VAL_43:.*]] = addi %[[VAL_24]], %[[VAL_4]] : index
// CHECK: %[[VAL_44:.*]] = select %[[VAL_42]], %[[VAL_43]], %[[VAL_24]] : index
// CHECK: scf.yield %[[VAL_41]], %[[VAL_44]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_45:.*]] = %[[VAL_46:.*]]#0 to %[[VAL_14]] step %[[VAL_5]] {
-// CHECK: %[[VAL_47:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_45]]] : memref<?xindex>
-// CHECK: %[[VAL_48:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_45]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_45:.*]] = %[[VAL_46:.*]]#0 to %[[VAL_14]] step %[[VAL_4]] {
+// CHECK: %[[VAL_47:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_45]]] : memref<?xindex>
+// CHECK: %[[VAL_48:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_45]]] : memref<?xf32>
// CHECK: store %[[VAL_48]], %[[VAL_12]]{{\[}}%[[VAL_47]]] : memref<32xf32>
// CHECK: }
-// CHECK: scf.for %[[VAL_49:.*]] = %[[VAL_50:.*]]#1 to %[[VAL_16]] step %[[VAL_5]] {
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_49]]] : memref<?xindex>
-// CHECK: %[[VAL_52:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_49:.*]] = %[[VAL_50:.*]]#1 to %[[VAL_16]] step %[[VAL_4]] {
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_52:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: store %[[VAL_52]], %[[VAL_12]]{{\[}}%[[VAL_51]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_53:.*]] = tensor_load %[[VAL_12]] : memref<32xf32>
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: scf.condition(%[[VAL_22]]) %[[VAL_18]], %[[VAL_19]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = cmpi ult, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_28:.*]] = select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_23]]] : memref<?xf32>
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_23]]] : memref<?xf32>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_34:.*]] = mulf %[[VAL_32]], %[[VAL_33]] : f32
// CHECK: store %[[VAL_34]], %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
-// CHECK: %[[VAL_36:.*]] = addi %[[VAL_23]], %[[VAL_5]] : index
+// CHECK: %[[VAL_36:.*]] = addi %[[VAL_23]], %[[VAL_4]] : index
// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
// CHECK: %[[VAL_38:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
-// CHECK: %[[VAL_39:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
+// CHECK: %[[VAL_39:.*]] = addi %[[VAL_24]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = select %[[VAL_38]], %[[VAL_39]], %[[VAL_24]] : index
// CHECK: scf.yield %[[VAL_37]], %[[VAL_40]] : index, index
// CHECK: }
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: f32,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> {
-// CHECK: %[[VAL_4:.*]] = constant 999 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<16xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]]:3 = scf.while (%[[VAL_19:.*]] = %[[VAL_14]], %[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_5]]) : (index, index, index) -> (index, index, index) {
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_3]] : memref<16xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<16xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]]:3 = scf.while (%[[VAL_19:.*]] = %[[VAL_14]], %[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_15]] : index
// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_20]], %[[VAL_17]] : index
// CHECK: %[[VAL_24:.*]] = and %[[VAL_22]], %[[VAL_23]] : i1
// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_19]], %[[VAL_20]], %[[VAL_21]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_25]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: %[[VAL_31:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
// CHECK: %[[VAL_32:.*]] = and %[[VAL_30]], %[[VAL_31]] : i1
// CHECK: scf.if %[[VAL_32]] {
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_34:.*]] = mulf %[[VAL_33]], %[[VAL_2]] : f32
-// CHECK: %[[VAL_35:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xf32>
+// CHECK: %[[VAL_35:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xf32>
// CHECK: %[[VAL_36:.*]] = mulf %[[VAL_35]], %[[VAL_2]] : f32
// CHECK: %[[VAL_37:.*]] = addf %[[VAL_34]], %[[VAL_36]] : f32
// CHECK: store %[[VAL_37]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: %[[VAL_38:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_38]] {
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_40:.*]] = mulf %[[VAL_39]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_40]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_41]] {
-// CHECK: %[[VAL_42:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xf32>
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xf32>
// CHECK: %[[VAL_43:.*]] = mulf %[[VAL_42]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_43]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_44:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: %[[VAL_45:.*]] = addi %[[VAL_25]], %[[VAL_6]] : index
+// CHECK: %[[VAL_45:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
// CHECK: %[[VAL_46:.*]] = select %[[VAL_44]], %[[VAL_45]], %[[VAL_25]] : index
// CHECK: %[[VAL_47:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
-// CHECK: %[[VAL_48:.*]] = addi %[[VAL_26]], %[[VAL_6]] : index
+// CHECK: %[[VAL_48:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
// CHECK: %[[VAL_49:.*]] = select %[[VAL_47]], %[[VAL_48]], %[[VAL_26]] : index
-// CHECK: %[[VAL_50:.*]] = addi %[[VAL_27]], %[[VAL_6]] : index
+// CHECK: %[[VAL_50:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
// CHECK: scf.yield %[[VAL_46]], %[[VAL_49]], %[[VAL_50]] : index, index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_51:.*]] = %[[VAL_52:.*]]#0 to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_53:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_51]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_51:.*]] = %[[VAL_52:.*]]#0 to %[[VAL_15]] step %[[VAL_5]] {
+// CHECK: %[[VAL_53:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_51]]] : memref<?xf32>
// CHECK: %[[VAL_54:.*]] = mulf %[[VAL_53]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_54]], %[[VAL_13]]{{\[}}%[[VAL_52]]#2] : memref<16xf32>
// CHECK: }
-// CHECK: scf.for %[[VAL_55:.*]] = %[[VAL_56:.*]]#1 to %[[VAL_17]] step %[[VAL_6]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_55]]] : memref<?xindex>
-// CHECK: %[[VAL_58:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_55]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_55:.*]] = %[[VAL_56:.*]]#1 to %[[VAL_17]] step %[[VAL_5]] {
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_55]]] : memref<?xindex>
+// CHECK: %[[VAL_58:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_55]]] : memref<?xf32>
// CHECK: %[[VAL_59:.*]] = mulf %[[VAL_58]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_59]], %[[VAL_13]]{{\[}}%[[VAL_57]]] : memref<16xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: f32,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> {
-// CHECK: %[[VAL_4:.*]] = constant 999 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<16xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]]:3 = scf.while (%[[VAL_19:.*]] = %[[VAL_14]], %[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_5]]) : (index, index, index) -> (index, index, index) {
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_3]] : memref<16xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<16xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]]:3 = scf.while (%[[VAL_19:.*]] = %[[VAL_14]], %[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_15]] : index
// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_20]], %[[VAL_17]] : index
// CHECK: %[[VAL_24:.*]] = and %[[VAL_22]], %[[VAL_23]] : i1
// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_19]], %[[VAL_20]], %[[VAL_21]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_25]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: %[[VAL_31:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
// CHECK: %[[VAL_32:.*]] = and %[[VAL_30]], %[[VAL_31]] : i1
// CHECK: scf.if %[[VAL_32]] {
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_25]]] : memref<?xf32>
-// CHECK: %[[VAL_34:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xf32>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_34:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xf32>
// CHECK: %[[VAL_35:.*]] = addf %[[VAL_33]], %[[VAL_34]] : f32
// CHECK: %[[VAL_36:.*]] = mulf %[[VAL_35]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_36]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_37]] {
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_39:.*]] = mulf %[[VAL_38]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_39]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_40]] {
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xf32>
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xf32>
// CHECK: %[[VAL_42:.*]] = mulf %[[VAL_41]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_42]], %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_43:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: %[[VAL_44:.*]] = addi %[[VAL_25]], %[[VAL_6]] : index
+// CHECK: %[[VAL_44:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
// CHECK: %[[VAL_45:.*]] = select %[[VAL_43]], %[[VAL_44]], %[[VAL_25]] : index
// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_27]] : index
-// CHECK: %[[VAL_47:.*]] = addi %[[VAL_26]], %[[VAL_6]] : index
+// CHECK: %[[VAL_47:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_26]] : index
-// CHECK: %[[VAL_49:.*]] = addi %[[VAL_27]], %[[VAL_6]] : index
+// CHECK: %[[VAL_49:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
// CHECK: scf.yield %[[VAL_45]], %[[VAL_48]], %[[VAL_49]] : index, index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#0 to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_52:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_50]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#0 to %[[VAL_15]] step %[[VAL_5]] {
+// CHECK: %[[VAL_52:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_50]]] : memref<?xf32>
// CHECK: %[[VAL_53:.*]] = mulf %[[VAL_52]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_53]], %[[VAL_13]]{{\[}}%[[VAL_51]]#2] : memref<16xf32>
// CHECK: }
-// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#1 to %[[VAL_17]] step %[[VAL_6]] {
-// CHECK: %[[VAL_56:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_54]]] : memref<?xindex>
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_54]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#1 to %[[VAL_17]] step %[[VAL_5]] {
+// CHECK: %[[VAL_56:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_54]]] : memref<?xindex>
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_54]]] : memref<?xf32>
// CHECK: %[[VAL_58:.*]] = mulf %[[VAL_57]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_58]], %[[VAL_13]]{{\[}}%[[VAL_56]]] : memref<16xf32>
// CHECK: }
// CHECK-LABEL: func @sum_reduction(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<?xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
-// CHECK: %[[VAL_2:.*]] = constant 999 : index
-// CHECK: %[[VAL_3:.*]] = constant 0 : index
-// CHECK: %[[VAL_4:.*]] = constant 1 : index
-// CHECK: %[[VAL_5:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_2]]) : memref<?xf32>
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_8:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_2:.*]] = constant 0 : index
+// CHECK: %[[VAL_3:.*]] = constant 1 : index
+// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<?xf32> to memref<?xindex>
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?xf32> to memref<?xf32>
+// CHECK: %[[VAL_6:.*]] = tensor_to_memref %[[VAL_1]] : memref<f32>
+// CHECK: %[[VAL_7:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_6]], %[[VAL_7]]) : memref<f32>, memref<f32>
+// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_10:.*]] = load %[[VAL_7]][] : memref<f32>
-// CHECK: %[[VAL_11:.*]] = scf.for %[[VAL_12:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_4]] iter_args(%[[VAL_13:.*]] = %[[VAL_10]]) -> (f32) {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = scf.for %[[VAL_12:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_13:.*]] = %[[VAL_10]]) -> (f32) {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref<?xf32>
// CHECK: %[[VAL_15:.*]] = addf %[[VAL_13]], %[[VAL_14]] : f32
// CHECK: scf.yield %[[VAL_15]] : f32
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<f32>) -> tensor<f32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]]:3 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<f32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<f32>, memref<f32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]]:3 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_3]]) : (index, index, index) -> (index, index, index) {
// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: %[[VAL_23:.*]] = and %[[VAL_21]], %[[VAL_22]] : i1
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_18]], %[[VAL_19]], %[[VAL_20]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_25]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]][] : memref<f32>
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
-// CHECK: %[[VAL_34:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_34:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_35:.*]] = addf %[[VAL_33]], %[[VAL_34]] : f32
// CHECK: %[[VAL_36:.*]] = addf %[[VAL_32]], %[[VAL_35]] : f32
// CHECK: store %[[VAL_36]], %[[VAL_12]][] : memref<f32>
// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_37]] {
// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]][] : memref<f32>
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_40:.*]] = addf %[[VAL_38]], %[[VAL_39]] : f32
// CHECK: store %[[VAL_40]], %[[VAL_12]][] : memref<f32>
// CHECK: } else {
// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_41]] {
// CHECK: %[[VAL_42:.*]] = load %[[VAL_12]][] : memref<f32>
-// CHECK: %[[VAL_43:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_43:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_44:.*]] = addf %[[VAL_42]], %[[VAL_43]] : f32
// CHECK: store %[[VAL_44]], %[[VAL_12]][] : memref<f32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_46:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
+// CHECK: %[[VAL_46:.*]] = addi %[[VAL_24]], %[[VAL_4]] : index
// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_24]] : index
// CHECK: %[[VAL_48:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
-// CHECK: %[[VAL_49:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
+// CHECK: %[[VAL_49:.*]] = addi %[[VAL_25]], %[[VAL_4]] : index
// CHECK: %[[VAL_50:.*]] = select %[[VAL_48]], %[[VAL_49]], %[[VAL_25]] : index
-// CHECK: %[[VAL_51:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
+// CHECK: %[[VAL_51:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
// CHECK: scf.yield %[[VAL_47]], %[[VAL_50]], %[[VAL_51]] : index, index, index
// CHECK: }
// CHECK: %[[VAL_52:.*]] = load %[[VAL_12]][] : memref<f32>
-// CHECK: %[[VAL_53:.*]] = scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#0 to %[[VAL_14]] step %[[VAL_5]] iter_args(%[[VAL_56:.*]] = %[[VAL_52]]) -> (f32) {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_54]]] : memref<?xf32>
+// CHECK: %[[VAL_53:.*]] = scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#0 to %[[VAL_14]] step %[[VAL_4]] iter_args(%[[VAL_56:.*]] = %[[VAL_52]]) -> (f32) {
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_54]]] : memref<?xf32>
// CHECK: %[[VAL_58:.*]] = addf %[[VAL_56]], %[[VAL_57]] : f32
// CHECK: scf.yield %[[VAL_58]] : f32
// CHECK: }
-// CHECK: %[[VAL_59:.*]] = scf.for %[[VAL_60:.*]] = %[[VAL_61:.*]]#1 to %[[VAL_16]] step %[[VAL_5]] iter_args(%[[VAL_62:.*]] = %[[VAL_63:.*]]) -> (f32) {
-// CHECK: %[[VAL_64:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_60]]] : memref<?xf32>
+// CHECK: %[[VAL_59:.*]] = scf.for %[[VAL_60:.*]] = %[[VAL_61:.*]]#1 to %[[VAL_16]] step %[[VAL_4]] iter_args(%[[VAL_62:.*]] = %[[VAL_63:.*]]) -> (f32) {
+// CHECK: %[[VAL_64:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_60]]] : memref<?xf32>
// CHECK: %[[VAL_65:.*]] = addf %[[VAL_62]], %[[VAL_64]] : f32
// CHECK: scf.yield %[[VAL_65]] : f32
// CHECK: }
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<f32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<16xf32>,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<f32>) -> tensor<f32> {
-// CHECK: %[[VAL_4:.*]] = constant 999 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_10]][] : memref<f32>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]]:3 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]], %[[VAL_23:.*]] = %[[VAL_5]]) : (index, index, index) -> (index, index, index) {
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<f32>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<16xf32> to memref<?xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_3]] : memref<f32>
+// CHECK: %[[VAL_14:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<f32>, memref<f32>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]][] : memref<f32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]]:3 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]], %[[VAL_23:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_26:.*]] = and %[[VAL_24]], %[[VAL_25]] : i1
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_21]], %[[VAL_22]], %[[VAL_23]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index, %[[VAL_29:.*]]: index):
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_14]][] : memref<f32>
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_27]]] : memref<?xf32>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xf32>
// CHECK: %[[VAL_37:.*]] = mulf %[[VAL_36]], %[[VAL_15]] : f32
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xf32>
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xf32>
// CHECK: %[[VAL_39:.*]] = addf %[[VAL_37]], %[[VAL_38]] : f32
// CHECK: %[[VAL_40:.*]] = addf %[[VAL_35]], %[[VAL_39]] : f32
// CHECK: store %[[VAL_40]], %[[VAL_14]][] : memref<f32>
// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: scf.if %[[VAL_41]] {
// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]][] : memref<f32>
-// CHECK: %[[VAL_43:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_27]]] : memref<?xf32>
+// CHECK: %[[VAL_43:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xf32>
// CHECK: %[[VAL_44:.*]] = mulf %[[VAL_43]], %[[VAL_15]] : f32
// CHECK: %[[VAL_45:.*]] = addf %[[VAL_42]], %[[VAL_44]] : f32
// CHECK: store %[[VAL_45]], %[[VAL_14]][] : memref<f32>
// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: scf.if %[[VAL_46]] {
// CHECK: %[[VAL_47:.*]] = load %[[VAL_14]][] : memref<f32>
-// CHECK: %[[VAL_48:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xf32>
+// CHECK: %[[VAL_48:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xf32>
// CHECK: %[[VAL_49:.*]] = addf %[[VAL_47]], %[[VAL_48]] : f32
// CHECK: store %[[VAL_49]], %[[VAL_14]][] : memref<f32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_50:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
-// CHECK: %[[VAL_51:.*]] = addi %[[VAL_27]], %[[VAL_6]] : index
+// CHECK: %[[VAL_51:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_50]], %[[VAL_51]], %[[VAL_27]] : index
// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
-// CHECK: %[[VAL_54:.*]] = addi %[[VAL_28]], %[[VAL_6]] : index
+// CHECK: %[[VAL_54:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
// CHECK: %[[VAL_55:.*]] = select %[[VAL_53]], %[[VAL_54]], %[[VAL_28]] : index
-// CHECK: %[[VAL_56:.*]] = addi %[[VAL_29]], %[[VAL_6]] : index
+// CHECK: %[[VAL_56:.*]] = addi %[[VAL_29]], %[[VAL_5]] : index
// CHECK: scf.yield %[[VAL_52]], %[[VAL_55]], %[[VAL_56]] : index, index, index
// CHECK: }
// CHECK: %[[VAL_57:.*]] = load %[[VAL_14]][] : memref<f32>
-// CHECK: %[[VAL_58:.*]] = scf.for %[[VAL_59:.*]] = %[[VAL_60:.*]]#0 to %[[VAL_17]] step %[[VAL_6]] iter_args(%[[VAL_61:.*]] = %[[VAL_57]]) -> (f32) {
-// CHECK: %[[VAL_62:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_59]]] : memref<?xf32>
+// CHECK: %[[VAL_58:.*]] = scf.for %[[VAL_59:.*]] = %[[VAL_60:.*]]#0 to %[[VAL_17]] step %[[VAL_5]] iter_args(%[[VAL_61:.*]] = %[[VAL_57]]) -> (f32) {
+// CHECK: %[[VAL_62:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_59]]] : memref<?xf32>
// CHECK: %[[VAL_63:.*]] = mulf %[[VAL_62]], %[[VAL_15]] : f32
// CHECK: %[[VAL_64:.*]] = addf %[[VAL_61]], %[[VAL_63]] : f32
// CHECK: scf.yield %[[VAL_64]] : f32
// CHECK: }
-// CHECK: %[[VAL_65:.*]] = scf.for %[[VAL_66:.*]] = %[[VAL_67:.*]]#1 to %[[VAL_19]] step %[[VAL_6]] iter_args(%[[VAL_68:.*]] = %[[VAL_69:.*]]) -> (f32) {
-// CHECK: %[[VAL_70:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_66]]] : memref<?xf32>
+// CHECK: %[[VAL_65:.*]] = scf.for %[[VAL_66:.*]] = %[[VAL_67:.*]]#1 to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_68:.*]] = %[[VAL_69:.*]]) -> (f32) {
+// CHECK: %[[VAL_70:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_66]]] : memref<?xf32>
// CHECK: %[[VAL_71:.*]] = addf %[[VAL_68]], %[[VAL_70]] : f32
// CHECK: scf.yield %[[VAL_71]] : f32
// CHECK: }
// CHECK: %[[VAL_4:.*]] = constant 16 : index
// CHECK: %[[VAL_5:.*]] = constant 0 : index
// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32x16xf32>
-// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
-// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
-// CHECK: %[[VAL_14:.*]] = addf %[[VAL_12]], %[[VAL_13]] : f32
-// CHECK: store %[[VAL_14]], %[[VAL_9]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_0]] : memref<32x16xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_10:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
+// CHECK: %[[VAL_15:.*]] = addf %[[VAL_13]], %[[VAL_14]] : f32
+// CHECK: store %[[VAL_15]], %[[VAL_10]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_15:.*]] = tensor_load %[[VAL_9]] : memref<32x16xf32>
-// CHECK: return %[[VAL_15]] : tensor<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = tensor_load %[[VAL_10]] : memref<32x16xf32>
+// CHECK: return %[[VAL_16]] : tensor<32x16xf32>
// CHECK: }
func @add_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait_dd
// CHECK: %[[VAL_4:.*]] = constant 16 : index
// CHECK: %[[VAL_5:.*]] = constant 0 : index
// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32x16xf32>
-// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
-// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
-// CHECK: %[[VAL_14:.*]] = mulf %[[VAL_12]], %[[VAL_13]] : f32
-// CHECK: store %[[VAL_14]], %[[VAL_9]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_0]] : memref<32x16xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_10:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
+// CHECK: %[[VAL_15:.*]] = mulf %[[VAL_13]], %[[VAL_14]] : f32
+// CHECK: store %[[VAL_15]], %[[VAL_10]]{{\[}}%[[VAL_11]], %[[VAL_12]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_15:.*]] = tensor_load %[[VAL_9]] : memref<32x16xf32>
-// CHECK: return %[[VAL_15]] : tensor<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = tensor_load %[[VAL_10]] : memref<32x16xf32>
+// CHECK: return %[[VAL_16]] : tensor<32x16xf32>
// CHECK: }
func @mul_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
%0 = linalg.generic #trait_dd
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant 0 : index
-// CHECK: %[[VAL_7:.*]] = constant true
-// CHECK: %[[VAL_8:.*]] = constant 1 : index
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16xf32>
-// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_8]] {
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = addi %[[VAL_14]], %[[VAL_8]] : index
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant 0 : index
+// CHECK: %[[VAL_6:.*]] = constant true
+// CHECK: %[[VAL_7:.*]] = constant 1 : index
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_7]] {
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = addi %[[VAL_14]], %[[VAL_7]] : index
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_5]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_17]] : index
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_19]], %[[VAL_20]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xindex>
// CHECK: %[[VAL_25:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: scf.if %[[VAL_25]] {
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref<?xf32>
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xf32>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
// CHECK: %[[VAL_28:.*]] = addf %[[VAL_26]], %[[VAL_27]] : f32
// CHECK: store %[[VAL_28]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_7]] {
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
+// CHECK: scf.if %[[VAL_6]] {
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
// CHECK: store %[[VAL_29]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_23]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
-// CHECK: %[[VAL_31:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
+// CHECK: %[[VAL_31:.*]] = addi %[[VAL_22]], %[[VAL_7]] : index
// CHECK: %[[VAL_32:.*]] = select %[[VAL_30]], %[[VAL_31]], %[[VAL_22]] : index
-// CHECK: %[[VAL_33:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
+// CHECK: %[[VAL_33:.*]] = addi %[[VAL_23]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_32]], %[[VAL_33]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_34:.*]] = %[[VAL_35:.*]]#1 to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_14]], %[[VAL_34]]] : memref<32x16xf32>
+// CHECK: scf.for %[[VAL_34:.*]] = %[[VAL_35:.*]]#1 to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_14]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: store %[[VAL_36]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16xf32>
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_6]] : index
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<?xf32>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_12]], %[[VAL_17]]] : memref<32x16xf32>
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_5]] : index
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_5]] {
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xf32>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_12]], %[[VAL_17]]] : memref<32x16xf32>
// CHECK: %[[VAL_20:.*]] = mulf %[[VAL_18]], %[[VAL_19]] : f32
// CHECK: store %[[VAL_20]], %[[VAL_11]]{{\[}}%[[VAL_12]], %[[VAL_17]]] : memref<32x16xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant true
-// CHECK: %[[VAL_7:.*]] = constant 0 : index
-// CHECK: %[[VAL_8:.*]] = constant 1 : index
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]]:2 = scf.while (%[[VAL_17:.*]] = %[[VAL_14]], %[[VAL_18:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant true
+// CHECK: %[[VAL_6:.*]] = constant 0 : index
+// CHECK: %[[VAL_7:.*]] = constant 1 : index
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]]:2 = scf.while (%[[VAL_17:.*]] = %[[VAL_14]], %[[VAL_18:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_19:.*]] = cmpi ult, %[[VAL_17]], %[[VAL_15]] : index
// CHECK: scf.condition(%[[VAL_19]]) %[[VAL_17]], %[[VAL_18]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_20:.*]]: index, %[[VAL_21:.*]]: index):
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xindex>
// CHECK: %[[VAL_23:.*]] = cmpi eq, %[[VAL_22]], %[[VAL_21]] : index
// CHECK: scf.if %[[VAL_23]] {
-// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_25:.*]] = muli %[[VAL_20]], %[[VAL_5]] : index
+// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_25:.*]] = muli %[[VAL_20]], %[[VAL_4]] : index
// CHECK: %[[VAL_26:.*]] = addi %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xf32>
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_21]], %[[VAL_24]]] : memref<32x16xf32>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_26]]] : memref<?xf32>
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_21]], %[[VAL_24]]] : memref<32x16xf32>
// CHECK: %[[VAL_29:.*]] = addf %[[VAL_27]], %[[VAL_28]] : f32
// CHECK: store %[[VAL_29]], %[[VAL_13]]{{\[}}%[[VAL_21]], %[[VAL_24]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
-// CHECK: scf.for %[[VAL_30:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_21]], %[[VAL_30]]] : memref<32x16xf32>
+// CHECK: scf.if %[[VAL_5]] {
+// CHECK: scf.for %[[VAL_30:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_21]], %[[VAL_30]]] : memref<32x16xf32>
// CHECK: store %[[VAL_31]], %[[VAL_13]]{{\[}}%[[VAL_21]], %[[VAL_30]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_22]], %[[VAL_21]] : index
-// CHECK: %[[VAL_33:.*]] = addi %[[VAL_20]], %[[VAL_8]] : index
+// CHECK: %[[VAL_33:.*]] = addi %[[VAL_20]], %[[VAL_7]] : index
// CHECK: %[[VAL_34:.*]] = select %[[VAL_32]], %[[VAL_33]], %[[VAL_20]] : index
-// CHECK: %[[VAL_35:.*]] = addi %[[VAL_21]], %[[VAL_8]] : index
+// CHECK: %[[VAL_35:.*]] = addi %[[VAL_21]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_34]], %[[VAL_35]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_36:.*]] = %[[VAL_37:.*]]#1 to %[[VAL_4]] step %[[VAL_8]] {
-// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_36]], %[[VAL_38]]] : memref<32x16xf32>
+// CHECK: scf.for %[[VAL_36:.*]] = %[[VAL_37:.*]]#1 to %[[VAL_3]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_36]], %[[VAL_38]]] : memref<32x16xf32>
// CHECK: store %[[VAL_39]], %[[VAL_13]]{{\[}}%[[VAL_36]], %[[VAL_38]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 16 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_13]] step %[[VAL_6]] {
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_17:.*]] = muli %[[VAL_14]], %[[VAL_4]] : index
+// CHECK: %[[VAL_3:.*]] = constant 16 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_13]] step %[[VAL_5]] {
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_17:.*]] = muli %[[VAL_14]], %[[VAL_3]] : index
// CHECK: %[[VAL_18:.*]] = addi %[[VAL_17]], %[[VAL_16]] : index
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf32>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_15]], %[[VAL_16]]] : memref<32x16xf32>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xf32>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_15]], %[[VAL_16]]] : memref<32x16xf32>
// CHECK: %[[VAL_21:.*]] = mulf %[[VAL_19]], %[[VAL_20]] : f32
// CHECK: store %[[VAL_21]], %[[VAL_11]]{{\[}}%[[VAL_15]], %[[VAL_16]]] : memref<32x16xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant true
-// CHECK: %[[VAL_7:.*]] = constant 0 : index
-// CHECK: %[[VAL_8:.*]] = constant 1 : index
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_15:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_16]], %[[VAL_20:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant true
+// CHECK: %[[VAL_6:.*]] = constant 0 : index
+// CHECK: %[[VAL_7:.*]] = constant 1 : index
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_15:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_14]], %[[VAL_15]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_16]], %[[VAL_20:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_17]] : index
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_19]], %[[VAL_20]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xindex>
// CHECK: %[[VAL_25:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: scf.if %[[VAL_25]] {
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_27:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]]:2 = scf.while (%[[VAL_30:.*]] = %[[VAL_26]], %[[VAL_31:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK: %[[VAL_27:.*]] = addi %[[VAL_22]], %[[VAL_7]] : index
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]]:2 = scf.while (%[[VAL_30:.*]] = %[[VAL_26]], %[[VAL_31:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_32:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_28]] : index
// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_30]], %[[VAL_31]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index):
-// CHECK: %[[VAL_35:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK: %[[VAL_35:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_33]]] : memref<?xindex>
// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: scf.if %[[VAL_36]] {
-// CHECK: %[[VAL_37:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_33]]] : memref<?xf32>
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
+// CHECK: %[[VAL_37:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: %[[VAL_39:.*]] = addf %[[VAL_37]], %[[VAL_38]] : f32
// CHECK: store %[[VAL_39]], %[[VAL_15]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
-// CHECK: %[[VAL_40:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
+// CHECK: scf.if %[[VAL_5]] {
+// CHECK: %[[VAL_40:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: store %[[VAL_40]], %[[VAL_15]]{{\[}}%[[VAL_23]], %[[VAL_34]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
-// CHECK: %[[VAL_42:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK: %[[VAL_42:.*]] = addi %[[VAL_33]], %[[VAL_7]] : index
// CHECK: %[[VAL_43:.*]] = select %[[VAL_41]], %[[VAL_42]], %[[VAL_33]] : index
-// CHECK: %[[VAL_44:.*]] = addi %[[VAL_34]], %[[VAL_8]] : index
+// CHECK: %[[VAL_44:.*]] = addi %[[VAL_34]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_45:.*]] = %[[VAL_46:.*]]#1 to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_47:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_23]], %[[VAL_45]]] : memref<32x16xf32>
+// CHECK: scf.for %[[VAL_45:.*]] = %[[VAL_46:.*]]#1 to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_47:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_23]], %[[VAL_45]]] : memref<32x16xf32>
// CHECK: store %[[VAL_47]], %[[VAL_15]]{{\[}}%[[VAL_23]], %[[VAL_45]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_6]] {
-// CHECK: scf.for %[[VAL_48:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_49:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_23]], %[[VAL_48]]] : memref<32x16xf32>
+// CHECK: scf.if %[[VAL_5]] {
+// CHECK: scf.for %[[VAL_48:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_49:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_23]], %[[VAL_48]]] : memref<32x16xf32>
// CHECK: store %[[VAL_49]], %[[VAL_15]]{{\[}}%[[VAL_23]], %[[VAL_48]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_50:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
-// CHECK: %[[VAL_51:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
+// CHECK: %[[VAL_51:.*]] = addi %[[VAL_22]], %[[VAL_7]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_50]], %[[VAL_51]], %[[VAL_22]] : index
-// CHECK: %[[VAL_53:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
+// CHECK: %[[VAL_53:.*]] = addi %[[VAL_23]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_52]], %[[VAL_53]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#1 to %[[VAL_4]] step %[[VAL_8]] {
-// CHECK: scf.for %[[VAL_56:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_54]], %[[VAL_56]]] : memref<32x16xf32>
+// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_55:.*]]#1 to %[[VAL_3]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_56:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_54]], %[[VAL_56]]] : memref<32x16xf32>
// CHECK: store %[[VAL_57]], %[[VAL_15]]{{\[}}%[[VAL_54]], %[[VAL_56]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_5]] {
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = addi %[[VAL_15]], %[[VAL_5]] : index
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_5]] {
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xf32>
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_16]], %[[VAL_21]]] : memref<32x16xf32>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_4]] {
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = addi %[[VAL_15]], %[[VAL_4]] : index
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_4]] {
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xf32>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_16]], %[[VAL_21]]] : memref<32x16xf32>
// CHECK: %[[VAL_24:.*]] = mulf %[[VAL_22]], %[[VAL_23]] : f32
// CHECK: store %[[VAL_24]], %[[VAL_12]]{{\[}}%[[VAL_16]], %[[VAL_21]]] : memref<32x16xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_17]], %[[VAL_23:.*]] = %[[VAL_19]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_18]] : index
// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_20]] : index
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_22]], %[[VAL_23]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index):
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = and %[[VAL_33]], %[[VAL_34]] : i1
// CHECK: scf.if %[[VAL_35]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_42:.*]]:2 = scf.while (%[[VAL_43:.*]] = %[[VAL_36]], %[[VAL_44:.*]] = %[[VAL_39]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_38]] : index
// CHECK: %[[VAL_46:.*]] = cmpi ult, %[[VAL_44]], %[[VAL_41]] : index
// CHECK: scf.condition(%[[VAL_47]]) %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index):
-// CHECK: %[[VAL_50:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_50:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
// CHECK: scf.if %[[VAL_56]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
-// CHECK: %[[VAL_58:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_58:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: %[[VAL_59:.*]] = addf %[[VAL_57]], %[[VAL_58]] : f32
// CHECK: store %[[VAL_59]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: %[[VAL_60:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: scf.if %[[VAL_60]] {
-// CHECK: %[[VAL_61:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_61:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
// CHECK: store %[[VAL_61]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: %[[VAL_62:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: scf.if %[[VAL_62]] {
-// CHECK: %[[VAL_63:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_63:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: store %[[VAL_63]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
-// CHECK: %[[VAL_65:.*]] = addi %[[VAL_48]], %[[VAL_5]] : index
+// CHECK: %[[VAL_65:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_66:.*]] = select %[[VAL_64]], %[[VAL_65]], %[[VAL_48]] : index
// CHECK: %[[VAL_67:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
-// CHECK: %[[VAL_68:.*]] = addi %[[VAL_49]], %[[VAL_5]] : index
+// CHECK: %[[VAL_68:.*]] = addi %[[VAL_49]], %[[VAL_4]] : index
// CHECK: %[[VAL_69:.*]] = select %[[VAL_67]], %[[VAL_68]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_66]], %[[VAL_69]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_71:.*]]#0 to %[[VAL_38]] step %[[VAL_5]] {
-// CHECK: %[[VAL_72:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_70]]] : memref<?xindex>
-// CHECK: %[[VAL_73:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_70]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_71:.*]]#0 to %[[VAL_38]] step %[[VAL_4]] {
+// CHECK: %[[VAL_72:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_70]]] : memref<?xindex>
+// CHECK: %[[VAL_73:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_70]]] : memref<?xf32>
// CHECK: store %[[VAL_73]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_72]]] : memref<32x16xf32>
// CHECK: }
-// CHECK: scf.for %[[VAL_74:.*]] = %[[VAL_75:.*]]#1 to %[[VAL_41]] step %[[VAL_5]] {
-// CHECK: %[[VAL_76:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_74]]] : memref<?xindex>
-// CHECK: %[[VAL_77:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_74]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_74:.*]] = %[[VAL_75:.*]]#1 to %[[VAL_41]] step %[[VAL_4]] {
+// CHECK: %[[VAL_76:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_74]]] : memref<?xindex>
+// CHECK: %[[VAL_77:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_74]]] : memref<?xf32>
// CHECK: store %[[VAL_77]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_76]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: %[[VAL_78:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: scf.if %[[VAL_78]] {
-// CHECK: %[[VAL_79:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_80:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_81:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_80]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_79]] to %[[VAL_81]] step %[[VAL_5]] {
-// CHECK: %[[VAL_83:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_82]]] : memref<?xindex>
-// CHECK: %[[VAL_84:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_82]]] : memref<?xf32>
+// CHECK: %[[VAL_79:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_80:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_81:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_80]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_79]] to %[[VAL_81]] step %[[VAL_4]] {
+// CHECK: %[[VAL_83:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_82]]] : memref<?xindex>
+// CHECK: %[[VAL_84:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_82]]] : memref<?xf32>
// CHECK: store %[[VAL_84]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_83]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: %[[VAL_85:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: scf.if %[[VAL_85]] {
-// CHECK: %[[VAL_86:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_87:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_88:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_87]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_89:.*]] = %[[VAL_86]] to %[[VAL_88]] step %[[VAL_5]] {
-// CHECK: %[[VAL_90:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_89]]] : memref<?xindex>
-// CHECK: %[[VAL_91:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_89]]] : memref<?xf32>
+// CHECK: %[[VAL_86:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_87:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_88:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_87]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_89:.*]] = %[[VAL_86]] to %[[VAL_88]] step %[[VAL_4]] {
+// CHECK: %[[VAL_90:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_89]]] : memref<?xindex>
+// CHECK: %[[VAL_91:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_89]]] : memref<?xf32>
// CHECK: store %[[VAL_91]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_90]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_92:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
-// CHECK: %[[VAL_93:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
+// CHECK: %[[VAL_93:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_27]] : index
// CHECK: %[[VAL_95:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
-// CHECK: %[[VAL_96:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
+// CHECK: %[[VAL_96:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_98:.*]] = %[[VAL_99:.*]]#0 to %[[VAL_18]] step %[[VAL_5]] {
-// CHECK: %[[VAL_100:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_98]]] : memref<?xindex>
-// CHECK: %[[VAL_101:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_98]]] : memref<?xindex>
-// CHECK: %[[VAL_102:.*]] = addi %[[VAL_98]], %[[VAL_5]] : index
-// CHECK: %[[VAL_103:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_102]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_104:.*]] = %[[VAL_101]] to %[[VAL_103]] step %[[VAL_5]] {
-// CHECK: %[[VAL_105:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_104]]] : memref<?xindex>
-// CHECK: %[[VAL_106:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_104]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_98:.*]] = %[[VAL_99:.*]]#0 to %[[VAL_18]] step %[[VAL_4]] {
+// CHECK: %[[VAL_100:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_98]]] : memref<?xindex>
+// CHECK: %[[VAL_101:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_98]]] : memref<?xindex>
+// CHECK: %[[VAL_102:.*]] = addi %[[VAL_98]], %[[VAL_4]] : index
+// CHECK: %[[VAL_103:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_102]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_104:.*]] = %[[VAL_101]] to %[[VAL_103]] step %[[VAL_4]] {
+// CHECK: %[[VAL_105:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_104]]] : memref<?xindex>
+// CHECK: %[[VAL_106:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_104]]] : memref<?xf32>
// CHECK: store %[[VAL_106]], %[[VAL_16]]{{\[}}%[[VAL_100]], %[[VAL_105]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
-// CHECK: scf.for %[[VAL_107:.*]] = %[[VAL_108:.*]]#1 to %[[VAL_20]] step %[[VAL_5]] {
-// CHECK: %[[VAL_109:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK: %[[VAL_110:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK: %[[VAL_111:.*]] = addi %[[VAL_107]], %[[VAL_5]] : index
-// CHECK: %[[VAL_112:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_111]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_113:.*]] = %[[VAL_110]] to %[[VAL_112]] step %[[VAL_5]] {
-// CHECK: %[[VAL_114:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_113]]] : memref<?xindex>
-// CHECK: %[[VAL_115:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_113]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_107:.*]] = %[[VAL_108:.*]]#1 to %[[VAL_20]] step %[[VAL_4]] {
+// CHECK: %[[VAL_109:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_107]]] : memref<?xindex>
+// CHECK: %[[VAL_110:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_107]]] : memref<?xindex>
+// CHECK: %[[VAL_111:.*]] = addi %[[VAL_107]], %[[VAL_4]] : index
+// CHECK: %[[VAL_112:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_111]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_113:.*]] = %[[VAL_110]] to %[[VAL_112]] step %[[VAL_4]] {
+// CHECK: %[[VAL_114:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_113]]] : memref<?xindex>
+// CHECK: %[[VAL_115:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_113]]] : memref<?xf32>
// CHECK: store %[[VAL_115]], %[[VAL_16]]{{\[}}%[[VAL_109]], %[[VAL_114]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_17]], %[[VAL_23:.*]] = %[[VAL_19]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_18]] : index
// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_20]] : index
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_22]], %[[VAL_23]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index):
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = and %[[VAL_33]], %[[VAL_34]] : i1
// CHECK: scf.if %[[VAL_35]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_42:.*]]:2 = scf.while (%[[VAL_43:.*]] = %[[VAL_36]], %[[VAL_44:.*]] = %[[VAL_39]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_38]] : index
// CHECK: %[[VAL_46:.*]] = cmpi ult, %[[VAL_44]], %[[VAL_41]] : index
// CHECK: scf.condition(%[[VAL_47]]) %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index):
-// CHECK: %[[VAL_50:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_50:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
// CHECK: scf.if %[[VAL_56]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
-// CHECK: %[[VAL_58:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_58:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: %[[VAL_59:.*]] = mulf %[[VAL_57]], %[[VAL_58]] : f32
// CHECK: store %[[VAL_59]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_60:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
-// CHECK: %[[VAL_61:.*]] = addi %[[VAL_48]], %[[VAL_5]] : index
+// CHECK: %[[VAL_61:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_62:.*]] = select %[[VAL_60]], %[[VAL_61]], %[[VAL_48]] : index
// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
-// CHECK: %[[VAL_64:.*]] = addi %[[VAL_49]], %[[VAL_5]] : index
+// CHECK: %[[VAL_64:.*]] = addi %[[VAL_49]], %[[VAL_4]] : index
// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_62]], %[[VAL_65]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_66:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
-// CHECK: %[[VAL_67:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
+// CHECK: %[[VAL_67:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_68:.*]] = select %[[VAL_66]], %[[VAL_67]], %[[VAL_27]] : index
// CHECK: %[[VAL_69:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
-// CHECK: %[[VAL_70:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
+// CHECK: %[[VAL_70:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
// CHECK: %[[VAL_71:.*]] = select %[[VAL_69]], %[[VAL_70]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_68]], %[[VAL_71]] : index, index
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_17]], %[[VAL_23:.*]] = %[[VAL_19]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_18]] : index
// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_20]] : index
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_22]], %[[VAL_23]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index):
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = and %[[VAL_33]], %[[VAL_34]] : i1
// CHECK: scf.if %[[VAL_35]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_42:.*]]:2 = scf.while (%[[VAL_43:.*]] = %[[VAL_36]], %[[VAL_44:.*]] = %[[VAL_39]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_38]] : index
// CHECK: %[[VAL_46:.*]] = cmpi ult, %[[VAL_44]], %[[VAL_41]] : index
// CHECK: scf.condition(%[[VAL_47]]) %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index):
-// CHECK: %[[VAL_50:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_50:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
// CHECK: scf.if %[[VAL_56]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
-// CHECK: %[[VAL_58:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_58:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: %[[VAL_59:.*]] = addf %[[VAL_57]], %[[VAL_58]] : f32
// CHECK: store %[[VAL_59]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: %[[VAL_60:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: scf.if %[[VAL_60]] {
-// CHECK: %[[VAL_61:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_61:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
// CHECK: store %[[VAL_61]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: %[[VAL_62:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: scf.if %[[VAL_62]] {
-// CHECK: %[[VAL_63:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_63:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: store %[[VAL_63]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
-// CHECK: %[[VAL_65:.*]] = addi %[[VAL_48]], %[[VAL_5]] : index
+// CHECK: %[[VAL_65:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_66:.*]] = select %[[VAL_64]], %[[VAL_65]], %[[VAL_48]] : index
// CHECK: %[[VAL_67:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
-// CHECK: %[[VAL_68:.*]] = addi %[[VAL_49]], %[[VAL_5]] : index
+// CHECK: %[[VAL_68:.*]] = addi %[[VAL_49]], %[[VAL_4]] : index
// CHECK: %[[VAL_69:.*]] = select %[[VAL_67]], %[[VAL_68]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_66]], %[[VAL_69]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_71:.*]]#0 to %[[VAL_38]] step %[[VAL_5]] {
-// CHECK: %[[VAL_72:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_70]]] : memref<?xindex>
-// CHECK: %[[VAL_73:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_70]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_71:.*]]#0 to %[[VAL_38]] step %[[VAL_4]] {
+// CHECK: %[[VAL_72:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_70]]] : memref<?xindex>
+// CHECK: %[[VAL_73:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_70]]] : memref<?xf32>
// CHECK: store %[[VAL_73]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_72]]] : memref<32x16xf32>
// CHECK: }
-// CHECK: scf.for %[[VAL_74:.*]] = %[[VAL_75:.*]]#1 to %[[VAL_41]] step %[[VAL_5]] {
-// CHECK: %[[VAL_76:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_74]]] : memref<?xindex>
-// CHECK: %[[VAL_77:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_74]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_74:.*]] = %[[VAL_75:.*]]#1 to %[[VAL_41]] step %[[VAL_4]] {
+// CHECK: %[[VAL_76:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_74]]] : memref<?xindex>
+// CHECK: %[[VAL_77:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_74]]] : memref<?xf32>
// CHECK: store %[[VAL_77]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_76]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: %[[VAL_78:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: scf.if %[[VAL_78]] {
-// CHECK: %[[VAL_79:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_80:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_81:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_80]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_79]] to %[[VAL_81]] step %[[VAL_5]] {
-// CHECK: %[[VAL_83:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_82]]] : memref<?xindex>
-// CHECK: %[[VAL_84:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_82]]] : memref<?xf32>
+// CHECK: %[[VAL_79:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_80:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_81:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_80]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_79]] to %[[VAL_81]] step %[[VAL_4]] {
+// CHECK: %[[VAL_83:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_82]]] : memref<?xindex>
+// CHECK: %[[VAL_84:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_82]]] : memref<?xf32>
// CHECK: store %[[VAL_84]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_83]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: %[[VAL_85:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: scf.if %[[VAL_85]] {
-// CHECK: %[[VAL_86:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_87:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_88:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_87]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_89:.*]] = %[[VAL_86]] to %[[VAL_88]] step %[[VAL_5]] {
-// CHECK: %[[VAL_90:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_89]]] : memref<?xindex>
-// CHECK: %[[VAL_91:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_89]]] : memref<?xf32>
+// CHECK: %[[VAL_86:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_87:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_88:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_87]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_89:.*]] = %[[VAL_86]] to %[[VAL_88]] step %[[VAL_4]] {
+// CHECK: %[[VAL_90:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_89]]] : memref<?xindex>
+// CHECK: %[[VAL_91:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_89]]] : memref<?xf32>
// CHECK: store %[[VAL_91]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_90]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_92:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
-// CHECK: %[[VAL_93:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
+// CHECK: %[[VAL_93:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_27]] : index
// CHECK: %[[VAL_95:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
-// CHECK: %[[VAL_96:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
+// CHECK: %[[VAL_96:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_98:.*]] = %[[VAL_99:.*]]#0 to %[[VAL_18]] step %[[VAL_5]] {
-// CHECK: %[[VAL_100:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_98]]] : memref<?xindex>
-// CHECK: %[[VAL_101:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_98]]] : memref<?xindex>
-// CHECK: %[[VAL_102:.*]] = addi %[[VAL_98]], %[[VAL_5]] : index
-// CHECK: %[[VAL_103:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_102]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_104:.*]] = %[[VAL_101]] to %[[VAL_103]] step %[[VAL_5]] {
-// CHECK: %[[VAL_105:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_104]]] : memref<?xindex>
-// CHECK: %[[VAL_106:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_104]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_98:.*]] = %[[VAL_99:.*]]#0 to %[[VAL_18]] step %[[VAL_4]] {
+// CHECK: %[[VAL_100:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_98]]] : memref<?xindex>
+// CHECK: %[[VAL_101:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_98]]] : memref<?xindex>
+// CHECK: %[[VAL_102:.*]] = addi %[[VAL_98]], %[[VAL_4]] : index
+// CHECK: %[[VAL_103:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_102]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_104:.*]] = %[[VAL_101]] to %[[VAL_103]] step %[[VAL_4]] {
+// CHECK: %[[VAL_105:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_104]]] : memref<?xindex>
+// CHECK: %[[VAL_106:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_104]]] : memref<?xf32>
// CHECK: store %[[VAL_106]], %[[VAL_16]]{{\[}}%[[VAL_100]], %[[VAL_105]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
-// CHECK: scf.for %[[VAL_107:.*]] = %[[VAL_108:.*]]#1 to %[[VAL_20]] step %[[VAL_5]] {
-// CHECK: %[[VAL_109:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK: %[[VAL_110:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK: %[[VAL_111:.*]] = addi %[[VAL_107]], %[[VAL_5]] : index
-// CHECK: %[[VAL_112:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_111]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_113:.*]] = %[[VAL_110]] to %[[VAL_112]] step %[[VAL_5]] {
-// CHECK: %[[VAL_114:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_113]]] : memref<?xindex>
-// CHECK: %[[VAL_115:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_113]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_107:.*]] = %[[VAL_108:.*]]#1 to %[[VAL_20]] step %[[VAL_4]] {
+// CHECK: %[[VAL_109:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_107]]] : memref<?xindex>
+// CHECK: %[[VAL_110:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_107]]] : memref<?xindex>
+// CHECK: %[[VAL_111:.*]] = addi %[[VAL_107]], %[[VAL_4]] : index
+// CHECK: %[[VAL_112:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_111]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_113:.*]] = %[[VAL_110]] to %[[VAL_112]] step %[[VAL_4]] {
+// CHECK: %[[VAL_114:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_113]]] : memref<?xindex>
+// CHECK: %[[VAL_115:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_113]]] : memref<?xf32>
// CHECK: store %[[VAL_115]], %[[VAL_16]]{{\[}}%[[VAL_109]], %[[VAL_114]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16xf32>
+// CHECK: %[[VAL_16:.*]] = alloc() : memref<32x16xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_17]], %[[VAL_23:.*]] = %[[VAL_19]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_18]] : index
// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_20]] : index
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_22]], %[[VAL_23]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index):
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = and %[[VAL_33]], %[[VAL_34]] : i1
// CHECK: scf.if %[[VAL_35]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
+// CHECK: %[[VAL_38:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_42:.*]]:2 = scf.while (%[[VAL_43:.*]] = %[[VAL_36]], %[[VAL_44:.*]] = %[[VAL_39]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_38]] : index
// CHECK: %[[VAL_46:.*]] = cmpi ult, %[[VAL_44]], %[[VAL_41]] : index
// CHECK: scf.condition(%[[VAL_47]]) %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index):
-// CHECK: %[[VAL_50:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_50:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
// CHECK: scf.if %[[VAL_56]] {
-// CHECK: %[[VAL_57:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_48]]] : memref<?xf32>
-// CHECK: %[[VAL_58:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_49]]] : memref<?xf32>
+// CHECK: %[[VAL_57:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_58:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_49]]] : memref<?xf32>
// CHECK: %[[VAL_59:.*]] = mulf %[[VAL_57]], %[[VAL_58]] : f32
// CHECK: store %[[VAL_59]], %[[VAL_16]]{{\[}}%[[VAL_32]], %[[VAL_53]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_60:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
-// CHECK: %[[VAL_61:.*]] = addi %[[VAL_48]], %[[VAL_5]] : index
+// CHECK: %[[VAL_61:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_62:.*]] = select %[[VAL_60]], %[[VAL_61]], %[[VAL_48]] : index
// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
-// CHECK: %[[VAL_64:.*]] = addi %[[VAL_49]], %[[VAL_5]] : index
+// CHECK: %[[VAL_64:.*]] = addi %[[VAL_49]], %[[VAL_4]] : index
// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_62]], %[[VAL_65]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_66:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
-// CHECK: %[[VAL_67:.*]] = addi %[[VAL_27]], %[[VAL_5]] : index
+// CHECK: %[[VAL_67:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_68:.*]] = select %[[VAL_66]], %[[VAL_67]], %[[VAL_27]] : index
// CHECK: %[[VAL_69:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
-// CHECK: %[[VAL_70:.*]] = addi %[[VAL_28]], %[[VAL_5]] : index
+// CHECK: %[[VAL_70:.*]] = addi %[[VAL_28]], %[[VAL_4]] : index
// CHECK: %[[VAL_71:.*]] = select %[[VAL_69]], %[[VAL_70]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_68]], %[[VAL_71]] : index, index
// CHECK: }
}
// CHECK-LABEL: func @matvec(
-// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16x32xf32>,
-// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32>,
-// CHECK-SAME: %[[VAL_2:.*2]]: tensor<16xf32>) -> tensor<16xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 16 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<16xf32>
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_6]] : index
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<16x32xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>,
+// CHECK-SAME: %[[VAL_2:.*]]: tensor<16xf32>) -> tensor<16xf32> {
+// CHECK: %[[VAL_3:.*]] = constant 16 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16x32xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<32xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<16xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<16xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<16xf32>, memref<16xf32>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_5]] : index
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_12]]] : memref<16xf32>
-// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_6]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f32) {
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf32>
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<32xf32>
+// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_5]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f32) {
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xf32>
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<32xf32>
// CHECK: %[[VAL_23:.*]] = mulf %[[VAL_21]], %[[VAL_22]] : f32
// CHECK: %[[VAL_24:.*]] = addf %[[VAL_23]], %[[VAL_19]] : f32
// CHECK: scf.yield %[[VAL_24]] : f32
// CHECK-LABEL: func @sum_reduction(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
-// CHECK: %[[VAL_2:.*]] = constant 999 : index
-// CHECK: %[[VAL_3:.*]] = constant 10 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xf32>
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<f32>
-// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = addi %[[VAL_9]], %[[VAL_5]] : index
-// CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
+// CHECK: %[[VAL_2:.*]] = constant 10 : index
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20xf32> to memref<?xf32>
+// CHECK: %[[VAL_7:.*]] = tensor_to_memref %[[VAL_1]] : memref<f32>
+// CHECK: %[[VAL_8:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<f32>, memref<f32>
+// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] {
+// CHECK: %[[VAL_10:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = addi %[[VAL_9]], %[[VAL_4]] : index
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]][] : memref<f32>
-// CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_10]] to %[[VAL_12]] step %[[VAL_5]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xf32>
+// CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_10]] to %[[VAL_12]] step %[[VAL_4]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) {
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_15]]] : memref<?xf32>
// CHECK: %[[VAL_18:.*]] = addf %[[VAL_16]], %[[VAL_17]] : f32
// CHECK: scf.yield %[[VAL_18]] : f32
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf64>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf64>) -> tensor<?x?xf64> {
// CHECK: %[[VAL_2:.*]] = constant 2.000000e+00 : f64
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 0 : index
-// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xf64>
+// CHECK: %[[VAL_3:.*]] = constant 0 : index
+// CHECK: %[[VAL_4:.*]] = constant 1 : index
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf64> to memref<?xf64>
+// CHECK: %[[VAL_8:.*]] = dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf64>
// CHECK: %[[VAL_9:.*]] = dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf64>
-// CHECK: %[[VAL_10:.*]] = dim %[[VAL_1]], %[[VAL_5]] : tensor<?x?xf64>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_9]], %[[VAL_10]]) : memref<?x?xf64>
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_9]] step %[[VAL_5]] {
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_5]] : index
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_5]] {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xf64>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<?x?xf64>
+// CHECK: %[[VAL_11:.*]] = alloc(%[[VAL_8]], %[[VAL_9]]) : memref<?x?xf64>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<?x?xf64>, memref<?x?xf64>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_3]] to %[[VAL_8]] step %[[VAL_4]] {
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_4]] : index
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_4]] {
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xf64>
// CHECK: %[[VAL_19:.*]] = mulf %[[VAL_18]], %[[VAL_2]] : f64
// CHECK: store %[[VAL_19]], %[[VAL_11]]{{\[}}%[[VAL_12]], %[[VAL_17]]] : memref<?x?xf64>
// CHECK: }
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<?x?xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<?x?xf32>,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
-// CHECK: %[[VAL_4:.*]] = constant 999 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = dim %[[VAL_1]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_13:.*]] = dim %[[VAL_1]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_12]], %[[VAL_13]]) : memref<?x?xf32>
-// CHECK: %[[VAL_15:.*]] = dim %[[VAL_2]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_16:.*]] = dim %[[VAL_2]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_17:.*]] = alloca(%[[VAL_15]], %[[VAL_16]]) : memref<?x?xf32>
-// CHECK: %[[VAL_18:.*]] = dim %[[VAL_3]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_19:.*]] = dim %[[VAL_3]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_20:.*]] = alloca(%[[VAL_18]], %[[VAL_19]]) : memref<?x?xf32>
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_21]] to %[[VAL_22]] step %[[VAL_6]] {
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_5]] to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_24]], %[[VAL_25]]] : memref<?x?xf32>
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = addi %[[VAL_23]], %[[VAL_6]] : index
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_30:.*]] = %[[VAL_27]] to %[[VAL_29]] step %[[VAL_6]] {
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_30]]] : memref<?xindex>
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_24]], %[[VAL_31]]] : memref<?x?xf32>
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<?xf32>
-// CHECK: %[[VAL_34:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_25]], %[[VAL_31]]] : memref<?x?xf32>
-// CHECK: %[[VAL_35:.*]] = mulf %[[VAL_26]], %[[VAL_34]] : f32
-// CHECK: %[[VAL_36:.*]] = mulf %[[VAL_33]], %[[VAL_35]] : f32
-// CHECK: %[[VAL_37:.*]] = addf %[[VAL_32]], %[[VAL_36]] : f32
-// CHECK: store %[[VAL_37]], %[[VAL_20]]{{\[}}%[[VAL_24]], %[[VAL_31]]] : memref<?x?xf32>
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_1]] : memref<?x?xf32>
+// CHECK: %[[VAL_12:.*]] = dim %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_2]] : memref<?x?xf32>
+// CHECK: %[[VAL_14:.*]] = dim %[[VAL_3]], %[[VAL_4]] : tensor<?x?xf32>
+// CHECK: %[[VAL_15:.*]] = dim %[[VAL_3]], %[[VAL_5]] : tensor<?x?xf32>
+// CHECK: %[[VAL_16:.*]] = tensor_to_memref %[[VAL_3]] : memref<?x?xf32>
+// CHECK: %[[VAL_17:.*]] = alloc(%[[VAL_14]], %[[VAL_15]]) : memref<?x?xf32>
+// CHECK: linalg.copy(%[[VAL_16]], %[[VAL_17]]) : memref<?x?xf32>, memref<?x?xf32>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] {
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_22:.*]] = %[[VAL_4]] to %[[VAL_12]] step %[[VAL_5]] {
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_21]], %[[VAL_22]]] : memref<?x?xf32>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = addi %[[VAL_20]], %[[VAL_5]] : index
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_25]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_24]] to %[[VAL_26]] step %[[VAL_5]] {
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_21]], %[[VAL_28]]] : memref<?x?xf32>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_27]]] : memref<?xf32>
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_22]], %[[VAL_28]]] : memref<?x?xf32>
+// CHECK: %[[VAL_32:.*]] = mulf %[[VAL_23]], %[[VAL_31]] : f32
+// CHECK: %[[VAL_33:.*]] = mulf %[[VAL_30]], %[[VAL_32]] : f32
+// CHECK: %[[VAL_34:.*]] = addf %[[VAL_29]], %[[VAL_33]] : f32
+// CHECK: store %[[VAL_34]], %[[VAL_17]]{{\[}}%[[VAL_21]], %[[VAL_28]]] : memref<?x?xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_38:.*]] = tensor_load %[[VAL_20]] : memref<?x?xf32>
-// CHECK: return %[[VAL_38]] : tensor<?x?xf32>
+// CHECK: %[[VAL_35:.*]] = tensor_load %[[VAL_17]] : memref<?x?xf32>
+// CHECK: return %[[VAL_35]] : tensor<?x?xf32>
// CHECK: }
func @sampled_dense_dense(%args: tensor<?x?xf32>,
%arga: tensor<?x?xf32>,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<?xf32>,
// CHECK-SAME: %[[VAL_4:.*4]]: tensor<f32>,
// CHECK-SAME: %[[VAL_5:.*5]]: tensor<?xf32>) -> tensor<?xf32> {
-// CHECK: %[[VAL_6:.*]] = constant 999 : index
-// CHECK: %[[VAL_7:.*]] = constant 0 : index
-// CHECK: %[[VAL_8:.*]] = constant true
-// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_6]]) : memref<?xf32>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = alloca(%[[VAL_6]]) : memref<?xf32>
-// CHECK: %[[VAL_18:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = alloca(%[[VAL_6]]) : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = alloca(%[[VAL_6]]) : memref<?xf32>
-// CHECK: %[[VAL_21:.*]] = dim %[[VAL_3]], %[[VAL_7]] : tensor<?xf32>
-// CHECK: %[[VAL_22:.*]] = alloca(%[[VAL_21]]) : memref<?xf32>
-// CHECK: %[[VAL_23:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_24:.*]] = dim %[[VAL_5]], %[[VAL_7]] : tensor<?xf32>
-// CHECK: %[[VAL_25:.*]] = alloca(%[[VAL_24]]) : memref<?xf32>
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_23]][] : memref<f32>
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]]:2 = scf.while (%[[VAL_30:.*]] = %[[VAL_27]], %[[VAL_31:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_32:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_28]] : index
-// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_30]], %[[VAL_31]] : index, index
+// CHECK: %[[VAL_6:.*]] = constant 0 : index
+// CHECK: %[[VAL_7:.*]] = constant true
+// CHECK: %[[VAL_8:.*]] = constant 1 : index
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?x?xf32> to memref<?xf32>
+// CHECK: %[[VAL_17:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<?x?xf32> to memref<?xf32>
+// CHECK: %[[VAL_20:.*]] = tensor_to_memref %[[VAL_3]] : memref<?xf32>
+// CHECK: %[[VAL_21:.*]] = tensor_to_memref %[[VAL_4]] : memref<f32>
+// CHECK: %[[VAL_22:.*]] = dim %[[VAL_5]], %[[VAL_6]] : tensor<?xf32>
+// CHECK: %[[VAL_23:.*]] = tensor_to_memref %[[VAL_5]] : memref<?xf32>
+// CHECK: %[[VAL_24:.*]] = alloc(%[[VAL_22]]) : memref<?xf32>
+// CHECK: linalg.copy(%[[VAL_23]], %[[VAL_24]]) : memref<?xf32>, memref<?xf32>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_21]][] : memref<f32>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK: %[[VAL_28:.*]]:2 = scf.while (%[[VAL_29:.*]] = %[[VAL_26]], %[[VAL_30:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_27]] : index
+// CHECK: scf.condition(%[[VAL_31]]) %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index):
-// CHECK: %[[VAL_35:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_33]]] : memref<?xindex>
-// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
-// CHECK: scf.if %[[VAL_36]] {
-// CHECK: %[[VAL_37:.*]] = load %[[VAL_22]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_33]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = addi %[[VAL_33]], %[[VAL_9]] : index
-// CHECK: %[[VAL_40:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_34]]] : memref<?xindex>
-// CHECK: %[[VAL_42:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
-// CHECK: %[[VAL_43:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_42]]] : memref<?xindex>
-// CHECK: %[[VAL_44:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_34]]] : memref<?xindex>
-// CHECK: %[[VAL_45:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
-// CHECK: %[[VAL_46:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_45]]] : memref<?xindex>
-// CHECK: %[[VAL_47:.*]]:4 = scf.while (%[[VAL_48:.*]] = %[[VAL_38]], %[[VAL_49:.*]] = %[[VAL_41]], %[[VAL_50:.*]] = %[[VAL_44]], %[[VAL_51:.*]] = %[[VAL_7]]) : (index, index, index, index) -> (index, index, index, index) {
-// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_48]], %[[VAL_40]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi ult, %[[VAL_49]], %[[VAL_43]] : index
-// CHECK: %[[VAL_54:.*]] = and %[[VAL_52]], %[[VAL_53]] : i1
-// CHECK: %[[VAL_55:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_46]] : index
-// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
-// CHECK: scf.condition(%[[VAL_56]]) %[[VAL_48]], %[[VAL_49]], %[[VAL_50]], %[[VAL_51]] : index, index, index, index
+// CHECK: ^bb0(%[[VAL_32:.*]]: index, %[[VAL_33:.*]]: index):
+// CHECK: %[[VAL_34:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_32]]] : memref<?xindex>
+// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_34]], %[[VAL_33]] : index
+// CHECK: scf.if %[[VAL_35]] {
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_37:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_32]]] : memref<?xindex>
+// CHECK: %[[VAL_38:.*]] = addi %[[VAL_32]], %[[VAL_8]] : index
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_38]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK: %[[VAL_41:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_41]]] : memref<?xindex>
+// CHECK: %[[VAL_43:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK: %[[VAL_44:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK: %[[VAL_45:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_44]]] : memref<?xindex>
+// CHECK: %[[VAL_46:.*]]:4 = scf.while (%[[VAL_47:.*]] = %[[VAL_37]], %[[VAL_48:.*]] = %[[VAL_40]], %[[VAL_49:.*]] = %[[VAL_43]], %[[VAL_50:.*]] = %[[VAL_6]]) : (index, index, index, index) -> (index, index, index, index) {
+// CHECK: %[[VAL_51:.*]] = cmpi ult, %[[VAL_47]], %[[VAL_39]] : index
+// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_48]], %[[VAL_42]] : index
+// CHECK: %[[VAL_53:.*]] = and %[[VAL_51]], %[[VAL_52]] : i1
+// CHECK: %[[VAL_54:.*]] = cmpi ult, %[[VAL_49]], %[[VAL_45]] : index
+// CHECK: %[[VAL_55:.*]] = and %[[VAL_53]], %[[VAL_54]] : i1
+// CHECK: scf.condition(%[[VAL_55]]) %[[VAL_47]], %[[VAL_48]], %[[VAL_49]], %[[VAL_50]] : index, index, index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: index, %[[VAL_60:.*]]: index):
-// CHECK: %[[VAL_61:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_57]]] : memref<?xindex>
-// CHECK: %[[VAL_62:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_58]]] : memref<?xindex>
-// CHECK: %[[VAL_63:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_59]]] : memref<?xindex>
-// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
-// CHECK: %[[VAL_65:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
-// CHECK: %[[VAL_66:.*]] = and %[[VAL_64]], %[[VAL_65]] : i1
-// CHECK: %[[VAL_67:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
-// CHECK: %[[VAL_68:.*]] = and %[[VAL_66]], %[[VAL_67]] : i1
-// CHECK: scf.if %[[VAL_68]] {
-// CHECK: %[[VAL_69:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_70:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_57]]] : memref<?xf32>
-// CHECK: %[[VAL_71:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_58]]] : memref<?xf32>
-// CHECK: %[[VAL_72:.*]] = mulf %[[VAL_70]], %[[VAL_71]] : f32
-// CHECK: %[[VAL_73:.*]] = mulf %[[VAL_72]], %[[VAL_37]] : f32
-// CHECK: %[[VAL_74:.*]] = mulf %[[VAL_73]], %[[VAL_26]] : f32
-// CHECK: %[[VAL_75:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_59]]] : memref<?xf32>
-// CHECK: %[[VAL_76:.*]] = addf %[[VAL_74]], %[[VAL_75]] : f32
-// CHECK: %[[VAL_77:.*]] = addf %[[VAL_69]], %[[VAL_76]] : f32
-// CHECK: store %[[VAL_77]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: ^bb0(%[[VAL_56:.*]]: index, %[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: index):
+// CHECK: %[[VAL_60:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_56]]] : memref<?xindex>
+// CHECK: %[[VAL_61:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_57]]] : memref<?xindex>
+// CHECK: %[[VAL_62:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_58]]] : memref<?xindex>
+// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_60]], %[[VAL_59]] : index
+// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_59]] : index
+// CHECK: %[[VAL_65:.*]] = and %[[VAL_63]], %[[VAL_64]] : i1
+// CHECK: %[[VAL_66:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_59]] : index
+// CHECK: %[[VAL_67:.*]] = and %[[VAL_65]], %[[VAL_66]] : i1
+// CHECK: scf.if %[[VAL_67]] {
+// CHECK: %[[VAL_68:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_69:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_56]]] : memref<?xf32>
+// CHECK: %[[VAL_70:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_57]]] : memref<?xf32>
+// CHECK: %[[VAL_71:.*]] = mulf %[[VAL_69]], %[[VAL_70]] : f32
+// CHECK: %[[VAL_72:.*]] = mulf %[[VAL_71]], %[[VAL_36]] : f32
+// CHECK: %[[VAL_73:.*]] = mulf %[[VAL_72]], %[[VAL_25]] : f32
+// CHECK: %[[VAL_74:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_58]]] : memref<?xf32>
+// CHECK: %[[VAL_75:.*]] = addf %[[VAL_73]], %[[VAL_74]] : f32
+// CHECK: %[[VAL_76:.*]] = addf %[[VAL_68]], %[[VAL_75]] : f32
+// CHECK: store %[[VAL_76]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
-// CHECK: %[[VAL_78:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
-// CHECK: %[[VAL_79:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
-// CHECK: %[[VAL_80:.*]] = and %[[VAL_78]], %[[VAL_79]] : i1
-// CHECK: scf.if %[[VAL_80]] {
-// CHECK: %[[VAL_81:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_82:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_57]]] : memref<?xf32>
-// CHECK: %[[VAL_83:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_58]]] : memref<?xf32>
-// CHECK: %[[VAL_84:.*]] = mulf %[[VAL_82]], %[[VAL_83]] : f32
-// CHECK: %[[VAL_85:.*]] = mulf %[[VAL_84]], %[[VAL_37]] : f32
-// CHECK: %[[VAL_86:.*]] = mulf %[[VAL_85]], %[[VAL_26]] : f32
-// CHECK: %[[VAL_87:.*]] = addf %[[VAL_81]], %[[VAL_86]] : f32
-// CHECK: store %[[VAL_87]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: %[[VAL_77:.*]] = cmpi eq, %[[VAL_60]], %[[VAL_59]] : index
+// CHECK: %[[VAL_78:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_59]] : index
+// CHECK: %[[VAL_79:.*]] = and %[[VAL_77]], %[[VAL_78]] : i1
+// CHECK: scf.if %[[VAL_79]] {
+// CHECK: %[[VAL_80:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_81:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_56]]] : memref<?xf32>
+// CHECK: %[[VAL_82:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_57]]] : memref<?xf32>
+// CHECK: %[[VAL_83:.*]] = mulf %[[VAL_81]], %[[VAL_82]] : f32
+// CHECK: %[[VAL_84:.*]] = mulf %[[VAL_83]], %[[VAL_36]] : f32
+// CHECK: %[[VAL_85:.*]] = mulf %[[VAL_84]], %[[VAL_25]] : f32
+// CHECK: %[[VAL_86:.*]] = addf %[[VAL_80]], %[[VAL_85]] : f32
+// CHECK: store %[[VAL_86]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
-// CHECK: %[[VAL_88:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
-// CHECK: scf.if %[[VAL_88]] {
-// CHECK: %[[VAL_89:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_90:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_59]]] : memref<?xf32>
-// CHECK: %[[VAL_91:.*]] = addf %[[VAL_89]], %[[VAL_90]] : f32
-// CHECK: store %[[VAL_91]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: %[[VAL_87:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_59]] : index
+// CHECK: scf.if %[[VAL_87]] {
+// CHECK: %[[VAL_88:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_89:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_58]]] : memref<?xf32>
+// CHECK: %[[VAL_90:.*]] = addf %[[VAL_88]], %[[VAL_89]] : f32
+// CHECK: store %[[VAL_90]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_92:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
-// CHECK: %[[VAL_93:.*]] = addi %[[VAL_57]], %[[VAL_9]] : index
-// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_57]] : index
-// CHECK: %[[VAL_95:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
-// CHECK: %[[VAL_96:.*]] = addi %[[VAL_58]], %[[VAL_9]] : index
-// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_58]] : index
-// CHECK: %[[VAL_98:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
-// CHECK: %[[VAL_99:.*]] = addi %[[VAL_59]], %[[VAL_9]] : index
-// CHECK: %[[VAL_100:.*]] = select %[[VAL_98]], %[[VAL_99]], %[[VAL_59]] : index
-// CHECK: %[[VAL_101:.*]] = addi %[[VAL_60]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]], %[[VAL_100]], %[[VAL_101]] : index, index, index, index
+// CHECK: %[[VAL_91:.*]] = cmpi eq, %[[VAL_60]], %[[VAL_59]] : index
+// CHECK: %[[VAL_92:.*]] = addi %[[VAL_56]], %[[VAL_8]] : index
+// CHECK: %[[VAL_93:.*]] = select %[[VAL_91]], %[[VAL_92]], %[[VAL_56]] : index
+// CHECK: %[[VAL_94:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_59]] : index
+// CHECK: %[[VAL_95:.*]] = addi %[[VAL_57]], %[[VAL_8]] : index
+// CHECK: %[[VAL_96:.*]] = select %[[VAL_94]], %[[VAL_95]], %[[VAL_57]] : index
+// CHECK: %[[VAL_97:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_59]] : index
+// CHECK: %[[VAL_98:.*]] = addi %[[VAL_58]], %[[VAL_8]] : index
+// CHECK: %[[VAL_99:.*]] = select %[[VAL_97]], %[[VAL_98]], %[[VAL_58]] : index
+// CHECK: %[[VAL_100:.*]] = addi %[[VAL_59]], %[[VAL_8]] : index
+// CHECK: scf.yield %[[VAL_93]], %[[VAL_96]], %[[VAL_99]], %[[VAL_100]] : index, index, index, index
// CHECK: }
-// CHECK: %[[VAL_102:.*]]:3 = scf.while (%[[VAL_103:.*]] = %[[VAL_104:.*]]#0, %[[VAL_105:.*]] = %[[VAL_104]]#1, %[[VAL_106:.*]] = %[[VAL_104]]#3) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_107:.*]] = cmpi ult, %[[VAL_103]], %[[VAL_40]] : index
-// CHECK: %[[VAL_108:.*]] = cmpi ult, %[[VAL_105]], %[[VAL_43]] : index
-// CHECK: %[[VAL_109:.*]] = and %[[VAL_107]], %[[VAL_108]] : i1
-// CHECK: scf.condition(%[[VAL_109]]) %[[VAL_103]], %[[VAL_105]], %[[VAL_106]] : index, index, index
+// CHECK: %[[VAL_101:.*]]:3 = scf.while (%[[VAL_102:.*]] = %[[VAL_103:.*]]#0, %[[VAL_104:.*]] = %[[VAL_103]]#1, %[[VAL_105:.*]] = %[[VAL_103]]#3) : (index, index, index) -> (index, index, index) {
+// CHECK: %[[VAL_106:.*]] = cmpi ult, %[[VAL_102]], %[[VAL_39]] : index
+// CHECK: %[[VAL_107:.*]] = cmpi ult, %[[VAL_104]], %[[VAL_42]] : index
+// CHECK: %[[VAL_108:.*]] = and %[[VAL_106]], %[[VAL_107]] : i1
+// CHECK: scf.condition(%[[VAL_108]]) %[[VAL_102]], %[[VAL_104]], %[[VAL_105]] : index, index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_110:.*]]: index, %[[VAL_111:.*]]: index, %[[VAL_112:.*]]: index):
-// CHECK: %[[VAL_113:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_110]]] : memref<?xindex>
-// CHECK: %[[VAL_114:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_111]]] : memref<?xindex>
-// CHECK: %[[VAL_115:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_112]] : index
-// CHECK: %[[VAL_116:.*]] = cmpi eq, %[[VAL_114]], %[[VAL_112]] : index
-// CHECK: %[[VAL_117:.*]] = and %[[VAL_115]], %[[VAL_116]] : i1
-// CHECK: scf.if %[[VAL_117]] {
-// CHECK: %[[VAL_118:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_119:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_110]]] : memref<?xf32>
-// CHECK: %[[VAL_120:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_111]]] : memref<?xf32>
-// CHECK: %[[VAL_121:.*]] = mulf %[[VAL_119]], %[[VAL_120]] : f32
-// CHECK: %[[VAL_122:.*]] = mulf %[[VAL_121]], %[[VAL_37]] : f32
-// CHECK: %[[VAL_123:.*]] = mulf %[[VAL_122]], %[[VAL_26]] : f32
-// CHECK: %[[VAL_124:.*]] = addf %[[VAL_118]], %[[VAL_123]] : f32
-// CHECK: store %[[VAL_124]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: ^bb0(%[[VAL_109:.*]]: index, %[[VAL_110:.*]]: index, %[[VAL_111:.*]]: index):
+// CHECK: %[[VAL_112:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_109]]] : memref<?xindex>
+// CHECK: %[[VAL_113:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_110]]] : memref<?xindex>
+// CHECK: %[[VAL_114:.*]] = cmpi eq, %[[VAL_112]], %[[VAL_111]] : index
+// CHECK: %[[VAL_115:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_111]] : index
+// CHECK: %[[VAL_116:.*]] = and %[[VAL_114]], %[[VAL_115]] : i1
+// CHECK: scf.if %[[VAL_116]] {
+// CHECK: %[[VAL_117:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_118:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_109]]] : memref<?xf32>
+// CHECK: %[[VAL_119:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_110]]] : memref<?xf32>
+// CHECK: %[[VAL_120:.*]] = mulf %[[VAL_118]], %[[VAL_119]] : f32
+// CHECK: %[[VAL_121:.*]] = mulf %[[VAL_120]], %[[VAL_36]] : f32
+// CHECK: %[[VAL_122:.*]] = mulf %[[VAL_121]], %[[VAL_25]] : f32
+// CHECK: %[[VAL_123:.*]] = addf %[[VAL_117]], %[[VAL_122]] : f32
+// CHECK: store %[[VAL_123]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_125:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_112]] : index
-// CHECK: %[[VAL_126:.*]] = addi %[[VAL_110]], %[[VAL_9]] : index
-// CHECK: %[[VAL_127:.*]] = select %[[VAL_125]], %[[VAL_126]], %[[VAL_110]] : index
-// CHECK: %[[VAL_128:.*]] = cmpi eq, %[[VAL_114]], %[[VAL_112]] : index
-// CHECK: %[[VAL_129:.*]] = addi %[[VAL_111]], %[[VAL_9]] : index
-// CHECK: %[[VAL_130:.*]] = select %[[VAL_128]], %[[VAL_129]], %[[VAL_111]] : index
-// CHECK: %[[VAL_131:.*]] = addi %[[VAL_112]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_127]], %[[VAL_130]], %[[VAL_131]] : index, index, index
+// CHECK: %[[VAL_124:.*]] = cmpi eq, %[[VAL_112]], %[[VAL_111]] : index
+// CHECK: %[[VAL_125:.*]] = addi %[[VAL_109]], %[[VAL_8]] : index
+// CHECK: %[[VAL_126:.*]] = select %[[VAL_124]], %[[VAL_125]], %[[VAL_109]] : index
+// CHECK: %[[VAL_127:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_111]] : index
+// CHECK: %[[VAL_128:.*]] = addi %[[VAL_110]], %[[VAL_8]] : index
+// CHECK: %[[VAL_129:.*]] = select %[[VAL_127]], %[[VAL_128]], %[[VAL_110]] : index
+// CHECK: %[[VAL_130:.*]] = addi %[[VAL_111]], %[[VAL_8]] : index
+// CHECK: scf.yield %[[VAL_126]], %[[VAL_129]], %[[VAL_130]] : index, index, index
// CHECK: }
-// CHECK: %[[VAL_132:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_133:.*]] = scf.for %[[VAL_134:.*]] = %[[VAL_135:.*]]#2 to %[[VAL_46]] step %[[VAL_9]] iter_args(%[[VAL_136:.*]] = %[[VAL_132]]) -> (f32) {
-// CHECK: %[[VAL_137:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_134]]] : memref<?xf32>
-// CHECK: %[[VAL_138:.*]] = addf %[[VAL_136]], %[[VAL_137]] : f32
-// CHECK: scf.yield %[[VAL_138]] : f32
+// CHECK: %[[VAL_131:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_132:.*]] = scf.for %[[VAL_133:.*]] = %[[VAL_134:.*]]#2 to %[[VAL_45]] step %[[VAL_8]] iter_args(%[[VAL_135:.*]] = %[[VAL_131]]) -> (f32) {
+// CHECK: %[[VAL_136:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_133]]] : memref<?xf32>
+// CHECK: %[[VAL_137:.*]] = addf %[[VAL_135]], %[[VAL_136]] : f32
+// CHECK: scf.yield %[[VAL_137]] : f32
// CHECK: }
-// CHECK: store %[[VAL_139:.*]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: store %[[VAL_138:.*]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
-// CHECK: scf.if %[[VAL_8]] {
-// CHECK: %[[VAL_140:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_34]]] : memref<?xindex>
-// CHECK: %[[VAL_141:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
-// CHECK: %[[VAL_142:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_141]]] : memref<?xindex>
-// CHECK: %[[VAL_143:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
-// CHECK: %[[VAL_144:.*]] = scf.for %[[VAL_145:.*]] = %[[VAL_140]] to %[[VAL_142]] step %[[VAL_9]] iter_args(%[[VAL_146:.*]] = %[[VAL_143]]) -> (f32) {
-// CHECK: %[[VAL_147:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_145]]] : memref<?xf32>
-// CHECK: %[[VAL_148:.*]] = addf %[[VAL_146]], %[[VAL_147]] : f32
-// CHECK: scf.yield %[[VAL_148]] : f32
+// CHECK: scf.if %[[VAL_7]] {
+// CHECK: %[[VAL_139:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK: %[[VAL_140:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK: %[[VAL_141:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_140]]] : memref<?xindex>
+// CHECK: %[[VAL_142:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
+// CHECK: %[[VAL_143:.*]] = scf.for %[[VAL_144:.*]] = %[[VAL_139]] to %[[VAL_141]] step %[[VAL_8]] iter_args(%[[VAL_145:.*]] = %[[VAL_142]]) -> (f32) {
+// CHECK: %[[VAL_146:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_144]]] : memref<?xf32>
+// CHECK: %[[VAL_147:.*]] = addf %[[VAL_145]], %[[VAL_146]] : f32
+// CHECK: scf.yield %[[VAL_147]] : f32
// CHECK: }
-// CHECK: store %[[VAL_149:.*]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
+// CHECK: store %[[VAL_148:.*]], %[[VAL_24]]{{\[}}%[[VAL_33]]] : memref<?xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_150:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
-// CHECK: %[[VAL_151:.*]] = addi %[[VAL_33]], %[[VAL_9]] : index
-// CHECK: %[[VAL_152:.*]] = select %[[VAL_150]], %[[VAL_151]], %[[VAL_33]] : index
-// CHECK: %[[VAL_153:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_152]], %[[VAL_153]] : index, index
+// CHECK: %[[VAL_149:.*]] = cmpi eq, %[[VAL_34]], %[[VAL_33]] : index
+// CHECK: %[[VAL_150:.*]] = addi %[[VAL_32]], %[[VAL_8]] : index
+// CHECK: %[[VAL_151:.*]] = select %[[VAL_149]], %[[VAL_150]], %[[VAL_32]] : index
+// CHECK: %[[VAL_152:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK: scf.yield %[[VAL_151]], %[[VAL_152]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_154:.*]] = %[[VAL_155:.*]]#1 to %[[VAL_24]] step %[[VAL_9]] {
-// CHECK: %[[VAL_156:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_154]]] : memref<?xindex>
-// CHECK: %[[VAL_157:.*]] = addi %[[VAL_154]], %[[VAL_9]] : index
-// CHECK: %[[VAL_158:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_157]]] : memref<?xindex>
-// CHECK: %[[VAL_159:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_154]]] : memref<?xf32>
-// CHECK: %[[VAL_160:.*]] = scf.for %[[VAL_161:.*]] = %[[VAL_156]] to %[[VAL_158]] step %[[VAL_9]] iter_args(%[[VAL_162:.*]] = %[[VAL_159]]) -> (f32) {
-// CHECK: %[[VAL_163:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_161]]] : memref<?xf32>
-// CHECK: %[[VAL_164:.*]] = addf %[[VAL_162]], %[[VAL_163]] : f32
-// CHECK: scf.yield %[[VAL_164]] : f32
+// CHECK: scf.for %[[VAL_153:.*]] = %[[VAL_154:.*]]#1 to %[[VAL_22]] step %[[VAL_8]] {
+// CHECK: %[[VAL_155:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_153]]] : memref<?xindex>
+// CHECK: %[[VAL_156:.*]] = addi %[[VAL_153]], %[[VAL_8]] : index
+// CHECK: %[[VAL_157:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_156]]] : memref<?xindex>
+// CHECK: %[[VAL_158:.*]] = load %[[VAL_24]]{{\[}}%[[VAL_153]]] : memref<?xf32>
+// CHECK: %[[VAL_159:.*]] = scf.for %[[VAL_160:.*]] = %[[VAL_155]] to %[[VAL_157]] step %[[VAL_8]] iter_args(%[[VAL_161:.*]] = %[[VAL_158]]) -> (f32) {
+// CHECK: %[[VAL_162:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_160]]] : memref<?xf32>
+// CHECK: %[[VAL_163:.*]] = addf %[[VAL_161]], %[[VAL_162]] : f32
+// CHECK: scf.yield %[[VAL_163]] : f32
// CHECK: }
-// CHECK: store %[[VAL_165:.*]], %[[VAL_25]]{{\[}}%[[VAL_154]]] : memref<?xf32>
+// CHECK: store %[[VAL_164:.*]], %[[VAL_24]]{{\[}}%[[VAL_153]]] : memref<?xf32>
// CHECK: }
-// CHECK: %[[VAL_166:.*]] = tensor_load %[[VAL_25]] : memref<?xf32>
-// CHECK: return %[[VAL_166]] : tensor<?xf32>
+// CHECK: %[[VAL_165:.*]] = tensor_load %[[VAL_24]] : memref<?xf32>
+// CHECK: return %[[VAL_165]] : tensor<?xf32>
// CHECK: }
func @sum_kernel_with_inv(%arga: tensor<?x?xf32>,
%argb: tensor<?x?xf32>,
// CHECK: %[[VAL_5:.*]] = constant 8 : index
// CHECK: %[[VAL_6:.*]] = constant 0 : index
// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_16:.*]] = addf %[[VAL_14]], %[[VAL_15]] : f32
-// CHECK: store %[[VAL_16]], %[[VAL_10]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_0]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_17:.*]] = addf %[[VAL_15]], %[[VAL_16]] : f32
+// CHECK: store %[[VAL_17]], %[[VAL_11]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_17:.*]] = tensor_load %[[VAL_10]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_17]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_18:.*]] = tensor_load %[[VAL_11]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_18]] : tensor<32x16x8xf32>
// CHECK: }
func @add_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_ddd
// CHECK: %[[VAL_5:.*]] = constant 8 : index
// CHECK: %[[VAL_6:.*]] = constant 0 : index
// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_16:.*]] = mulf %[[VAL_14]], %[[VAL_15]] : f32
-// CHECK: store %[[VAL_16]], %[[VAL_10]]{{\[}}%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_0]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_11:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_17:.*]] = mulf %[[VAL_15]], %[[VAL_16]] : f32
+// CHECK: store %[[VAL_17]], %[[VAL_11]]{{\[}}%[[VAL_12]], %[[VAL_13]], %[[VAL_14]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_17:.*]] = tensor_load %[[VAL_10]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_17]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_18:.*]] = tensor_load %[[VAL_11]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_18]] : tensor<32x16x8xf32>
// CHECK: }
func @mul_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_ddd
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 16 : index
// CHECK: %[[VAL_6:.*]] = constant 8 : index
// CHECK: %[[VAL_7:.*]] = constant 0 : index
// CHECK: %[[VAL_8:.*]] = constant true
// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: %[[VAL_17:.*]] = muli %[[VAL_15]], %[[VAL_5]] : index
-// CHECK: %[[VAL_18:.*]] = addi %[[VAL_17]], %[[VAL_16]] : index
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = addi %[[VAL_18]], %[[VAL_9]] : index
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]]:2 = scf.while (%[[VAL_23:.*]] = %[[VAL_19]], %[[VAL_24:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_21]] : index
-// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_23]], %[[VAL_24]] : index, index
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_15:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_14]], %[[VAL_15]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: %[[VAL_18:.*]] = muli %[[VAL_16]], %[[VAL_5]] : index
+// CHECK: %[[VAL_19:.*]] = addi %[[VAL_18]], %[[VAL_17]] : index
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = addi %[[VAL_19]], %[[VAL_9]] : index
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: %[[VAL_23:.*]]:2 = scf.while (%[[VAL_24:.*]] = %[[VAL_20]], %[[VAL_25:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_26:.*]] = cmpi ult, %[[VAL_24]], %[[VAL_22]] : index
+// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_24]], %[[VAL_25]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: scf.if %[[VAL_29]] {
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xf32>
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_27]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_32:.*]] = addf %[[VAL_30]], %[[VAL_31]] : f32
-// CHECK: store %[[VAL_32]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_27]]] : memref<32x16x8xf32>
+// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index):
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: scf.if %[[VAL_30]] {
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref<?xf32>
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_28]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_33:.*]] = addf %[[VAL_31]], %[[VAL_32]] : f32
+// CHECK: store %[[VAL_33]], %[[VAL_15]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_28]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: scf.if %[[VAL_8]] {
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_27]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_33]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_27]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_34:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_28]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_34]], %[[VAL_15]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_28]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: %[[VAL_35:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
-// CHECK: %[[VAL_36:.*]] = select %[[VAL_34]], %[[VAL_35]], %[[VAL_26]] : index
-// CHECK: %[[VAL_37:.*]] = addi %[[VAL_27]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_36]], %[[VAL_37]] : index, index
+// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_36:.*]] = addi %[[VAL_27]], %[[VAL_9]] : index
+// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_27]] : index
+// CHECK: %[[VAL_38:.*]] = addi %[[VAL_28]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_37]], %[[VAL_38]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_39:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_40:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_38]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_40]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_16]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_39:.*]] = %[[VAL_40:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_39]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_41]], %[[VAL_15]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_39]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_41:.*]] = tensor_load %[[VAL_14]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_41]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_42:.*]] = tensor_load %[[VAL_15]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_42]] : tensor<32x16x8xf32>
// CHECK: }
func @add_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_dds
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 16 : index
// CHECK: %[[VAL_6:.*]] = constant 0 : index
// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
-// CHECK: %[[VAL_15:.*]] = muli %[[VAL_13]], %[[VAL_5]] : index
-// CHECK: %[[VAL_16:.*]] = addi %[[VAL_15]], %[[VAL_14]] : index
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = addi %[[VAL_16]], %[[VAL_7]] : index
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_7]] {
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xf32>
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_13]], %[[VAL_14]], %[[VAL_21]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_24:.*]] = mulf %[[VAL_22]], %[[VAL_23]] : f32
-// CHECK: store %[[VAL_24]], %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_14]], %[[VAL_21]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
+// CHECK: %[[VAL_16:.*]] = muli %[[VAL_14]], %[[VAL_5]] : index
+// CHECK: %[[VAL_17:.*]] = addi %[[VAL_16]], %[[VAL_15]] : index
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = addi %[[VAL_17]], %[[VAL_7]] : index
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_20]] step %[[VAL_7]] {
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xf32>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_14]], %[[VAL_15]], %[[VAL_22]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_25:.*]] = mulf %[[VAL_23]], %[[VAL_24]] : f32
+// CHECK: store %[[VAL_25]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_15]], %[[VAL_22]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_25:.*]] = tensor_load %[[VAL_12]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_25]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_26:.*]] = tensor_load %[[VAL_13]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_26]] : tensor<32x16x8xf32>
// CHECK: }
func @mul_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_dds
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant 8 : index
-// CHECK: %[[VAL_7:.*]] = constant true
-// CHECK: %[[VAL_8:.*]] = constant 0 : index
-// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = addi %[[VAL_15]], %[[VAL_9]] : index
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant 8 : index
+// CHECK: %[[VAL_6:.*]] = constant true
+// CHECK: %[[VAL_7:.*]] = constant 0 : index
+// CHECK: %[[VAL_8:.*]] = constant 1 : index
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_3]] step %[[VAL_8]] {
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = addi %[[VAL_15]], %[[VAL_8]] : index
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_16]], %[[VAL_21:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_20]], %[[VAL_18]] : index
// CHECK: scf.condition(%[[VAL_22]]) %[[VAL_20]], %[[VAL_21]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xindex>
// CHECK: %[[VAL_26:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: scf.if %[[VAL_26]] {
-// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_28:.*]] = muli %[[VAL_23]], %[[VAL_6]] : index
+// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_28:.*]] = muli %[[VAL_23]], %[[VAL_5]] : index
// CHECK: %[[VAL_29:.*]] = addi %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_29]]] : memref<?xf32>
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_27]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_29]]] : memref<?xf32>
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_27]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_32:.*]] = addf %[[VAL_30]], %[[VAL_31]] : f32
// CHECK: store %[[VAL_32]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_27]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_33:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_34:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_33]]] : memref<32x16x8xf32>
+// CHECK: scf.if %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_33:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_34:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_33]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_34]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_24]], %[[VAL_33]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_36:.*]] = addi %[[VAL_23]], %[[VAL_9]] : index
+// CHECK: %[[VAL_36:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
-// CHECK: %[[VAL_38:.*]] = addi %[[VAL_24]], %[[VAL_9]] : index
+// CHECK: %[[VAL_38:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_37]], %[[VAL_38]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_39:.*]] = %[[VAL_40:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_42:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_15]], %[[VAL_39]], %[[VAL_41]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_39:.*]] = %[[VAL_40:.*]]#1 to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_15]], %[[VAL_39]], %[[VAL_41]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_42]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_39]], %[[VAL_41]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 8 : index
-// CHECK: %[[VAL_6:.*]] = constant 0 : index
-// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = addi %[[VAL_13]], %[[VAL_7]] : index
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_7]] {
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
-// CHECK: %[[VAL_20:.*]] = muli %[[VAL_17]], %[[VAL_5]] : index
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 8 : index
+// CHECK: %[[VAL_5:.*]] = constant 0 : index
+// CHECK: %[[VAL_6:.*]] = constant 1 : index
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = addi %[[VAL_13]], %[[VAL_6]] : index
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_6]] {
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_20:.*]] = muli %[[VAL_17]], %[[VAL_4]] : index
// CHECK: %[[VAL_21:.*]] = addi %[[VAL_20]], %[[VAL_19]] : index
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xf32>
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_13]], %[[VAL_18]], %[[VAL_19]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xf32>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_13]], %[[VAL_18]], %[[VAL_19]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_24:.*]] = mulf %[[VAL_22]], %[[VAL_23]] : f32
// CHECK: store %[[VAL_24]], %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_18]], %[[VAL_19]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 16 : index
// CHECK: %[[VAL_6:.*]] = constant 8 : index
// CHECK: %[[VAL_7:.*]] = constant true
// CHECK: %[[VAL_8:.*]] = constant 0 : index
// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_15:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = addi %[[VAL_17]], %[[VAL_9]] : index
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_18]], %[[VAL_23:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_20]] : index
-// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_22]], %[[VAL_23]] : index, index
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_17:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_16]], %[[VAL_17]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] {
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = addi %[[VAL_18]], %[[VAL_9]] : index
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]]:2 = scf.while (%[[VAL_23:.*]] = %[[VAL_19]], %[[VAL_24:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_21]] : index
+// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_23]], %[[VAL_24]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: scf.if %[[VAL_28]] {
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = addi %[[VAL_25]], %[[VAL_9]] : index
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_30]]] : memref<?xindex>
-// CHECK: %[[VAL_32:.*]]:2 = scf.while (%[[VAL_33:.*]] = %[[VAL_29]], %[[VAL_34:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_35:.*]] = cmpi ult, %[[VAL_33]], %[[VAL_31]] : index
-// CHECK: scf.condition(%[[VAL_35]]) %[[VAL_33]], %[[VAL_34]] : index, index
+// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
+// CHECK: scf.if %[[VAL_29]] {
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_31:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<?xindex>
+// CHECK: %[[VAL_33:.*]]:2 = scf.while (%[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_36:.*]] = cmpi ult, %[[VAL_34]], %[[VAL_32]] : index
+// CHECK: scf.condition(%[[VAL_36]]) %[[VAL_34]], %[[VAL_35]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_36:.*]]: index, %[[VAL_37:.*]]: index):
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_36]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
-// CHECK: scf.if %[[VAL_39]] {
-// CHECK: %[[VAL_40:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_36]]] : memref<?xf32>
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_37]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_42:.*]] = addf %[[VAL_40]], %[[VAL_41]] : f32
-// CHECK: store %[[VAL_42]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_37]]] : memref<32x16x8xf32>
+// CHECK: ^bb0(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index):
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: scf.if %[[VAL_40]] {
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_37]]] : memref<?xf32>
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_43:.*]] = addf %[[VAL_41]], %[[VAL_42]] : f32
+// CHECK: store %[[VAL_43]], %[[VAL_17]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: %[[VAL_43:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_37]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_43]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_37]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_44:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_44]], %[[VAL_17]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_44:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
-// CHECK: %[[VAL_45:.*]] = addi %[[VAL_36]], %[[VAL_9]] : index
-// CHECK: %[[VAL_46:.*]] = select %[[VAL_44]], %[[VAL_45]], %[[VAL_36]] : index
-// CHECK: %[[VAL_47:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_46]], %[[VAL_47]] : index, index
+// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: %[[VAL_46:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
+// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_37]] : index
+// CHECK: %[[VAL_48:.*]] = addi %[[VAL_38]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_47]], %[[VAL_48]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_48:.*]] = %[[VAL_49:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_50:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_48]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_50]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_48]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_49:.*]] = %[[VAL_50:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_51:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_49]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_51]], %[[VAL_17]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_49]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_51:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_52:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_51]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_52]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_26]], %[[VAL_51]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_52:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_53:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_52]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_53]], %[[VAL_17]]{{\[}}%[[VAL_18]], %[[VAL_27]], %[[VAL_52]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_54:.*]] = addi %[[VAL_25]], %[[VAL_9]] : index
-// CHECK: %[[VAL_55:.*]] = select %[[VAL_53]], %[[VAL_54]], %[[VAL_25]] : index
-// CHECK: %[[VAL_56:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_55]], %[[VAL_56]] : index, index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
+// CHECK: %[[VAL_55:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
+// CHECK: %[[VAL_56:.*]] = select %[[VAL_54]], %[[VAL_55]], %[[VAL_26]] : index
+// CHECK: %[[VAL_57:.*]] = addi %[[VAL_27]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_56]], %[[VAL_57]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_57:.*]] = %[[VAL_58:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_59:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_60:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_17]], %[[VAL_57]], %[[VAL_59]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_60]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_57]], %[[VAL_59]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_58:.*]] = %[[VAL_59:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_60:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_61:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_18]], %[[VAL_58]], %[[VAL_60]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_61]], %[[VAL_17]]{{\[}}%[[VAL_18]], %[[VAL_58]], %[[VAL_60]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_61:.*]] = tensor_load %[[VAL_16]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_61]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_62:.*]] = tensor_load %[[VAL_17]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_62]] : tensor<32x16x8xf32>
// CHECK: }
func @add_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_dss
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 0 : index
// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = addi %[[VAL_14]], %[[VAL_6]] : index
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_6]] {
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]] = addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_20]] to %[[VAL_22]] step %[[VAL_6]] {
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_23]]] : memref<?xf32>
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_14]], %[[VAL_19]], %[[VAL_24]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_27:.*]] = mulf %[[VAL_25]], %[[VAL_26]] : f32
-// CHECK: store %[[VAL_27]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_19]], %[[VAL_24]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = addi %[[VAL_15]], %[[VAL_6]] : index
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_6]] {
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_21]] to %[[VAL_23]] step %[[VAL_6]] {
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_15]], %[[VAL_20]], %[[VAL_25]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_28:.*]] = mulf %[[VAL_26]], %[[VAL_27]] : f32
+// CHECK: store %[[VAL_28]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_20]], %[[VAL_25]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_28:.*]] = tensor_load %[[VAL_13]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_28]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_29:.*]] = tensor_load %[[VAL_14]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_29]] : tensor<32x16x8xf32>
// CHECK: }
func @mul_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_dss
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant 8 : index
-// CHECK: %[[VAL_7:.*]] = constant true
-// CHECK: %[[VAL_8:.*]] = constant 0 : index
-// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_15]], %[[VAL_19:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant 8 : index
+// CHECK: %[[VAL_6:.*]] = constant true
+// CHECK: %[[VAL_7:.*]] = constant 0 : index
+// CHECK: %[[VAL_8:.*]] = constant 1 : index
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_15]], %[[VAL_19:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_16]] : index
// CHECK: scf.condition(%[[VAL_20]]) %[[VAL_18]], %[[VAL_19]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_21:.*]]: index, %[[VAL_22:.*]]: index):
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xindex>
// CHECK: %[[VAL_24:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: scf.if %[[VAL_24]] {
-// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: %[[VAL_26:.*]] = muli %[[VAL_21]], %[[VAL_5]] : index
+// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: %[[VAL_26:.*]] = muli %[[VAL_21]], %[[VAL_4]] : index
// CHECK: %[[VAL_27:.*]] = addi %[[VAL_26]], %[[VAL_25]] : index
-// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_29:.*]] = muli %[[VAL_27]], %[[VAL_6]] : index
+// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_29:.*]] = muli %[[VAL_27]], %[[VAL_5]] : index
// CHECK: %[[VAL_30:.*]] = addi %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_30]]] : memref<?xf32>
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_22]], %[[VAL_25]], %[[VAL_28]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<?xf32>
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_22]], %[[VAL_25]], %[[VAL_28]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_33:.*]] = addf %[[VAL_31]], %[[VAL_32]] : f32
// CHECK: store %[[VAL_33]], %[[VAL_14]]{{\[}}%[[VAL_22]], %[[VAL_25]], %[[VAL_28]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_34:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_35:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_22]], %[[VAL_34]], %[[VAL_35]]] : memref<32x16x8xf32>
+// CHECK: scf.if %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_34:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_35:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_22]], %[[VAL_34]], %[[VAL_35]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_36]], %[[VAL_14]]{{\[}}%[[VAL_22]], %[[VAL_34]], %[[VAL_35]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
-// CHECK: %[[VAL_38:.*]] = addi %[[VAL_21]], %[[VAL_9]] : index
+// CHECK: %[[VAL_38:.*]] = addi %[[VAL_21]], %[[VAL_8]] : index
// CHECK: %[[VAL_39:.*]] = select %[[VAL_37]], %[[VAL_38]], %[[VAL_21]] : index
-// CHECK: %[[VAL_40:.*]] = addi %[[VAL_22]], %[[VAL_9]] : index
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_39]], %[[VAL_40]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_42:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_43:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_45:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_41]], %[[VAL_43]], %[[VAL_44]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_42:.*]]#1 to %[[VAL_3]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_43:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_45:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_41]], %[[VAL_43]], %[[VAL_44]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_45]], %[[VAL_14]]{{\[}}%[[VAL_41]], %[[VAL_43]], %[[VAL_44]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 16 : index
-// CHECK: %[[VAL_5:.*]] = constant 8 : index
-// CHECK: %[[VAL_6:.*]] = constant 0 : index
-// CHECK: %[[VAL_7:.*]] = constant 1 : index
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_7]] {
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
-// CHECK: %[[VAL_18:.*]] = muli %[[VAL_15]], %[[VAL_4]] : index
+// CHECK: %[[VAL_3:.*]] = constant 16 : index
+// CHECK: %[[VAL_4:.*]] = constant 8 : index
+// CHECK: %[[VAL_5:.*]] = constant 0 : index
+// CHECK: %[[VAL_6:.*]] = constant 1 : index
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_6]] {
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
+// CHECK: %[[VAL_18:.*]] = muli %[[VAL_15]], %[[VAL_3]] : index
// CHECK: %[[VAL_19:.*]] = addi %[[VAL_18]], %[[VAL_17]] : index
-// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
-// CHECK: %[[VAL_21:.*]] = muli %[[VAL_19]], %[[VAL_5]] : index
+// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_21:.*]] = muli %[[VAL_19]], %[[VAL_4]] : index
// CHECK: %[[VAL_22:.*]] = addi %[[VAL_21]], %[[VAL_20]] : index
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xf32>
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_20]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xf32>
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_20]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_25:.*]] = mulf %[[VAL_23]], %[[VAL_24]] : f32
// CHECK: store %[[VAL_25]], %[[VAL_12]]{{\[}}%[[VAL_16]], %[[VAL_17]], %[[VAL_20]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 16 : index
// CHECK: %[[VAL_6:.*]] = constant 8 : index
// CHECK: %[[VAL_7:.*]] = constant true
// CHECK: %[[VAL_8:.*]] = constant 0 : index
// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_15:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_17]], %[[VAL_21:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_20]], %[[VAL_18]] : index
-// CHECK: scf.condition(%[[VAL_22]]) %[[VAL_20]], %[[VAL_21]] : index, index
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_17:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_16]], %[[VAL_17]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_18]], %[[VAL_22:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_19]] : index
+// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: scf.if %[[VAL_26]] {
-// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: %[[VAL_28:.*]] = muli %[[VAL_23]], %[[VAL_5]] : index
-// CHECK: %[[VAL_29:.*]] = addi %[[VAL_28]], %[[VAL_27]] : index
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_29]]] : memref<?xindex>
-// CHECK: %[[VAL_31:.*]] = addi %[[VAL_29]], %[[VAL_9]] : index
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<?xindex>
-// CHECK: %[[VAL_33:.*]]:2 = scf.while (%[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_36:.*]] = cmpi ult, %[[VAL_34]], %[[VAL_32]] : index
-// CHECK: scf.condition(%[[VAL_36]]) %[[VAL_34]], %[[VAL_35]] : index, index
+// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index):
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_27:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: scf.if %[[VAL_27]] {
+// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: %[[VAL_29:.*]] = muli %[[VAL_24]], %[[VAL_5]] : index
+// CHECK: %[[VAL_30:.*]] = addi %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_30]]] : memref<?xindex>
+// CHECK: %[[VAL_32:.*]] = addi %[[VAL_30]], %[[VAL_9]] : index
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_32]]] : memref<?xindex>
+// CHECK: %[[VAL_34:.*]]:2 = scf.while (%[[VAL_35:.*]] = %[[VAL_31]], %[[VAL_36:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_37:.*]] = cmpi ult, %[[VAL_35]], %[[VAL_33]] : index
+// CHECK: scf.condition(%[[VAL_37]]) %[[VAL_35]], %[[VAL_36]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index):
-// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
-// CHECK: scf.if %[[VAL_40]] {
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_37]]] : memref<?xf32>
-// CHECK: %[[VAL_42:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_43:.*]] = addf %[[VAL_41]], %[[VAL_42]] : f32
-// CHECK: store %[[VAL_43]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: ^bb0(%[[VAL_38:.*]]: index, %[[VAL_39:.*]]: index):
+// CHECK: %[[VAL_40:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_38]]] : memref<?xindex>
+// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_40]], %[[VAL_39]] : index
+// CHECK: scf.if %[[VAL_41]] {
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_38]]] : memref<?xf32>
+// CHECK: %[[VAL_43:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_39]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_44:.*]] = addf %[[VAL_42]], %[[VAL_43]] : f32
+// CHECK: store %[[VAL_44]], %[[VAL_17]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_39]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: %[[VAL_44:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_44]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_45:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_39]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_45]], %[[VAL_17]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_39]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
-// CHECK: %[[VAL_46:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
-// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_37]] : index
-// CHECK: %[[VAL_48:.*]] = addi %[[VAL_38]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_47]], %[[VAL_48]] : index, index
+// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_40]], %[[VAL_39]] : index
+// CHECK: %[[VAL_47:.*]] = addi %[[VAL_38]], %[[VAL_9]] : index
+// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_38]] : index
+// CHECK: %[[VAL_49:.*]] = addi %[[VAL_39]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_48]], %[[VAL_49]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_49:.*]] = %[[VAL_50:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_49]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_51]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_27]], %[[VAL_49]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_52:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_50]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_52]], %[[VAL_17]]{{\[}}%[[VAL_25]], %[[VAL_28]], %[[VAL_50]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_52:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_53:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_54:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_52]], %[[VAL_53]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_54]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_52]], %[[VAL_53]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_53:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_55:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_25]], %[[VAL_53]], %[[VAL_54]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_55]], %[[VAL_17]]{{\[}}%[[VAL_25]], %[[VAL_53]], %[[VAL_54]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_56:.*]] = addi %[[VAL_23]], %[[VAL_9]] : index
-// CHECK: %[[VAL_57:.*]] = select %[[VAL_55]], %[[VAL_56]], %[[VAL_23]] : index
-// CHECK: %[[VAL_58:.*]] = addi %[[VAL_24]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_57]], %[[VAL_58]] : index, index
+// CHECK: %[[VAL_56:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_57:.*]] = addi %[[VAL_24]], %[[VAL_9]] : index
+// CHECK: %[[VAL_58:.*]] = select %[[VAL_56]], %[[VAL_57]], %[[VAL_24]] : index
+// CHECK: %[[VAL_59:.*]] = addi %[[VAL_25]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_58]], %[[VAL_59]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_59:.*]] = %[[VAL_60:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_61:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_62:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_63:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_59]], %[[VAL_61]], %[[VAL_62]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_63]], %[[VAL_16]]{{\[}}%[[VAL_59]], %[[VAL_61]], %[[VAL_62]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_60:.*]] = %[[VAL_61:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_62:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_63:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_64:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_60]], %[[VAL_62]], %[[VAL_63]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_64]], %[[VAL_17]]{{\[}}%[[VAL_60]], %[[VAL_62]], %[[VAL_63]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_64:.*]] = tensor_load %[[VAL_16]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_64]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_65:.*]] = tensor_load %[[VAL_17]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_65]] : tensor<32x16x8xf32>
// CHECK: }
func @add_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_sds
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 16 : index
// CHECK: %[[VAL_5:.*]] = constant 0 : index
// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_14]] to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_19:.*]] = muli %[[VAL_16]], %[[VAL_4]] : index
-// CHECK: %[[VAL_20:.*]] = addi %[[VAL_19]], %[[VAL_18]] : index
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]] = addi %[[VAL_20]], %[[VAL_6]] : index
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_21]] to %[[VAL_23]] step %[[VAL_6]] {
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xf32>
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_17]], %[[VAL_18]], %[[VAL_25]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_28:.*]] = mulf %[[VAL_26]], %[[VAL_27]] : f32
-// CHECK: store %[[VAL_28]], %[[VAL_13]]{{\[}}%[[VAL_17]], %[[VAL_18]], %[[VAL_25]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] {
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
+// CHECK: %[[VAL_20:.*]] = muli %[[VAL_17]], %[[VAL_4]] : index
+// CHECK: %[[VAL_21:.*]] = addi %[[VAL_20]], %[[VAL_19]] : index
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: %[[VAL_23:.*]] = addi %[[VAL_21]], %[[VAL_6]] : index
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_22]] to %[[VAL_24]] step %[[VAL_6]] {
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_18]], %[[VAL_19]], %[[VAL_26]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_29:.*]] = mulf %[[VAL_27]], %[[VAL_28]] : f32
+// CHECK: store %[[VAL_29]], %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_19]], %[[VAL_26]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_29:.*]] = tensor_load %[[VAL_13]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_29]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_30:.*]] = tensor_load %[[VAL_14]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_30]] : tensor<32x16x8xf32>
// CHECK: }
func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_sds
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 32 : index
-// CHECK: %[[VAL_5:.*]] = constant 16 : index
-// CHECK: %[[VAL_6:.*]] = constant 8 : index
-// CHECK: %[[VAL_7:.*]] = constant true
-// CHECK: %[[VAL_8:.*]] = constant 0 : index
-// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_15:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_16:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_17]], %[[VAL_21:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_3:.*]] = constant 32 : index
+// CHECK: %[[VAL_4:.*]] = constant 16 : index
+// CHECK: %[[VAL_5:.*]] = constant 8 : index
+// CHECK: %[[VAL_6:.*]] = constant true
+// CHECK: %[[VAL_7:.*]] = constant 0 : index
+// CHECK: %[[VAL_8:.*]] = constant 1 : index
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_17]], %[[VAL_21:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_20]], %[[VAL_18]] : index
// CHECK: scf.condition(%[[VAL_22]]) %[[VAL_20]], %[[VAL_21]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xindex>
// CHECK: %[[VAL_26:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: scf.if %[[VAL_26]] {
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = addi %[[VAL_23]], %[[VAL_9]] : index
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]]:2 = scf.while (%[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_28:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]]:2 = scf.while (%[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
// CHECK: %[[VAL_33:.*]] = cmpi ult, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: scf.condition(%[[VAL_33]]) %[[VAL_31]], %[[VAL_32]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_34:.*]]: index, %[[VAL_35:.*]]: index):
-// CHECK: %[[VAL_36:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_34]]] : memref<?xindex>
+// CHECK: %[[VAL_36:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_34]]] : memref<?xindex>
// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_36]], %[[VAL_35]] : index
// CHECK: scf.if %[[VAL_37]] {
-// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_39:.*]] = muli %[[VAL_34]], %[[VAL_6]] : index
+// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_39:.*]] = muli %[[VAL_34]], %[[VAL_5]] : index
// CHECK: %[[VAL_40:.*]] = addi %[[VAL_39]], %[[VAL_38]] : index
-// CHECK: %[[VAL_41:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_40]]] : memref<?xf32>
-// CHECK: %[[VAL_42:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_38]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xf32>
+// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_38]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_43:.*]] = addf %[[VAL_41]], %[[VAL_42]] : f32
// CHECK: store %[[VAL_43]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_38]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_45:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_44]]] : memref<32x16x8xf32>
+// CHECK: scf.if %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_45:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_44]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_45]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_35]], %[[VAL_44]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_36]], %[[VAL_35]] : index
-// CHECK: %[[VAL_47:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
+// CHECK: %[[VAL_47:.*]] = addi %[[VAL_34]], %[[VAL_8]] : index
// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_34]] : index
-// CHECK: %[[VAL_49:.*]] = addi %[[VAL_35]], %[[VAL_9]] : index
+// CHECK: %[[VAL_49:.*]] = addi %[[VAL_35]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_48]], %[[VAL_49]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_52:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_53:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_50]], %[[VAL_52]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#1 to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_52:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_53:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_24]], %[[VAL_50]], %[[VAL_52]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_53]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_50]], %[[VAL_52]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
-// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_55:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_56:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_24]], %[[VAL_54]], %[[VAL_55]]] : memref<32x16x8xf32>
+// CHECK: scf.if %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_54:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_55:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_56:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_24]], %[[VAL_54]], %[[VAL_55]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_56]], %[[VAL_16]]{{\[}}%[[VAL_24]], %[[VAL_54]], %[[VAL_55]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_57:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_58:.*]] = addi %[[VAL_23]], %[[VAL_9]] : index
+// CHECK: %[[VAL_58:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
// CHECK: %[[VAL_59:.*]] = select %[[VAL_57]], %[[VAL_58]], %[[VAL_23]] : index
-// CHECK: %[[VAL_60:.*]] = addi %[[VAL_24]], %[[VAL_9]] : index
+// CHECK: %[[VAL_60:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_59]], %[[VAL_60]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_61:.*]] = %[[VAL_62:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_63:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_64:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_65:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_61]], %[[VAL_63]], %[[VAL_64]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_61:.*]] = %[[VAL_62:.*]]#1 to %[[VAL_3]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_63:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: scf.for %[[VAL_64:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_65:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_61]], %[[VAL_63]], %[[VAL_64]]] : memref<32x16x8xf32>
// CHECK: store %[[VAL_65]], %[[VAL_16]]{{\[}}%[[VAL_61]], %[[VAL_63]], %[[VAL_64]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 8 : index
-// CHECK: %[[VAL_5:.*]] = constant 0 : index
-// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_14]] to %[[VAL_15]] step %[[VAL_6]] {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = addi %[[VAL_16]], %[[VAL_6]] : index
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_20]] step %[[VAL_6]] {
-// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
-// CHECK: %[[VAL_24:.*]] = muli %[[VAL_21]], %[[VAL_4]] : index
+// CHECK: %[[VAL_3:.*]] = constant 8 : index
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_14]] to %[[VAL_15]] step %[[VAL_5]] {
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = addi %[[VAL_16]], %[[VAL_5]] : index
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_20]] step %[[VAL_5]] {
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK: %[[VAL_24:.*]] = muli %[[VAL_21]], %[[VAL_3]] : index
// CHECK: %[[VAL_25:.*]] = addi %[[VAL_24]], %[[VAL_23]] : index
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_17]], %[[VAL_22]], %[[VAL_23]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xf32>
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_17]], %[[VAL_22]], %[[VAL_23]]] : memref<32x16x8xf32>
// CHECK: %[[VAL_28:.*]] = mulf %[[VAL_26]], %[[VAL_27]] : f32
// CHECK: store %[[VAL_28]], %[[VAL_13]]{{\[}}%[[VAL_17]], %[[VAL_22]], %[[VAL_23]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 32 : index
// CHECK: %[[VAL_5:.*]] = constant 16 : index
// CHECK: %[[VAL_6:.*]] = constant 8 : index
// CHECK: %[[VAL_7:.*]] = constant true
// CHECK: %[[VAL_8:.*]] = constant 0 : index
// CHECK: %[[VAL_9:.*]] = constant 1 : index
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_17:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_18:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_19]], %[[VAL_23:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_20]] : index
-// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_22]], %[[VAL_23]] : index, index
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_17:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_18:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_19:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_18]], %[[VAL_19]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]]:2 = scf.while (%[[VAL_23:.*]] = %[[VAL_20]], %[[VAL_24:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_23]], %[[VAL_21]] : index
+// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_23]], %[[VAL_24]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: scf.if %[[VAL_28]] {
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = addi %[[VAL_25]], %[[VAL_9]] : index
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_30]]] : memref<?xindex>
-// CHECK: %[[VAL_32:.*]]:2 = scf.while (%[[VAL_33:.*]] = %[[VAL_29]], %[[VAL_34:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_35:.*]] = cmpi ult, %[[VAL_33]], %[[VAL_31]] : index
-// CHECK: scf.condition(%[[VAL_35]]) %[[VAL_33]], %[[VAL_34]] : index, index
+// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
+// CHECK: scf.if %[[VAL_29]] {
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: %[[VAL_31:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<?xindex>
+// CHECK: %[[VAL_33:.*]]:2 = scf.while (%[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_36:.*]] = cmpi ult, %[[VAL_34]], %[[VAL_32]] : index
+// CHECK: scf.condition(%[[VAL_36]]) %[[VAL_34]], %[[VAL_35]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_36:.*]]: index, %[[VAL_37:.*]]: index):
-// CHECK: %[[VAL_38:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_36]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
-// CHECK: scf.if %[[VAL_39]] {
-// CHECK: %[[VAL_40:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_36]]] : memref<?xindex>
-// CHECK: %[[VAL_41:.*]] = addi %[[VAL_36]], %[[VAL_9]] : index
-// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_41]]] : memref<?xindex>
-// CHECK: %[[VAL_43:.*]]:2 = scf.while (%[[VAL_44:.*]] = %[[VAL_40]], %[[VAL_45:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_46:.*]] = cmpi ult, %[[VAL_44]], %[[VAL_42]] : index
-// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_44]], %[[VAL_45]] : index, index
+// CHECK: ^bb0(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index):
+// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: scf.if %[[VAL_40]] {
+// CHECK: %[[VAL_41:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK: %[[VAL_42:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
+// CHECK: %[[VAL_43:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_42]]] : memref<?xindex>
+// CHECK: %[[VAL_44:.*]]:2 = scf.while (%[[VAL_45:.*]] = %[[VAL_41]], %[[VAL_46:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
+// CHECK: %[[VAL_47:.*]] = cmpi ult, %[[VAL_45]], %[[VAL_43]] : index
+// CHECK: scf.condition(%[[VAL_47]]) %[[VAL_45]], %[[VAL_46]] : index, index
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_47:.*]]: index, %[[VAL_48:.*]]: index):
-// CHECK: %[[VAL_49:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_47]]] : memref<?xindex>
-// CHECK: %[[VAL_50:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_48]] : index
-// CHECK: scf.if %[[VAL_50]] {
-// CHECK: %[[VAL_51:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_47]]] : memref<?xf32>
-// CHECK: %[[VAL_52:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_48]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_53:.*]] = addf %[[VAL_51]], %[[VAL_52]] : f32
-// CHECK: store %[[VAL_53]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_48]]] : memref<32x16x8xf32>
+// CHECK: ^bb0(%[[VAL_48:.*]]: index, %[[VAL_49:.*]]: index):
+// CHECK: %[[VAL_50:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK: %[[VAL_51:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: scf.if %[[VAL_51]] {
+// CHECK: %[[VAL_52:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_48]]] : memref<?xf32>
+// CHECK: %[[VAL_53:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_49]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_54:.*]] = addf %[[VAL_52]], %[[VAL_53]] : f32
+// CHECK: store %[[VAL_54]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_49]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: %[[VAL_54:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_48]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_54]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_48]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_55:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_49]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_55]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_49]]] : memref<32x16x8xf32>
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_55:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_48]] : index
-// CHECK: %[[VAL_56:.*]] = addi %[[VAL_47]], %[[VAL_9]] : index
-// CHECK: %[[VAL_57:.*]] = select %[[VAL_55]], %[[VAL_56]], %[[VAL_47]] : index
-// CHECK: %[[VAL_58:.*]] = addi %[[VAL_48]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_57]], %[[VAL_58]] : index, index
+// CHECK: %[[VAL_56:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: %[[VAL_57:.*]] = addi %[[VAL_48]], %[[VAL_9]] : index
+// CHECK: %[[VAL_58:.*]] = select %[[VAL_56]], %[[VAL_57]], %[[VAL_48]] : index
+// CHECK: %[[VAL_59:.*]] = addi %[[VAL_49]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_58]], %[[VAL_59]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_59:.*]] = %[[VAL_60:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_61:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_59]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_61]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_59]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_60:.*]] = %[[VAL_61:.*]]#1 to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_62:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_60]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_62]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_60]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_62:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_63:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_62]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_63]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_37]], %[[VAL_62]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_63:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_64:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_63]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_64]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_38]], %[[VAL_63]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
-// CHECK: %[[VAL_65:.*]] = addi %[[VAL_36]], %[[VAL_9]] : index
-// CHECK: %[[VAL_66:.*]] = select %[[VAL_64]], %[[VAL_65]], %[[VAL_36]] : index
-// CHECK: %[[VAL_67:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_66]], %[[VAL_67]] : index, index
+// CHECK: %[[VAL_65:.*]] = cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: %[[VAL_66:.*]] = addi %[[VAL_37]], %[[VAL_9]] : index
+// CHECK: %[[VAL_67:.*]] = select %[[VAL_65]], %[[VAL_66]], %[[VAL_37]] : index
+// CHECK: %[[VAL_68:.*]] = addi %[[VAL_38]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_67]], %[[VAL_68]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_68:.*]] = %[[VAL_69:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_71:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_68]], %[[VAL_70]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_71]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_68]], %[[VAL_70]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_69:.*]] = %[[VAL_70:.*]]#1 to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_71:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_72:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_69]], %[[VAL_71]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_72]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_69]], %[[VAL_71]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
// CHECK: scf.if %[[VAL_7]] {
-// CHECK: scf.for %[[VAL_72:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_73:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_74:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_26]], %[[VAL_72]], %[[VAL_73]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_74]], %[[VAL_18]]{{\[}}%[[VAL_26]], %[[VAL_72]], %[[VAL_73]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_73:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_74:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_75:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_27]], %[[VAL_73]], %[[VAL_74]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_75]], %[[VAL_19]]{{\[}}%[[VAL_27]], %[[VAL_73]], %[[VAL_74]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_75:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_76:.*]] = addi %[[VAL_25]], %[[VAL_9]] : index
-// CHECK: %[[VAL_77:.*]] = select %[[VAL_75]], %[[VAL_76]], %[[VAL_25]] : index
-// CHECK: %[[VAL_78:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
-// CHECK: scf.yield %[[VAL_77]], %[[VAL_78]] : index, index
+// CHECK: %[[VAL_76:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
+// CHECK: %[[VAL_77:.*]] = addi %[[VAL_26]], %[[VAL_9]] : index
+// CHECK: %[[VAL_78:.*]] = select %[[VAL_76]], %[[VAL_77]], %[[VAL_26]] : index
+// CHECK: %[[VAL_79:.*]] = addi %[[VAL_27]], %[[VAL_9]] : index
+// CHECK: scf.yield %[[VAL_78]], %[[VAL_79]] : index, index
// CHECK: }
-// CHECK: scf.for %[[VAL_79:.*]] = %[[VAL_80:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_81:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
-// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
-// CHECK: %[[VAL_83:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_79]], %[[VAL_81]], %[[VAL_82]]] : memref<32x16x8xf32>
-// CHECK: store %[[VAL_83]], %[[VAL_18]]{{\[}}%[[VAL_79]], %[[VAL_81]], %[[VAL_82]]] : memref<32x16x8xf32>
+// CHECK: scf.for %[[VAL_80:.*]] = %[[VAL_81:.*]]#1 to %[[VAL_4]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_82:.*]] = %[[VAL_8]] to %[[VAL_5]] step %[[VAL_9]] {
+// CHECK: scf.for %[[VAL_83:.*]] = %[[VAL_8]] to %[[VAL_6]] step %[[VAL_9]] {
+// CHECK: %[[VAL_84:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_80]], %[[VAL_82]], %[[VAL_83]]] : memref<32x16x8xf32>
+// CHECK: store %[[VAL_84]], %[[VAL_19]]{{\[}}%[[VAL_80]], %[[VAL_82]], %[[VAL_83]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_84:.*]] = tensor_load %[[VAL_18]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_84]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_85:.*]] = tensor_load %[[VAL_19]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_85]] : tensor<32x16x8xf32>
// CHECK: }
func @add_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_sss
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16x8xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
+// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_13:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_14:.*]] = alloca() : memref<32x16x8xf32>
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_5]] {
-// CHECK: %[[VAL_18:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = addi %[[VAL_17]], %[[VAL_5]] : index
-// CHECK: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_22:.*]] = %[[VAL_19]] to %[[VAL_21]] step %[[VAL_5]] {
-// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_25:.*]] = addi %[[VAL_22]], %[[VAL_5]] : index
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_24]] to %[[VAL_26]] step %[[VAL_5]] {
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref<?xf32>
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_18]], %[[VAL_23]], %[[VAL_28]]] : memref<32x16x8xf32>
-// CHECK: %[[VAL_31:.*]] = mulf %[[VAL_29]], %[[VAL_30]] : f32
-// CHECK: store %[[VAL_31]], %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_23]], %[[VAL_28]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_1]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_14:.*]] = tensor_to_memref %[[VAL_2]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_15:.*]] = alloc() : memref<32x16x8xf32>
+// CHECK: linalg.copy(%[[VAL_14]], %[[VAL_15]]) : memref<32x16x8xf32>, memref<32x16x8xf32>
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_16]] to %[[VAL_17]] step %[[VAL_5]] {
+// CHECK: %[[VAL_19:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = addi %[[VAL_18]], %[[VAL_5]] : index
+// CHECK: %[[VAL_22:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_20]] to %[[VAL_22]] step %[[VAL_5]] {
+// CHECK: %[[VAL_24:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xindex>
+// CHECK: %[[VAL_26:.*]] = addi %[[VAL_23]], %[[VAL_5]] : index
+// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_26]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_28:.*]] = %[[VAL_25]] to %[[VAL_27]] step %[[VAL_5]] {
+// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xf32>
+// CHECK: %[[VAL_31:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_19]], %[[VAL_24]], %[[VAL_29]]] : memref<32x16x8xf32>
+// CHECK: %[[VAL_32:.*]] = mulf %[[VAL_30]], %[[VAL_31]] : f32
+// CHECK: store %[[VAL_32]], %[[VAL_15]]{{\[}}%[[VAL_19]], %[[VAL_24]], %[[VAL_29]]] : memref<32x16x8xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_32:.*]] = tensor_load %[[VAL_14]] : memref<32x16x8xf32>
-// CHECK: return %[[VAL_32]] : tensor<32x16x8xf32>
+// CHECK: %[[VAL_33:.*]] = tensor_load %[[VAL_15]] : memref<32x16x8xf32>
+// CHECK: return %[[VAL_33]] : tensor<32x16x8xf32>
// CHECK: }
func @mul_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
%0 = linalg.generic #trait_sss
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<?x?x?xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<?x?xf32>,
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
-// CHECK: %[[VAL_4:.*]] = constant 999 : index
+// CHECK: %[[VAL_4:.*]] = constant 2 : index
// CHECK: %[[VAL_5:.*]] = constant 0 : index
// CHECK: %[[VAL_6:.*]] = constant 1 : index
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_4]]) : memref<?xindex>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_4]]) : memref<?xf32>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
+// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?x?x?xf32> to memref<?xf32>
// CHECK: %[[VAL_10:.*]] = dim %[[VAL_2]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_11:.*]] = dim %[[VAL_2]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca(%[[VAL_10]], %[[VAL_11]]) : memref<?x?xf32>
-// CHECK: %[[VAL_13:.*]] = dim %[[VAL_3]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_14:.*]] = dim %[[VAL_3]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_13]], %[[VAL_14]]) : memref<?x?xf32>
-// CHECK: %[[VAL_16:.*]] = dim %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32>
-// CHECK: %[[VAL_17:.*]] = dim %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32>
-// CHECK: %[[VAL_18:.*]] = alloca(%[[VAL_16]], %[[VAL_17]]) : memref<?x?xf32>
-// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_5]] to %[[VAL_16]] step %[[VAL_6]] {
-// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_6]] {
-// CHECK: %[[VAL_21:.*]] = muli %[[VAL_10]], %[[VAL_19]] : index
-// CHECK: %[[VAL_22:.*]] = addi %[[VAL_21]], %[[VAL_20]] : index
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<?x?xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_3]] : memref<?x?xf32>
+// CHECK: %[[VAL_13:.*]] = dim %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32>
+// CHECK: %[[VAL_14:.*]] = dim %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32>
+// CHECK: %[[VAL_15:.*]] = tensor_to_memref %[[VAL_0]] : memref<?x?xf32>
+// CHECK: %[[VAL_16:.*]] = alloc(%[[VAL_13]], %[[VAL_14]]) : memref<?x?xf32>
+// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<?x?xf32>, memref<?x?xf32>
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_5]] to %[[VAL_13]] step %[[VAL_6]] {
+// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_6]] {
+// CHECK: %[[VAL_19:.*]] = muli %[[VAL_10]], %[[VAL_17]] : index
+// CHECK: %[[VAL_20:.*]] = addi %[[VAL_19]], %[[VAL_18]] : index
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK: %[[VAL_22:.*]] = addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: %[[VAL_23:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_24:.*]] = addi %[[VAL_22]], %[[VAL_6]] : index
-// CHECK: %[[VAL_25:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_26:.*]] = %[[VAL_23]] to %[[VAL_25]] step %[[VAL_6]] {
-// CHECK: %[[VAL_27:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_26]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_26]]] : memref<?xf32>
-// CHECK: scf.for %[[VAL_29:.*]] = %[[VAL_5]] to %[[VAL_17]] step %[[VAL_6]] {
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_20]], %[[VAL_29]]] : memref<?x?xf32>
-// CHECK: %[[VAL_31:.*]] = mulf %[[VAL_28]], %[[VAL_30]] : f32
-// CHECK: %[[VAL_32:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_27]], %[[VAL_29]]] : memref<?x?xf32>
-// CHECK: %[[VAL_33:.*]] = mulf %[[VAL_31]], %[[VAL_32]] : f32
-// CHECK: %[[VAL_34:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_19]], %[[VAL_29]]] : memref<?x?xf32>
-// CHECK: %[[VAL_35:.*]] = addf %[[VAL_33]], %[[VAL_34]] : f32
-// CHECK: store %[[VAL_35]], %[[VAL_18]]{{\[}}%[[VAL_19]], %[[VAL_29]]] : memref<?x?xf32>
+// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_21]] to %[[VAL_23]] step %[[VAL_6]] {
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK: %[[VAL_26:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_27:.*]] = %[[VAL_5]] to %[[VAL_14]] step %[[VAL_6]] {
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_18]], %[[VAL_27]]] : memref<?x?xf32>
+// CHECK: %[[VAL_29:.*]] = mulf %[[VAL_26]], %[[VAL_28]] : f32
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_25]], %[[VAL_27]]] : memref<?x?xf32>
+// CHECK: %[[VAL_31:.*]] = mulf %[[VAL_29]], %[[VAL_30]] : f32
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_27]]] : memref<?x?xf32>
+// CHECK: %[[VAL_33:.*]] = addf %[[VAL_31]], %[[VAL_32]] : f32
+// CHECK: store %[[VAL_33]], %[[VAL_16]]{{\[}}%[[VAL_17]], %[[VAL_27]]] : memref<?x?xf32>
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_36:.*]] = tensor_load %[[VAL_18]] : memref<?x?xf32>
-// CHECK: return %[[VAL_36]] : tensor<?x?xf32>
+// CHECK: %[[VAL_34:.*]] = tensor_load %[[VAL_16]] : memref<?x?xf32>
+// CHECK: return %[[VAL_34]] : tensor<?x?xf32>
// CHECK: }
func @kernel_3d(%arga: tensor<?x?xf32>,
%argb: tensor<?x?x?xf32>,
// CHECK-LABEL: func @sum_reduction(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20x30xf32>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
-// CHECK: %[[VAL_2:.*]] = constant 999 : index
+// CHECK: %[[VAL_2:.*]] = constant 2 : index
// CHECK: %[[VAL_3:.*]] = constant 0 : index
// CHECK: %[[VAL_4:.*]] = constant 1 : index
-// CHECK: %[[VAL_5:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_2]]) : memref<?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<f32>
-// CHECK: %[[VAL_10:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK: %[[VAL_11:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
-// CHECK: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_4]] : index
-// CHECK: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_4]] {
-// CHECK: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK: %[[VAL_18:.*]] = addi %[[VAL_16]], %[[VAL_4]] : index
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]][] : memref<f32>
-// CHECK: %[[VAL_21:.*]] = scf.for %[[VAL_22:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_4]] iter_args(%[[VAL_23:.*]] = %[[VAL_20]]) -> (f32) {
-// CHECK: %[[VAL_24:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_22]]] : memref<?xf32>
-// CHECK: %[[VAL_25:.*]] = addf %[[VAL_23]], %[[VAL_24]] : f32
-// CHECK: scf.yield %[[VAL_25]] : f32
+// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref<?xindex>
+// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref<?xindex>
+// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref<?xindex>
+// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20x30xf32> to memref<?xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<f32>
+// CHECK: %[[VAL_10:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<f32>, memref<f32>
+// CHECK: %[[VAL_11:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK: %[[VAL_12:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_4]] {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = addi %[[VAL_13]], %[[VAL_4]] : index
+// CHECK: %[[VAL_16:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_4]] {
+// CHECK: %[[VAL_18:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK: %[[VAL_19:.*]] = addi %[[VAL_17]], %[[VAL_4]] : index
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK: %[[VAL_21:.*]] = load %[[VAL_10]][] : memref<f32>
+// CHECK: %[[VAL_22:.*]] = scf.for %[[VAL_23:.*]] = %[[VAL_18]] to %[[VAL_20]] step %[[VAL_4]] iter_args(%[[VAL_24:.*]] = %[[VAL_21]]) -> (f32) {
+// CHECK: %[[VAL_25:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_23]]] : memref<?xf32>
+// CHECK: %[[VAL_26:.*]] = addf %[[VAL_24]], %[[VAL_25]] : f32
+// CHECK: scf.yield %[[VAL_26]] : f32
// CHECK: }
-// CHECK: store %[[VAL_26:.*]], %[[VAL_9]][] : memref<f32>
+// CHECK: store %[[VAL_27:.*]], %[[VAL_10]][] : memref<f32>
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_27:.*]] = tensor_load %[[VAL_9]] : memref<f32>
-// CHECK: return %[[VAL_27]] : tensor<f32>
+// CHECK: %[[VAL_28:.*]] = tensor_load %[[VAL_10]] : memref<f32>
+// CHECK: return %[[VAL_28]] : tensor<f32>
// CHECK: }
func @sum_reduction(%arga: tensor<10x20x30xf32>, %argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction
// CHECK: %[[VAL_3:.*]] = constant 2 : index
// CHECK: %[[VAL_4:.*]] = constant 0 : index
// CHECK: %[[VAL_5:.*]] = constant 1 : index
-// CHECK: %[[VAL_6:.*]] = dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_7:.*]] = dim %[[VAL_0]], %[[VAL_5]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_8:.*]] = dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?x?xf32>
-// CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_6]], %[[VAL_7]], %[[VAL_8]]) : memref<?x?x?xf32>
-// CHECK: %[[VAL_10:.*]] = dim %[[VAL_1]], %[[VAL_4]] : tensor<?xf32>
-// CHECK: %[[VAL_11:.*]] = alloca(%[[VAL_10]]) : memref<?xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<f32>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_10]] step %[[VAL_5]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_13]]] : memref<?xf32>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_4]] to %[[VAL_7]] step %[[VAL_5]] {
+// CHECK: %[[VAL_6:.*]] = dim %[[VAL_0]], %[[VAL_5]] : tensor<?x?x?xf32>
+// CHECK: %[[VAL_7:.*]] = dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?x?xf32>
+// CHECK: %[[VAL_8:.*]] = tensor_to_memref %[[VAL_0]] : memref<?x?x?xf32>
+// CHECK: %[[VAL_9:.*]] = dim %[[VAL_1]], %[[VAL_4]] : tensor<?xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<?xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<f32>
+// CHECK: %[[VAL_12:.*]] = alloc() : memref<f32>
+// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<f32>, memref<f32>
+// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_9]] step %[[VAL_5]] {
+// CHECK: %[[VAL_14:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_13]]] : memref<?xf32>
+// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_4]] to %[[VAL_6]] step %[[VAL_5]] {
// CHECK: %[[VAL_16:.*]] = load %[[VAL_12]][] : memref<f32>
-// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_4]] to %[[VAL_8]] step %[[VAL_5]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f32) {
-// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_13]], %[[VAL_15]], %[[VAL_18]]] : memref<?x?x?xf32>
+// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_4]] to %[[VAL_7]] step %[[VAL_5]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f32) {
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]], %[[VAL_15]], %[[VAL_18]]] : memref<?x?x?xf32>
// CHECK: %[[VAL_21:.*]] = mulf %[[VAL_20]], %[[VAL_14]] : f32
// CHECK: %[[VAL_22:.*]] = addf %[[VAL_19]], %[[VAL_21]] : f32
// CHECK: scf.yield %[[VAL_22]] : f32
// CHECK: %[[VAL_6:.*]] = constant 30 : index
// CHECK: %[[VAL_7:.*]] = constant 0 : index
// CHECK: %[[VAL_8:.*]] = constant 1 : index
-// CHECK: %[[VAL_9:.*]] = alloca() : memref<10xf32>
-// CHECK: %[[VAL_10:.*]] = alloca() : memref<20xf32>
-// CHECK: %[[VAL_11:.*]] = alloca() : memref<30xf32>
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<10x20x30xf32>
-// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
-// CHECK: %[[VAL_14:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<10xf32>
-// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
-// CHECK: %[[VAL_16:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<20xf32>
-// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_7]] to %[[VAL_6]] step %[[VAL_8]] {
-// CHECK: %[[VAL_18:.*]] = mulf %[[VAL_14]], %[[VAL_16]] : f32
-// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_17]]] : memref<30xf32>
-// CHECK: %[[VAL_20:.*]] = mulf %[[VAL_18]], %[[VAL_19]] : f32
-// CHECK: store %[[VAL_20]], %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_15]], %[[VAL_17]]] : memref<10x20x30xf32>
+// CHECK: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_0]] : memref<10xf32>
+// CHECK: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<20xf32>
+// CHECK: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<30xf32>
+// CHECK: %[[VAL_12:.*]] = tensor_to_memref %[[VAL_3]] : memref<10x20x30xf32>
+// CHECK: %[[VAL_13:.*]] = alloc() : memref<10x20x30xf32>
+// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<10x20x30xf32>, memref<10x20x30xf32>
+// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
+// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref<10xf32>
+// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
+// CHECK: %[[VAL_17:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_16]]] : memref<20xf32>
+// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_7]] to %[[VAL_6]] step %[[VAL_8]] {
+// CHECK: %[[VAL_19:.*]] = mulf %[[VAL_15]], %[[VAL_17]] : f32
+// CHECK: %[[VAL_20:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_18]]] : memref<30xf32>
+// CHECK: %[[VAL_21:.*]] = mulf %[[VAL_19]], %[[VAL_20]] : f32
+// CHECK: store %[[VAL_21]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_16]], %[[VAL_18]]] : memref<10x20x30xf32>
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_21:.*]] = tensor_load %[[VAL_12]] : memref<10x20x30xf32>
-// CHECK: return %[[VAL_21]] : tensor<10x20x30xf32>
+// CHECK: %[[VAL_22:.*]] = tensor_load %[[VAL_13]] : memref<10x20x30xf32>
+// CHECK: return %[[VAL_22]] : tensor<10x20x30xf32>
// CHECK: }
func @invariants(%arga: tensor<10xf32>,
%argb: tensor<20xf32>,
--- /dev/null
+// RUN: mlir-opt %s -test-sparsification | \
+// RUN: FileCheck %s --check-prefix=CHECK-HIR
+//
+// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops | \
+// RUN: FileCheck %s --check-prefix=CHECK-MIR
+//
+// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops \
+// RUN: --func-bufferize --tensor-constant-bufferize \
+// RUN: --tensor-bufferize --finalizing-bufferize | \
+// RUN: FileCheck %s --check-prefix=CHECK-LIR
+//
+// RUN: mlir-opt %s -test-sparsification="lower fast-output" --convert-linalg-to-loops \
+// RUN: --func-bufferize --tensor-constant-bufferize \
+// RUN: --tensor-bufferize --finalizing-bufferize | \
+// RUN: FileCheck %s --check-prefix=CHECK-FAST
+
+#trait_matvec = {
+ indexing_maps = [
+ affine_map<(i,j) -> (i,j)>, // A
+ affine_map<(i,j) -> (j)>, // b
+ affine_map<(i,j) -> (i)> // x (out)
+ ],
+ iterator_types = ["parallel","reduction"],
+ sparse = [
+ [ "D", "S" ], // A
+ [ "D" ], // b
+ [ "D" ] // x (out)
+ ],
+ sparse_dim_map = [
+ affine_map<(i,j) -> (j,i)>, // A: column-wise
+ affine_map<(i) -> (i)>, // x
+ affine_map<(i) -> (i)> // b
+ ],
+ doc = "x(i) += A(i,j) * b(j)"
+}
+
+// CHECK-HIR-LABEL: func @matvec(
+// CHECK-HIR-SAME: %[[VAL_0:.*]]: !llvm.ptr<i8>,
+// CHECK-HIR-SAME: %[[VAL_1:.*]]: tensor<64xf64>,
+// CHECK-HIR-SAME: %[[VAL_2:.*]]: tensor<64xf64>) -> tensor<64xf64> {
+// CHECK-HIR: %[[VAL_3:.*]] = constant 64 : index
+// CHECK-HIR: %[[VAL_4:.*]] = constant 0 : index
+// CHECK-HIR: %[[VAL_5:.*]] = constant 1 : index
+// CHECK-HIR: %[[VAL_6:.*]] = linalg.sparse_tensor %[[VAL_0]] : !llvm.ptr<i8> to tensor<64x64xf64>
+// CHECK-HIR: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
+// CHECK-HIR: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
+// CHECK-HIR: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_6]] : tensor<64x64xf64> to memref<?xf64>
+// CHECK-HIR: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_1]] : memref<64xf64>
+// CHECK-HIR: %[[VAL_11:.*]] = tensor_to_memref %[[VAL_2]] : memref<64xf64>
+// CHECK-HIR: %[[VAL_12:.*]] = alloc() : memref<64xf64>
+// CHECK-HIR: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<64xf64>, memref<64xf64>
+// CHECK-HIR: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-HIR: %[[VAL_14:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK-HIR: %[[VAL_15:.*]] = addi %[[VAL_13]], %[[VAL_5]] : index
+// CHECK-HIR: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK-HIR: %[[VAL_17:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_13]]] : memref<64xf64>
+// CHECK-HIR: %[[VAL_18:.*]] = scf.for %[[VAL_19:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_5]] iter_args(%[[VAL_20:.*]] = %[[VAL_17]]) -> (f64) {
+// CHECK-HIR: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
+// CHECK-HIR: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xf64>
+// CHECK-HIR: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<64xf64>
+// CHECK-HIR: %[[VAL_24:.*]] = mulf %[[VAL_22]], %[[VAL_23]] : f64
+// CHECK-HIR: %[[VAL_25:.*]] = addf %[[VAL_20]], %[[VAL_24]] : f64
+// CHECK-HIR: scf.yield %[[VAL_25]] : f64
+// CHECK-HIR: }
+// CHECK-HIR: store %[[VAL_26:.*]], %[[VAL_12]]{{\[}}%[[VAL_13]]] : memref<64xf64>
+// CHECK-HIR: }
+// CHECK-HIR: %[[VAL_27:.*]] = tensor_load %[[VAL_12]] : memref<64xf64>
+// CHECK-HIR: return %[[VAL_27]] : tensor<64xf64>
+// CHECK-HIR: }
+
+// CHECK-MIR-LABEL: func @matvec(
+// CHECK-MIR-SAME: %[[VAL_0:.*]]: !llvm.ptr<i8>,
+// CHECK-MIR-SAME: %[[VAL_1:.*]]: tensor<64xf64>,
+// CHECK-MIR-SAME: %[[VAL_2:.*]]: tensor<64xf64>) -> tensor<64xf64> {
+// CHECK-MIR: %[[VAL_3:.*]] = constant 64 : index
+// CHECK-MIR: %[[VAL_4:.*]] = constant 0 : index
+// CHECK-MIR: %[[VAL_5:.*]] = constant 1 : index
+// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePtrsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndxsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR: %[[VAL_8:.*]] = call @sparseValsF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
+// CHECK-MIR: %[[VAL_9:.*]] = tensor_to_memref %[[VAL_1]] : memref<64xf64>
+// CHECK-MIR: %[[VAL_10:.*]] = tensor_to_memref %[[VAL_2]] : memref<64xf64>
+// CHECK-MIR: %[[VAL_11:.*]] = alloc() : memref<64xf64>
+// CHECK-MIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-MIR: %[[VAL_13:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_12]]] : memref<64xf64>
+// CHECK-MIR: store %[[VAL_13]], %[[VAL_11]]{{\[}}%[[VAL_12]]] : memref<64xf64>
+// CHECK-MIR: }
+// CHECK-MIR: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-MIR: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK-MIR: %[[VAL_16:.*]] = addi %[[VAL_14]], %[[VAL_5]] : index
+// CHECK-MIR: %[[VAL_17:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK-MIR: %[[VAL_18:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_14]]] : memref<64xf64>
+// CHECK-MIR: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_5]] iter_args(%[[VAL_21:.*]] = %[[VAL_18]]) -> (f64) {
+// CHECK-MIR: %[[VAL_22:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK-MIR: %[[VAL_23:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xf64>
+// CHECK-MIR: %[[VAL_24:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_22]]] : memref<64xf64>
+// CHECK-MIR: %[[VAL_25:.*]] = mulf %[[VAL_23]], %[[VAL_24]] : f64
+// CHECK-MIR: %[[VAL_26:.*]] = addf %[[VAL_21]], %[[VAL_25]] : f64
+// CHECK-MIR: scf.yield %[[VAL_26]] : f64
+// CHECK-MIR: }
+// CHECK-MIR: store %[[VAL_27:.*]], %[[VAL_11]]{{\[}}%[[VAL_14]]] : memref<64xf64>
+// CHECK-MIR: }
+// CHECK-MIR: %[[VAL_28:.*]] = tensor_load %[[VAL_11]] : memref<64xf64>
+// CHECK-MIR: return %[[VAL_28]] : tensor<64xf64>
+// CHECK-MIR: }
+
+// CHECK-LIR-LABEL: func @matvec(
+// CHECK-LIR-SAME: %[[VAL_0:.*]]: !llvm.ptr<i8>,
+// CHECK-LIR-SAME: %[[VAL_1:.*]]: memref<64xf64>,
+// CHECK-LIR-SAME: %[[VAL_2:.*]]: memref<64xf64>) -> memref<64xf64> {
+// CHECK-LIR: %[[VAL_3:.*]] = constant 64 : index
+// CHECK-LIR: %[[VAL_4:.*]] = constant 0 : index
+// CHECK-LIR: %[[VAL_5:.*]] = constant 1 : index
+// CHECK-LIR: %[[VAL_6:.*]] = call @sparsePtrsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR: %[[VAL_7:.*]] = call @sparseIndxsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR: %[[VAL_8:.*]] = call @sparseValsF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
+// CHECK-LIR: %[[VAL_9:.*]] = alloc() : memref<64xf64>
+// CHECK-LIR: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-LIR: %[[VAL_11:.*]] = load %[[VAL_2]]{{\[}}%[[VAL_10]]] : memref<64xf64>
+// CHECK-LIR: store %[[VAL_11]], %[[VAL_9]]{{\[}}%[[VAL_10]]] : memref<64xf64>
+// CHECK-LIR: }
+// CHECK-LIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-LIR: %[[VAL_13:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
+// CHECK-LIR: %[[VAL_14:.*]] = addi %[[VAL_12]], %[[VAL_5]] : index
+// CHECK-LIR: %[[VAL_15:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
+// CHECK-LIR: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64>
+// CHECK-LIR: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_13]] to %[[VAL_15]] step %[[VAL_5]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f64) {
+// CHECK-LIR: %[[VAL_20:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK-LIR: %[[VAL_21:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xf64>
+// CHECK-LIR: %[[VAL_22:.*]] = load %[[VAL_1]]{{\[}}%[[VAL_20]]] : memref<64xf64>
+// CHECK-LIR: %[[VAL_23:.*]] = mulf %[[VAL_21]], %[[VAL_22]] : f64
+// CHECK-LIR: %[[VAL_24:.*]] = addf %[[VAL_19]], %[[VAL_23]] : f64
+// CHECK-LIR: scf.yield %[[VAL_24]] : f64
+// CHECK-LIR: }
+// CHECK-LIR: store %[[VAL_25:.*]], %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64>
+// CHECK-LIR: }
+// CHECK-LIR: return %[[VAL_9]] : memref<64xf64>
+// CHECK-LIR: }
+
+// CHECK-FAST-LABEL: func @matvec(
+// CHECK-FAST-SAME: %[[VAL_0:.*]]: !llvm.ptr<i8>,
+// CHECK-FAST-SAME: %[[VAL_1:.*]]: memref<64xf64>,
+// CHECK-FAST-SAME: %[[VAL_2:.*]]: memref<64xf64>) -> memref<64xf64> {
+// CHECK-FAST: %[[VAL_3:.*]] = constant 64 : index
+// CHECK-FAST: %[[VAL_4:.*]] = constant 0 : index
+// CHECK-FAST: %[[VAL_5:.*]] = constant 1 : index
+// CHECK-FAST: %[[VAL_6:.*]] = call @sparsePtrsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-FAST: %[[VAL_7:.*]] = call @sparseIndxsI64(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-FAST: %[[VAL_8:.*]] = call @sparseValsF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
+// CHECK-FAST: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
+// CHECK-FAST: %[[VAL_10:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK-FAST: %[[VAL_11:.*]] = addi %[[VAL_9]], %[[VAL_5]] : index
+// CHECK-FAST: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
+// CHECK-FAST: %[[VAL_13:.*]] = load %[[VAL_2]]{{\[}}%[[VAL_9]]] : memref<64xf64>
+// CHECK-FAST: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_10]] to %[[VAL_12]] step %[[VAL_5]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f64) {
+// CHECK-FAST: %[[VAL_17:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
+// CHECK-FAST: %[[VAL_18:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xf64>
+// CHECK-FAST: %[[VAL_19:.*]] = load %[[VAL_1]]{{\[}}%[[VAL_17]]] : memref<64xf64>
+// CHECK-FAST: %[[VAL_20:.*]] = mulf %[[VAL_18]], %[[VAL_19]] : f64
+// CHECK-FAST: %[[VAL_21:.*]] = addf %[[VAL_16]], %[[VAL_20]] : f64
+// CHECK-FAST: scf.yield %[[VAL_21]] : f64
+// CHECK-FAST: }
+// CHECK-FAST: store %[[VAL_22:.*]], %[[VAL_2]]{{\[}}%[[VAL_9]]] : memref<64xf64>
+// CHECK-FAST: }
+// CHECK-FAST: return %[[VAL_2]] : memref<64xf64>
+// CHECK-FAST: }
+
+!SparseTensor = type !llvm.ptr<i8>
+
+func @matvec(%argA: !SparseTensor, %argb: tensor<64xf64>, %argx: tensor<64xf64>) -> tensor<64xf64> {
+ %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<64x64xf64>
+ %0 = linalg.generic #trait_matvec
+ ins(%arga, %argb : tensor<64x64xf64>, tensor<64xf64>)
+ outs(%argx: tensor<64xf64>) {
+ ^bb(%A: f64, %b: f64, %x: f64):
+ %0 = mulf %A, %b : f64
+ %1 = addf %x, %0 : f64
+ linalg.yield %1 : f64
+ } -> tensor<64xf64>
+ return %0 : tensor<64xf64>
+}
--- /dev/null
+// RUN: mlir-opt --test-sparsification="lower" %s | FileCheck %s
+
+!SparseTensor = type !llvm.ptr<i8>
+
+// CHECK-LABEL: func @sparse_pointers(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: %[[C:.*]] = constant 1 : index
+// CHECK: %[[T:.*]] = call @sparsePtrsI64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK: return %[[T]] : memref<?xindex>
+func @sparse_pointers(%arg0: !SparseTensor) -> memref<?xindex> {
+ %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
+ %c = constant 1 : index
+ %0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref<?xindex>
+ return %0 : memref<?xindex>
+}
+
+// CHECK-LABEL: func @sparse_indices(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: %[[C:.*]] = constant 1 : index
+// CHECK: %[[T:.*]] = call @sparseIndxsI64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK: return %[[T]] : memref<?xindex>
+func @sparse_indices(%arg0: !SparseTensor) -> memref<?xindex> {
+ %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
+ %c = constant 1 : index
+ %0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref<?xindex>
+ return %0 : memref<?xindex>
+}
+
+// CHECK-LABEL: func @sparse_values(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: %[[T:.*]] = call @sparseValsF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64>
+// CHECK: return %[[T]] : memref<?xf64>
+func @sparse_values(%arg0: !SparseTensor) -> memref<?xf64> {
+ %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
+ %0 = linalg.sparse_values %a : tensor<128xf64> to memref<?xf64>
+ return %0 : memref<?xf64>
+}
// CHECK-SAME: %[[VAL_0:.*0]]: tensor<100x200x300x400x500x600x700x800xf32>,
// CHECK-SAME: %[[VAL_1:.*1]]: tensor<100x200x300x400x500x600x700x800xf32>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<100x200x300x400x500x600x700x800xf32>) -> tensor<100x200x300x400x500x600x700x800xf32> {
-// CHECK: %[[VAL_3:.*]] = constant 999 : index
-// CHECK: %[[VAL_4:.*]] = constant 100 : index
-// CHECK: %[[VAL_5:.*]] = constant 200 : index
-// CHECK: %[[VAL_6:.*]] = constant 300 : index
-// CHECK: %[[VAL_7:.*]] = constant 600 : index
-// CHECK: %[[VAL_8:.*]] = constant 700 : index
-// CHECK: %[[VAL_9:.*]] = constant 800 : index
-// CHECK: %[[VAL_10:.*]] = constant 0 : index
-// CHECK: %[[VAL_11:.*]] = constant 1 : index
-// CHECK: %[[VAL_12:.*]] = alloca() : memref<100x200x300x400x500x600x700x800xf32>
-// CHECK: %[[VAL_13:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_14:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_15:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_16:.*]] = alloca(%[[VAL_3]]) : memref<?xindex>
-// CHECK: %[[VAL_17:.*]] = alloca(%[[VAL_3]]) : memref<?xf32>
-// CHECK: %[[VAL_18:.*]] = alloca() : memref<100x200x300x400x500x600x700x800xf32>
-// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_10]] to %[[VAL_9]] step %[[VAL_11]] {
-// CHECK: scf.for %[[VAL_20:.*]] = %[[VAL_10]] to %[[VAL_8]] step %[[VAL_11]] {
-// CHECK: %[[VAL_21:.*]] = muli %[[VAL_19]], %[[VAL_8]] : index
-// CHECK: %[[VAL_22:.*]] = addi %[[VAL_21]], %[[VAL_20]] : index
-// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_10]] to %[[VAL_7]] step %[[VAL_11]] {
-// CHECK: %[[VAL_24:.*]] = muli %[[VAL_22]], %[[VAL_7]] : index
-// CHECK: %[[VAL_25:.*]] = addi %[[VAL_24]], %[[VAL_23]] : index
-// CHECK: %[[VAL_26:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_27:.*]] = addi %[[VAL_25]], %[[VAL_11]] : index
-// CHECK: %[[VAL_28:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_29:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_11]] {
-// CHECK: %[[VAL_30:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_29]]] : memref<?xindex>
-// CHECK: %[[VAL_31:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_29]]] : memref<?xindex>
-// CHECK: %[[VAL_32:.*]] = addi %[[VAL_29]], %[[VAL_11]] : index
-// CHECK: %[[VAL_33:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_32]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_34:.*]] = %[[VAL_31]] to %[[VAL_33]] step %[[VAL_11]] {
-// CHECK: %[[VAL_35:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref<?xindex>
-// CHECK: scf.for %[[VAL_36:.*]] = %[[VAL_10]] to %[[VAL_6]] step %[[VAL_11]] {
-// CHECK: %[[VAL_37:.*]] = muli %[[VAL_34]], %[[VAL_6]] : index
-// CHECK: %[[VAL_38:.*]] = addi %[[VAL_37]], %[[VAL_36]] : index
-// CHECK: scf.for %[[VAL_39:.*]] = %[[VAL_10]] to %[[VAL_5]] step %[[VAL_11]] {
-// CHECK: %[[VAL_40:.*]] = muli %[[VAL_38]], %[[VAL_5]] : index
-// CHECK: %[[VAL_41:.*]] = addi %[[VAL_40]], %[[VAL_39]] : index
-// CHECK: scf.for %[[VAL_42:.*]] = %[[VAL_10]] to %[[VAL_4]] step %[[VAL_11]] {
-// CHECK: %[[VAL_43:.*]] = muli %[[VAL_41]], %[[VAL_4]] : index
-// CHECK: %[[VAL_44:.*]] = addi %[[VAL_43]], %[[VAL_42]] : index
-// CHECK: %[[VAL_45:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_42]], %[[VAL_39]], %[[VAL_36]], %[[VAL_35]], %[[VAL_30]], %[[VAL_23]], %[[VAL_20]], %[[VAL_19]]] : memref<100x200x300x400x500x600x700x800xf32>
-// CHECK: %[[VAL_46:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_44]]] : memref<?xf32>
-// CHECK: %[[VAL_47:.*]] = mulf %[[VAL_45]], %[[VAL_46]] : f32
-// CHECK: store %[[VAL_47]], %[[VAL_18]]{{\[}}%[[VAL_42]], %[[VAL_39]], %[[VAL_36]], %[[VAL_35]], %[[VAL_30]], %[[VAL_23]], %[[VAL_20]], %[[VAL_19]]] : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: %[[VAL_3:.*]] = constant 3 : index
+// CHECK: %[[VAL_4:.*]] = constant 4 : index
+// CHECK: %[[VAL_5:.*]] = constant 100 : index
+// CHECK: %[[VAL_6:.*]] = constant 200 : index
+// CHECK: %[[VAL_7:.*]] = constant 300 : index
+// CHECK: %[[VAL_8:.*]] = constant 600 : index
+// CHECK: %[[VAL_9:.*]] = constant 700 : index
+// CHECK: %[[VAL_10:.*]] = constant 800 : index
+// CHECK: %[[VAL_11:.*]] = constant 0 : index
+// CHECK: %[[VAL_12:.*]] = constant 1 : index
+// CHECK: %[[VAL_13:.*]] = tensor_to_memref %[[VAL_0]] : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<100x200x300x400x500x600x700x800xf32> to memref<?xindex>
+// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<100x200x300x400x500x600x700x800xf32> to memref<?xindex>
+// CHECK: %[[VAL_16:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<100x200x300x400x500x600x700x800xf32> to memref<?xindex>
+// CHECK: %[[VAL_17:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<100x200x300x400x500x600x700x800xf32> to memref<?xindex>
+// CHECK: %[[VAL_18:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<100x200x300x400x500x600x700x800xf32> to memref<?xf32>
+// CHECK: %[[VAL_19:.*]] = tensor_to_memref %[[VAL_2]] : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: %[[VAL_20:.*]] = alloc() : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: linalg.copy(%[[VAL_19]], %[[VAL_20]]) : memref<100x200x300x400x500x600x700x800xf32>, memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] {
+// CHECK: scf.for %[[VAL_22:.*]] = %[[VAL_11]] to %[[VAL_9]] step %[[VAL_12]] {
+// CHECK: %[[VAL_23:.*]] = muli %[[VAL_21]], %[[VAL_9]] : index
+// CHECK: %[[VAL_24:.*]] = addi %[[VAL_23]], %[[VAL_22]] : index
+// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_11]] to %[[VAL_8]] step %[[VAL_12]] {
+// CHECK: %[[VAL_26:.*]] = muli %[[VAL_24]], %[[VAL_8]] : index
+// CHECK: %[[VAL_27:.*]] = addi %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_28:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_27]]] : memref<?xindex>
+// CHECK: %[[VAL_29:.*]] = addi %[[VAL_27]], %[[VAL_12]] : index
+// CHECK: %[[VAL_30:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_29]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_28]] to %[[VAL_30]] step %[[VAL_12]] {
+// CHECK: %[[VAL_32:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_31]]] : memref<?xindex>
+// CHECK: %[[VAL_33:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_31]]] : memref<?xindex>
+// CHECK: %[[VAL_34:.*]] = addi %[[VAL_31]], %[[VAL_12]] : index
+// CHECK: %[[VAL_35:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_36:.*]] = %[[VAL_33]] to %[[VAL_35]] step %[[VAL_12]] {
+// CHECK: %[[VAL_37:.*]] = load %[[VAL_17]]{{\[}}%[[VAL_36]]] : memref<?xindex>
+// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_11]] to %[[VAL_7]] step %[[VAL_12]] {
+// CHECK: %[[VAL_39:.*]] = muli %[[VAL_36]], %[[VAL_7]] : index
+// CHECK: %[[VAL_40:.*]] = addi %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_11]] to %[[VAL_6]] step %[[VAL_12]] {
+// CHECK: %[[VAL_42:.*]] = muli %[[VAL_40]], %[[VAL_6]] : index
+// CHECK: %[[VAL_43:.*]] = addi %[[VAL_42]], %[[VAL_41]] : index
+// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_11]] to %[[VAL_5]] step %[[VAL_12]] {
+// CHECK: %[[VAL_45:.*]] = muli %[[VAL_43]], %[[VAL_5]] : index
+// CHECK: %[[VAL_46:.*]] = addi %[[VAL_45]], %[[VAL_44]] : index
+// CHECK: %[[VAL_47:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_44]], %[[VAL_41]], %[[VAL_38]], %[[VAL_37]], %[[VAL_32]], %[[VAL_25]], %[[VAL_22]], %[[VAL_21]]] : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: %[[VAL_48:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_46]]] : memref<?xf32>
+// CHECK: %[[VAL_49:.*]] = mulf %[[VAL_47]], %[[VAL_48]] : f32
+// CHECK: store %[[VAL_49]], %[[VAL_20]]{{\[}}%[[VAL_44]], %[[VAL_41]], %[[VAL_38]], %[[VAL_37]], %[[VAL_32]], %[[VAL_25]], %[[VAL_22]], %[[VAL_21]]] : memref<100x200x300x400x500x600x700x800xf32>
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_48:.*]] = tensor_load %[[VAL_18]] : memref<100x200x300x400x500x600x700x800xf32>
-// CHECK: return %[[VAL_48]] : tensor<100x200x300x400x500x600x700x800xf32>
+// CHECK: %[[VAL_50:.*]] = tensor_load %[[VAL_20]] : memref<100x200x300x400x500x600x700x800xf32>
+// CHECK: return %[[VAL_50]] : tensor<100x200x300x400x500x600x700x800xf32>
// CHECK: }
func @mul(%arga: tensor<100x200x300x400x500x600x700x800xf32>,
%argb: tensor<100x200x300x400x500x600x700x800xf32>,
--- /dev/null
+// RUN: mlir-opt -split-input-file %s | FileCheck %s
+
+!SparseTensor = type !llvm.ptr<i8>
+
+// CHECK-LABEL: func @sparse_tensor(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: %[[T:.*]] = linalg.sparse_tensor %[[A]] : !llvm.ptr<i8> to tensor<128xf64>
+// CHECK: return %[[T]] : tensor<128xf64>
+func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> {
+ %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
+ return %0 : tensor<128xf64>
+}
+
+// -----
+
+// CHECK-LABEL: func @sparse_pointers(
+// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
+// CHECK: %[[C:.*]] = constant 1 : index
+// CHECK: %[[T:.*]] = linalg.sparse_pointers %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
+// CHECK: return %[[T]] : memref<?xindex>
+func @sparse_pointers(%arg0: tensor<128xf64>) -> memref<?xindex> {
+ %c = constant 1 : index
+ %0 = linalg.sparse_pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
+ return %0 : memref<?xindex>
+}
+
+// -----
+
+// CHECK-LABEL: func @sparse_indices(
+// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
+// CHECK: %[[C:.*]] = constant 1 : index
+// CHECK: %[[T:.*]] = linalg.sparse_indices %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
+// CHECK: return %[[T]] : memref<?xindex>
+func @sparse_indices(%arg0: tensor<128xf64>) -> memref<?xindex> {
+ %c = constant 1 : index
+ %0 = linalg.sparse_indices %arg0, %c : tensor<128xf64> to memref<?xindex>
+ return %0 : memref<?xindex>
+}
+
+// -----
+
+// CHECK-LABEL: func @sparse_values(
+// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
+// CHECK: %[[T:.*]] = linalg.sparse_values %[[A]] : tensor<128xf64> to memref<?xf64>
+// CHECK: return %[[T]] : memref<?xf64>
+func @sparse_values(%arg0: tensor<128xf64>) -> memref<?xf64> {
+ %0 = linalg.sparse_values %arg0 : tensor<128xf64> to memref<?xf64>
+ return %0 : memref<?xf64>
+}
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/Pass/Pass.h"
llvm::cl::desc("Set the index type"),
llvm::cl::init(0)};
+ Option<bool> fastOutput{*this, "fast-output",
+ llvm::cl::desc("Allows fast output buffers"),
+ llvm::cl::init(false)};
+
+ Option<bool> lower{*this, "lower", llvm::cl::desc("Lower sparse primitives"),
+ llvm::cl::init(false)};
+
/// Registers all dialects required by testing.
void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<scf::SCFDialect, vector::VectorDialect>();
+ registry
+ .insert<scf::SCFDialect, vector::VectorDialect, LLVM::LLVMDialect>();
}
/// Returns parallelization strategy given on command line.
// Translate strategy flags to strategy options.
linalg::SparsificationOptions options(parallelOption(), vectorOption(),
vectorLength, typeOption(ptrType),
- typeOption(indType));
+ typeOption(indType), fastOutput);
// Apply rewriting.
linalg::populateSparsificationPatterns(ctx, patterns, options);
vector::populateVectorToVectorCanonicalizationPatterns(patterns, ctx);
(void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
+ // Lower sparse primitives to calls into runtime support library.
+ if (lower) {
+ OwningRewritePatternList conversionPatterns;
+ ConversionTarget target(*ctx);
+ target.addIllegalOp<linalg::SparseTensorFromPointerOp,
+ linalg::SparseTensorToPointersMemRefOp,
+ linalg::SparseTensorToIndicesMemRefOp,
+ linalg::SparseTensorToValuesMemRefOp>();
+ target.addLegalOp<CallOp>();
+ linalg::populateSparsificationConversionPatterns(ctx, conversionPatterns);
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(conversionPatterns))))
+ signalPassFailure();
+ }
}
};