From e52f530c36e4b3f78c35f1ccd59cae75bdff7db4 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Thu, 13 Jan 2022 14:36:17 -0800 Subject: [PATCH] [mlir][sparse] fix two typos (1) copy-and-past error in encoding alias name: this is an annotation for a tensor (3-d) not a matrix (2-d). (2) typo in "initialization" Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D117255 --- mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td | 2 +- .../Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index 292fc07..b7fce5b 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -239,7 +239,7 @@ def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []>, dimension (e.g. a full row for matrices). The added array and count are used to store new indices when a false value is encountered in the filled array. All arrays should be allocated before the loop (possibly even shared between - loops in a future optimization) so that their *dense* intitialization can be + loops in a future optimization) so that their *dense* initialization can be amortized over many iterations. Setting and resetting the dense arrays in the loop nest itself is kept *sparse* by only iterating over set elements through an indirection using the added array, so that the operations are diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir index 71ba247..ca12873 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -26,7 +26,7 @@ !Filename = type !llvm.ptr -#SparseMatrix = #sparse_tensor.encoding<{ +#SparseTensor = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> @@ -51,14 +51,14 @@ module { // Computes Matricized Tensor Times Khatri-Rao Product (MTTKRP) kernel. See // http://tensor-compiler.org/docs/data_analytics/index.html. // - func @kernel_mttkrp(%argb: tensor, + func @kernel_mttkrp(%argb: tensor, %argc: tensor, %argd: tensor, %arga: tensor {linalg.inplaceable = true}) -> tensor { %0 = linalg.generic #mttkrp ins(%argb, %argc, %argd: - tensor, tensor, tensor) + tensor, tensor, tensor) outs(%arga: tensor) { ^bb(%b: f64, %c: f64, %d: f64, %a: f64): %0 = arith.mulf %b, %c : f64 @@ -87,7 +87,7 @@ module { // Read the sparse B input from a file. %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) %b = sparse_tensor.new %fileName - : !Filename to tensor + : !Filename to tensor // Initialize dense C and D inputs and dense output A. %cdata = memref.alloc(%c3, %c5) : memref @@ -124,7 +124,7 @@ module { // Call kernel. %0 = call @kernel_mttkrp(%b, %c, %d, %a) - : (tensor, + : (tensor, tensor, tensor, tensor) -> tensor // Print the result for verification. @@ -141,7 +141,7 @@ module { memref.dealloc %adata : memref memref.dealloc %cdata : memref memref.dealloc %ddata : memref - sparse_tensor.release %b : tensor + sparse_tensor.release %b : tensor return } -- 2.7.4