From: River Riddle Date: Mon, 21 Oct 2019 18:31:59 +0000 (-0700) Subject: Cleanup and rewrite Ch-4.md. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4514cdd5eb4844e10790580ab8df9bce433cbe6e;p=platform%2Fupstream%2Fllvm.git Cleanup and rewrite Ch-4.md. This change rewrites Ch-4.md to introduced interfaces in a detailed step-by-step manner, adds examples, and fixes some errors. PiperOrigin-RevId: 275887017 --- diff --git a/mlir/examples/toy/Ch4/include/toy/ShapeInferenceInterface.td b/mlir/examples/toy/Ch4/include/toy/ShapeInferenceInterface.td index 4b1240d..4958b01 100644 --- a/mlir/examples/toy/Ch4/include/toy/ShapeInferenceInterface.td +++ b/mlir/examples/toy/Ch4/include/toy/ShapeInferenceInterface.td @@ -1,4 +1,4 @@ -//===- ShapeInferenceInterface.td - Operation Interface for Shape Inference ----------*- tablegen -*-===// +//===- ShapeInferenceInterface.td - Shape Inference Interface -*- tablegen -==// // // Copyright 2019 The MLIR Authors. // @@ -29,6 +29,11 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE def ShapeInferenceOpInterface : OpInterface<"ShapeInference"> { + let description = [{ + Interface to access a registered method to infer the return types for an + operation that can be used during type inference. + }]; + let methods = [ InterfaceMethod<"Infer and set the output shape for the current operation.", "void", "inferShapes"> diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp index ac73236..3dd079d 100644 --- a/mlir/examples/toy/Ch4/toyc.cpp +++ b/mlir/examples/toy/Ch4/toyc.cpp @@ -132,6 +132,7 @@ int dumpMLIR() { // the operations. pm.addPass(mlir::toy::createShapeInferencePass()); pm.addPass(mlir::createCanonicalizerPass()); + pm.addPass(mlir::createCSEPass()); if (mlir::failed(pm.run(*module))) return 4; diff --git a/mlir/examples/toy/Ch5/include/toy/ShapeInferenceInterface.td b/mlir/examples/toy/Ch5/include/toy/ShapeInferenceInterface.td index 4b1240d..4958b01 100644 --- a/mlir/examples/toy/Ch5/include/toy/ShapeInferenceInterface.td +++ b/mlir/examples/toy/Ch5/include/toy/ShapeInferenceInterface.td @@ -1,4 +1,4 @@ -//===- ShapeInferenceInterface.td - Operation Interface for Shape Inference ----------*- tablegen -*-===// +//===- ShapeInferenceInterface.td - Shape Inference Interface -*- tablegen -==// // // Copyright 2019 The MLIR Authors. // @@ -29,6 +29,11 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE def ShapeInferenceOpInterface : OpInterface<"ShapeInference"> { + let description = [{ + Interface to access a registered method to infer the return types for an + operation that can be used during type inference. + }]; + let methods = [ InterfaceMethod<"Infer and set the output shape for the current operation.", "void", "inferShapes"> diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp index e4157ea..9b47b43 100644 --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -137,6 +137,7 @@ int dumpMLIR() { // the operations. pm.addPass(mlir::toy::createShapeInferencePass()); pm.addPass(mlir::createCanonicalizerPass()); + pm.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { diff --git a/mlir/examples/toy/Ch6/include/toy/ShapeInferenceInterface.td b/mlir/examples/toy/Ch6/include/toy/ShapeInferenceInterface.td index 19e70e6..4958b01 100644 --- a/mlir/examples/toy/Ch6/include/toy/ShapeInferenceInterface.td +++ b/mlir/examples/toy/Ch6/include/toy/ShapeInferenceInterface.td @@ -29,6 +29,11 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE def ShapeInferenceOpInterface : OpInterface<"ShapeInference"> { + let description = [{ + Interface to access a registered method to infer the return types for an + operation that can be used during type inference. + }]; + let methods = [ InterfaceMethod<"Infer and set the output shape for the current operation.", "void", "inferShapes"> diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp index a40056b..018dd0f 100644 --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -151,6 +151,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context, // the operations. pm.addPass(mlir::toy::createShapeInferencePass()); pm.addPass(mlir::createCanonicalizerPass()); + pm.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { diff --git a/mlir/g3doc/Tutorials/Toy/Ch-4.md b/mlir/g3doc/Tutorials/Toy/Ch-4.md index 363cd3d..39349eb 100644 --- a/mlir/g3doc/Tutorials/Toy/Ch-4.md +++ b/mlir/g3doc/Tutorials/Toy/Ch-4.md @@ -1,41 +1,78 @@ -# Chapter 4: Using Interfaces +# Chapter 4: Enabling Generic Transformation with Interfaces -[Interfaces](../../Interfaces.md) provide a generic method for applying -transformations across dialects. We first describe how to leverage an existing -MLIR interface, and then walk through writing your own interface. +## Background: Grappling with an Extensible IR -## Function Inlining +Through dialects, MLIR allows for the representation of many different levels of +abstraction; with the Toy dialect that we have previously defined being one such +example. Though these different dialects may represent different abstractions, +there are often a set of common transformations and analyses that we would like +to perform. The problem that arises is that naively implementing each +transformation for each dialect leads to large amounts of code duplication, as +the internal algorithms are generally very similar if not the same. We would +like to provide the ability for transformations to opaquely hook into dialects +like Toy to get the information they need. -In order to apply function inlining in the Toy dialect, we override the -DialectInlinerInterface in Toy, enable inlining and add special handling for the -return operation: +MLIR provides a set of always available hooks for certain core transformations, +as seen in the [previous chapter](Ch-3.md) when we registered some +canonicalizations via a hook on our operations: `getCanonicalizationPatterns`, +but these types of hooks don't really scale well. Therefore a more generic +solution to make the MLIR infrastructure as extensible as the representation was +designed, in the form of Interfaces. [Interfaces](../../Interfaces.md) provide a +generic mechanism for dialects and operations to provide information to a +transformation or analysis. -```Toy(.cpp) -//===----------------------------------------------------------------------===// -// ToyInlinerInterface -//===----------------------------------------------------------------------===// +## Shape Inference: Preparing for Code Generation -/// This class defines the interface for handling inlining with Toy -/// operations. +Our Toy IR currently operates on generic tensors, meaning that we don't know the +shape of tensors other than during the initialization of constants. This +complicates optimizations, as well as code generation. Fortunately, we can +simply propagate the shapes through the computation until they are all known. +The issue is how to handle calls to user-defined generic functions: every call +site could deduce different shapes. One possibility would be to perform symbolic +inference based on the argument types, but this would be hard to generalize if +we were to introduce more control flow in the language. Another approach would +be function specialization, where every call site with new argument shapes +duplicates the called function and specializes it. The approach we take for Toy +is to inline all of the function calls, and then perform a simple +intra-procedural shape propagation. + +### Inlining + +Here we could write an inlining algorithm specifically designed for the Toy +dialect, but that can become quite complicated depending on the level of +complexity that we want. Disregarding cost modeling, the pure structural +transformation is already complex to implement from scratch. Thankfully, MLIR +provides already provides a generic inliner algorithm that dialects can plug +into. All we need to do in Toy, is to provide the +[interfaces](../../Interfaces.md) for the inliner to hook into. + +The first thing we need to do, is to define the constraints on inlining +operations in the Toy dialect. This information is provided through a +[dialect interface](../../Interfaces.md#dialect-interfaces). A dialect interface +is essentially a class containing a set of virtual hooks that a dialect may +provide a specialization for. In this case, the interface is +`DialectInlinerInterface`. + +```c++ +/// This class defines the interface for handling inlining with Toy operations. +/// We simplify inherit from the base interface class and provide a +/// specialization of the necessary methods. struct ToyInlinerInterface : public DialectInlinerInterface { using DialectInlinerInterface::DialectInlinerInterface; - //===--------------------------------------------------------------------===// - // Analysis Hooks - //===--------------------------------------------------------------------===// - - /// All operations within toy can be inlined. + /// This hook checks to see if the given operation is legal to inline into the + /// given region. For Toy this hook can simply return true, as all Toy + /// operations are inlinable. bool isLegalToInline(Operation *, Region *, BlockAndValueMapping &) const final { return true; } - //===--------------------------------------------------------------------===// - // Transformation Hooks - //===--------------------------------------------------------------------===// - - /// Handle the given inlined terminator(toy.return) by replacing it with a new operation - /// as necessary. + /// This hook is called when a terminator operation has been inlined. The only + /// terminator that we have in the Toy dialect is the return + /// operation(toy.return). We handle the return by replacing the values + /// previously returned by the call operation, with the operands of the + /// return. void handleTerminator(Operation *op, ArrayRef valuesToRepl) const final { // Only "toy.return" needs to be handled here. @@ -49,70 +86,301 @@ struct ToyInlinerInterface : public DialectInlinerInterface { }; ``` -Next, we call into the interface by adding an inliner pass to the pass manager -for toy: +We then register our dialect interface directly on the Toy dialect, similarly to +how we did for operations. + +```c++ +ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) { + addInterfaces(); +} +``` + +Next, we need to provide a way for the inliner to know that toy.generic_call +represents a call to a function. MLIR provides an +[operation interface](../../Interfaces.md#operation-interfaces) that can be used +to mark an operation as being "call like". Unlike dialect interfaces, operation +interfaces provide a more refined granularity of information that is specific +and core to a single operation. The interface that we will be adding here is the +`CallOpInterface`. + +To add this interface we just need to include the definition into our operation +specification file(Ops.td): + +```.td +#ifdef MLIR_CALLINTERFACES +#else +include "mlir/Analysis/CallInterfaces.td" +#endif // MLIR_CALLINTERFACES +``` + +and add it to the traits list of GenericCallOp: + +```.td +def GenericCallOp : Toy_Op<"generic_call", + [DeclareOpInterfaceMethods]> { + ... +} +``` + +In the above we also use the `DeclareOpInterfaceMethods` directive to +auto-declare all of the interface methods in the class declaration of +GenericCallOp. This means that we just need to provide a definition: + +```c++ +/// Return the callee of the generic call operation, this is required by the +/// call interface. +CallInterfaceCallable GenericCallOp::getCallableForCallee() { + return getAttrOfType("callee"); +} + +/// Get the argument operands to the called function, this is required by the +/// call interface. +Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); } +``` + +Now that the inliner has been informed about the Toy dialect, we can add the +inliner pass to the pass manager for toy: + +```c++ + pm.addPass(mlir::createInlinerPass()); +``` + +Now let's look at a working example: + +```mlir +func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> + %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> + %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> + "toy.return"(%2) : (tensor<*xf64>) -> () +} +func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> + %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> + %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> + %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> + %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + "toy.print"(%5) : (tensor<*xf64>) -> () + "toy.return"() : () -> () +} +``` + +We have two calls to multiple_transpose that we would like to inline into main, +but if we look at the output nothing has changed. We are missing one last subtle +piece, there is a hidden type conversion on the edge of the call. If we look at +the above, the operands to the generic_call are of type `tensor<2x3xf64>` while +the inputs to the function expect `tensor<*xf64>`. To resolve this difference, +the inliner expects an explicit cast operation to be inserted. For this, we need +to add a new operation to the Toy dialect, `ToyCastOp`(toy.cast), to represent +casts between two different shapes. + +```.td +def CastOp : Toy_Op<"cast", [NoSideEffect, SameOperandsAndResultShape]> { + let summary = "shape cast operation"; + let description = [{ + The "cast" operation converts a tensor from one type to an equivalent type + without changing any data elements. The source and destination types + must both be tensor types with the same element type. If both are ranked + then the rank should be the same and static dimensions should match. The + operation is invalid if converting to a mismatching constant dimension. + }]; + + let arguments = (ins F64Tensor:$input); + let results = (outs F64Tensor:$output); + + // Set the folder bit so that we can fold redundant cast operations. + let hasFolder = 1; +} +``` + +We can then override the necessary hook on the ToyInlinerInterface to insert +this for us when necessary: + +```c++ +struct ToyInlinerInterface : public DialectInlinerInterface { + ... + + /// Attempts to materialize a conversion for a type mismatch between a call + /// from this dialect, and a callable region. This method should generate an + /// operation that takes 'input' as the only operand, and produces a single + /// result of 'resultType'. If a conversion can not be generated, nullptr + /// should be returned. + Operation *materializeCallConversion(OpBuilder &builder, Value *input, + Type resultType, + Location conversionLoc) const final { + return builder.create(conversionLoc, resultType, input); + } +}; +``` + +If we run the working example through the pipeline again, we get the expected: -```Toy(.cpp) - pm.addPass(mlir::createInlinerPass()); +```mlir +func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> + %1 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> + %2 = "toy.cast"(%1) : (tensor<2x3xf64>) -> tensor<*xf64> + %3 = "toy.cast"(%0) : (tensor<2x3xf64>) -> tensor<*xf64> + %4 = "toy.transpose"(%2) : (tensor<*xf64>) -> tensor<*xf64> + %5 = "toy.transpose"(%3) : (tensor<*xf64>) -> tensor<*xf64> + %6 = "toy.mul"(%4, %5) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> + "toy.print"(%6) : (tensor<*xf64>) -> () + "toy.return"() : () -> () +} ``` -** Insert example here ** +NOTE: The generic inliner will also perform simplifications, so the output may +be a bit cleaner than expected. + +### Intraprocedural Shape Inference + +Now that we have inlined all of the functions, we are left with a main function +containing a mix of static and dynamically shaped operations. We can now write a +simple shape inference pass to propagate shapes intra-procedurally within a +single function. We could write this as a pass that directly encodes the +constraints of the operations within the Toy dialect, but this seems like a good +candidate for a transformation that could be written generically. As a good rule +of thumb, if possible it is better to express a transformation as generically as +possible so that it could be extended to other dialects in the future. There is +no telling how many other dialects may have similar needs or encounter the same +problems. -## Shape Inference +For shape inference if we break down the problem to its core, we really just +want operations to tell us what the expected outputs are given a set of +statically known inputs. We can definitely get more complex than that, but for +our needs we can keep it simple. Given that this is a property that is core to a +specific operation, we can define an operation interface that can be specified +on operations that need to have their result shapes inferred. -The Toy language allows for implicit shapes and hence requires shape inference. -We implement shape inference as a generic -[Operation Interface](../../Interfaces.md#operation-interfaces). +Similarly to operations, we can also +[define operation interfaces](../../OpDefinitions.md#operation-interfaces) using +the operation definition specification(ODS) framework: -1. We first create the ShapeInferenceOpInterface by specializing the - OpInterface class using [ODS](../../OpDefinitions.md#operation-interfaces). - This class defines interface methods that Toy operations must override for - shape inference. +The interface is defined by inheriting from `OpInterface`, which takes as a +template argument the name to be given to the generated c++ interface class. For +our purposes, we will name the generated class a simpler `ShapeInference`. We +also provide a description for the interface. + +```.td +def ShapeInferenceOpInterface : OpInterface<"ShapeInference"> { + let description = [{ + Interface to access a registered method to infer the return types for an + operation that can be used during type inference. + }]; +} +``` + +Next we define the interface methods that the operations will need to provide. +An interface method is comprised of: a description, a c++ return type in string +form, a method name in string form, as well as a few optional components +depending on the need. See the [ODS +documentation]](../../OpDefinitions.md#operation-interfaces) for more +information. + +```.td +def ShapeInferenceOpInterface : OpInterface<"ShapeInference"> { + let description = [{ + Interface to access a registered method to infer the return types for an + operation that can be used during type inference. + }]; -```Toy(.cpp) -def ShapeInferenceOpInterface : OpInterface<"ShapeInferenceOpInterface"> { let methods = [ - InterfaceMethod< - "bool", "returnsGenericArray", (ins), [{ - if (getNumResults() == 1) { - auto arrayTy = op.getResult()->getType().cast(); - return arrayTy.getShape().empty(); - } - return false; - }]>, - InterfaceMethod<"void", "inferShapes", (ins), [{}]> + InterfaceMethod<"Infer and set the output shape for the current operation.", + "void", "inferShapes"> ]; } ``` -1. Next, we override the inferShapes() method within Toy operations. As an - example, for the transpose op, the result shape is inferred by swapping the - dimensions of the input tensor. - -```Toy(.cpp) - void inferShapes() { - SmallVector dims; - auto arrayTy = getOperand()->getType().cast(); - dims.insert(dims.end(), arrayTy.getShape().begin(), - arrayTy.getShape().end()); - if (dims.size() == 2) - std::swap(dims[0], dims[1]); - getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType())); - return; +Now that the interface is defined, we can add it to the necessary Toy operations +in a similar way to how we added the `CallOpInterface` to the GenericCallOp: + +``` +def MulOp : Toy_Op<"mul", + [..., DeclareOpInterfaceMethods]> { + ... +} +``` + +Each of these operations will then need to provide a definition for the +`inferShapes()` method. As an example, for the mul op, the result shape is +inferred as the shape of the inputs. + +```c++ +/// Infer the output shape of the MulOp, this is required by the shape inference +/// interface. +void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); } +``` + +At this point, each of the necessary Toy operations provide a mechanism in which +to infer their output shapes. The ShapeInferencePass is a FunctionPass: it will +runs on each Function in isolation. MLIR also supports general +[OperationPasses](../../WritingAPass.md#operation-pass) that run on any isolated +operation(e.g. other function like operations), but here are module only +contains functions so there is no need to generalize yet. + +Implementing such a pass is done by creating a class inheriting from +`mlir::FunctionPass` and overriding the `runOnFunction()` method: + +```c++ +class ShapeInferencePass : public mlir::FunctionPass { + void runOnFunction() override { + FuncOp function = getFunction(); + ... + } +}; +``` + +The algorithm operates as follows: + +1. Build a worklist containing all the operations that return a dynamically + shaped tensor: these are the operations that need shape inference. +2. Iterate on the worklist: + - find an operation to process: the next ready operation in the worklist + has all of its arguments non-generic, + - if no operation is found, break out of the loop, + - remove the operation from the worklist, + - infer the shape of its output from the argument types. +3. If the worklist is empty, the algorithm succeeded. + +When processing an operation, we query if it registered the `ShapeInference` +interface. + +```c++ + // Ask the operation to infer its output shapes. + LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); + + /// We check if an operation has a particular interface by casting. + if (ShapeInference shapeOp = dyn_cast(op)) { + shapeOp.inferShapes(); + } else { + op->emitError("unable to infer shape of operation without shape " + "inference interface"); + return signalPassFailure(); } ``` -1. We then create a generic ShapeInference Function pass that uses operation - casting to access the inferShapes() method. This is an intraprocedural shape - inference pass that executes after function inlining and iterates over - operations in a worklist calling inferShapes for each operation with unknown - result shapes. +We can then add our pass to the pass manager: -2. Finally, we call into shape inference pass by adding it to the pass manager - for toy: +```c++ + pm.addPass(mlir::createShapeInferencePass()); +``` + +If we rerun our original example, we now get the following: -```Toy(.cpp) - pm.addPass(mlir::createShapeInferencePass()); +```mlir +func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> + %1 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> + %2 = "toy.mul"(%1, %1) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> + "toy.print"(%2) : (tensor<3x2xf64>) -> () + "toy.return"() : () -> () +} ``` -** Insert example here ** +You can build `toyc-ch4` and try yourself: `toyc-ch4 test/codegen.toy -emit=mlir +-opt`. + +In the [next chapter](Ch-5.md), we will start the process of code generation by +targeting a lower level dialect for optimizing some of the more compute-heavy +Toy operations. diff --git a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir index 799ce45..d1c07e4 100644 --- a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir @@ -24,9 +24,7 @@ func @main() { // CHECK-LABEL: func @main() // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> () +// CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> +// CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> +// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () // CHECK: "toy.return"() : () -> () diff --git a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir index 962a641..9e44ac5 100644 --- a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir @@ -24,9 +24,7 @@ func @main() { // CHECK-LABEL: func @main() // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> () +// CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> +// CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> +// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () // CHECK: "toy.return"() : () -> () diff --git a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir index 5788d86..199446c 100644 --- a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir @@ -24,9 +24,7 @@ func @main() { // CHECK-LABEL: func @main() // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> -// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> -// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> () +// CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> +// CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> +// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () // CHECK: "toy.return"() : () -> ()