```
Module:
- Function
- Proto 'multiply_transpose' @test/ast.toy:5:1'
- Args: [a, b]
+ Function
+ Proto 'multiply_transpose' @test/Examples/Toy/Ch1/ast.toy:4:1'
+ Params: [a, b]
Block {
Return
- BinOp: * @test/ast.toy:6:25
- Call 'transpose' [ @test/ast.toy:6:10
- var: a @test/ast.toy:6:20
+ BinOp: * @test/Examples/Toy/Ch1/ast.toy:5:25
+ Call 'transpose' [ @test/Examples/Toy/Ch1/ast.toy:5:10
+ var: a @test/Examples/Toy/Ch1/ast.toy:5:20
]
- Call 'transpose' [ @test/ast.toy:6:25
- var: b @test/ast.toy:6:35
+ Call 'transpose' [ @test/Examples/Toy/Ch1/ast.toy:5:25
+ var: b @test/Examples/Toy/Ch1/ast.toy:5:35
]
} // Block
- Function
- Proto 'main' @test/ast.toy:9:1'
- Args: []
+ Function
+ Proto 'main' @test/Examples/Toy/Ch1/ast.toy:8:1'
+ Params: []
Block {
- VarDecl a<> @test/ast.toy:11:3
- Literal: <2, 3>[<3>[1.000000e+00, 2.000000e+00, 3.000000e+00], <3>[4.000000e+00, 5.000000e+00, 6.000000e+00]] @test/ast.toy:11:17
- VarDecl b<2, 3> @test/ast.toy:12:3
- Literal: <6>[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00] @test/ast.toy:12:17
- VarDecl c<> @test/ast.toy:15:3
- Call 'multiply_transpose' [ @test/ast.toy:15:11
- var: a @test/ast.toy:15:30
- var: b @test/ast.toy:15:33
+ VarDecl a<> @test/Examples/Toy/Ch1/ast.toy:11:3
+ Literal: <2, 3>[ <3>[ 1.000000e+00, 2.000000e+00, 3.000000e+00], <3>[ 4.000000e+00, 5.000000e+00, 6.000000e+00]] @test/Examples/Toy/Ch1/ast.toy:11:11
+ VarDecl b<2, 3> @test/Examples/Toy/Ch1/ast.toy:15:3
+ Literal: <6>[ 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00] @test/Examples/Toy/Ch1/ast.toy:15:17
+ VarDecl c<> @test/Examples/Toy/Ch1/ast.toy:19:3
+ Call 'multiply_transpose' [ @test/Examples/Toy/Ch1/ast.toy:19:11
+ var: a @test/Examples/Toy/Ch1/ast.toy:19:30
+ var: b @test/Examples/Toy/Ch1/ast.toy:19:33
]
- VarDecl d<> @test/ast.toy:18:3
- Call 'multiply_transpose' [ @test/ast.toy:18:11
- var: b @test/ast.toy:18:30
- var: a @test/ast.toy:18:33
+ VarDecl d<> @test/Examples/Toy/Ch1/ast.toy:22:3
+ Call 'multiply_transpose' [ @test/Examples/Toy/Ch1/ast.toy:22:11
+ var: b @test/Examples/Toy/Ch1/ast.toy:22:30
+ var: a @test/Examples/Toy/Ch1/ast.toy:22:33
]
- VarDecl e<> @test/ast.toy:21:3
- Call 'multiply_transpose' [ @test/ast.toy:21:11
- var: b @test/ast.toy:21:30
- var: c @test/ast.toy:21:33
+ VarDecl e<> @test/Examples/Toy/Ch1/ast.toy:25:3
+ Call 'multiply_transpose' [ @test/Examples/Toy/Ch1/ast.toy:25:11
+ var: b @test/Examples/Toy/Ch1/ast.toy:25:30
+ var: c @test/Examples/Toy/Ch1/ast.toy:25:33
]
- VarDecl f<> @test/ast.toy:24:3
- Call 'multiply_transpose' [ @test/ast.toy:24:11
- Call 'transpose' [ @test/ast.toy:24:30
- var: a @test/ast.toy:24:40
+ VarDecl f<> @test/Examples/Toy/Ch1/ast.toy:28:3
+ Call 'multiply_transpose' [ @test/Examples/Toy/Ch1/ast.toy:28:11
+ Call 'transpose' [ @test/Examples/Toy/Ch1/ast.toy:28:30
+ var: a @test/Examples/Toy/Ch1/ast.toy:28:40
]
- var: c @test/ast.toy:24:44
+ var: c @test/Examples/Toy/Ch1/ast.toy:28:44
]
} // Block
```
#### Specifying a Custom Assembly Format
-At this point we can generate our "Toy IR". A simplified version of the previous
-example:
+At this point we can generate our "Toy IR". For example, the following:
```toy
# User defined generic function that operates on unknown shaped arguments.
```mlir
module {
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
- %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:10)
- %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25)
- %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25)
- "toy.return"(%2) : (tensor<*xf64>) -> () loc("test/codegen.toy":5:3)
- } loc("test/codegen.toy":4:1)
+ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:10)
+ %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25)
+ %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25)
+ "toy.return"(%2) : (tensor<*xf64>) -> () loc("test/Examples/Toy/Ch2/codegen.toy":5:3)
+ } loc("test/Examples/Toy/Ch2/codegen.toy":4:1)
func @main() {
- %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":9:17)
- %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":9:3)
- %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":10:17)
- %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":10:3)
- %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11)
- %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11)
- "toy.print"(%5) : (tensor<*xf64>) -> () loc("test/codegen.toy":13:3)
- "toy.return"() : () -> () loc("test/codegen.toy":8:1)
- } loc("test/codegen.toy":8:1)
-} loc("test/codegen.toy":0:0)
+ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:17)
+ %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:3)
+ %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:17)
+ %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:3)
+ %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":11:11)
+ %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":12:11)
+ "toy.print"(%5) : (tensor<*xf64>) -> () loc("test/Examples/Toy/Ch2/codegen.toy":13:3)
+ "toy.return"() : () -> () loc("test/Examples/Toy/Ch2/codegen.toy":8:1)
+ } loc("test/Examples/Toy/Ch2/codegen.toy":8:1)
+} loc(unknown)
```
One thing to notice here is that all of our Toy operations are printed using the
```mlir
module {
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
- %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> loc("test/codegen.toy":5:10)
- %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> loc("test/codegen.toy":5:25)
- %2 = toy.mul %0, %1 : tensor<*xf64> loc("test/codegen.toy":5:25)
- toy.return %2 : tensor<*xf64> loc("test/codegen.toy":5:3)
- } loc("test/codegen.toy":4:1)
+ %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:10)
+ %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25)
+ %2 = toy.mul %0, %1 : tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25)
+ toy.return %2 : tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:3)
+ } loc("test/Examples/Toy/Ch2/codegen.toy":4:1)
func @main() {
- %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> loc("test/codegen.toy":9:17)
- %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> loc("test/codegen.toy":9:3)
- %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> loc("test/codegen.toy":10:17)
- %3 = toy.reshape(%2 : tensor<6xf64>) to tensor<2x3xf64> loc("test/codegen.toy":10:3)
- %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11)
- %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11)
- toy.print %5 : tensor<*xf64> loc("test/codegen.toy":13:3)
- toy.return loc("test/codegen.toy":8:1)
- } loc("test/codegen.toy":8:1)
-} loc("test/codegen.toy":0:0)
+ %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:17)
+ %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:3)
+ %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:17)
+ %3 = toy.reshape(%2 : tensor<6xf64>) to tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:3)
+ %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":11:11)
+ %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":12:11)
+ toy.print %5 : tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":13:3)
+ toy.return loc("test/Examples/Toy/Ch2/codegen.toy":8:1)
+ } loc("test/Examples/Toy/Ch2/codegen.toy":8:1)
+} loc(unknown)
```
Above we introduce several of the concepts for defining operations in the ODS
## Complete Toy Example
-At this point we can generate our "Toy IR". A simplified version of the previous
-example:
-
-```toy
-# User defined generic function that operates on unknown shaped arguments.
-def multiply_transpose(a, b) {
- return transpose(a) * transpose(b);
-}
-
-def main() {
- var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
- var b<2, 3> = [1, 2, 3, 4, 5, 6];
- var c = multiply_transpose(a, b);
- var d = multiply_transpose(b, a);
- print(d);
-}
-```
-
-Results in the following IR:
-
-```mlir
-module {
- func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
- %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> loc("test/codegen.toy":5:10)
- %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> loc("test/codegen.toy":5:25)
- %2 = toy.mul %0, %1 : tensor<*xf64> loc("test/codegen.toy":5:25)
- toy.return %2 : tensor<*xf64> loc("test/codegen.toy":5:3)
- } loc("test/codegen.toy":4:1)
- func @main() {
- %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> loc("test/codegen.toy":9:17)
- %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> loc("test/codegen.toy":9:3)
- %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> loc("test/codegen.toy":10:17)
- %3 = toy.reshape(%2 : tensor<6xf64>) to tensor<2x3xf64> loc("test/codegen.toy":10:3)
- %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11)
- %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11)
- toy.print %5 : tensor<*xf64> loc("test/codegen.toy":13:3)
- toy.return loc("test/codegen.toy":8:1)
- } loc("test/codegen.toy":8:1)
-} loc("test/codegen.toy":0:0)
-```
-
-You can build `toyc-ch2` and try yourself: `toyc-ch2
-test/Examples/Toy/Ch2/codegen.toy -emit=mlir -mlir-print-debuginfo`. We can also
-check our RoundTrip: `toyc-ch2 test/Examples/Toy/Ch2/codegen.toy -emit=mlir
--mlir-print-debuginfo 2> codegen.mlir` followed by `toyc-ch2 codegen.mlir
--emit=mlir`. You should also use `mlir-tblgen` on the final definition file and
-study the generated C++ code.
+We can now generate our "Toy IR". You can build `toyc-ch2` and try yourself on
+the above example: `toyc-ch2 test/Examples/Toy/Ch2/codegen.toy -emit=mlir
+-mlir-print-debuginfo`. We can also check our RoundTrip: `toyc-ch2
+test/Examples/Toy/Ch2/codegen.toy -emit=mlir -mlir-print-debuginfo 2>
+codegen.mlir` followed by `toyc-ch2 codegen.mlir -emit=mlir`. You should also
+use `mlir-tblgen` on the final definition file and study the generated C++ code.
At this point, MLIR knows about our Toy dialect and operations. In the
[next chapter](Ch-3.md), we will leverage our new dialect to implement some
pm.addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
```
-Finally, we can run `toyc-ch3 test/transpose_transpose.toy -emit=mlir -opt` and
-observe our pattern in action:
+Finally, we can run `toyc-ch3 test/Examples/Toy/Ch3/transpose_transpose.toy
+-emit=mlir -opt` and observe our pattern in action:
```mlir
func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> {
```
We demonstrate these reshape optimizations using the following
-trivialReshape.toy program:
+trivial_reshape.toy program:
```c++
def main() {
}
```
-We can try to run `toyc-ch3 test/trivialReshape.toy -emit=mlir -opt` and observe
-our pattern in action:
+We can try to run `toyc-ch3 test/Examples/Toy/Ch3/trivial_reshape.toy -emit=mlir
+-opt` and observe our pattern in action:
```mlir
module {
## Complete Toy Example
-Looking back at our current working example:
+Let's take a concrete example:
```mlir
func @main() {
Here, we can see that a redundant allocation was removed, the two loop nests
were fused, and some unnecessary `load`s were removed. You can build `toyc-ch5`
-and try yourself: `toyc-ch5 test/lowering.toy -emit=mlir-affine`. We can also
-check our optimizations by adding `-opt`.
+and try yourself: `toyc-ch5 test/Examples/Toy/Ch5/affine-lowering.mlir
+-emit=mlir-affine`. We can also check our optimizations by adding `-opt`.
In this chapter we explored some aspects of partial lowering, with the intent to
optimize. In the [next chapter](Ch-6.md) we will continue the discussion about
}
```
-The full code listing for dumping LLVM IR can be found in `Ch6/toy.cpp` in the
-`dumpLLVMIR()` function:
+The full code listing for dumping LLVM IR can be found in
+`examples/toy/Ch6/toy.cpp` in the `dumpLLVMIR()` function:
```c++
[`--print-ir-after-all`](../../WritingAPass.md#ir-printing) to track the
evolution of the IR throughout the pipeline.
+The example code used throughout this section can be found in
+test/Examples/Toy/Ch6/llvm-lowering.mlir.
+
So far, we have worked with primitive data types. In the
[next chapter](Ch-7.md), we will add a composite `struct` type.
--- /dev/null
+# RUN: toyc-ch3 %s -emit=mlir 2>&1 | FileCheck %s
+
+# User defined generic function that operates on unknown shaped arguments
+def transpose_transpose(x) {
+ return transpose(transpose(x));
+}
+
+def main() {
+ var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
+ var b = transpose_transpose(a);
+ print(b);
+}
+
+# CHECK-LABEL: func @transpose_transpose(
+# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64>
+# CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: toy.return [[VAL_2]] : tensor<*xf64>
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_4:%.*]] = toy.reshape([[VAL_3]] : tensor<2x3xf64>) to tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_5:%.*]] = toy.generic_call @transpose_transpose([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<*xf64>
+# CHECK-NEXT: toy.print [[VAL_5]] : tensor<*xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch3 %s -emit=mlir 2>&1 | FileCheck %s
+
+def main() {
+ var a<2,1> = [1, 2];
+ var b<2,1> = a;
+ var c<2,1> = b;
+ print(c);
+}
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+# CHECK-NEXT: [[VAL_1:%.*]] = toy.reshape([[VAL_0]] : tensor<2xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.reshape([[VAL_1]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.reshape([[VAL_2]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: toy.print [[VAL_3]] : tensor<2x1xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch4 %s -emit=mlir 2>&1 | FileCheck %s
+
+# User defined generic function that operates on unknown shaped arguments
+def transpose_transpose(x) {
+ return transpose(transpose(x));
+}
+
+def main() {
+ var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
+ var b = transpose_transpose(a);
+ print(b);
+}
+
+# CHECK-LABEL: func @transpose_transpose(
+# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64>
+# CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: toy.return [[VAL_2]] : tensor<*xf64>
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_4:%.*]] = toy.reshape([[VAL_3]] : tensor<2x3xf64>) to tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_5:%.*]] = toy.generic_call @transpose_transpose([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<*xf64>
+# CHECK-NEXT: toy.print [[VAL_5]] : tensor<*xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch4 %s -emit=mlir 2>&1 | FileCheck %s
+
+def main() {
+ var a<2,1> = [1, 2];
+ var b<2,1> = a;
+ var c<2,1> = b;
+ print(c);
+}
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+# CHECK-NEXT: [[VAL_1:%.*]] = toy.reshape([[VAL_0]] : tensor<2xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.reshape([[VAL_1]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.reshape([[VAL_2]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: toy.print [[VAL_3]] : tensor<2x1xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch5 %s -emit=mlir 2>&1 | FileCheck %s
+
+# User defined generic function that operates on unknown shaped arguments
+def transpose_transpose(x) {
+ return transpose(transpose(x));
+}
+
+def main() {
+ var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
+ var b = transpose_transpose(a);
+ print(b);
+}
+
+# CHECK-LABEL: func @transpose_transpose(
+# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64>
+# CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: toy.return [[VAL_2]] : tensor<*xf64>
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_4:%.*]] = toy.reshape([[VAL_3]] : tensor<2x3xf64>) to tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_5:%.*]] = toy.generic_call @transpose_transpose([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<*xf64>
+# CHECK-NEXT: toy.print [[VAL_5]] : tensor<*xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch5 %s -emit=mlir 2>&1 | FileCheck %s
+
+def main() {
+ var a<2,1> = [1, 2];
+ var b<2,1> = a;
+ var c<2,1> = b;
+ print(c);
+}
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+# CHECK-NEXT: [[VAL_1:%.*]] = toy.reshape([[VAL_0]] : tensor<2xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.reshape([[VAL_1]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.reshape([[VAL_2]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: toy.print [[VAL_3]] : tensor<2x1xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch6 %s -emit=mlir 2>&1 | FileCheck %s
+
+# User defined generic function that operates on unknown shaped arguments
+def transpose_transpose(x) {
+ return transpose(transpose(x));
+}
+
+def main() {
+ var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
+ var b = transpose_transpose(a);
+ print(b);
+}
+
+# CHECK-LABEL: func @transpose_transpose(
+# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64>
+# CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: toy.return [[VAL_2]] : tensor<*xf64>
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_4:%.*]] = toy.reshape([[VAL_3]] : tensor<2x3xf64>) to tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_5:%.*]] = toy.generic_call @transpose_transpose([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<*xf64>
+# CHECK-NEXT: toy.print [[VAL_5]] : tensor<*xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch6 %s -emit=mlir 2>&1 | FileCheck %s
+
+def main() {
+ var a<2,1> = [1, 2];
+ var b<2,1> = a;
+ var c<2,1> = b;
+ print(c);
+}
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+# CHECK-NEXT: [[VAL_1:%.*]] = toy.reshape([[VAL_0]] : tensor<2xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.reshape([[VAL_1]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.reshape([[VAL_2]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: toy.print [[VAL_3]] : tensor<2x1xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch7 %s -emit=mlir 2>&1 | FileCheck %s
+
+# User defined generic function that operates on unknown shaped arguments
+def transpose_transpose(x) {
+ return transpose(transpose(x));
+}
+
+def main() {
+ var a<2, 3> = [[1, 2, 3], [4, 5, 6]];
+ var b = transpose_transpose(a);
+ print(b);
+}
+
+# CHECK-LABEL: func @transpose_transpose(
+# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64>
+# CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64>
+# CHECK-NEXT: toy.return [[VAL_2]] : tensor<*xf64>
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_4:%.*]] = toy.reshape([[VAL_3]] : tensor<2x3xf64>) to tensor<2x3xf64>
+# CHECK-NEXT: [[VAL_5:%.*]] = toy.generic_call @transpose_transpose([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<*xf64>
+# CHECK-NEXT: toy.print [[VAL_5]] : tensor<*xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file
--- /dev/null
+# RUN: toyc-ch7 %s -emit=mlir 2>&1 | FileCheck %s
+
+def main() {
+ var a<2,1> = [1, 2];
+ var b<2,1> = a;
+ var c<2,1> = b;
+ print(c);
+}
+
+# CHECK-LABEL: func @main()
+# CHECK-NEXT: [[VAL_0:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+# CHECK-NEXT: [[VAL_1:%.*]] = toy.reshape([[VAL_0]] : tensor<2xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_2:%.*]] = toy.reshape([[VAL_1]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: [[VAL_3:%.*]] = toy.reshape([[VAL_2]] : tensor<2x1xf64>) to tensor<2x1xf64>
+# CHECK-NEXT: toy.print [[VAL_3]] : tensor<2x1xf64>
+# CHECK-NEXT: toy.return
\ No newline at end of file