NFC: Fix remaining usages of MulOp as matrix multiplication.

MulOp now represents an element-wise multiplication instead of a matrix multiplication.

PiperOrigin-RevId: 275886774
This commit is contained in:
River Riddle 2019-10-21 11:30:58 -07:00 committed by A. Unique TensorFlower
parent b74af4aa5c
commit 941a1c4332
19 changed files with 177 additions and 214 deletions

View File

@ -177,26 +177,7 @@ static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
void MulOp::inferShapes() {
auto lhs = getOperand(0)->getType().cast<RankedTensorType>();
auto rhs = getOperand(1)->getType().cast<RankedTensorType>();
auto lhsRank = lhs.getShape().size();
auto rhsRank = rhs.getShape().size();
if (lhsRank != rhsRank)
return;
SmallVector<int64_t, 2> dims;
if (lhsRank == 1) {
// dot product, result shape is <1>
dims.push_back(1);
} else if (lhsRank == 2) {
dims.push_back(lhs.getShape()[0]);
dims.push_back(rhs.getShape()[1]);
} else {
return;
}
getResult()->setType(RankedTensorType::get(dims, lhs.getElementType()));
}
void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
static mlir::LogicalResult verify(ReturnOp op) {
// We know that the parent operation is a function, because of the 'HasParent'

View File

@ -177,26 +177,7 @@ static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
void MulOp::inferShapes() {
auto lhs = getOperand(0)->getType().cast<RankedTensorType>();
auto rhs = getOperand(1)->getType().cast<RankedTensorType>();
auto lhsRank = lhs.getShape().size();
auto rhsRank = rhs.getShape().size();
if (lhsRank != rhsRank)
return;
SmallVector<int64_t, 2> dims;
if (lhsRank == 1) {
// dot product, result shape is <1>
dims.push_back(1);
} else if (lhsRank == 2) {
dims.push_back(lhs.getShape()[0]);
dims.push_back(rhs.getShape()[1]);
} else {
return;
}
getResult()->setType(RankedTensorType::get(dims, lhs.getElementType()));
}
void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
static mlir::LogicalResult verify(ReturnOp op) {
// We know that the parent operation is a function, because of the 'HasParent'

View File

@ -177,26 +177,7 @@ static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
void MulOp::inferShapes() {
auto lhs = getOperand(0)->getType().cast<RankedTensorType>();
auto rhs = getOperand(1)->getType().cast<RankedTensorType>();
auto lhsRank = lhs.getShape().size();
auto rhsRank = rhs.getShape().size();
if (lhsRank != rhsRank)
return;
SmallVector<int64_t, 2> dims;
if (lhsRank == 1) {
// dot product, result shape is <1>
dims.push_back(1);
} else if (lhsRank == 2) {
dims.push_back(lhs.getShape()[0]);
dims.push_back(rhs.getShape()[1]);
} else {
return;
}
getResult()->setType(RankedTensorType::get(dims, lhs.getElementType()));
}
void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
static mlir::LogicalResult verify(ReturnOp op) {
// We know that the parent operation is a function, because of the 'HasParent'

View File

@ -51,7 +51,7 @@ def main() {
# transpose() and print() are the only builtin, the following will transpose
# b and perform an element-wise multiplication before printing the result.
print(a * transpose(b));
print(transpose(a) * transpose(b));
}
```
@ -65,7 +65,7 @@ the previous example by adding a user-defined function:
```Toy {.toy}
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -102,10 +102,12 @@ Module:
Args: [a, b]
Block {
Return
BinOp: * @test/ast.toy:6:12
var: a @test/ast.toy:6:10
Call 'transpose' [ @test/ast.toy:6:14
var: b @test/ast.toy:6:24
BinOp: * @test/ast.toy:6:25
Call 'transpose' [ @test/ast.toy:6:10
var: a @test/ast.toy:6:20
]
Call 'transpose' [ @test/ast.toy:6:25
var: b @test/ast.toy:6:35
]
} // Block
Function

View File

@ -502,7 +502,7 @@ example:
```.toy
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -518,23 +518,23 @@ Results in the following IR:
```mlir
module {
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64>
attributes {toy.generic} {
%0 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":3:14)
%1 = "toy.mul"(%arg0, %0) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":3:14)
"toy.return"(%1) : (tensor<*xf64>) -> () loc("test/codegen.toy":3:3)
} loc("test/codegen.toy":2:1)
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
%0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:10)
%1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25)
%2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25)
"toy.return"(%2) : (tensor<*xf64>) -> () loc("test/codegen.toy":5:3)
} loc("test/codegen.toy":4:1)
func @main() {
%0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":7:17)
%1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":7:3)
%2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":8:17)
%3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":8:3)
%4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":9:11)
%5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":10:11)
"toy.print"(%5) : (tensor<*xf64>) -> () loc("test/codegen.toy":11:3)
"toy.return"() : () -> () loc("test/codegen.toy":6:1)
} loc("test/codegen.toy":6:1)
} loc("test/codegen.toy"0:0)
%0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":9:17)
%1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":9:3)
%2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":10:17)
%3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":10:3)
%4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11)
%5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11)
"toy.print"(%5) : (tensor<*xf64>) -> () loc("test/codegen.toy":13:3)
"toy.return"() : () -> () loc("test/codegen.toy":8:1)
} loc("test/codegen.toy":8:1)
} loc("test/codegen.toy":0:0)
```
You can build `toyc-ch2` and try yourself: `toyc-ch2 test/codegen.toy -emit=mlir

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -13,19 +13,19 @@ def main() {
print(d);
}
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>)
# CHECK-NEXT: attributes {toy.generic} {
# CHECK-NEXT: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.mul"([[VAL_0]], [[VAL_2]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_3]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64>
# CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @main() {
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.reshape"([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.reshape"([[VAL_6]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.generic_call"([[VAL_5]], [[VAL_7]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_7]], [[VAL_5]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_9]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -13,19 +13,19 @@ def main() {
print(d);
}
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>)
# CHECK-NEXT: attributes {toy.generic} {
# CHECK-NEXT: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.mul"([[VAL_0]], [[VAL_2]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_3]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64>
# CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @main() {
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.reshape"([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.reshape"([[VAL_6]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.generic_call"([[VAL_5]], [[VAL_7]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_7]], [[VAL_5]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_9]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -13,19 +13,19 @@ def main() {
print(d);
}
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>)
# CHECK-NEXT: attributes {toy.generic} {
# CHECK-NEXT: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.mul"([[VAL_0]], [[VAL_2]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_3]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64>
# CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @main() {
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.reshape"([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.reshape"([[VAL_6]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.generic_call"([[VAL_5]], [[VAL_7]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_7]], [[VAL_5]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_9]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()

View File

@ -3,9 +3,10 @@
// Check the result of inlining+shape inference on an input module.
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
%0 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.mul"(%arg0, %0) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%1) : (tensor<*xf64>) -> ()
%0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%2) : (tensor<*xf64>) -> ()
}
func @main() {
%0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
@ -21,10 +22,11 @@ func @main() {
// CHECK-NOT: func @multiply_transpose
// CHECK-NOT: tensor<*xf64>
// CHECK-LABEL: func @main() {
// CHECK-LABEL: func @main()
// CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.mul"([[VAL_1]], [[VAL_2]]) : (tensor<2x3xf64>, tensor<3x2xf64>) -> tensor<2x2xf64>
// CHECK: "toy.print"([[VAL_3]]) : (tensor<2x2xf64>) -> ()
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64>
// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> ()
// CHECK: "toy.return"() : () -> ()

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -13,19 +13,19 @@ def main() {
print(d);
}
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>)
# CHECK-NEXT: attributes {toy.generic} {
# CHECK-NEXT: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.mul"([[VAL_0]], [[VAL_2]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_3]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64>
# CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @main() {
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.reshape"([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.reshape"([[VAL_6]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.generic_call"([[VAL_5]], [[VAL_7]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_7]], [[VAL_5]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_9]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()

View File

@ -3,9 +3,10 @@
// Check the result of inlining+shape inference on an input module.
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
%0 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.mul"(%arg0, %0) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%1) : (tensor<*xf64>) -> ()
%0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%2) : (tensor<*xf64>) -> ()
}
func @main() {
%0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
@ -21,10 +22,11 @@ func @main() {
// CHECK-NOT: func @multiply_transpose
// CHECK-NOT: tensor<*xf64>
// CHECK-LABEL: func @main() {
// CHECK-LABEL: func @main()
// CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.mul"([[VAL_1]], [[VAL_2]]) : (tensor<2x3xf64>, tensor<3x2xf64>) -> tensor<2x2xf64>
// CHECK: "toy.print"([[VAL_3]]) : (tensor<2x2xf64>) -> ()
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64>
// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> ()
// CHECK: "toy.return"() : () -> ()

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments.
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -35,10 +35,12 @@ def main() {
# CHECK-NEXT: Params: [a, b]
# CHECK-NEXT: Block {
# CHECK-NEXT: Return
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:10
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:14
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:24
# CHECK-NEXT: BinOp: * @{{.*}}ast.toy:5:25
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:10
# CHECK-NEXT: var: a @{{.*}}ast.toy:5:20
# CHECK-NEXT: ]
# CHECK-NEXT: Call 'transpose' [ @{{.*}}ast.toy:5:25
# CHECK-NEXT: var: b @{{.*}}ast.toy:5:35
# CHECK-NEXT: ]
# CHECK-NEXT: } // Block
# CHECK-NEXT: Function

View File

@ -2,7 +2,7 @@
# User defined generic function that operates on unknown shaped arguments
def multiply_transpose(a, b) {
return a * transpose(b);
return transpose(a) * transpose(b);
}
def main() {
@ -13,19 +13,19 @@ def main() {
print(d);
}
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>)
# CHECK-NEXT: attributes {toy.generic} {
# CHECK-NEXT: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.mul"([[VAL_0]], [[VAL_2]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_3]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @multiply_transpose(
# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64>
# CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> ()
# CHECK-LABEL: func @main() {
# CHECK-NEXT: [[VAL_4:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.reshape"([[VAL_4]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.reshape"([[VAL_6]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.generic_call"([[VAL_5]], [[VAL_7]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_7]], [[VAL_5]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_9]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()
# CHECK-LABEL: func @main()
# CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64>
# CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64>
# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64>
# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> ()
# CHECK-NEXT: "toy.return"() : () -> ()

View File

@ -3,9 +3,10 @@
// Check the result of inlining+shape inference on an input module.
func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> {
%0 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.mul"(%arg0, %0) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%1) : (tensor<*xf64>) -> ()
%0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64>
%1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64>
%2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>
"toy.return"(%2) : (tensor<*xf64>) -> ()
}
func @main() {
%0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
@ -21,10 +22,11 @@ func @main() {
// CHECK-NOT: func @multiply_transpose
// CHECK-NOT: tensor<*xf64>
// CHECK-LABEL: func @main() {
// CHECK-LABEL: func @main()
// CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64>
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.mul"([[VAL_1]], [[VAL_2]]) : (tensor<2x3xf64>, tensor<3x2xf64>) -> tensor<2x2xf64>
// CHECK: "toy.print"([[VAL_3]]) : (tensor<2x2xf64>) -> ()
// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_3:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64>
// CHECK: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64>
// CHECK: "toy.print"([[VAL_4]]) : (tensor<3x2xf64>) -> ()
// CHECK: "toy.return"() : () -> ()