2020-03-30 06:35:38 +08:00
|
|
|
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func(canonicalize)' -split-input-file | FileCheck %s
|
2019-11-15 04:22:28 +08:00
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @test_subi_zero
|
|
|
|
func @test_subi_zero(%arg0: i32) -> i32 {
|
2018-10-23 04:08:27 +08:00
|
|
|
// CHECK-NEXT: %c0_i32 = constant 0 : i32
|
|
|
|
// CHECK-NEXT: return %c0
|
|
|
|
%y = subi %arg0, %arg0 : i32
|
|
|
|
return %y: i32
|
|
|
|
}
|
|
|
|
|
2019-04-08 20:53:59 +08:00
|
|
|
// CHECK-LABEL: func @test_subi_zero_vector
|
|
|
|
func @test_subi_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
//CHECK-NEXT: %cst = constant dense<0> : vector<4xi32>
|
2019-04-08 20:53:59 +08:00
|
|
|
%y = subi %arg0, %arg0 : vector<4xi32>
|
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
return %y: vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: func @test_subi_zero_tensor
|
|
|
|
func @test_subi_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
//CHECK-NEXT: %cst = constant dense<0> : tensor<4x5xi32>
|
2019-04-08 20:53:59 +08:00
|
|
|
%y = subi %arg0, %arg0 : tensor<4x5xi32>
|
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
return %y: tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @dim
|
|
|
|
func @dim(%arg0: tensor<8x4xf32>) -> index {
|
2018-10-17 00:31:45 +08:00
|
|
|
|
|
|
|
// CHECK: %c4 = constant 4 : index
|
|
|
|
%0 = dim %arg0, 1 : tensor<8x4xf32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %c4
|
|
|
|
return %0 : index
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @test_commutative
|
|
|
|
func @test_commutative(%arg0: i32) -> (i32, i32) {
|
2018-10-17 00:31:45 +08:00
|
|
|
// CHECK: %c42_i32 = constant 42 : i32
|
|
|
|
%c42_i32 = constant 42 : i32
|
2018-10-26 13:04:35 +08:00
|
|
|
// CHECK-NEXT: %0 = addi %arg0, %c42_i32 : i32
|
2018-10-17 00:31:45 +08:00
|
|
|
%y = addi %c42_i32, %arg0 : i32
|
2018-10-26 13:04:35 +08:00
|
|
|
|
|
|
|
// This should not be swapped.
|
|
|
|
// CHECK-NEXT: %1 = subi %c42_i32, %arg0 : i32
|
|
|
|
%z = subi %c42_i32, %arg0 : i32
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %0, %1
|
|
|
|
return %y, %z: i32, i32
|
2018-10-12 08:21:55 +08:00
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @trivial_dce
|
|
|
|
func @trivial_dce(%arg0: tensor<8x4xf32>) {
|
2018-10-22 10:53:10 +08:00
|
|
|
%0 = dim %arg0, 1 : tensor<8x4xf32>
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
// CHECK-LABEL: func @load_dce
|
|
|
|
func @load_dce(%arg0: index) {
|
|
|
|
%c4 = constant 4 : index
|
|
|
|
%a = alloc(%c4) : memref<?xf32>
|
|
|
|
%2 = load %a[%arg0] : memref<?xf32>
|
|
|
|
dealloc %a: memref<?xf32>
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @addi_zero
|
|
|
|
func @addi_zero(%arg0: i32) -> i32 {
|
2018-10-23 04:08:27 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%c0_i32 = constant 0 : i32
|
|
|
|
%y = addi %c0_i32, %arg0 : i32
|
2018-10-27 02:28:06 +08:00
|
|
|
return %y: i32
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:33:49 +08:00
|
|
|
// CHECK-LABEL: func @addi_zero_index
|
|
|
|
func @addi_zero_index(%arg0: index) -> index {
|
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%c0_index = constant 0 : index
|
|
|
|
%y = addi %c0_index, %arg0 : index
|
|
|
|
return %y: index
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @addi_zero_vector
|
|
|
|
func @addi_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> {
|
2018-10-30 01:22:49 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%c0_v4i32 = constant dense<0> : vector<4 x i32>
|
2018-10-30 01:22:49 +08:00
|
|
|
%y = addi %c0_v4i32, %arg0 : vector<4 x i32>
|
|
|
|
return %y: vector<4 x i32>
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @addi_zero_tensor
|
|
|
|
func @addi_zero_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> {
|
2018-10-30 01:22:49 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%c0_t45i32 = constant dense<0> : tensor<4 x 5 x i32>
|
2018-10-30 01:22:49 +08:00
|
|
|
%y = addi %arg0, %c0_t45i32 : tensor<4 x 5 x i32>
|
|
|
|
return %y: tensor<4 x 5 x i32>
|
|
|
|
}
|
|
|
|
|
2019-01-12 01:12:11 +08:00
|
|
|
// CHECK-LABEL: func @muli_zero
|
|
|
|
func @muli_zero(%arg0: i32) -> i32 {
|
|
|
|
// CHECK-NEXT: %c0_i32 = constant 0 : i32
|
|
|
|
%c0_i32 = constant 0 : i32
|
|
|
|
|
|
|
|
%y = muli %c0_i32, %arg0 : i32
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %c0_i32
|
|
|
|
return %y: i32
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:33:49 +08:00
|
|
|
// CHECK-LABEL: func @muli_zero_index
|
|
|
|
func @muli_zero_index(%arg0: index) -> index {
|
|
|
|
// CHECK-NEXT: %[[CST:.*]] = constant 0 : index
|
|
|
|
%c0_index = constant 0 : index
|
|
|
|
|
|
|
|
%y = muli %c0_index, %arg0 : index
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %[[CST]]
|
|
|
|
return %y: index
|
|
|
|
}
|
|
|
|
|
2019-01-12 01:12:11 +08:00
|
|
|
// CHECK-LABEL: func @muli_zero_vector
|
|
|
|
func @muli_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
// CHECK-NEXT: %cst = constant dense<0> : vector<4xi32>
|
|
|
|
%cst = constant dense<0> : vector<4 x i32>
|
2019-01-12 01:12:11 +08:00
|
|
|
|
|
|
|
%y = muli %cst, %arg0 : vector<4 x i32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
return %y: vector<4 x i32>
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: func @muli_zero_tensor
|
|
|
|
func @muli_zero_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
// CHECK-NEXT: %cst = constant dense<0> : tensor<4x5xi32>
|
|
|
|
%cst = constant dense<0> : tensor<4 x 5 x i32>
|
2019-01-12 01:12:11 +08:00
|
|
|
|
|
|
|
%y = muli %arg0, %cst : tensor<4 x 5 x i32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
return %y: tensor<4 x 5 x i32>
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @muli_one
|
|
|
|
func @muli_one(%arg0: i32) -> i32 {
|
2018-10-27 02:28:06 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%c0_i32 = constant 1 : i32
|
|
|
|
%y = muli %c0_i32, %arg0 : i32
|
2018-10-23 04:08:27 +08:00
|
|
|
return %y: i32
|
|
|
|
}
|
2018-10-24 01:12:00 +08:00
|
|
|
|
2019-11-11 18:33:49 +08:00
|
|
|
// CHECK-LABEL: func @muli_one_index
|
|
|
|
func @muli_one_index(%arg0: index) -> index {
|
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%c0_index = constant 1 : index
|
|
|
|
%y = muli %c0_index, %arg0 : index
|
|
|
|
return %y: index
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @muli_one_vector
|
|
|
|
func @muli_one_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> {
|
2018-10-30 01:22:49 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%c1_v4i32 = constant dense<1> : vector<4 x i32>
|
2018-10-30 01:22:49 +08:00
|
|
|
%y = muli %c1_v4i32, %arg0 : vector<4 x i32>
|
|
|
|
return %y: vector<4 x i32>
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @muli_one_tensor
|
|
|
|
func @muli_one_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> {
|
2018-10-30 01:22:49 +08:00
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%c1_t45i32 = constant dense<1> : tensor<4 x 5 x i32>
|
2018-10-30 01:22:49 +08:00
|
|
|
%y = muli %arg0, %c1_t45i32 : tensor<4 x 5 x i32>
|
|
|
|
return %y: tensor<4 x 5 x i32>
|
|
|
|
}
|
|
|
|
|
2019-04-08 15:00:46 +08:00
|
|
|
//CHECK-LABEL: func @and_self
|
|
|
|
func @and_self(%arg0: i32) -> i32 {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = and %arg0, %arg0 : i32
|
|
|
|
return %1 : i32
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @and_self_vector
|
|
|
|
func @and_self_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = and %arg0, %arg0 : vector<4xi32>
|
|
|
|
return %1 : vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @and_self_tensor
|
|
|
|
func @and_self_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = and %arg0, %arg0 : tensor<4x5xi32>
|
|
|
|
return %1 : tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @and_zero
|
|
|
|
func @and_zero(%arg0: i32) -> i32 {
|
|
|
|
// CHECK-NEXT: %c0_i32 = constant 0 : i32
|
|
|
|
%c0_i32 = constant 0 : i32
|
|
|
|
// CHECK-NEXT: return %c0_i32
|
|
|
|
%1 = and %arg0, %c0_i32 : i32
|
|
|
|
return %1 : i32
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:33:49 +08:00
|
|
|
//CHECK-LABEL: func @and_zero_index
|
|
|
|
func @and_zero_index(%arg0: index) -> index {
|
|
|
|
// CHECK-NEXT: %[[CST:.*]] = constant 0 : index
|
|
|
|
%c0_index = constant 0 : index
|
|
|
|
// CHECK-NEXT: return %[[CST]]
|
|
|
|
%1 = and %arg0, %c0_index : index
|
|
|
|
return %1 : index
|
|
|
|
}
|
|
|
|
|
2019-04-08 15:00:46 +08:00
|
|
|
//CHECK-LABEL: func @and_zero_vector
|
|
|
|
func @and_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
// CHECK-NEXT: %cst = constant dense<0> : vector<4xi32>
|
|
|
|
%cst = constant dense<0> : vector<4xi32>
|
2019-04-08 15:00:46 +08:00
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
%1 = and %arg0, %cst : vector<4xi32>
|
|
|
|
return %1 : vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @and_zero_tensor
|
|
|
|
func @and_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
// CHECK-NEXT: %cst = constant dense<0> : tensor<4x5xi32>
|
|
|
|
%cst = constant dense<0> : tensor<4x5xi32>
|
2019-04-08 15:00:46 +08:00
|
|
|
// CHECK-NEXT: return %cst
|
|
|
|
%1 = and %arg0, %cst : tensor<4x5xi32>
|
|
|
|
return %1 : tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @or_self
|
|
|
|
func @or_self(%arg0: i32) -> i32 {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = or %arg0, %arg0 : i32
|
|
|
|
return %1 : i32
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @or_self_vector
|
|
|
|
func @or_self_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = or %arg0, %arg0 : vector<4xi32>
|
|
|
|
return %1 : vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @or_self_tensor
|
|
|
|
func @or_self_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
|
|
|
//CHECK-NEXT: return %arg0
|
|
|
|
%1 = or %arg0, %arg0 : tensor<4x5xi32>
|
|
|
|
return %1 : tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @or_zero
|
|
|
|
func @or_zero(%arg0: i32) -> i32 {
|
|
|
|
%c0_i32 = constant 0 : i32
|
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%1 = or %arg0, %c0_i32 : i32
|
|
|
|
return %1 : i32
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:33:49 +08:00
|
|
|
//CHECK-LABEL: func @or_zero_index
|
|
|
|
func @or_zero_index(%arg0: index) -> index {
|
|
|
|
%c0_index = constant 0 : index
|
|
|
|
// CHECK-NEXT: return %arg0
|
|
|
|
%1 = or %arg0, %c0_index : index
|
|
|
|
return %1 : index
|
|
|
|
}
|
|
|
|
|
2019-04-08 15:00:46 +08:00
|
|
|
//CHECK-LABEL: func @or_zero_vector
|
|
|
|
func @or_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%cst = constant dense<0> : vector<4xi32>
|
2019-04-08 15:00:46 +08:00
|
|
|
%1 = or %arg0, %cst : vector<4xi32>
|
|
|
|
return %1 : vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @or_zero_tensor
|
|
|
|
func @or_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
|
|
|
// CHECK-NEXT: return %arg0
|
2019-06-26 07:06:13 +08:00
|
|
|
%cst = constant dense<0> : tensor<4x5xi32>
|
2019-04-08 15:00:46 +08:00
|
|
|
%1 = or %arg0, %cst : tensor<4x5xi32>
|
|
|
|
return %1 : tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
2019-04-08 20:53:59 +08:00
|
|
|
//CHECK-LABEL: func @xor_self
|
|
|
|
func @xor_self(%arg0: i32) -> i32 {
|
|
|
|
//CHECK-NEXT: %c0_i32 = constant 0
|
|
|
|
%1 = xor %arg0, %arg0 : i32
|
|
|
|
//CHECK-NEXT: return %c0_i32
|
|
|
|
return %1 : i32
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @xor_self_vector
|
|
|
|
func @xor_self_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
//CHECK-NEXT: %cst = constant dense<0> : vector<4xi32>
|
2019-04-08 20:53:59 +08:00
|
|
|
%1 = xor %arg0, %arg0 : vector<4xi32>
|
|
|
|
//CHECK-NEXT: return %cst
|
|
|
|
return %1 : vector<4xi32>
|
|
|
|
}
|
|
|
|
|
|
|
|
//CHECK-LABEL: func @xor_self_tensor
|
|
|
|
func @xor_self_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
|
2019-06-26 07:06:13 +08:00
|
|
|
//CHECK-NEXT: %cst = constant dense<0> : tensor<4x5xi32>
|
2019-04-08 20:53:59 +08:00
|
|
|
%1 = xor %arg0, %arg0 : tensor<4x5xi32>
|
|
|
|
//CHECK-NEXT: return %cst
|
|
|
|
return %1 : tensor<4x5xi32>
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @memref_cast_folding
|
|
|
|
func @memref_cast_folding(%arg0: memref<4 x f32>, %arg1: f32) -> f32 {
|
2018-12-30 05:56:57 +08:00
|
|
|
%1 = memref_cast %arg0 : memref<4xf32> to memref<?xf32>
|
2019-09-18 02:49:14 +08:00
|
|
|
// CHECK-NEXT: %c0 = constant 0 : index
|
2018-10-24 01:12:00 +08:00
|
|
|
%c0 = constant 0 : index
|
2019-09-14 09:18:21 +08:00
|
|
|
%dim = dim %1, 0 : memref<? x f32>
|
|
|
|
|
2019-09-18 02:49:14 +08:00
|
|
|
// CHECK-NEXT: affine.load %arg0[3]
|
2019-09-14 09:18:21 +08:00
|
|
|
affine.load %1[%dim - 1] : memref<?xf32>
|
2018-10-24 01:12:00 +08:00
|
|
|
|
|
|
|
// CHECK-NEXT: store %arg1, %arg0[%c0] : memref<4xf32>
|
|
|
|
store %arg1, %1[%c0] : memref<?xf32>
|
|
|
|
|
2019-09-14 09:18:21 +08:00
|
|
|
// CHECK-NEXT: %{{.*}} = load %arg0[%c0] : memref<4xf32>
|
2018-10-24 01:12:00 +08:00
|
|
|
%0 = load %1[%c0] : memref<?xf32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: dealloc %arg0 : memref<4xf32>
|
|
|
|
dealloc %1: memref<?xf32>
|
|
|
|
|
2019-09-14 09:18:21 +08:00
|
|
|
// CHECK-NEXT: return %{{.*}}
|
2018-10-24 01:12:00 +08:00
|
|
|
return %0 : f32
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @alloc_const_fold
|
|
|
|
func @alloc_const_fold() -> memref<?xf32> {
|
2018-10-24 01:12:00 +08:00
|
|
|
// CHECK-NEXT: %0 = alloc() : memref<4xf32>
|
|
|
|
%c4 = constant 4 : index
|
|
|
|
%a = alloc(%c4) : memref<?xf32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: %1 = memref_cast %0 : memref<4xf32> to memref<?xf32>
|
|
|
|
// CHECK-NEXT: return %1 : memref<?xf32>
|
|
|
|
return %a : memref<?xf32>
|
|
|
|
}
|
|
|
|
|
2019-01-17 03:40:37 +08:00
|
|
|
// CHECK-LABEL: func @dead_alloc_fold
|
|
|
|
func @dead_alloc_fold() {
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
%c4 = constant 4 : index
|
|
|
|
%a = alloc(%c4) : memref<?xf32>
|
|
|
|
return
|
|
|
|
}
|
2018-10-24 01:12:00 +08:00
|
|
|
|
2019-01-17 04:39:03 +08:00
|
|
|
// CHECK-LABEL: func @dead_dealloc_fold
|
|
|
|
func @dead_dealloc_fold() {
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
%a = alloc() : memref<4xf32>
|
|
|
|
dealloc %a: memref<4xf32>
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: func @dead_dealloc_fold_multi_use
|
|
|
|
func @dead_dealloc_fold_multi_use(%cond : i1) {
|
|
|
|
// CHECK-NEXT: cond_br
|
|
|
|
%a = alloc() : memref<4xf32>
|
|
|
|
cond_br %cond, ^bb1, ^bb2
|
|
|
|
|
|
|
|
// CHECK-LABEL: bb1:
|
|
|
|
^bb1:
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
dealloc %a: memref<4xf32>
|
|
|
|
return
|
|
|
|
|
|
|
|
// CHECK-LABEL: bb2:
|
|
|
|
^bb2:
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
dealloc %a: memref<4xf32>
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-31 02:18:51 +08:00
|
|
|
// CHECK-LABEL: func @dead_block_elim
|
|
|
|
func @dead_block_elim() {
|
[MLIR] Add missing colon after CHECKs.
Reviewers: herhut
Reviewed By: herhut
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, grosul1, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77709
2020-04-08 14:59:59 +08:00
|
|
|
// CHECK-NOT: ^bb
|
2019-10-31 02:18:51 +08:00
|
|
|
func @nested() {
|
|
|
|
return
|
|
|
|
|
|
|
|
^bb1:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
|
|
|
|
^bb1:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @dyn_shape_fold(%arg0: index, %arg1: index)
|
2019-01-17 03:40:37 +08:00
|
|
|
func @dyn_shape_fold(%L : index, %M : index) -> (memref<? x ? x i32>, memref<? x ? x f32>) {
|
2018-10-24 01:12:00 +08:00
|
|
|
// CHECK: %c0 = constant 0 : index
|
|
|
|
%zero = constant 0 : index
|
|
|
|
// The constants below disappear after they propagate into shapes.
|
|
|
|
%nine = constant 9 : index
|
|
|
|
%N = constant 1024 : index
|
|
|
|
%K = constant 512 : index
|
|
|
|
|
2020-03-22 23:50:21 +08:00
|
|
|
// CHECK-NEXT: alloc(%arg0) : memref<?x1024xf32>
|
2018-10-24 01:12:00 +08:00
|
|
|
%a = alloc(%L, %N) : memref<? x ? x f32>
|
|
|
|
|
2020-03-22 23:50:21 +08:00
|
|
|
// CHECK-NEXT: alloc(%arg1) : memref<4x1024x8x512x?xf32>
|
2018-10-24 01:12:00 +08:00
|
|
|
%b = alloc(%N, %K, %M) : memref<4 x ? x 8 x ? x ? x f32>
|
|
|
|
|
2020-03-22 23:50:21 +08:00
|
|
|
// CHECK-NEXT: alloc() : memref<512x1024xi32>
|
2018-11-21 05:39:35 +08:00
|
|
|
%c = alloc(%K, %N) : memref<? x ? x i32>
|
|
|
|
|
2020-03-22 23:50:21 +08:00
|
|
|
// CHECK: alloc() : memref<9x9xf32>
|
|
|
|
%d = alloc(%nine, %nine) : memref<? x ? x f32>
|
|
|
|
|
|
|
|
// CHECK: alloca(%arg1) : memref<4x1024x8x512x?xf32>
|
|
|
|
%e = alloca(%N, %K, %M) : memref<4 x ? x 8 x ? x ? x f32>
|
|
|
|
|
2019-07-10 01:40:29 +08:00
|
|
|
// CHECK: affine.for
|
2019-03-26 01:14:34 +08:00
|
|
|
affine.for %i = 0 to %L {
|
2019-07-10 01:40:29 +08:00
|
|
|
// CHECK-NEXT: affine.for
|
2019-03-26 01:14:34 +08:00
|
|
|
affine.for %j = 0 to 10 {
|
2019-07-10 01:40:29 +08:00
|
|
|
// CHECK-NEXT: load %0[%arg2, %arg3] : memref<?x1024xf32>
|
|
|
|
// CHECK-NEXT: store %{{.*}}, %1[%c0, %c0, %arg2, %arg3, %c0] : memref<4x1024x8x512x?xf32>
|
2018-10-24 01:12:00 +08:00
|
|
|
%v = load %a[%i, %j] : memref<?x?xf32>
|
|
|
|
store %v, %b[%zero, %zero, %i, %j, %zero] : memref<4x?x8x?x?xf32>
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 03:40:37 +08:00
|
|
|
return %c, %d : memref<? x ? x i32>, memref<? x ? x f32>
|
2018-10-24 01:12:00 +08:00
|
|
|
}
|
2018-10-25 00:22:48 +08:00
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
#map1 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
|
|
|
|
#map2 = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s2 + d1 * s1 + d2 + s0)>
|
2019-12-06 21:59:06 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index,
|
|
|
|
func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>, %M : index, %N : index, %K : index) {
|
|
|
|
// CHECK-SAME: [[M:arg[0-9]+]]: index
|
|
|
|
// CHECK-SAME: [[N:arg[0-9]+]]: index
|
|
|
|
// CHECK-SAME: [[K:arg[0-9]+]]: index
|
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c1 = constant 1 : index
|
|
|
|
%0 = alloc(%arg0, %arg1) : memref<?x?xf32>
|
|
|
|
%1 = alloc(%arg1, %arg2) : memref<?x8x?xf32>
|
|
|
|
%2 = dim %1, 2 : memref<?x8x?xf32>
|
|
|
|
affine.for %arg3 = 0 to %2 {
|
|
|
|
%3 = alloc(%arg0) : memref<?xi8>
|
|
|
|
%ub = dim %3, 0 : memref<?xi8>
|
|
|
|
affine.for %arg4 = 0 to %ub {
|
|
|
|
%s = dim %0, 0 : memref<?x?xf32>
|
|
|
|
%v = std.view %3[%c0][%arg4, %s] : memref<?xi8> to memref<?x?xf32, #map1>
|
2020-03-06 04:40:53 +08:00
|
|
|
%sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
|
2019-12-06 21:59:06 +08:00
|
|
|
%l = dim %v, 1 : memref<?x?xf32, #map1>
|
|
|
|
%u = dim %sv, 0 : memref<?x?xf32, #map1>
|
|
|
|
affine.for %arg5 = %l to %u {
|
|
|
|
"foo"() : () -> ()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// CHECK-NEXT: affine.for %arg7 = 0 to %arg2 {
|
|
|
|
// CHECK-NEXT: affine.for %arg8 = 0 to %arg0 {
|
|
|
|
// CHECK-NEXT: affine.for %arg9 = %arg0 to %arg0 {
|
|
|
|
// CHECK-NEXT: "foo"() : () -> ()
|
|
|
|
// CHECK-NEXT: }
|
|
|
|
// CHECK-NEXT: }
|
|
|
|
// CHECK-NEXT: }
|
|
|
|
|
|
|
|
%A = view %BUF[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
%B = view %BUF[%c0][%K, %N] : memref<?xi8> to memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
%C = view %BUF[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
|
|
|
|
%M_ = dim %A, 0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
%K_ = dim %A, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
%N_ = dim %C, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
|
|
|
|
loop.for %i = %c0 to %M_ step %c1 {
|
|
|
|
loop.for %j = %c0 to %N_ step %c1 {
|
|
|
|
loop.for %k = %c0 to %K_ step %c1 {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-13 05:06:41 +08:00
|
|
|
// CHECK-NEXT: return
|
2019-12-06 21:59:06 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @merge_constants
|
|
|
|
func @merge_constants() -> (index, index) {
|
2018-10-25 00:22:48 +08:00
|
|
|
// CHECK-NEXT: %c42 = constant 42 : index
|
|
|
|
%0 = constant 42 : index
|
|
|
|
%1 = constant 42 : index
|
|
|
|
// CHECK-NEXT: return %c42, %c42
|
|
|
|
return %0, %1: index, index
|
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @hoist_constant
|
|
|
|
func @hoist_constant(%arg0: memref<8xi32>) {
|
2018-10-25 00:22:48 +08:00
|
|
|
// CHECK-NEXT: %c42_i32 = constant 42 : i32
|
2019-07-10 01:40:29 +08:00
|
|
|
// CHECK-NEXT: affine.for %arg1 = 0 to 8 {
|
|
|
|
affine.for %arg1 = 0 to 8 {
|
|
|
|
// CHECK-NEXT: store %c42_i32, %arg0[%arg1]
|
2018-10-25 00:22:48 +08:00
|
|
|
%c42_i32 = constant 42 : i32
|
2019-07-10 01:40:29 +08:00
|
|
|
store %c42_i32, %arg0[%arg1] : memref<8xi32>
|
2018-10-25 00:22:48 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2018-10-26 13:04:35 +08:00
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
// CHECK-LABEL: func @const_fold_propagate
|
|
|
|
func @const_fold_propagate() -> memref<?x?xf32> {
|
2018-10-26 13:04:35 +08:00
|
|
|
%VT_i = constant 512 : index
|
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
%VT_i_s = affine.apply affine_map<(d0) -> (d0 floordiv 8)> (%VT_i)
|
|
|
|
%VT_k_l = affine.apply affine_map<(d0) -> (d0 floordiv 16)> (%VT_i)
|
2018-10-26 13:04:35 +08:00
|
|
|
|
|
|
|
// CHECK: = alloc() : memref<64x32xf32>
|
|
|
|
%Av = alloc(%VT_i_s, %VT_k_l) : memref<?x?xf32>
|
|
|
|
return %Av : memref<?x?xf32>
|
2019-01-04 23:23:28 +08:00
|
|
|
}
|
2019-01-15 05:23:18 +08:00
|
|
|
|
2019-01-30 10:08:28 +08:00
|
|
|
// CHECK-LABEL: func @indirect_call_folding
|
|
|
|
func @indirect_target() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func @indirect_call_folding() {
|
|
|
|
// CHECK-NEXT: call @indirect_target() : () -> ()
|
|
|
|
// CHECK-NEXT: return
|
|
|
|
%indirect_fn = constant @indirect_target : () -> ()
|
|
|
|
call_indirect %indirect_fn() : () -> ()
|
|
|
|
return
|
|
|
|
}
|
2019-02-08 00:26:31 +08:00
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// IMPORTANT NOTE: the operations in this test are exactly those produced by
|
2020-01-14 05:12:37 +08:00
|
|
|
// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
|
2019-02-08 00:26:31 +08:00
|
|
|
// change these operations together with the affine lowering pass tests.
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// CHECK-LABEL: @lowered_affine_mod
|
|
|
|
func @lowered_affine_mod() -> (index, index) {
|
|
|
|
// CHECK-NEXT: {{.*}} = constant 41 : index
|
|
|
|
%c-43 = constant -43 : index
|
|
|
|
%c42 = constant 42 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%0 = remi_signed %c-43, %c42 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%c0 = constant 0 : index
|
|
|
|
%1 = cmpi "slt", %0, %c0 : index
|
|
|
|
%2 = addi %0, %c42 : index
|
|
|
|
%3 = select %1, %2, %0 : index
|
|
|
|
// CHECK-NEXT: {{.*}} = constant 1 : index
|
|
|
|
%c43 = constant 43 : index
|
|
|
|
%c42_0 = constant 42 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%4 = remi_signed %c43, %c42_0 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%c0_1 = constant 0 : index
|
|
|
|
%5 = cmpi "slt", %4, %c0_1 : index
|
|
|
|
%6 = addi %4, %c42_0 : index
|
|
|
|
%7 = select %5, %6, %4 : index
|
|
|
|
return %3, %7 : index, index
|
|
|
|
}
|
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// IMPORTANT NOTE: the operations in this test are exactly those produced by
|
2020-01-14 05:12:37 +08:00
|
|
|
// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
|
2019-02-08 00:26:31 +08:00
|
|
|
// change these operations together with the affine lowering pass tests.
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// CHECK-LABEL: func @lowered_affine_floordiv
|
|
|
|
func @lowered_affine_floordiv() -> (index, index) {
|
|
|
|
// CHECK-NEXT: %c-2 = constant -2 : index
|
|
|
|
%c-43 = constant -43 : index
|
|
|
|
%c42 = constant 42 : index
|
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c-1 = constant -1 : index
|
|
|
|
%0 = cmpi "slt", %c-43, %c0 : index
|
|
|
|
%1 = subi %c-1, %c-43 : index
|
|
|
|
%2 = select %0, %1, %c-43 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%3 = divi_signed %2, %c42 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%4 = subi %c-1, %3 : index
|
|
|
|
%5 = select %0, %4, %3 : index
|
|
|
|
// CHECK-NEXT: %c1 = constant 1 : index
|
|
|
|
%c43 = constant 43 : index
|
|
|
|
%c42_0 = constant 42 : index
|
|
|
|
%c0_1 = constant 0 : index
|
|
|
|
%c-1_2 = constant -1 : index
|
|
|
|
%6 = cmpi "slt", %c43, %c0_1 : index
|
|
|
|
%7 = subi %c-1_2, %c43 : index
|
|
|
|
%8 = select %6, %7, %c43 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%9 = divi_signed %8, %c42_0 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%10 = subi %c-1_2, %9 : index
|
|
|
|
%11 = select %6, %10, %9 : index
|
|
|
|
return %5, %11 : index, index
|
|
|
|
}
|
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// IMPORTANT NOTE: the operations in this test are exactly those produced by
|
2020-01-14 05:12:37 +08:00
|
|
|
// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
|
2019-02-08 00:26:31 +08:00
|
|
|
// change these operations together with the affine lowering pass tests.
|
2019-11-15 19:59:57 +08:00
|
|
|
//
|
2019-02-08 00:26:31 +08:00
|
|
|
// CHECK-LABEL: func @lowered_affine_ceildiv
|
|
|
|
func @lowered_affine_ceildiv() -> (index, index) {
|
|
|
|
// CHECK-NEXT: %c-1 = constant -1 : index
|
|
|
|
%c-43 = constant -43 : index
|
|
|
|
%c42 = constant 42 : index
|
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c1 = constant 1 : index
|
|
|
|
%0 = cmpi "sle", %c-43, %c0 : index
|
|
|
|
%1 = subi %c0, %c-43 : index
|
|
|
|
%2 = subi %c-43, %c1 : index
|
|
|
|
%3 = select %0, %1, %2 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%4 = divi_signed %3, %c42 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%5 = subi %c0, %4 : index
|
|
|
|
%6 = addi %4, %c1 : index
|
|
|
|
%7 = select %0, %5, %6 : index
|
|
|
|
// CHECK-NEXT: %c2 = constant 2 : index
|
|
|
|
%c43 = constant 43 : index
|
|
|
|
%c42_0 = constant 42 : index
|
|
|
|
%c0_1 = constant 0 : index
|
|
|
|
%c1_2 = constant 1 : index
|
|
|
|
%8 = cmpi "sle", %c43, %c0_1 : index
|
|
|
|
%9 = subi %c0_1, %c43 : index
|
|
|
|
%10 = subi %c43, %c1_2 : index
|
|
|
|
%11 = select %8, %9, %10 : index
|
2019-12-23 02:01:35 +08:00
|
|
|
%12 = divi_signed %11, %c42_0 : index
|
2019-02-08 00:26:31 +08:00
|
|
|
%13 = subi %c0_1, %12 : index
|
|
|
|
%14 = addi %12, %c1_2 : index
|
|
|
|
%15 = select %8, %13, %14 : index
|
|
|
|
return %7, %15 : index, index
|
|
|
|
}
|
2019-04-28 11:55:38 +08:00
|
|
|
|
|
|
|
// Checks that NOP casts are removed.
|
|
|
|
// CHECK-LABEL: cast_values
|
|
|
|
func @cast_values(%arg0: tensor<*xi32>, %arg1: memref<?xi32>) -> (tensor<2xi32>, memref<2xi32>) {
|
|
|
|
|
|
|
|
// NOP casts
|
|
|
|
%0 = tensor_cast %arg0 : tensor<*xi32> to tensor<*xi32>
|
|
|
|
%1 = memref_cast %arg1 : memref<?xi32> to memref<?xi32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: %0 = tensor_cast %arg0 : tensor<*xi32> to tensor<2xi32>
|
|
|
|
// CHECK-NEXT: %1 = memref_cast %arg1 : memref<?xi32> to memref<2xi32>
|
|
|
|
%2 = tensor_cast %0 : tensor<*xi32> to tensor<2xi32>
|
|
|
|
%3 = memref_cast %1 : memref<?xi32> to memref<2xi32>
|
|
|
|
|
|
|
|
// NOP casts
|
|
|
|
%4 = tensor_cast %2 : tensor<2xi32> to tensor<2xi32>
|
|
|
|
%5 = memref_cast %3 : memref<2xi32> to memref<2xi32>
|
|
|
|
|
|
|
|
// CHECK-NEXT: return %0, %1 : tensor<2xi32>, memref<2xi32>
|
|
|
|
return %4, %5 : tensor<2xi32>, memref<2xi32>
|
|
|
|
}
|
2019-11-08 00:04:33 +08:00
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
// -----
|
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
#TEST_VIEW_MAP0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
|
|
|
|
#TEST_VIEW_MAP1 = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * s0 + d2)>
|
|
|
|
#TEST_VIEW_MAP2 = affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>
|
2019-11-15 19:59:57 +08:00
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
// CHECK-DAG: #[[VIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 11 + d1 + 15)>
|
|
|
|
// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 11 + s0 + d1)>
|
|
|
|
// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1 + 15)>
|
|
|
|
// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 7 + d2)>
|
|
|
|
// CHECK-DAG: #[[VIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 15)>
|
[mlir] : Fix ViewOp shape folder for identity affine maps
Summary: Fix the ViewOpShapeFolder in case of no affine mapping associated with a Memref construct identity mapping.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, liufengdb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72735
2020-01-15 05:18:05 +08:00
|
|
|
// CHECK-DAG: #[[VIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 7 + d1)>
|
2019-11-15 19:59:57 +08:00
|
|
|
|
2019-11-08 00:04:33 +08:00
|
|
|
// CHECK-LABEL: func @view
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
func @view(%arg0 : index) -> (f32, f32, f32, f32, f32, f32) {
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-08 09:46:40 +08:00
|
|
|
// CHECK: %[[ALLOC_MEM:.*]] = alloc() : memref<2048xi8>
|
2019-11-08 00:04:33 +08:00
|
|
|
%0 = alloc() : memref<2048xi8>
|
2019-11-15 21:27:57 +08:00
|
|
|
%c0 = constant 0 : index
|
2019-11-08 00:04:33 +08:00
|
|
|
%c7 = constant 7 : index
|
|
|
|
%c11 = constant 11 : index
|
|
|
|
%c15 = constant 15 : index
|
|
|
|
|
|
|
|
// Test: fold constant sizes and offset, update map with static stride/offset.
|
2020-01-09 04:28:11 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP0]]>
|
2019-11-08 02:19:54 +08:00
|
|
|
%1 = view %0[%c15][%c7, %c11]
|
2019-11-08 00:04:33 +08:00
|
|
|
: memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r0 = load %1[%c0, %c0] : memref<?x?xf32, #TEST_VIEW_MAP0>
|
2019-11-08 02:19:54 +08:00
|
|
|
|
2019-11-08 00:04:33 +08:00
|
|
|
// Test: fold constant sizes but not offset, update map with static stride.
|
|
|
|
// Test that we do not a fold dynamic dim which is not produced by a constant.
|
2020-01-09 04:28:11 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][%arg0][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP1]]>
|
2019-11-08 02:19:54 +08:00
|
|
|
%2 = view %0[%arg0][%c7, %c11]
|
2019-11-08 00:04:33 +08:00
|
|
|
: memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r1 = load %2[%c0, %c0] : memref<?x?xf32, #TEST_VIEW_MAP0>
|
2019-11-08 02:19:54 +08:00
|
|
|
|
2019-11-08 00:04:33 +08:00
|
|
|
// Test: fold constant offset but not sizes, update map with constant offset.
|
|
|
|
// Test that we fold constant offset but not dynamic dims.
|
2020-01-09 04:28:11 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][][%arg0, %arg0] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]>
|
2019-11-08 02:19:54 +08:00
|
|
|
%3 = view %0[%c15][%arg0, %arg0]
|
2019-11-08 00:04:33 +08:00
|
|
|
: memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r2 = load %3[%c0, %c0] : memref<?x?xf32, #TEST_VIEW_MAP0>
|
2019-11-08 02:19:54 +08:00
|
|
|
|
2019-11-08 00:04:33 +08:00
|
|
|
// Test: fold one constant dim, no offset, should update with constant
|
|
|
|
// stride on dim 1, but leave dynamic stride on dim 0.
|
2020-01-09 04:28:11 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][][%arg0, %arg0] : memref<2048xi8> to memref<?x?x7xf32, #[[VIEW_MAP3]]>
|
2019-11-08 02:19:54 +08:00
|
|
|
%4 = view %0[][%arg0, %arg0, %c7]
|
2019-11-08 00:04:33 +08:00
|
|
|
: memref<2048xi8> to memref<?x?x?xf32, #TEST_VIEW_MAP1>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r3 = load %4[%c0, %c0, %c0] : memref<?x?x?xf32, #TEST_VIEW_MAP1>
|
2019-11-08 00:04:33 +08:00
|
|
|
|
|
|
|
// Test: preserve an existing static dim size while folding a dynamic
|
|
|
|
// dimension and offset.
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-08 09:46:40 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<7x4xf32, #[[VIEW_MAP4]]>
|
|
|
|
%5 = view %0[%c15][%c7] : memref<2048xi8> to memref<?x4xf32, #TEST_VIEW_MAP2>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r4 = load %5[%c0, %c0] : memref<?x4xf32, #TEST_VIEW_MAP2>
|
2019-11-08 00:04:33 +08:00
|
|
|
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-08 09:46:40 +08:00
|
|
|
// Test: folding static alloc and memref_cast into a view.
|
[mlir] : Fix ViewOp shape folder for identity affine maps
Summary: Fix the ViewOpShapeFolder in case of no affine mapping associated with a Memref construct identity mapping.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, liufengdb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72735
2020-01-15 05:18:05 +08:00
|
|
|
// CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<15x7xf32, #[[VIEW_MAP5]]>
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-08 09:46:40 +08:00
|
|
|
%6 = memref_cast %0 : memref<2048xi8> to memref<?xi8>
|
|
|
|
%7 = view %6[%c15][%c7] : memref<?xi8> to memref<?x?xf32>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%r5 = load %7[%c0, %c0] : memref<?x?xf32>
|
|
|
|
return %r0, %r1, %r2, %r3, %r4, %r5 : f32, f32, f32, f32, f32, f32
|
2019-11-08 00:04:33 +08:00
|
|
|
}
|
2019-11-15 04:22:28 +08:00
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
// -----
|
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
|
|
|
|
// CHECK-DAG: #[[SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
|
2019-11-15 04:22:28 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: func @subview
|
2019-11-23 03:41:29 +08:00
|
|
|
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
|
|
|
|
func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
|
2019-11-18 20:31:02 +08:00
|
|
|
// CHECK: %[[C0:.*]] = constant 0 : index
|
2019-11-15 04:22:28 +08:00
|
|
|
%c0 = constant 0 : index
|
2019-11-18 20:31:02 +08:00
|
|
|
// CHECK: %[[C1:.*]] = constant 1 : index
|
2019-11-15 04:22:28 +08:00
|
|
|
%c1 = constant 1 : index
|
2019-11-23 03:41:29 +08:00
|
|
|
// CHECK: %[[C2:.*]] = constant 2 : index
|
|
|
|
%c2 = constant 2 : index
|
2019-11-18 20:31:02 +08:00
|
|
|
// CHECK: %[[C7:.*]] = constant 7 : index
|
2019-11-15 04:22:28 +08:00
|
|
|
%c7 = constant 7 : index
|
2019-11-18 20:31:02 +08:00
|
|
|
// CHECK: %[[C11:.*]] = constant 11 : index
|
2019-11-15 04:22:28 +08:00
|
|
|
%c11 = constant 11 : index
|
|
|
|
%c15 = constant 15 : index
|
|
|
|
|
2019-11-15 19:59:57 +08:00
|
|
|
// CHECK: %[[ALLOC0:.*]] = alloc()
|
2020-01-14 05:12:37 +08:00
|
|
|
%0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
|
2019-11-15 04:22:28 +08:00
|
|
|
|
|
|
|
// Test: subview with constant base memref and constant operands is folded.
|
2019-11-19 07:00:34 +08:00
|
|
|
// Note that the subview uses the base memrefs layout map because it used
|
|
|
|
// zero offset and unit stride arguments.
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[BASE_MAP0]]>
|
|
|
|
%1 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c1, %c1, %c1]
|
2020-01-14 05:12:37 +08:00
|
|
|
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
|
2019-11-15 04:22:28 +08:00
|
|
|
memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
%v0 = load %1[%c0, %c0, %c0] : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-15 04:22:28 +08:00
|
|
|
|
|
|
|
// Test: subview with one dynamic operand should not be folded.
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][%[[C0]], %[[ARG0]], %[[C0]]] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
|
|
|
|
%2 = subview %0[%c0, %arg0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
|
2020-01-14 05:12:37 +08:00
|
|
|
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
|
2019-11-15 04:22:28 +08:00
|
|
|
memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %2[%c0, %c0, %c0] : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-15 04:22:28 +08:00
|
|
|
|
2019-11-23 03:41:29 +08:00
|
|
|
// CHECK: %[[ALLOC1:.*]] = alloc(%[[ARG0]])
|
2020-01-14 05:12:37 +08:00
|
|
|
%3 = alloc(%arg0) : memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
|
2019-11-23 03:41:29 +08:00
|
|
|
// Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC1]][] [] [] : memref<?x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[BASE_MAP0]]>
|
|
|
|
%4 = subview %3[%c0, %c0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
|
2020-01-14 05:12:37 +08:00
|
|
|
: memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
|
2019-11-15 04:22:28 +08:00
|
|
|
memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %4[%c0, %c0, %c0] : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-15 04:22:28 +08:00
|
|
|
|
2019-12-06 21:59:06 +08:00
|
|
|
// Test: subview offset operands are folded correctly w.r.t. base strides.
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
|
|
|
|
%5 = subview %0[%c1, %c2, %c7] [%c7, %c11, %c2] [%c1, %c1, %c1]
|
2020-01-14 05:12:37 +08:00
|
|
|
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
|
2019-11-19 07:00:34 +08:00
|
|
|
memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %5[%c0, %c0, %c0] : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-19 07:00:34 +08:00
|
|
|
|
|
|
|
// Test: subview stride operands are folded correctly w.r.t. base strides.
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
|
|
|
|
%6 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c2, %c7, %c11]
|
2020-01-14 05:12:37 +08:00
|
|
|
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
|
2019-11-19 07:00:34 +08:00
|
|
|
memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %6[%c0, %c0, %c0] : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-19 07:00:34 +08:00
|
|
|
|
2019-11-23 03:41:29 +08:00
|
|
|
// Test: subview shape are folded, but offsets and strides are not even if base memref is static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
|
|
|
|
%10 = subview %0[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %10[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// Test: subview strides are folded, but offsets and shape are not even if base memref is static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP4]]
|
|
|
|
%11 = subview %0[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c7, %c11] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %11[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// Test: subview offsets are folded, but strides and shape are not even if base memref is static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC0]][] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP5]]
|
|
|
|
%13 = subview %0[%c1, %c2, %c7] [%arg1, %arg1, %arg1] [%arg0, %arg0, %arg0] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %13[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// CHECK: %[[ALLOC2:.*]] = alloc(%[[ARG0]], %[[ARG0]], %[[ARG1]])
|
|
|
|
%14 = alloc(%arg0, %arg0, %arg1) : memref<?x?x?xf32>
|
|
|
|
// Test: subview shape are folded, even if base memref is not static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
|
|
|
|
%15 = subview %14[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %15[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// TEST: subview strides are not folded when the base memref is not static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[C2]], %[[C2]], %[[C2]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
|
|
|
|
%16 = subview %14[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c2, %c2] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %16[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// TEST: subview offsets are not folded when the base memref is not static
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC2]][%[[C1]], %[[C1]], %[[C1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
|
|
|
|
%17 = subview %14[%c1, %c1, %c1] [%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %17[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// CHECK: %[[ALLOC3:.*]] = alloc() : memref<12x4xf32>
|
|
|
|
%18 = alloc() : memref<12x4xf32>
|
|
|
|
%c4 = constant 4 : index
|
|
|
|
|
|
|
|
// TEST: subview strides are maintained when sizes are folded
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC3]][%arg1, %arg1] [] [] : memref<12x4xf32> to memref<2x4xf32, #[[SUBVIEW_MAP6]]>
|
|
|
|
%19 = subview %18[%arg1, %arg1] [%c2, %c4] [] : memref<12x4xf32> to memref<?x?xf32, offset: ?, strides:[4, 1]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %19[%arg1, %arg1] : memref<?x?xf32, offset: ?, strides:[4, 1]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
|
|
|
// TEST: subview strides and sizes are maintained when offsets are folded
|
2020-03-06 04:40:53 +08:00
|
|
|
// CHECK: subview %[[ALLOC3]][] [] [] : memref<12x4xf32> to memref<12x4xf32, #[[SUBVIEW_MAP7]]>
|
|
|
|
%20 = subview %18[%c2, %c4] [] [] : memref<12x4xf32> to memref<12x4xf32, offset: ?, strides:[4, 1]>
|
[MLIR] Propagate input side effect information
Summary:
Previously operations like std.load created methods for obtaining their
effects but did not inherit from the SideEffect interfaces when their
parameters were decorated with the information. The resulting situation
was that passes had no information on the SideEffects of std.load/store
and had to treat them more cautiously. This adds the inheritance
information when creating the methods.
As a side effect, many tests are modified, as they were using std.load
for testing and this oepration would be folded away as part of pattern
rewriting. Tests are modified to use store or to reutn the result of the
std.load.
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, aartbik, ftynse!
Subscribers: mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78802
2020-04-24 00:13:44 +08:00
|
|
|
store %v0, %20[%arg1, %arg1] : memref<12x4xf32, offset: ?, strides:[4, 1]>
|
2019-11-23 03:41:29 +08:00
|
|
|
|
2019-11-18 20:31:02 +08:00
|
|
|
// Test: dim on subview is rewritten to size operand.
|
2019-11-19 07:00:34 +08:00
|
|
|
%7 = dim %4, 0 : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-19 07:00:34 +08:00
|
|
|
%8 = dim %4, 1 : memref<?x?x?xf32,
|
2020-01-14 05:12:37 +08:00
|
|
|
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
|
2019-11-18 20:31:02 +08:00
|
|
|
|
|
|
|
// CHECK: return %[[C7]], %[[C11]]
|
2019-11-19 07:00:34 +08:00
|
|
|
return %7, %8 : index, index
|
2019-11-15 04:22:28 +08:00
|
|
|
}
|
2019-12-05 08:15:10 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: func @index_cast
|
|
|
|
// CHECK-SAME: %[[ARG_0:arg[0-9]+]]: i16
|
|
|
|
func @index_cast(%arg0: i16) -> (i16) {
|
|
|
|
%11 = index_cast %arg0 : i16 to index
|
|
|
|
%12 = index_cast %11 : index to i16
|
|
|
|
// CHECK: return %[[ARG_0]] : i16
|
|
|
|
return %12 : i16
|
|
|
|
}
|
2019-12-11 03:59:13 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: func @index_cast_fold
|
|
|
|
func @index_cast_fold() -> (i16, index) {
|
|
|
|
%c4 = constant 4 : index
|
|
|
|
%1 = index_cast %c4 : index to i16
|
|
|
|
%c4_i16 = constant 4 : i16
|
|
|
|
%2 = index_cast %c4_i16 : i16 to index
|
|
|
|
// CHECK: %[[C4_I16:.*]] = constant 4 : i16
|
|
|
|
// CHECK: %[[C4:.*]] = constant 4 : index
|
|
|
|
// CHECK: return %[[C4_I16]], %[[C4]] : i16, index
|
|
|
|
return %1, %2 : i16, index
|
|
|
|
}
|
2020-03-31 03:45:40 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: func @remove_dead_else
|
|
|
|
func @remove_dead_else(%M : memref<100 x i32>) {
|
|
|
|
affine.for %i = 0 to 100 {
|
|
|
|
affine.load %M[%i] : memref<100xi32>
|
|
|
|
affine.if affine_set<(d0) : (d0 - 2 >= 0)>(%i) {
|
|
|
|
affine.for %j = 0 to 100 {
|
|
|
|
affine.load %M[%j] : memref<100xi32>
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Nothing
|
|
|
|
}
|
|
|
|
affine.load %M[%i] : memref<100xi32>
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// CHECK: affine.if
|
|
|
|
// CHECK-NEXT: affine.for
|
|
|
|
// CHECK-NEXT: affine.load
|
|
|
|
// CHECK-NEXT: }
|
|
|
|
// CHECK-NEXT: }
|
|
|
|
// CHECK-NEXT: affine.load
|