Add and and or bitwise operations to StandardOps.

This adds parsing, printing and some folding/canonicalization.

--

PiperOrigin-RevId: 242409840
This commit is contained in:
Stephan Herhut 2019-04-08 00:00:46 -07:00 committed by Mehdi Amini
parent 89d5d36964
commit a8a5c06961
5 changed files with 227 additions and 0 deletions

View File

@ -1807,6 +1807,34 @@ TODO: In the distant future, this will accept
optional attributes for fast math, contraction, rounding mode, and other
controls.
#### 'and' operation
Bitwise integer and.
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `and` ssa-use, ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar integer bitwise and.
%a = and %b, %c : i64
// SIMD vector element-wise bitwise integer and.
%f = and %g, %h : vector<4xi32>
// Tensor element-wise bitwise integer and.
%x = and %y, %z : tensor<4x?xi8>
```
The `and` operation takes two operands and returns one result, each of these is
required to be the same type. This type may be an integer scalar type, a vector
whose element type is integer, or a tensor of integers. It has no standard
attributes.
#### 'cmpi' operation
Examples:
@ -2021,6 +2049,34 @@ TODO: In the distant future, this will accept
optional attributes for fast math, contraction, rounding mode, and other
controls.
#### 'or' operation
Bitwise integer or.
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `or` ssa-use, ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar integer bitwise or.
%a = or %b, %c : i64
// SIMD vector element-wise bitwise integer or.
%f = or %g, %h : vector<4xi32>
// Tensor element-wise bitwise integer or.
%x = or %y, %z : tensor<4x?xi8>
```
The `or` operation takes two operands and returns one result, each of these is
required to be the same type. This type may be an integer scalar type, a vector
whose element type is integer, or a tensor of integers. It has no standard
attributes.
#### 'remis' operation
Signed integer division remainder. Treats the leading bit as sign, i.e. `6 %

View File

@ -80,6 +80,12 @@ def AddIOp : IntArithmeticOp<"std.addi", [Commutative]> {
let hasConstantFolder = 0b1;
}
def AndOp : IntArithmeticOp<"std.and", [Commutative]> {
let summary = "integer binary and";
let hasConstantFolder = 0b1;
let hasFolder = 1;
}
def DivFOp : FloatArithmeticOp<"std.divf"> {
let summary = "floating point division operation";
}
@ -105,6 +111,12 @@ def MulIOp : IntArithmeticOp<"std.muli", [Commutative]> {
let hasFolder = 1;
}
def OrOp : IntArithmeticOp<"std.or", [Commutative]> {
let summary = "integer binary or";
let hasConstantFolder = 0b1;
let hasFolder = 1;
}
def RemFOp : FloatArithmeticOp<"std.remf"> {
let summary = "floating point division remainder operation";
}

View File

@ -2002,6 +2002,48 @@ void SubIOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
results.push_back(llvm::make_unique<SimplifyXMinusX>(context));
}
//===----------------------------------------------------------------------===//
// AndOp
//===----------------------------------------------------------------------===//
Attribute AndOp::constantFold(ArrayRef<Attribute> operands,
MLIRContext *context) {
return constFoldBinaryOp<IntegerAttr>(operands,
[](APInt a, APInt b) { return a & b; });
}
Value *AndOp::fold() {
/// and(x, 0) -> 0
if (matchPattern(rhs(), m_Zero()))
return rhs();
/// and(x,x) -> x
if (lhs() == rhs())
return rhs();
return nullptr;
}
//===----------------------------------------------------------------------===//
// OrOp
//===----------------------------------------------------------------------===//
Attribute OrOp::constantFold(ArrayRef<Attribute> operands,
MLIRContext *context) {
return constFoldBinaryOp<IntegerAttr>(operands,
[](APInt a, APInt b) { return a | b; });
}
Value *OrOp::fold() {
/// or(x, 0) -> x
if (matchPattern(rhs(), m_Zero()))
return lhs();
/// or(x,x) -> x
if (lhs() == rhs())
return rhs();
return nullptr;
}
//===----------------------------------------------------------------------===//
// TensorCastOp
//===----------------------------------------------------------------------===//

View File

@ -217,6 +217,30 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index) {
// CHECK: %{{[0-9]+}} = remf %arg0, %arg0 : tensor<4x4x?xf32>
%51 = remf %t, %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = and %arg2, %arg2 : i32
%52 = "std.and"(%i, %i) : (i32,i32) -> i32
// CHECK: %{{[0-9]+}} = and %arg2, %arg2 : i32
%53 = and %i, %i : i32
// CHECK: %{{[0-9]+}} = and %cst_5, %cst_5 : vector<42xi32>
%54 = std.and %vci32, %vci32 : vector<42 x i32>
// CHECK: %{{[0-9]+}} = and %cst_4, %cst_4 : tensor<42xi32>
%55 = and %tci32, %tci32 : tensor<42 x i32>
// CHECK: %{{[0-9]+}} = or %arg2, %arg2 : i32
%56 = "std.or"(%i, %i) : (i32,i32) -> i32
// CHECK: %{{[0-9]+}} = or %arg2, %arg2 : i32
%57 = or %i, %i : i32
// CHECK: %{{[0-9]+}} = or %cst_5, %cst_5 : vector<42xi32>
%58 = std.or %vci32, %vci32 : vector<42 x i32>
// CHECK: %{{[0-9]+}} = or %cst_4, %cst_4 : tensor<42xi32>
%59 = or %tci32, %tci32 : tensor<42 x i32>
return
}

View File

@ -121,6 +121,99 @@ func @muli_one_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> {
return %y: tensor<4 x 5 x i32>
}
//CHECK-LABEL: func @and_self
func @and_self(%arg0: i32) -> i32 {
//CHECK-NEXT: return %arg0
%1 = and %arg0, %arg0 : i32
return %1 : i32
}
//CHECK-LABEL: func @and_self_vector
func @and_self_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
//CHECK-NEXT: return %arg0
%1 = and %arg0, %arg0 : vector<4xi32>
return %1 : vector<4xi32>
}
//CHECK-LABEL: func @and_self_tensor
func @and_self_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
//CHECK-NEXT: return %arg0
%1 = and %arg0, %arg0 : tensor<4x5xi32>
return %1 : tensor<4x5xi32>
}
//CHECK-LABEL: func @and_zero
func @and_zero(%arg0: i32) -> i32 {
// CHECK-NEXT: %c0_i32 = constant 0 : i32
%c0_i32 = constant 0 : i32
// CHECK-NEXT: return %c0_i32
%1 = and %arg0, %c0_i32 : i32
return %1 : i32
}
//CHECK-LABEL: func @and_zero_vector
func @and_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
// CHECK-NEXT: %cst = constant splat<vector<4xi32>, 0> : vector<4xi32>
%cst = constant splat<vector<4xi32>, 0> : vector<4xi32>
// CHECK-NEXT: return %cst
%1 = and %arg0, %cst : vector<4xi32>
return %1 : vector<4xi32>
}
//CHECK-LABEL: func @and_zero_tensor
func @and_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
// CHECK-NEXT: %cst = constant splat<tensor<4x5xi32>, 0> : tensor<4x5xi32>
%cst = constant splat<tensor<4x5xi32>, 0> : tensor<4x5xi32>
// CHECK-NEXT: return %cst
%1 = and %arg0, %cst : tensor<4x5xi32>
return %1 : tensor<4x5xi32>
}
//CHECK-LABEL: func @or_self
func @or_self(%arg0: i32) -> i32 {
//CHECK-NEXT: return %arg0
%1 = or %arg0, %arg0 : i32
return %1 : i32
}
//CHECK-LABEL: func @or_self_vector
func @or_self_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
//CHECK-NEXT: return %arg0
%1 = or %arg0, %arg0 : vector<4xi32>
return %1 : vector<4xi32>
}
//CHECK-LABEL: func @or_self_tensor
func @or_self_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
//CHECK-NEXT: return %arg0
%1 = or %arg0, %arg0 : tensor<4x5xi32>
return %1 : tensor<4x5xi32>
}
//CHECK-LABEL: func @or_zero
func @or_zero(%arg0: i32) -> i32 {
%c0_i32 = constant 0 : i32
// CHECK-NEXT: return %arg0
%1 = or %arg0, %c0_i32 : i32
return %1 : i32
}
//CHECK-LABEL: func @or_zero_vector
func @or_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> {
// CHECK-NEXT: return %arg0
%cst = constant splat<vector<4xi32>, 0> : vector<4xi32>
%1 = or %arg0, %cst : vector<4xi32>
return %1 : vector<4xi32>
}
//CHECK-LABEL: func @or_zero_tensor
func @or_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
// CHECK-NEXT: return %arg0
%cst = constant splat<tensor<4x5xi32>, 0> : tensor<4x5xi32>
%1 = or %arg0, %cst : tensor<4x5xi32>
return %1 : tensor<4x5xi32>
}
// CHECK-LABEL: func @memref_cast_folding
func @memref_cast_folding(%arg0: memref<4 x f32>, %arg1: f32) -> f32 {
%1 = memref_cast %arg0 : memref<4xf32> to memref<?xf32>