Added new FAbs, FCeil, Cos, Neg, Sign, Tanh operations.

Closes tensorflow/mlir#251

COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/mlir/pull/251 from dfki-jugr:new_ops 0398997bf9953016898f873068e22916a062eb2b
PiperOrigin-RevId: 283750699
This commit is contained in:
Julian Gross 2019-12-04 07:17:01 -08:00 committed by A. Unique TensorFlower
parent 34e1f4aa51
commit f7c6bc70a9
3 changed files with 534 additions and 242 deletions

View File

@ -454,6 +454,84 @@ tensor_store %8, %10 : memref<4x?xf32, #layout, memspace0>
## Unary Operations
### 'absf' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `absf` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar absolute value.
%a = absf %b : f64
// SIMD vector element-wise absolute value.
%f = absf %g : vector<4xf32>
// Tensor element-wise absolute value.
%x = absf %y : tensor<4x?xf8>
```
The `absf` operation computes the absolute value. It takes one operand and
returns one result of the same type. This type may be a float scalar type, a
vector whose element type is float, or a tensor of floats. It has no standard
attributes.
### 'ceilf' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `ceilf` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar ceiling value.
%a = ceilf %b : f64
// SIMD vector element-wise ceiling value.
%f = ceilf %g : vector<4xf32>
// Tensor element-wise ceiling value.
%x = ceilf %y : tensor<4x?xf8>
```
The `ceilf` operation computes the ceiling of a given value. It takes one
operand and returns one result of the same type. This type may be a float
scalar type, a vector whose element type is float, or a tensor of floats. It
has no standard attributes.
### 'cos' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `cos` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar cosine value.
%a = cos %b : f64
// SIMD vector element-wise cosine value.
%f = cos %g : vector<4xf32>
// Tensor element-wise cosine value.
%x = cos %y : tensor<4x?xf8>
```
The `cos` operation computes the cosine of a given value. It takes one operand
and returns one result of the same type. This type may be a float scalar type,
a vector whose element type is float, or a tensor of floats. It has no standard
attributes.
### 'exp' operation
Syntax:
@ -479,6 +557,58 @@ The `exp` operation takes one operand and returns one result of the same type.
This type may be a float scalar type, a vector whose element type is float, or a
tensor of floats. It has no standard attributes.
### 'negf' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `negf` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar negation value.
%a = negf %b : f64
// SIMD vector element-wise negation value.
%f = negf %g : vector<4xf32>
// Tensor element-wise negation value.
%x = negf %y : tensor<4x?xf8>
```
The `negf` operation computes the negation of a given value. It takes one
operand and returns one result of the same type. This type may be a float
scalar type, a vector whose element type is float, or a tensor of floats. It
has no standard attributes.
### 'tanh' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `tanh` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar hyperbolic tangent value.
%a = tanh %b : f64
// SIMD vector element-wise hyperbolic tangent value.
%f = tanh %g : vector<4xf32>
// Tensor element-wise hyperbolic tangent value.
%x = tanh %y : tensor<4x?xf8>
```
The `tanh` operation computes the hyperbolic tangent. It takes one operand and
returns one result of the same type. This type may be a float scalar type, a
vector whose element type is float, or a tensor of floats. It has no standard
attributes.
## Arithmetic Operations
Basic arithmetic in MLIR is specified by standard operations described in this
@ -675,6 +805,32 @@ compiler is multithreaded, and disallowing SSA values to directly reference a
function simplifies this
([rationale](../Rationale.md#multithreading-the-compiler)).
### 'copysign' operation
Syntax:
``` {.ebnf}
operation ::= ssa-id `=` `copysign` ssa-use `:` type
```
Examples:
```mlir {.mlir}
// Scalar copysign value.
%a = copysign %b %c : f64
// SIMD vector element-wise copysign value.
%f = copysign %g %h : vector<4xf32>
// Tensor element-wise copysign value.
%x = copysign %y %z : tensor<4x?xf8>
```
The `copysign` returns a value with the magnitude of the first operand and the
sign of the second operand. It takes two operands and returns one result of the
same type. This type may be a float scalar type, a vector whose element type is
float, or a tensor of floats. It has no standard attributes.
### 'divis' operation
Signed integer division. Rounds towards zero. Treats the leading bit as sign,

View File

@ -130,6 +130,16 @@ class FloatArithmeticOp<string mnemonic, list<OpTrait> traits = []> :
ArithmeticOp<mnemonic, traits>,
Arguments<(ins FloatLike:$lhs, FloatLike:$rhs)>;
def AbsFOp : FloatUnaryOp<"absf"> {
let summary = "floating point absolute-value operation";
let description = [{
The `absf` operation computes the absolute value. It takes one operand and
returns one result of the same type. This type may be a float scalar type,
a vector whose element type is float, or a tensor of floats. It has no
standard attributes.
}];
}
def AddFOp : FloatArithmeticOp<"addf"> {
let summary = "floating point addition operation";
let hasFolder = 1;
@ -345,6 +355,63 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
let hasCanonicalizer = 1;
}
def CeilFOp : FloatUnaryOp<"ceilf"> {
let summary = "ceiling of the specified value";
let description = [{
The `ceilf` operation computes the ceiling of a given value. It takes one
operand and returns one result of the same type. This type may be a float
scalar type, a vector whose element type is float, or a tensor of floats.
It has no standard attributes.
}];
}
def CmpFOp : Std_Op<"cmpf",
[NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
let summary = "floating-point comparison operation";
let description = [{
The "cmpf" operation compares its two operands according to the float
comparison rules and the predicate specified by the respective attribute.
The predicate defines the type of comparison: (un)orderedness, (in)equality
and signed less/greater than (or equal to) as well as predicates that are
always true or false. The operands must have the same type, and this type
must be a float type, or a vector or tensor thereof. The result is an i1,
or a vector/tensor thereof having the same shape as the inputs. Unlike cmpi,
the operands are always treated as signed. The u prefix indicates
*unordered* comparison, not unsigned comparison, so "une" means unordered or
not equal. For the sake of readability by humans, custom assembly form for
the operation uses a string-typed attribute for the predicate. The value of
this attribute corresponds to lower-cased name of the predicate constant,
e.g., "one" means "ordered not equal". The string representation of the
attribute is merely a syntactic sugar and is converted to an integer
attribute by the parser.
%r1 = cmpf "oeq" %0, %1 : f32
%r2 = cmpf "ult" %0, %1 : tensor<42x42xf64>
%r3 = "std.cmpf"(%0, %1) {predicate: 0} : (f8, f8) -> i1
}];
let arguments = (ins FloatLike:$lhs, FloatLike:$rhs);
let results = (outs BoolLike);
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpFPredicate predicate,"
"Value *lhs, Value *rhs", [{
::buildCmpFOp(builder, result, predicate, lhs, rhs);
}]>];
let extraClassDeclaration = [{
static StringRef getPredicateAttrName() { return "predicate"; }
static CmpFPredicate getPredicateByName(StringRef name);
CmpFPredicate getPredicate() {
return (CmpFPredicate)getAttrOfType<IntegerAttr>(getPredicateAttrName())
.getInt();
}
}];
let hasFolder = 1;
}
def CMPI_P_EQ : I64EnumAttrCase<"eq", 0>;
def CMPI_P_NE : I64EnumAttrCase<"ne", 1>;
def CMPI_P_SLT : I64EnumAttrCase<"slt", 2>;
@ -415,53 +482,6 @@ def CmpIOp : Std_Op<"cmpi",
let hasFolder = 1;
}
def CmpFOp : Std_Op<"cmpf",
[NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
let summary = "floating-point comparison operation";
let description = [{
The "cmpf" operation compares its two operands according to the float
comparison rules and the predicate specified by the respective attribute.
The predicate defines the type of comparison: (un)orderedness, (in)equality
and signed less/greater than (or equal to) as well as predicates that are
always true or false. The operands must have the same type, and this type
must be a float type, or a vector or tensor thereof. The result is an i1,
or a vector/tensor thereof having the same shape as the inputs. Unlike cmpi,
the operands are always treated as signed. The u prefix indicates
*unordered* comparison, not unsigned comparison, so "une" means unordered or
not equal. For the sake of readability by humans, custom assembly form for
the operation uses a string-typed attribute for the predicate. The value of
this attribute corresponds to lower-cased name of the predicate constant,
e.g., "one" means "ordered not equal". The string representation of the
attribute is merely a syntactic sugar and is converted to an integer
attribute by the parser.
%r1 = cmpf "oeq" %0, %1 : f32
%r2 = cmpf "ult" %0, %1 : tensor<42x42xf64>
%r3 = "std.cmpf"(%0, %1) {predicate: 0} : (f8, f8) -> i1
}];
let arguments = (ins FloatLike:$lhs, FloatLike:$rhs);
let results = (outs BoolLike);
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpFPredicate predicate,"
"Value *lhs, Value *rhs", [{
::buildCmpFOp(builder, result, predicate, lhs, rhs);
}]>];
let extraClassDeclaration = [{
static StringRef getPredicateAttrName() { return "predicate"; }
static CmpFPredicate getPredicateByName(StringRef name);
CmpFPredicate getPredicate() {
return (CmpFPredicate)getAttrOfType<IntegerAttr>(getPredicateAttrName())
.getInt();
}
}];
let hasFolder = 1;
}
def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
let summary = "conditional branch operation";
let description = [{
@ -602,6 +622,27 @@ def ConstantOp : Std_Op<"constant",
let hasFolder = 1;
}
def CopySignOp : FloatArithmeticOp<"copysign"> {
let summary = "A copysign operation";
let description = [{
The `copysign` returns a value with the magnitude of the first operand and
the sign of the second operand. It takes two operands and returns one
result of the same type. This type may be a float scalar type, a vector
whose element type is float, or a tensor of floats. It has no standard
attributes.
}];
}
def CosOp : FloatUnaryOp<"cos"> {
let summary = "cosine of the specified value";
let description = [{
The `cos` operation computes the cosine of a given value. It takes one
operand and returns one result of the same type. This type may be a float
scalar type, a vector whose element type is float, or a tensor of floats.
It has no standard attributes.
}];
}
def DeallocOp : Std_Op<"dealloc"> {
let summary = "memory deallocation operation";
let description = [{
@ -724,24 +765,6 @@ def IndexCastOp : CastOp<"index_cast">, Arguments<(ins AnyType:$in)> {
let hasFolder = 0;
}
def SIToFPOp : CastOp<"sitofp">, Arguments<(ins AnyType:$in)> {
let summary = "cast from integer type to floating-point";
let description = [{
Cast from a value interpreted as signed integer to the corresponding
floating-point value. If the value cannot be exactly represented, it is
rounded using the default rounding mode. Only scalars are currently
supported.
}];
let extraClassDeclaration = [{
/// Return true if `a` and `b` are valid operand and result pairs for
/// the operation.
static bool areCastCompatible(Type a, Type b);
}];
let hasFolder = 0;
}
def FPExtOp : CastOp<"fpext">, Arguments<(ins AnyType:$in)> {
let summary = "cast from floating-point to wider floating-point";
let description = [{
@ -866,6 +889,16 @@ def MulIOp : IntArithmeticOp<"muli", [Commutative]> {
let hasFolder = 1;
}
def NegFOp : FloatUnaryOp<"negf"> {
let summary = "floating point negation";
let description = [{
The `negf` operation computes the negation of a given value. It takes one
operand and returns one result of the same type. This type may be a float
scalar type, a vector whose element type is float, or a tensor of floats.
It has no standard attributes.
}];
}
def OrOp : IntArithmeticOp<"or", [Commutative]> {
let summary = "integer binary or";
let hasFolder = 1;
@ -1000,6 +1033,24 @@ def ShlISOp : IntArithmeticOp<"shlis"> {
let summary = "signed integer shift left";
}
def SIToFPOp : CastOp<"sitofp">, Arguments<(ins AnyType:$in)> {
let summary = "cast from integer type to floating-point";
let description = [{
Cast from a value interpreted as signed integer to the corresponding
floating-point value. If the value cannot be exactly represented, it is
rounded using the default rounding mode. Only scalars are currently
supported.
}];
let extraClassDeclaration = [{
/// Return true if `a` and `b` are valid operand and result pairs for
/// the operation.
static bool areCastCompatible(Type a, Type b);
}];
let hasFolder = 0;
}
def SplatOp : Std_Op<"splat", [NoSideEffect]> {
let summary = "splat or broadcast operation";
let description = [{
@ -1026,16 +1077,6 @@ def SplatOp : Std_Op<"splat", [NoSideEffect]> {
let hasFolder = 1;
}
def SubFOp : FloatArithmeticOp<"subf"> {
let summary = "floating point subtraction operation";
let hasFolder = 1;
}
def SubIOp : IntArithmeticOp<"subi"> {
let summary = "integer subtraction operation";
let hasFolder = 1;
}
def StoreOp : Std_Op<"store"> {
let summary = "store operation";
let description = [{
@ -1075,6 +1116,192 @@ def StoreOp : Std_Op<"store"> {
let hasCanonicalizer = 1;
}
def SubFOp : FloatArithmeticOp<"subf"> {
let summary = "floating point subtraction operation";
let hasFolder = 1;
}
def SubIOp : IntArithmeticOp<"subi"> {
let summary = "integer subtraction operation";
let hasFolder = 1;
}
def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
let summary = "memref subview operation";
let description = [{
The "subview" operation converts a memref type to another memref type
which represents a reduced-size view of the original memref as specified by
the operation's offsets, sizes and strides arguments.
The SubView operation supports the following arguments:
*) Memref: the "base" memref on which to create a "view" memref.
*) Offsets: zero or memref-rank number of dynamic offsets into the "base"
memref at which to create the "view" memref.
*) Sizes: zero or memref-rank dynamic size operands which specify the
dynamic sizes of the result "view" memref type.
*) Strides: zero or memref-rank number of dynamic strides which are applied
multiplicatively to the base memref strides in each dimension.
Note on the number of operands for offsets, sizes and strides: For
each of these, the number of operands must either be same as the
memref-rank number or empty. For the latter, those values will be
treated as constants.
Example 1:
%0 = alloc() : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>
// Create a sub-view of "base" memref '%0' with offset arguments '%c0',
// dynamic sizes for each dimension, and stride arguments '%c1'.
%1 = subview %0[%c0, %c0][%size0, %size1][%c1, %c1]
: memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1) > to
memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
Example 2:
%0 = alloc() : memref<8x16x4xf32, (d0, d1, d1) -> (d0 * 64 + d1 * 4 + d2)>
// Create a sub-view of "base" memref '%0' with dynamic offsets, sizes,
// and strides.
// Note that dynamic offsets are represented by the linearized dynamic
// offset symbol 's0' in the subview memref layout map, and that the
// dynamic strides operands, after being applied to the base memref
// strides in each dimension, are represented in the view memref layout
// map as symbols 's1', 's2' and 's3'.
%1 = subview %0[%i, %j, %k][%size0, %size1, %size2][%x, %y, %z]
: memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
memref<?x?x?xf32,
(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
Example 3:
%0 = alloc() : memref<8x16x4xf32, (d0, d1, d1) -> (d0 * 64 + d1 * 4 + d2)>
// Subview with constant offsets, sizes and strides.
%1 = subview %0[][][]
: memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
memref<4x4x4xf32, (d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>
Example 4:
%0 = alloc(%arg0, %arg1) : memref<?x?xf32>
// Subview with constant size, but dynamic offsets and
// strides. The resulting memref has a static shape, but if the
// base memref has an affine map to describe the layout, the result
// memref also uses an affine map to describe the layout. The
// strides of the result memref is computed as follows:
//
// Let #map1 represents the layout of the base memref, and #map2
// represents the layout of the result memref. A #mapsubview can be
// constructed to map an index from the result memref to the base
// memref (note that the description below uses more convenient
// naming for symbols, while in affine maps, symbols are
// represented as unsigned numbers that identify that symbol in the
// given affine map.
//
// #mapsubview = (d0, d1)[o0, o1, t0, t1] -> (d0 * t0 + o0, d1 * t1 + o1)
//
// where, o0, o1, ... are offsets, and t0, t1, ... are strides. Then,
//
// #map2 = #map1.compose(#mapsubview)
//
// If the layout map is represented as
//
// #map1 = (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)
//
// then,
//
// #map2 = (d0, d1)[s0, s1, s2, o0, o1, t0, t1] ->
// (d0 * s1 * t0 + d1 * s2 * t1 + o0 * s1 + o1 * s2 + s0)
//
// Representing this canonically
//
// #map2 = (d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)
//
// where, r0 = o0 * s1 + o1 * s2 + s0, r1 = s1 * t0, r2 = s2 * t1.
%1 = subview %0[%i, %j][][%x, %y] :
: memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)> to
memref<4x4xf32, (d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)>
// Note that the subview op does not gaurantee that the result
// memref is "inbounds" w.r.t to base memref. It is upto the client
// to ensure that the subview is accessed in a manner that is
// in-bounds.
}
}];
// TODO(b/144779634, ravishankarm) : Use different arguments for
// offsets, sizes and strides.
let arguments = (ins
AnyMemRef:$source,
Variadic<Index>:$offsets,
Variadic<Index>:$sizes,
Variadic<Index>:$strides,
I32ElementsAttr:$operand_segment_sizes
);
let results = (outs AnyMemRef);
let builders = [
OpBuilder<
"Builder *b, OperationState &result, Value *source, "
"ArrayRef<Value *> offsets, ArrayRef<Value *> sizes, "
"ArrayRef<Value *> strides, Type resultType = Type(), "
"ArrayRef<NamedAttribute> attrs = {}">,
OpBuilder<
"Builder *builder, OperationState &result, "
"Type resultType, Value *source">
];
let extraClassDeclaration = [{
/// Returns the type of the base memref operand.
MemRefType getBaseMemRefType() {
return source()->getType().cast<MemRefType>();
}
/// The result of a subview is always a memref.
MemRefType getType() { return getResult()->getType().cast<MemRefType>(); }
/// Returns as integer value the number of offset operands.
int64_t getNumOffsets() { return llvm::size(offsets()); }
/// Returns as integer value the number of size operands.
int64_t getNumSizes() { return llvm::size(sizes()); }
/// Returns as integer value the number of stride operands.
int64_t getNumStrides() { return llvm::size(strides()); }
/// Returns the dynamic sizes for this subview operation if specified.
operand_range getDynamicSizes() { return sizes(); }
/// Returns in `staticStrides` the static value of the stride
/// operands. Returns failure() if the static value of the stride
/// operands could not be retrieved.
LogicalResult getStaticStrides(SmallVectorImpl<int64_t> &staticStrides);
// Auxiliary range data structure and helper function that unpacks the
// offset, size and stride operands of the SubViewOp into a list of triples.
// Such a list of triple is sometimes more convenient to manipulate.
struct Range {
Value *offset, *size, *stride;
};
SmallVector<Range, 8> getRanges();
}];
let hasCanonicalizer = 1;
}
def TanhOp : FloatUnaryOp<"tanh"> {
let summary = "hyperbolic tangent of the specified value";
let description = [{
The `tanh` operation computes the hyperbolic tangent. It takes one operand
and returns one result of the same type. This type may be a float scalar
type, a vector whose element type is float, or a tensor of floats. It has
no standard attributes.
}];
}
def TensorCastOp : CastOp<"tensor_cast"> {
let summary = "tensor cast operation";
let description = [{
@ -1248,172 +1475,6 @@ def ViewOp : Std_Op<"view", [NoSideEffect]> {
let hasCanonicalizer = 1;
}
def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
let summary = "memref subview operation";
let description = [{
The "subview" operation converts a memref type to another memref type
which represents a reduced-size view of the original memref as specified by
the operation's offsets, sizes and strides arguments.
The SubView operation supports the following arguments:
*) Memref: the "base" memref on which to create a "view" memref.
*) Offsets: zero or memref-rank number of dynamic offsets into the "base"
memref at which to create the "view" memref.
*) Sizes: zero or memref-rank dynamic size operands which specify the
dynamic sizes of the result "view" memref type.
*) Strides: zero or memref-rank number of dynamic strides which are applied
multiplicatively to the base memref strides in each dimension.
Note on the number of operands for offsets, sizes and strides: For
each of these, the number of operands must either be same as the
memref-rank number or empty. For the latter, those values will be
treated as constants.
Example 1:
%0 = alloc() : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>
// Create a sub-view of "base" memref '%0' with offset arguments '%c0',
// dynamic sizes for each dimension, and stride arguments '%c1'.
%1 = subview %0[%c0, %c0][%size0, %size1][%c1, %c1]
: memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1) > to
memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
Example 2:
%0 = alloc() : memref<8x16x4xf32, (d0, d1, d1) -> (d0 * 64 + d1 * 4 + d2)>
// Create a sub-view of "base" memref '%0' with dynamic offsets, sizes,
// and strides.
// Note that dynamic offsets are represented by the linearized dynamic
// offset symbol 's0' in the subview memref layout map, and that the
// dynamic strides operands, after being applied to the base memref
// strides in each dimension, are represented in the view memref layout
// map as symbols 's1', 's2' and 's3'.
%1 = subview %0[%i, %j, %k][%size0, %size1, %size2][%x, %y, %z]
: memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
memref<?x?x?xf32,
(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
Example 3:
%0 = alloc() : memref<8x16x4xf32, (d0, d1, d1) -> (d0 * 64 + d1 * 4 + d2)>
// Subview with constant offsets, sizes and strides.
%1 = subview %0[][][]
: memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
memref<4x4x4xf32, (d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>
Example 4:
%0 = alloc(%arg0, %arg1) : memref<?x?xf32>
// Subview with constant size, but dynamic offsets and
// strides. The resulting memref has a static shape, but if the
// base memref has an affine map to describe the layout, the result
// memref also uses an affine map to describe the layout. The
// strides of the result memref is computed as follows:
//
// Let #map1 represents the layout of the base memref, and #map2
// represents the layout of the result memref. A #mapsubview can be
// constructed to map an index from the result memref to the base
// memref (note that the description below uses more convenient
// naming for symbols, while in affine maps, symbols are
// represented as unsigned numbers that identify that symbol in the
// given affine map.
//
// #mapsubview = (d0, d1)[o0, o1, t0, t1] -> (d0 * t0 + o0, d1 * t1 + o1)
//
// where, o0, o1, ... are offsets, and t0, t1, ... are strides. Then,
//
// #map2 = #map1.compose(#mapsubview)
//
// If the layout map is represented as
//
// #map1 = (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)
//
// then,
//
// #map2 = (d0, d1)[s0, s1, s2, o0, o1, t0, t1] ->
// (d0 * s1 * t0 + d1 * s2 * t1 + o0 * s1 + o1 * s2 + s0)
//
// Representing this canonically
//
// #map2 = (d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)
//
// where, r0 = o0 * s1 + o1 * s2 + s0, r1 = s1 * t0, r2 = s2 * t1.
%1 = subview %0[%i, %j][][%x, %y] :
: memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)> to
memref<4x4xf32, (d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)>
// Note that the subview op does not gaurantee that the result
// memref is "inbounds" w.r.t to base memref. It is upto the client
// to ensure that the subview is accessed in a manner that is
// in-bounds.
}
}];
// TODO(b/144779634, ravishankarm) : Use different arguments for
// offsets, sizes and strides.
let arguments = (ins
AnyMemRef:$source,
Variadic<Index>:$offsets,
Variadic<Index>:$sizes,
Variadic<Index>:$strides,
I32ElementsAttr:$operand_segment_sizes
);
let results = (outs AnyMemRef);
let builders = [
OpBuilder<
"Builder *b, OperationState &result, Value *source, "
"ArrayRef<Value *> offsets, ArrayRef<Value *> sizes, "
"ArrayRef<Value *> strides, Type resultType = Type(), "
"ArrayRef<NamedAttribute> attrs = {}">,
OpBuilder<
"Builder *builder, OperationState &result, "
"Type resultType, Value *source">
];
let extraClassDeclaration = [{
/// Returns the type of the base memref operand.
MemRefType getBaseMemRefType() {
return source()->getType().cast<MemRefType>();
}
/// The result of a subview is always a memref.
MemRefType getType() { return getResult()->getType().cast<MemRefType>(); }
/// Returns as integer value the number of offset operands.
int64_t getNumOffsets() { return llvm::size(offsets()); }
/// Returns as integer value the number of size operands.
int64_t getNumSizes() { return llvm::size(sizes()); }
/// Returns as integer value the number of stride operands.
int64_t getNumStrides() { return llvm::size(strides()); }
/// Returns the dynamic sizes for this subview operation if specified.
operand_range getDynamicSizes() { return sizes(); }
/// Returns in `staticStrides` the static value of the stride
/// operands. Returns failure() if the static value of the stride
/// operands could not be retrieved.
LogicalResult getStaticStrides(SmallVectorImpl<int64_t> &staticStrides);
// Auxiliary range data structure and helper function that unpacks the
// offset, size and stride operands of the SubViewOp into a list of triples.
// Such a list of triple is sometimes more convenient to manipulate.
struct Range {
Value *offset, *size, *stride;
};
SmallVector<Range, 8> getRanges();
}];
let hasCanonicalizer = 1;
}
def XOrOp : IntArithmeticOp<"xor", [Commutative]> {
let summary = "integer binary xor";
let hasFolder = 1;

View File

@ -371,8 +371,83 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
// CHECK: %{{[0-9]+}} = exp %arg1 : f32
%97 = exp %f : f32
// CHECK: %{{[0-9]+}} = exp %cst_8 : vector<4xf32>
%98 = exp %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = exp %arg0 : tensor<4x4x?xf32>
%98 = exp %t : tensor<4x4x?xf32>
%99 = exp %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = absf %arg1 : f32
%100 = "std.absf"(%f) : (f32) -> f32
// CHECK: %{{[0-9]+}} = absf %arg1 : f32
%101 = absf %f : f32
// CHECK: %{{[0-9]+}} = absf %cst_8 : vector<4xf32>
%102 = absf %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = absf %arg0 : tensor<4x4x?xf32>
%103 = absf %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = ceilf %arg1 : f32
%104 = "std.ceilf"(%f) : (f32) -> f32
// CHECK: %{{[0-9]+}} = ceilf %arg1 : f32
%105 = ceilf %f : f32
// CHECK: %{{[0-9]+}} = ceilf %cst_8 : vector<4xf32>
%106 = ceilf %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = ceilf %arg0 : tensor<4x4x?xf32>
%107 = ceilf %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = cos %arg1 : f32
%108 = "std.cos"(%f) : (f32) -> f32
// CHECK: %{{[0-9]+}} = cos %arg1 : f32
%109 = cos %f : f32
// CHECK: %{{[0-9]+}} = cos %cst_8 : vector<4xf32>
%110 = cos %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = cos %arg0 : tensor<4x4x?xf32>
%111 = cos %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = negf %arg1 : f32
%112 = "std.negf"(%f) : (f32) -> f32
// CHECK: %{{[0-9]+}} = negf %arg1 : f32
%113 = negf %f : f32
// CHECK: %{{[0-9]+}} = negf %cst_8 : vector<4xf32>
%114 = negf %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = negf %arg0 : tensor<4x4x?xf32>
%115 = negf %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = copysign %arg1, %arg1 : f32
%116 = "std.copysign"(%f, %f) : (f32, f32) -> f32
// CHECK: %{{[0-9]+}} = copysign %arg1, %arg1 : f32
%117 = copysign %f, %f : f32
// CHECK: %{{[0-9]+}} = copysign %cst_8, %cst_8 : vector<4xf32>
%118 = copysign %vcf32, %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = copysign %arg0, %arg0 : tensor<4x4x?xf32>
%119 = copysign %t, %t : tensor<4x4x?xf32>
// CHECK: %{{[0-9]+}} = tanh %arg1 : f32
%120 = "std.tanh"(%f) : (f32) -> f32
// CHECK: %{{[0-9]+}} = tanh %arg1 : f32
%121 = tanh %f : f32
// CHECK: %{{[0-9]+}} = tanh %cst_8 : vector<4xf32>
%122 = tanh %vcf32 : vector<4xf32>
// CHECK: %{{[0-9]+}} = tanh %arg0 : tensor<4x4x?xf32>
%123 = tanh %t : tensor<4x4x?xf32>
return
}