forked from OSchip/llvm-project
[mlir][OpDSL] Remove unused SoftPlus2DOp operation.
The revision removes the SoftPlus2DOp operation that previously served as a test operation. It has been replaced by the elemwise_unary operation, which is now used to test unary log and exp functions. Reviewed By: nicolasvasilache Differential Revision: https://reviews.llvm.org/D120794
This commit is contained in:
parent
b383b17bf7
commit
3231b6d3a2
|
@ -3090,65 +3090,3 @@ structured_op: !LinalgStructuredOpConfig
|
|||
scalar_const: '2.3283063999999999E-10 : f64'
|
||||
- !ScalarExpression
|
||||
scalar_arg: min
|
||||
--- !LinalgOpConfig
|
||||
metadata: !LinalgOpMetadata
|
||||
name: soft_plus_2d
|
||||
cpp_class_name: SoftPlus2DOp
|
||||
doc: |-
|
||||
Implements the soft plus operator.
|
||||
|
||||
Numeric casting is performed on the input operand, promoting it to the same
|
||||
data type as the accumulator/output.
|
||||
structured_op: !LinalgStructuredOpConfig
|
||||
args:
|
||||
- !LinalgOperandDefConfig
|
||||
name: I
|
||||
kind: input_tensor
|
||||
type_var: T
|
||||
shape_map: affine_map<()[s0, s1] -> (s0, s1)>
|
||||
- !LinalgOperandDefConfig
|
||||
name: O
|
||||
kind: output_tensor
|
||||
type_var: U
|
||||
shape_map: affine_map<()[s0, s1] -> (s0, s1)>
|
||||
indexing_maps: !LinalgIndexingMapsConfig
|
||||
static_indexing_maps:
|
||||
- affine_map<(d0, d1)[s0, s1] -> (d0, d1)>
|
||||
- affine_map<(d0, d1)[s0, s1] -> (d0, d1)>
|
||||
iterator_types:
|
||||
- parallel
|
||||
- parallel
|
||||
assignments:
|
||||
- !ScalarAssign
|
||||
arg: O
|
||||
value: !ScalarExpression
|
||||
scalar_fn:
|
||||
kind: unary
|
||||
fn_name: log
|
||||
operands:
|
||||
- !ScalarExpression
|
||||
scalar_fn:
|
||||
kind: binary
|
||||
fn_name: add
|
||||
operands:
|
||||
- !ScalarExpression
|
||||
scalar_fn:
|
||||
kind: type
|
||||
fn_name: cast_signed
|
||||
type_var: U
|
||||
operands:
|
||||
- !ScalarExpression
|
||||
scalar_const: '1.000000e+00 : f64'
|
||||
- !ScalarExpression
|
||||
scalar_fn:
|
||||
kind: unary
|
||||
fn_name: exp
|
||||
operands:
|
||||
- !ScalarExpression
|
||||
scalar_fn:
|
||||
kind: type
|
||||
fn_name: cast_signed
|
||||
type_var: U
|
||||
operands:
|
||||
- !ScalarExpression
|
||||
scalar_arg: I
|
||||
|
|
|
@ -702,16 +702,3 @@ def fill_rng_2d(
|
|||
scaling = (max - min) * inv_range
|
||||
O[D.m, D.n] = TypeFn.cast_signed(
|
||||
T, (offset + TypeFn.cast_signed(F64, rand2)) * scaling + min)
|
||||
|
||||
|
||||
@linalg_structured_op
|
||||
def soft_plus_2d(
|
||||
I=TensorDef(T, S.M, S.N), O=TensorDef(U, S.M, S.N, output=True)):
|
||||
"""Implements the soft plus operator.
|
||||
|
||||
Numeric casting is performed on the input operand, promoting it to the same
|
||||
data type as the accumulator/output.
|
||||
"""
|
||||
domain(D.m, D.n)
|
||||
O[D.m, D.n] = \
|
||||
UnaryFn.log(TypeFn.cast_signed(U, const(1.0)) + UnaryFn.exp(TypeFn.cast_signed(U, I[D.m, D.n])))
|
||||
|
|
Loading…
Reference in New Issue