!49591 [api] Rename soft_shrink

Merge pull request !49591 from shaojunsong/fix/softshrink
This commit is contained in:
i-robot 2023-03-02 06:51:41 +00:00 committed by Gitee
commit 33ad052c3a
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
9 changed files with 5 additions and 47 deletions

View File

@ -1,6 +0,0 @@
mindspore.Tensor.soft_shrink
============================
.. py:method:: mindspore.Tensor.soft_shrink(lambd=0.5)
详情请参考 :func:`mindspore.ops.soft_shrink`

View File

@ -1,7 +1,7 @@
mindspore.ops.soft_shrink
mindspore.ops.softshrink
=========================
.. py:function:: mindspore.ops.soft_shrink(x, lambd=0.5)
.. py:function:: mindspore.ops.softshrink(x, lambd=0.5)
Soft Shrink激活函数按输入元素计算输出。公式定义如下

View File

@ -350,7 +350,6 @@ BuiltInTypeMap &GetMethodMap() {
{"hardshrink", std::string("hardshrink")}, // P.hshrink
{"heaviside", std::string("heaviside")}, // F.heaviside
{"hypot", std::string("hypot")}, // F.hypot
{"soft_shrink", std::string("soft_shrink")}, // P.SoftShrink
{"gather_nd", std::string("gather_nd")}, // P.GatherNd()
{"unique_consecutive", std::string("unique_consecutive")}, // UniqueConsecutive()
{"unique_with_pad", std::string("unique_with_pad")}, // P.UniqueWithPad()

View File

@ -2136,11 +2136,6 @@ def hypot(x, other):
return F.hypot(x, other)
def soft_shrink(x, lambd=0.5):
"""Apply the soft shrink function for a tensor. Calculates the output according to the input elements."""
return F.SoftShrink(lambd)(x)
def getitem(data, index):
"""Implementation of `getitem`."""
return data.__getitem__(index)

View File

@ -3160,13 +3160,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
self._init_check()
return tensor_operator_registry.get('hypot')(self, other)
def soft_shrink(self, lambd=0.5):
r"""
For details, please refer to :func:`mindspore.ops.soft_shrink`.
"""
self._init_check()
return tensor_operator_registry.get('soft_shrink')(lambd)(self)
def to_coo(self):
"""
Convert a Tensor to COOTensor.

View File

@ -439,7 +439,6 @@ from .nn_func import (
pixel_shuffle,
pixel_unshuffle,
hardshrink,
soft_shrink,
is_floating_point,
intopk,
interpolate,

View File

@ -2340,7 +2340,7 @@ def softmin(x, axis=-1):
return softmax_(-x)
def soft_shrink(x, lambd=0.5):
def softshrink(x, lambd=0.5):
r"""
Applies the SoftShrink function element-wise.
@ -2373,7 +2373,7 @@ def soft_shrink(x, lambd=0.5):
>>> from mindspore import ops
>>> import numpy as np
>>> x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float32)
>>> output = ops.soft_shrink(x)
>>> output = ops.softshrink(x)
>>> print(output)
[[ 0.02979 0.287 0.676 ]
[ 0.2837 0.1216 -0.6543 ]]
@ -6107,7 +6107,6 @@ __all__ = [
'pixel_shuffle',
'pixel_unshuffle',
'hardshrink',
'soft_shrink',
'is_floating_point',
'flip',
'fliplr',

View File

@ -233,7 +233,6 @@ tensor_operator_registry.register('invert', invert)
tensor_operator_registry.register('hardshrink', P.HShrink)
tensor_operator_registry.register('heaviside', heaviside)
tensor_operator_registry.register('hypot', hypot)
tensor_operator_registry.register('soft_shrink', P.SoftShrink)
tensor_operator_registry.register('svd', linalg_ops.Svd)
tensor_operator_registry.register('diag', P.Diag)
tensor_operator_registry.register('diagflat', diagflat)

View File

@ -76,26 +76,6 @@ def test_soft_shrink(dtype, data_shape, lambd):
np.testing.assert_array_almost_equal(output.asnumpy(), benchmark_output)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_soft_shrink_tensor_check():
"""
Feature: test_soft_shrink_tensor_check.
Description: test cases for tensor func
Expectation: raise TypeError.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
in_np = np.random.rand(10).astype(np.float32)
in_tensor = Tensor(in_np)
benchmark_output = soft_shrink_op_np_bencmark(in_tensor, 0.5)
output = in_tensor.soft_shrink()
np.testing.assert_array_almost_equal(output.asnumpy(), benchmark_output)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -110,7 +90,7 @@ def test_soft_shrink_functional_check():
in_np = np.random.rand(3, 5).astype(np.float32)
in_tensor = Tensor(in_np)
output_ms = F.soft_shrink(in_tensor)
output_ms = F.softshrink(in_tensor)
output_np = soft_shrink_op_np_bencmark(in_tensor, 0.5)
np.testing.assert_allclose(output_ms.asnumpy(), output_np, rtol=1e-3)