forked from mindspore-Ecosystem/mindspore
!16518 fix G.AsinhGrad, G.AcoshGrad on cpu
From: @wangrao124 Reviewed-by: @wuxuejian,@liangchenghui Signed-off-by: @wuxuejian
This commit is contained in:
commit
f3ce8af0d6
|
@ -170,7 +170,7 @@ template <typename T>
|
|||
void EltWiseGradCPUKernel<T>::AsinhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) const {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = sqrt(1 + input1[i] * input1[i]);
|
||||
T divisor = cosh(input1[i]);
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
|
@ -191,7 +191,7 @@ template <typename T>
|
|||
void EltWiseGradCPUKernel<T>::AcoshGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) const {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = sqrt(input1[i] * input1[i] - 1);
|
||||
T divisor = sinh(input1[i]);
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
|
|
|
@ -37,10 +37,10 @@ class NetAcoshGrad(nn.Cell):
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_acosh_grad():
|
||||
x = np.array([5, 4, 3]).astype('float32')
|
||||
out = np.array([5, 4, 3]).astype('float32')
|
||||
dy = np.array([1, 0, -1]).astype('float32')
|
||||
acosh_grad = NetAcoshGrad()
|
||||
output = acosh_grad(Tensor(x), Tensor(dy))
|
||||
output = acosh_grad(Tensor(out), Tensor(dy))
|
||||
print(output)
|
||||
expect = dy / np.sqrt(x * x - 1)
|
||||
expect = dy / np.sinh(out)
|
||||
assert np.allclose(output.asnumpy(), expect)
|
||||
|
|
|
@ -37,10 +37,10 @@ class NetAsinhGrad(nn.Cell):
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_asinh_grad():
|
||||
x = np.array([-0.5, 0, 0.5]).astype('float32')
|
||||
out = np.array([-0.5, 0, 0.5]).astype('float32')
|
||||
dy = np.array([1, 0, -1]).astype('float32')
|
||||
asinh_grad = NetAsinhGrad()
|
||||
output = asinh_grad(Tensor(x), Tensor(dy))
|
||||
output = asinh_grad(Tensor(out), Tensor(dy))
|
||||
print(output)
|
||||
expect = dy / np.sqrt(1 + x * x)
|
||||
expect = dy / np.cosh(out)
|
||||
assert np.allclose(output.asnumpy(), expect)
|
||||
|
|
Loading…
Reference in New Issue