diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc index e59303a646c..21ac41deb38 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc @@ -111,22 +111,16 @@ bool MKLCPUKernel::BinaryBroadCast(std::vector *src0_shape, std::vector< } dnnl::memory::format_tag MKLCPUKernel::GetDefaultFormatTag(const dnnl::memory::dims &dims) const { - dnnl::memory::format_tag mem_tag; - auto dim_size = dims.size(); - if (dim_size == 5) { - mem_tag = dnnl::memory::format_tag::abcde; - } else if (dim_size == 4) { - mem_tag = dnnl::memory::format_tag::abcd; - } else if (dim_size == 3) { - mem_tag = dnnl::memory::format_tag::abc; - } else if (dim_size == 2) { - mem_tag = dnnl::memory::format_tag::ab; - } else if (dim_size == 1) { - mem_tag = dnnl::memory::format_tag::a; - } else { - MS_LOG(EXCEPTION) << "Kernel dims invalid " << dim_size; + static const std::vector tag_vec = { + dnnl::memory::format_tag::a, dnnl::memory::format_tag::ab, dnnl::memory::format_tag::abc, + dnnl::memory::format_tag::abcd, dnnl::memory::format_tag::abcde, dnnl::memory::format_tag::abcdef, + dnnl::memory::format_tag::abcdefg}; + + auto rank = dims.size(); + if (rank > tag_vec.size()) { + MS_LOG(EXCEPTION) << "The kernel does not support construct " << rank << "-D tensor dnnl memory format_tag."; } - return mem_tag; + return tag_vec[rank - 1]; } dnnl::memory::desc MKLCPUKernel::GetDefaultMemDesc(const std::vector &shape) { diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 46b6ce6ffd7..744b5e07f33 100755 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -346,14 +346,14 @@ class Softplus(Primitive): Inputs: - **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of - additional dimensions, with float16 or float32 data type. + additional dimensions, with float16, float32 or float64 data type. Outputs: Tensor, with the same type and shape as the `input_x`. Raises: TypeError: If `input_x` is not a Tensor. - TypeError: If dtype of `input_x` is neither float16 nor float32. + TypeError: If dtype of `input_x` does not belong to any of [float16, float32, float64]. Supported Platforms: ``Ascend`` ``GPU`` diff --git a/tests/st/ops/cpu/test_softplus_grad_op.py b/tests/st/ops/cpu/test_softplus_grad_op.py index 76879689960..4e70c850560 100644 --- a/tests/st/ops/cpu/test_softplus_grad_op.py +++ b/tests/st/ops/cpu/test_softplus_grad_op.py @@ -76,3 +76,17 @@ def test_softplus_grad_fp16(): output = grad(Tensor(x_np), Tensor(dy_np)) expect = dy_np * np.exp(x_np) / (1 + np.exp(x_np)) assert np.allclose(output[0].asnumpy(), expect, rtol=1e-2) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_softplus_grad_7d_fp64(): + np.random.seed(20) + x_np = np.random.randn(5, 3, 6, 3, 4, 5, 6).astype(np.float64) + dy_np = np.random.randn(5, 3, 6, 3, 4, 5, 6).astype(np.float64) + net = SoftplusNet() + grad = Grad(net) + output = grad(Tensor(x_np), Tensor(dy_np)) + expect = dy_np * np.exp(x_np) / (1 + np.exp(x_np)) + assert np.allclose(output[0].asnumpy(), expect, rtol=1e-2) diff --git a/tests/st/ops/cpu/test_softplus_op.py b/tests/st/ops/cpu/test_softplus_op.py index 19af2a20762..2f0fd97c3fb 100644 --- a/tests/st/ops/cpu/test_softplus_op.py +++ b/tests/st/ops/cpu/test_softplus_op.py @@ -37,6 +37,20 @@ def SoftplusCompute(x): return np.log(1 + np.exp(x)) +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_softplus_0d(): + x_np = np.array(1.2, np.float64) + y_np = SoftplusCompute(x_np) + + x_ms = Tensor(x_np) + net = SoftplusNet() + y_ms = net(x_ms) + + assert np.allclose(y_np, y_ms.asnumpy()) + + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard @@ -105,3 +119,17 @@ def test_softplus_4d_fp16(): y_ms = net(x_ms) assert np.allclose(y_np, y_ms.asnumpy(), rtol=5e-3) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_softplus_7d_fp64(): + x_np = np.random.random((32, 3, 20, 20, 20, 10, 10)).astype(np.float16) + y_np = SoftplusCompute(x_np) + + x_ms = Tensor(x_np) + net = SoftplusNet() + y_ms = net(x_ms) + + assert np.allclose(y_np, y_ms.asnumpy(), rtol=5e-3)