From 6a901a5f8a4155f3a918f3b00d3e4f69c5781345 Mon Sep 17 00:00:00 2001 From: looop5 Date: Tue, 1 Nov 2022 11:31:18 +0800 Subject: [PATCH] sync clean code --- .../device/cpu/kernel/index_add_cpu_kernel.cc | 2 +- .../sparse_apply_adagrad_da_cpu_kernel.cc | 18 +----------------- .../st/ops/graph_kernel/test_bias_add_grad.py | 3 +++ 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/index_add_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/index_add_cpu_kernel.cc index 744816dbbca..728d0595cc1 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/index_add_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/index_add_cpu_kernel.cc @@ -191,7 +191,7 @@ bool IndexAddCpuKernelMod::LaunchKernel(const std::vector &i const float block_size = 1024; const size_t inner_block_size = 100; if (HasDuplicateIndex(indices, y_axis_size_)) { - ParallelLaunch(heavy_task_block, outer_size_, CalcSizePerThread(outer_size_), this); + ParallelLaunch(heavy_task_block, outer_size_, SizeToFloat(CalcSizePerThread(outer_size_)), this); } else if (inner_size_ > 1 && inner_size_ <= inner_block_size) { ParallelLaunch(task_block, y_nums_ / inner_size_, block_size / inner_size_, this); } else { diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/sparse_apply_adagrad_da_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/sparse_apply_adagrad_da_cpu_kernel.cc index f24aad876cf..743104f7885 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/sparse_apply_adagrad_da_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/sparse_apply_adagrad_da_cpu_kernel.cc @@ -179,7 +179,7 @@ bool SparseApplyAdagradDACpuKernelMod::LaunchKernel(const std::vector static_cast(0.0)) { - var[j] = static_cast(-1.0) * static_cast(Sign(static_cast(ga[j]))) * + var[j] = static_cast(-1.0) * static_cast(Sign(static_cast(ga[j]))) * static_cast(std::fmax( static_cast((static_cast(std::fabs(static_cast(ga[j]))) / global_step_scalar) - l1_scalar), @@ -209,14 +209,6 @@ const std::vector> &SparseApplyAdagradDACpu &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Int64, Int64, Int64, Int64, Int32, Int64, Int64, Int64, Int64, Int64), &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt8, UInt8, UInt8, UInt8, Int32, UInt8, UInt8, UInt8, Int64, UInt8), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt16, UInt16, UInt16, UInt16, Int32, UInt16, UInt16, UInt16, Int64, UInt16), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt32, UInt32, UInt32, UInt32, Int32, UInt32, UInt32, UInt32, Int64, UInt32), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt64, UInt64, UInt64, UInt64, Int32, UInt64, UInt64, UInt64, Int64, UInt64), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Float16, Float16, Float16, Float16, Int32, Float16, Float16, Float16, Int64, Float16), &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Float32, Float32, Float32, Float32, Int32, Float32, Float32, Float32, Int64, Float32), @@ -231,14 +223,6 @@ const std::vector> &SparseApplyAdagradDACpu &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64), &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt8, UInt8, UInt8, UInt8, Int64, UInt8, UInt8, UInt8, Int64, UInt8), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt16, UInt16, UInt16, UInt16, Int64, UInt16, UInt16, UInt16, Int64, UInt16), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt32, UInt32, UInt32, UInt32, Int64, UInt32, UInt32, UInt32, Int64, UInt32), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, - {ADD_KERNEL(UInt64, UInt64, UInt64, UInt64, Int64, UInt64, UInt64, UInt64, Int64, UInt64), - &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Float16, Float16, Float16, Float16, Int64, Float16, Float16, Float16, Int64, Float16), &SparseApplyAdagradDACpuKernelMod::LaunchKernel}, {ADD_KERNEL(Float32, Float32, Float32, Float32, Int64, Float32, Float32, Float32, Int64, Float32), diff --git a/tests/st/ops/graph_kernel/test_bias_add_grad.py b/tests/st/ops/graph_kernel/test_bias_add_grad.py index f9b65324d65..973a0badfe3 100644 --- a/tests/st/ops/graph_kernel/test_bias_add_grad.py +++ b/tests/st/ops/graph_kernel/test_bias_add_grad.py @@ -20,6 +20,7 @@ from mindspore import Tensor from mindspore.common.api import jit from mindspore.ops.operations import _grad_ops as G + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -36,6 +37,7 @@ def get_output(dout, enable_graph_kernel=False): output = opt(Tensor(dout)) return output + def test_bias_add_grad(shape, dtype): np.random.seed(0) dout = np.random.normal(0, 1, shape).astype(dtype) @@ -50,6 +52,7 @@ def test_bias_add_grad(shape, dtype): atol = 1.e-3 assert np.allclose(expect.asnumpy(), output.asnumpy(), rtol, atol, equal_nan=True) + @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training