!44920 sync clean code

Merge pull request !44920 from looop5/sync_clean_code
This commit is contained in:
i-robot 2022-11-02 01:23:59 +00:00 committed by Gitee
commit a4de4786e6
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
3 changed files with 5 additions and 18 deletions

View File

@ -191,7 +191,7 @@ bool IndexAddCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &i
const float block_size = 1024;
const size_t inner_block_size = 100;
if (HasDuplicateIndex(indices, y_axis_size_)) {
ParallelLaunch(heavy_task_block, outer_size_, CalcSizePerThread(outer_size_), this);
ParallelLaunch(heavy_task_block, outer_size_, SizeToFloat(CalcSizePerThread(outer_size_)), this);
} else if (inner_size_ > 1 && inner_size_ <= inner_block_size) {
ParallelLaunch(task_block, y_nums_ / inner_size_, block_size / inner_size_, this);
} else {

View File

@ -179,7 +179,7 @@ bool SparseApplyAdagradDACpuKernelMod::LaunchKernel(const std::vector<kernel::Ad
ga[j] = ga[j] + g[k];
da[j] = da[j] + g[k] * g[k];
if (l1_scalar > static_cast<T>(0.0)) {
var[j] = static_cast<T>(-1.0) * static_cast<T>(Sign(static_cast<double>(ga[j]))) *
var[j] = static_cast<T>(-1.0) * static_cast<T>(Sign(static_cast<float>(ga[j]))) *
static_cast<T>(std::fmax(
static_cast<double>((static_cast<T>(std::fabs(static_cast<double>(ga[j]))) / global_step_scalar) -
l1_scalar),
@ -209,14 +209,6 @@ const std::vector<std::pair<KernelAttr, KernelRunFunc>> &SparseApplyAdagradDACpu
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, int32_t>},
{ADD_KERNEL(Int64, Int64, Int64, Int64, Int32, Int64, Int64, Int64, Int64, Int64),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, int64_t>},
{ADD_KERNEL(UInt8, UInt8, UInt8, UInt8, Int32, UInt8, UInt8, UInt8, Int64, UInt8),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, uint8_t>},
{ADD_KERNEL(UInt16, UInt16, UInt16, UInt16, Int32, UInt16, UInt16, UInt16, Int64, UInt16),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, uint16_t>},
{ADD_KERNEL(UInt32, UInt32, UInt32, UInt32, Int32, UInt32, UInt32, UInt32, Int64, UInt32),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, uint32_t>},
{ADD_KERNEL(UInt64, UInt64, UInt64, UInt64, Int32, UInt64, UInt64, UInt64, Int64, UInt64),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, uint64_t>},
{ADD_KERNEL(Float16, Float16, Float16, Float16, Int32, Float16, Float16, Float16, Int64, Float16),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int32_t, float16>},
{ADD_KERNEL(Float32, Float32, Float32, Float32, Int32, Float32, Float32, Float32, Int64, Float32),
@ -231,14 +223,6 @@ const std::vector<std::pair<KernelAttr, KernelRunFunc>> &SparseApplyAdagradDACpu
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, int32_t>},
{ADD_KERNEL(Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64, Int64),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, int64_t>},
{ADD_KERNEL(UInt8, UInt8, UInt8, UInt8, Int64, UInt8, UInt8, UInt8, Int64, UInt8),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, uint8_t>},
{ADD_KERNEL(UInt16, UInt16, UInt16, UInt16, Int64, UInt16, UInt16, UInt16, Int64, UInt16),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, uint16_t>},
{ADD_KERNEL(UInt32, UInt32, UInt32, UInt32, Int64, UInt32, UInt32, UInt32, Int64, UInt32),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, uint32_t>},
{ADD_KERNEL(UInt64, UInt64, UInt64, UInt64, Int64, UInt64, UInt64, UInt64, Int64, UInt64),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, uint64_t>},
{ADD_KERNEL(Float16, Float16, Float16, Float16, Int64, Float16, Float16, Float16, Int64, Float16),
&SparseApplyAdagradDACpuKernelMod::LaunchKernel<int64_t, float16>},
{ADD_KERNEL(Float32, Float32, Float32, Float32, Int64, Float32, Float32, Float32, Int64, Float32),

View File

@ -20,6 +20,7 @@ from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.ops.operations import _grad_ops as G
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@ -36,6 +37,7 @@ def get_output(dout, enable_graph_kernel=False):
output = opt(Tensor(dout))
return output
def test_bias_add_grad(shape, dtype):
np.random.seed(0)
dout = np.random.normal(0, 1, shape).astype(dtype)
@ -50,6 +52,7 @@ def test_bias_add_grad(shape, dtype):
atol = 1.e-3
assert np.allclose(expect.asnumpy(), output.asnumpy(), rtol, atol, equal_nan=True)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training