From c956dfff511522d1a69bb40c1221a77ed73734b8 Mon Sep 17 00:00:00 2001 From: yujianfeng Date: Thu, 4 Jun 2020 18:51:22 +0800 Subject: [PATCH] Add SparseAdam and SparseLazyAdam cpu kernel --- mindspore/ccsrc/kernel/common_utils.cc | 11 +- mindspore/ccsrc/kernel/cpu/cpu_kernel.h | 1 + .../cpu/sparse_apply_adam_cpu_kernel.cc | 131 ++++++++++++++++++ .../kernel/cpu/sparse_apply_adam_cpu_kernel.h | 66 +++++++++ .../cpu/sparse_apply_ftrl_cpu_kernel.cc | 13 +- .../cpu/sparse_apply_lazy_adam_cpu_kernel.cc | 113 +++++++++++++++ .../cpu/sparse_apply_lazy_adam_cpu_kernel.h | 63 +++++++++ tests/st/ops/cpu/test_sparse_apply_adam_op.py | 46 ++++++ 8 files changed, 435 insertions(+), 9 deletions(-) create mode 100644 mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.h create mode 100644 tests/st/ops/cpu/test_sparse_apply_adam_op.py diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index 497bb397f72..60ddf7b2fe1 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -559,21 +559,24 @@ void DeduplicateIndexedSlices(const SparseGradient &origin_sparse_grad, SparseGr size_t unique_indices_size = 0; for (size_t i = 0; i < origin_sparse_grad.indices_size_; ++i) { int index = origin_sparse_grad.indices_[i]; - if (index < 0 || (size_t)index >= first_dim) { + if (index < 0 || IntToSize(index) >= first_dim) { continue; } auto iter = index_map.find(index); if (iter == index_map.end()) { index_map[index] = unique_indices_size; unique_grad->indices_[unique_indices_size] = index; - for (size_t j = unique_indices_size * outer_dim, k = i * outer_dim; j < (unique_indices_size + 1) * outer_dim; - ++j, ++k) { + size_t start_index = unique_indices_size * outer_dim; + size_t end_index = start_index + outer_dim; + for (size_t j = start_index, k = i * outer_dim; j < end_index; ++j, ++k) { unique_grad->value_[j] = origin_sparse_grad.value_[k]; } unique_indices_size++; } else { size_t first_index = iter->second; - for (size_t j = first_index * outer_dim, k = i * outer_dim; j < (first_index + 1) * outer_dim; ++j, ++k) { + size_t start_index = first_index * outer_dim; + size_t end_index = start_index + outer_dim; + for (size_t j = start_index, k = i * outer_dim; j < end_index; ++j, ++k) { unique_grad->value_[j] += origin_sparse_grad.value_[k]; } } diff --git a/mindspore/ccsrc/kernel/cpu/cpu_kernel.h b/mindspore/ccsrc/kernel/cpu/cpu_kernel.h index a391f758635..08365298401 100644 --- a/mindspore/ccsrc/kernel/cpu/cpu_kernel.h +++ b/mindspore/ccsrc/kernel/cpu/cpu_kernel.h @@ -49,6 +49,7 @@ const char AXIS[] = "axis"; const char BEGIN[] = "begin"; const char END[] = "end"; const char SIZE[] = "size"; +const char USE_NESTEROV[] = "use_nesterov"; class CPUKernel : public kernel::KernelMod { public: diff --git a/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.cc b/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.cc new file mode 100644 index 00000000000..4d03645578d --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.cc @@ -0,0 +1,131 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "kernel/cpu/sparse_apply_adam_cpu_kernel.h" +#include "device/cpu/cpu_device_address.h" + +namespace mindspore { +namespace kernel { +namespace { +constexpr size_t kSparseApplyAdamInputSize = 11; +} // namespace + +void SparseApplyAdamCPUKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + std::vector var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + std::vector m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); + std::vector v_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); + std::vector grad_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); + std::vector indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); + if (!IsSameShape(var_shape, m_shape)) { + MS_LOG(EXCEPTION) << "var and m should have the same shape"; + } + if (!IsSameShape(var_shape, v_shape)) { + MS_LOG(EXCEPTION) << "var and v should have the same shape"; + } + if (var_shape.empty()) { + MS_LOG(EXCEPTION) << "var must be at least 1D"; + } + var_first_dim_size_ = var_shape[0]; + for (size_t i = 1; i < var_shape.size(); ++i) { + if (var_shape[i] != grad_shape[i]) { + MS_LOG(EXCEPTION) << "The shape of var and grad must equal in dimension " << i; + } + var_outer_dim_size_ *= var_shape[i]; + } + if (indices_shape.size() != 1) { + MS_LOG(EXCEPTION) << "indices must be 1D"; + } + indices_size_ = indices_shape[0]; + if (grad_shape[0] != indices_size_) { + MS_LOG(ERROR) << "The first dimension of grad shape must be equal to indices"; + } + if (AnfAlgo::HasNodeAttr(USE_NESTEROV, kernel_node)) { + use_nesterov_ = AnfAlgo::GetNodeAttr(kernel_node, "use_nesterov"); + } +} + +void SparseApplyAdamCPUKernel::UpdateSparseMomentum(const SparseGradient &unique_sparse_grad, float *m, float *m_t, + float *v, float beta1, float beta2) { + MS_EXCEPTION_IF_NULL(m); + MS_EXCEPTION_IF_NULL(m_t); + MS_EXCEPTION_IF_NULL(v); + for (size_t i = 0; i < unique_sparse_grad.indices_size_; ++i) { + int index = unique_sparse_grad.indices_[i]; + if (index < 0 || IntToSize(index) >= var_first_dim_size_) { + MS_LOG(EXCEPTION) << "Index " << index << " in indices is out of range after unique process"; + } + size_t start_index = var_outer_dim_size_ * index; + size_t end_index = start_index + var_outer_dim_size_; + for (size_t j = start_index, k = var_outer_dim_size_ * i; j < end_index; ++j, ++k) { + auto summed_grad = unique_sparse_grad.value_[k]; + m[j] += (1 - beta1) * summed_grad; + v[j] += (1 - beta2) * summed_grad * summed_grad; + if (use_nesterov_) { + m_t[j] = m[j] * beta1 + (1 - beta1) * summed_grad; + } + } + } +} + +bool SparseApplyAdamCPUKernel::Launch(const std::vector &inputs, + const std::vector & /*workspace*/, + const std::vector & /*outputs*/) { + if (inputs.size() < kSparseApplyAdamInputSize) { + MS_LOG(EXCEPTION) << "Error input size!"; + } + + auto var = reinterpret_cast(inputs[0]->addr); + auto m = reinterpret_cast(inputs[1]->addr); + auto v = reinterpret_cast(inputs[2]->addr); + auto beta1_power = reinterpret_cast(inputs[3]->addr)[0]; + if (beta1_power == 1) { + MS_LOG(EXCEPTION) << "The beta1_power should not be 1"; + } + auto beta2_power = reinterpret_cast(inputs[4]->addr)[0]; + auto lr = reinterpret_cast(inputs[5]->addr)[0]; + auto beta1 = reinterpret_cast(inputs[6]->addr)[0]; + auto beta2 = reinterpret_cast(inputs[7]->addr)[0]; + auto epsilon = reinterpret_cast(inputs[8]->addr)[0]; + auto grad = reinterpret_cast(inputs[9]->addr); + auto indices = reinterpret_cast(inputs[10]->addr); + + std::vector new_grad; + new_grad.reserve(indices_size_ * var_outer_dim_size_); + std::vector new_indices; + new_indices.reserve(indices_size_); + SparseGradient unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size_}); + DeduplicateIndexedSlices(SparseGradient({grad, indices, indices_size_}), &unique_sparse_grad, var_first_dim_size_, + var_outer_dim_size_); + size_t total_dim_size = var_first_dim_size_ * var_outer_dim_size_; + // Update momentum + lr = lr * std::sqrt(1 - beta2_power) / (1 - beta1_power); + for (size_t i = 0; i < total_dim_size; ++i) { + m[i] *= beta1; + v[i] *= beta2; + } + std::vector m_t(m, m + total_dim_size); + UpdateSparseMomentum(unique_sparse_grad, m, m_t.data(), v, beta1, beta2); + // Update weight + if (use_nesterov_) { + m = m_t.data(); + } + for (size_t i = 0; i < total_dim_size; ++i) { + var[i] -= lr * m[i] / (std::sqrt(v[i]) + epsilon); + } + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.h b/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.h new file mode 100644 index 00000000000..ea1ce54995a --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/sparse_apply_adam_cpu_kernel.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_CPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_CPU_KERNEL_H_ + +#include +#include +#include "kernel/cpu/cpu_kernel.h" +#include "kernel/cpu/cpu_kernel_factory.h" +#include "kernel/common_utils.h" + +namespace mindspore { +namespace kernel { +class SparseApplyAdamCPUKernel : public CPUKernel { + public: + SparseApplyAdamCPUKernel() = default; + ~SparseApplyAdamCPUKernel() override = default; + + void InitKernel(const CNodePtr &kernel_node) override; + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + + private: + void UpdateSparseMomentum(const SparseGradient &unique_sparse_grad, float *m, float *m_t, float *v, float beta1, + float beta2); + size_t indices_size_{0}; + size_t var_first_dim_size_{0}; + size_t var_outer_dim_size_{1}; + bool use_nesterov_{false}; +}; + +MS_REG_CPU_KERNEL(SparseApplyAdam, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + SparseApplyAdamCPUKernel); +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_ADAM_CPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/cpu/sparse_apply_ftrl_cpu_kernel.cc b/mindspore/ccsrc/kernel/cpu/sparse_apply_ftrl_cpu_kernel.cc index af1f301080f..4d96d31f426 100644 --- a/mindspore/ccsrc/kernel/cpu/sparse_apply_ftrl_cpu_kernel.cc +++ b/mindspore/ccsrc/kernel/cpu/sparse_apply_ftrl_cpu_kernel.cc @@ -84,19 +84,22 @@ bool SparseApplyFtrlCPUKernel::Launch(const std::vector &inp auto grad = reinterpret_cast(inputs[3]->addr); auto indices = reinterpret_cast(inputs[4]->addr); - std::vector new_grad(indices_size_ * var_outer_dim_size_); - std::vector new_indices(indices_size_); + std::vector new_grad; + new_grad.reserve(indices_size_ * var_outer_dim_size_); + std::vector new_indices; + new_indices.reserve(indices_size_); SparseGradient unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size_}); DeduplicateIndexedSlices(SparseGradient({grad, indices, indices_size_}), &unique_sparse_grad, var_first_dim_size_, var_outer_dim_size_); for (size_t i = 0; i < unique_sparse_grad.indices_size_; ++i) { int index = unique_sparse_grad.indices_[i]; - if (index < 0 || (size_t)index >= var_first_dim_size_) { + if (index < 0 || IntToSize(index) >= var_first_dim_size_) { MS_LOG(EXCEPTION) << "Index " << index << " in indices is out of range after unique process"; } - for (size_t j = var_outer_dim_size_ * index, k = var_outer_dim_size_ * i; j < var_outer_dim_size_ * (index + 1); - ++j, ++k) { + size_t start_index = var_outer_dim_size_ * index; + size_t end_index = start_index + var_outer_dim_size_; + for (size_t j = start_index, k = var_outer_dim_size_ * i; j < end_index; ++j, ++k) { auto summed_grad = unique_sparse_grad.value_[k]; auto accum_new = accum[j] + summed_grad * summed_grad; if (lr_power_ == -0.5) { diff --git a/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.cc b/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.cc new file mode 100644 index 00000000000..c0e091f02ba --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.cc @@ -0,0 +1,113 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.h" +#include "kernel/common_utils.h" +#include "device/cpu/cpu_device_address.h" + +namespace mindspore { +namespace kernel { +namespace { +constexpr size_t kSparseApplyLazyAdamInputSize = 11; +} // namespace + +void SparseApplyLazyAdamCPUKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + std::vector var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + std::vector m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); + std::vector v_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); + std::vector grad_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9); + std::vector indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10); + if (!IsSameShape(var_shape, m_shape)) { + MS_LOG(EXCEPTION) << "var and m should have the same shape"; + } + if (!IsSameShape(var_shape, v_shape)) { + MS_LOG(EXCEPTION) << "var and v should have the same shape"; + } + if (var_shape.empty()) { + MS_LOG(EXCEPTION) << "var must be at least 1D"; + } + var_first_dim_size_ = var_shape[0]; + for (size_t i = 1; i < var_shape.size(); ++i) { + if (var_shape[i] != grad_shape[i]) { + MS_LOG(EXCEPTION) << "The shape of var and grad must equal in dimension " << i; + } + var_outer_dim_size_ *= var_shape[i]; + } + if (indices_shape.size() != 1) { + MS_LOG(EXCEPTION) << "indices must be 1D"; + } + indices_size_ = indices_shape[0]; + if (grad_shape[0] != indices_size_) { + MS_LOG(ERROR) << "The first dimension of grad shape must be equal to indices"; + } + if (AnfAlgo::HasNodeAttr(USE_NESTEROV, kernel_node)) { + use_nesterov_ = AnfAlgo::GetNodeAttr(kernel_node, "use_nesterov"); + } +} + +bool SparseApplyLazyAdamCPUKernel::Launch(const std::vector &inputs, + const std::vector & /*workspace*/, + const std::vector & /*outputs*/) { + if (inputs.size() < kSparseApplyLazyAdamInputSize) { + MS_LOG(EXCEPTION) << "Error input size!"; + } + + auto var = reinterpret_cast(inputs[0]->addr); + auto m = reinterpret_cast(inputs[1]->addr); + auto v = reinterpret_cast(inputs[2]->addr); + auto beta1_power = reinterpret_cast(inputs[3]->addr)[0]; + if (beta1_power == 1) { + MS_LOG(EXCEPTION) << "The beta1_power should not be 1"; + } + auto beta2_power = reinterpret_cast(inputs[4]->addr)[0]; + auto lr = reinterpret_cast(inputs[5]->addr)[0]; + auto beta1 = reinterpret_cast(inputs[6]->addr)[0]; + auto beta2 = reinterpret_cast(inputs[7]->addr)[0]; + auto epsilon = reinterpret_cast(inputs[8]->addr)[0]; + auto grad = reinterpret_cast(inputs[9]->addr); + auto indices = reinterpret_cast(inputs[10]->addr); + + std::vector new_grad; + new_grad.reserve(indices_size_ * var_outer_dim_size_); + std::vector new_indices; + new_indices.reserve(indices_size_); + SparseGradient unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size_}); + DeduplicateIndexedSlices(SparseGradient({grad, indices, indices_size_}), &unique_sparse_grad, var_first_dim_size_, + var_outer_dim_size_); + + lr = lr * std::sqrt(1 - beta2_power) / (1 - beta1_power); + for (size_t i = 0; i < unique_sparse_grad.indices_size_; ++i) { + int index = unique_sparse_grad.indices_[i]; + if (index < 0 || IntToSize(index) >= var_first_dim_size_) { + MS_LOG(EXCEPTION) << "Index " << index << " in indices is out of range"; + } + size_t start_index = var_outer_dim_size_ * index; + size_t end_index = start_index + var_outer_dim_size_; + for (size_t j = start_index, k = var_outer_dim_size_ * i; j < end_index; ++j, ++k) { + auto summed_grad = unique_sparse_grad.value_[k]; + m[j] = beta1 * m[j] + (1 - beta1) * summed_grad; + v[j] = beta2 * v[j] + (1 - beta2) * summed_grad * summed_grad; + if (use_nesterov_) { + var[j] -= lr * (m[j] * beta1 + (1 - beta1) * summed_grad) / (std::sqrt(v[j]) + epsilon); + } else { + var[j] -= lr * m[j] / (std::sqrt(v[j]) + epsilon); + } + } + } + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.h b/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.h new file mode 100644 index 00000000000..0a521815616 --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/sparse_apply_lazy_adam_cpu_kernel.h @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_LAZY_ADAM_CPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_LAZY_ADAM_CPU_KERNEL_H_ + +#include +#include +#include "kernel/cpu/cpu_kernel.h" +#include "kernel/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class SparseApplyLazyAdamCPUKernel : public CPUKernel { + public: + SparseApplyLazyAdamCPUKernel() = default; + ~SparseApplyLazyAdamCPUKernel() override = default; + + void InitKernel(const CNodePtr &kernel_node) override; + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + + private: + size_t indices_size_{0}; + size_t var_first_dim_size_{0}; + size_t var_outer_dim_size_{1}; + bool use_nesterov_{false}; +}; + +MS_REG_CPU_KERNEL(SparseApplyLazyAdam, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + SparseApplyLazyAdamCPUKernel); +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_CPU_SPARSE_APPLY_LAZY_ADAM_CPU_KERNEL_H_ diff --git a/tests/st/ops/cpu/test_sparse_apply_adam_op.py b/tests/st/ops/cpu/test_sparse_apply_adam_op.py new file mode 100644 index 00000000000..e81ac470b76 --- /dev/null +++ b/tests/st/ops/cpu/test_sparse_apply_adam_op.py @@ -0,0 +1,46 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.common.parameter import Parameter +from mindspore.ops import operations as P +import mindspore.common.dtype as mstype + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.sparse_apply_adam = P.SparseApplyAdam() + self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var") + self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m") + self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v") + + def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): + out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, + grad, indices) + return out + + +def test_net(): + gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) + indices = Tensor([0, 1, 2], mstype.int32) + + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + sparse_apply_adam = Net() + output = sparse_apply_adam(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient, indices) + print(output[0].asnumpy())