diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.cc index 195c3ee4f67..3133594d132 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.cc @@ -124,8 +124,8 @@ uint32_t MatrixExpCpuKernel::MatrixExpCheck(CpuKernelContext &ctx) { } template -void MatrixExpCpuKernel::MTaylorApproximant(const Eigen::MatrixBase &A, const Eigen::MatrixBase &I, - int order, Eigen::MatrixBase &E) { +void MatrixExpCpuKernel::MTaylorApproximant(Eigen::MatrixBase &A, Eigen::MatrixBase &I, int order, + Eigen::MatrixBase &E) { constexpr int expension_order_1 = 1; constexpr int expension_order_2 = 2; constexpr int expension_order_4 = 4; @@ -167,7 +167,7 @@ void MatrixExpCpuKernel::MTaylorApproximant(const Eigen::MatrixBase &A } template -void MatrixExpCpuKernel::MexpImpl(const Eigen::MatrixBase &A, const Eigen::MatrixBase &I, +void MatrixExpCpuKernel::MexpImpl(Eigen::MatrixBase &A, Eigen::MatrixBase &I, Eigen::MatrixBase &mexp, CpuKernelContext &ctx) { const auto norm = A.cwiseAbs().colwise().sum().maxCoeff(); constexpr std::array m_vals = {1, 2, 4, 8, 12, 18}; @@ -203,8 +203,8 @@ void MatrixExpCpuKernel::MexpImpl(const Eigen::MatrixBase &A, const Ei } if (s >= 0) { const auto pow2s = pow(2, s); - const auto A_scaled = A / pow2s; - MTaylorApproximant(A_scaled, I, m_vals[total_n_degs - 1], mexp); + A /= pow2s; + MTaylorApproximant(A, I, m_vals[total_n_degs - 1], mexp); for (int k = 0; k < s; k++) { mexp = mexp * mexp; } diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.h b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.h index cddf4440f18..76e1ebb09c5 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_ops/cpu_kernel/ms_kernel/matrix_exp.h @@ -31,12 +31,12 @@ class MatrixExpCpuKernel : public CpuKernel { uint32_t MatrixExpCheck(CpuKernelContext &ctx); template - void MTaylorApproximant(const Eigen::MatrixBase &A, const Eigen::MatrixBase &I, int order, + void MTaylorApproximant(Eigen::MatrixBase &A, Eigen::MatrixBase &I, int order, Eigen::MatrixBase &E); template - void MexpImpl(const Eigen::MatrixBase &A, const Eigen::MatrixBase &I, - Eigen::MatrixBase &mexp, CpuKernelContext &ctx); + void MexpImpl(Eigen::MatrixBase &A, Eigen::MatrixBase &I, Eigen::MatrixBase &mexp, + CpuKernelContext &ctx); template uint32_t MatrixExpCompute(CpuKernelContext &ctx); diff --git a/mindspore/core/ops/unravel_index.cc b/mindspore/core/ops/unravel_index.cc index cf76e30aa34..5130c7d9454 100644 --- a/mindspore/core/ops/unravel_index.cc +++ b/mindspore/core/ops/unravel_index.cc @@ -79,8 +79,11 @@ TypePtr UravelIndexInferType(const PrimitivePtr &prim, const std::vector &input_args) { - return std::make_shared(UravelIndexInferType(primitive, input_args), - UravelIndexInferShape(primitive, input_args)); + const int64_t input_num = 2; + CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name()); + auto infer_type = UravelIndexInferType(primitive, input_args); + auto infer_shape = UravelIndexInferShape(primitive, input_args); + return std::make_shared(infer_type, infer_shape); } // AG means auto generated diff --git a/mindspore/python/mindspore/ops/operations/nn_ops.py b/mindspore/python/mindspore/ops/operations/nn_ops.py index aaeafe982fb..48707be38d7 100644 --- a/mindspore/python/mindspore/ops/operations/nn_ops.py +++ b/mindspore/python/mindspore/ops/operations/nn_ops.py @@ -8667,10 +8667,6 @@ class SparseApplyCenteredRMSProp(Primitive): ``Ascend`` ``GPU`` ``CPU`` Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> import mindspore.common.dtype as mstype - >>> import mindspore.ops.operations.nn_ops as nn_ops >>> var = Tensor(np.array([[0.6, 0.4], [0.1, 0.5]]).astype(np.float32)) >>> mg = Tensor(np.array([[0.1, 0.3], [0.1, 0.5]]).astype(np.float32)) >>> ms = Tensor(np.array([[0.2, 0.1], [0.1, 0.2]]).astype(np.float32)) @@ -9761,10 +9757,6 @@ class SparseApplyAdagradDA(Primitive): ``Ascend`` ``GPU`` ``CPU`` Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> import mindspore.common.dtype as mstype - >>> import mindspore.ops.operations.nn_ops as nn_ops >>> var = Parameter(Tensor(np.array([[1,2], [1,2]]).astype(np.float32))) >>> grad_accum = Parameter(Tensor(np.array([[2,1], [3,1]]).astype(np.float32))) >>> grad_square_accum = Parameter(Tensor(np.array([[4,1], [5,1]]).astype(np.float32)))