forked from mindspore-Ecosystem/mindspore
commit
9ba46115c5
|
@ -124,8 +124,8 @@ uint32_t MatrixExpCpuKernel::MatrixExpCheck(CpuKernelContext &ctx) {
|
|||
}
|
||||
|
||||
template <typename Derived1, typename Derived2, typename Derived3>
|
||||
void MatrixExpCpuKernel::MTaylorApproximant(const Eigen::MatrixBase<Derived1> &A, const Eigen::MatrixBase<Derived2> &I,
|
||||
int order, Eigen::MatrixBase<Derived3> &E) {
|
||||
void MatrixExpCpuKernel::MTaylorApproximant(Eigen::MatrixBase<Derived1> &A, Eigen::MatrixBase<Derived2> &I, int order,
|
||||
Eigen::MatrixBase<Derived3> &E) {
|
||||
constexpr int expension_order_1 = 1;
|
||||
constexpr int expension_order_2 = 2;
|
||||
constexpr int expension_order_4 = 4;
|
||||
|
@ -167,7 +167,7 @@ void MatrixExpCpuKernel::MTaylorApproximant(const Eigen::MatrixBase<Derived1> &A
|
|||
}
|
||||
|
||||
template <typename Derived1, typename Derived2>
|
||||
void MatrixExpCpuKernel::MexpImpl(const Eigen::MatrixBase<Derived1> &A, const Eigen::MatrixBase<Derived2> &I,
|
||||
void MatrixExpCpuKernel::MexpImpl(Eigen::MatrixBase<Derived1> &A, Eigen::MatrixBase<Derived2> &I,
|
||||
Eigen::MatrixBase<Derived1> &mexp, CpuKernelContext &ctx) {
|
||||
const auto norm = A.cwiseAbs().colwise().sum().maxCoeff();
|
||||
constexpr std::array<int, total_n_degs> m_vals = {1, 2, 4, 8, 12, 18};
|
||||
|
@ -203,8 +203,8 @@ void MatrixExpCpuKernel::MexpImpl(const Eigen::MatrixBase<Derived1> &A, const Ei
|
|||
}
|
||||
if (s >= 0) {
|
||||
const auto pow2s = pow(2, s);
|
||||
const auto A_scaled = A / pow2s;
|
||||
MTaylorApproximant(A_scaled, I, m_vals[total_n_degs - 1], mexp);
|
||||
A /= pow2s;
|
||||
MTaylorApproximant(A, I, m_vals[total_n_degs - 1], mexp);
|
||||
for (int k = 0; k < s; k++) {
|
||||
mexp = mexp * mexp;
|
||||
}
|
||||
|
|
|
@ -31,12 +31,12 @@ class MatrixExpCpuKernel : public CpuKernel {
|
|||
uint32_t MatrixExpCheck(CpuKernelContext &ctx);
|
||||
|
||||
template <typename Derived1, typename Derived2, typename Derived3>
|
||||
void MTaylorApproximant(const Eigen::MatrixBase<Derived1> &A, const Eigen::MatrixBase<Derived2> &I, int order,
|
||||
void MTaylorApproximant(Eigen::MatrixBase<Derived1> &A, Eigen::MatrixBase<Derived2> &I, int order,
|
||||
Eigen::MatrixBase<Derived3> &E);
|
||||
|
||||
template <typename Derived1, typename Derived2>
|
||||
void MexpImpl(const Eigen::MatrixBase<Derived1> &A, const Eigen::MatrixBase<Derived2> &I,
|
||||
Eigen::MatrixBase<Derived1> &mexp, CpuKernelContext &ctx);
|
||||
void MexpImpl(Eigen::MatrixBase<Derived1> &A, Eigen::MatrixBase<Derived2> &I, Eigen::MatrixBase<Derived1> &mexp,
|
||||
CpuKernelContext &ctx);
|
||||
|
||||
template <typename T>
|
||||
uint32_t MatrixExpCompute(CpuKernelContext &ctx);
|
||||
|
|
|
@ -79,8 +79,11 @@ TypePtr UravelIndexInferType(const PrimitivePtr &prim, const std::vector<Abstrac
|
|||
MIND_API_OPERATOR_IMPL(UnravelIndex, BaseOperator);
|
||||
AbstractBasePtr UnravelIndexInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return std::make_shared<abstract::AbstractTensor>(UravelIndexInferType(primitive, input_args),
|
||||
UravelIndexInferShape(primitive, input_args));
|
||||
const int64_t input_num = 2;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
|
||||
auto infer_type = UravelIndexInferType(primitive, input_args);
|
||||
auto infer_shape = UravelIndexInferShape(primitive, input_args);
|
||||
return std::make_shared<abstract::AbstractTensor>(infer_type, infer_shape);
|
||||
}
|
||||
|
||||
// AG means auto generated
|
||||
|
|
|
@ -8666,10 +8666,6 @@ class SparseApplyCenteredRMSProp(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> import mindspore.ops.operations.nn_ops as nn_ops
|
||||
>>> var = Tensor(np.array([[0.6, 0.4], [0.1, 0.5]]).astype(np.float32))
|
||||
>>> mg = Tensor(np.array([[0.1, 0.3], [0.1, 0.5]]).astype(np.float32))
|
||||
>>> ms = Tensor(np.array([[0.2, 0.1], [0.1, 0.2]]).astype(np.float32))
|
||||
|
@ -9760,10 +9756,6 @@ class SparseApplyAdagradDA(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> import mindspore.ops.operations.nn_ops as nn_ops
|
||||
>>> var = Parameter(Tensor(np.array([[1,2], [1,2]]).astype(np.float32)))
|
||||
>>> grad_accum = Parameter(Tensor(np.array([[2,1], [3,1]]).astype(np.float32)))
|
||||
>>> grad_square_accum = Parameter(Tensor(np.array([[4,1], [5,1]]).astype(np.float32)))
|
||||
|
|
Loading…
Reference in New Issue