forked from mindspore-Ecosystem/mindspore
Add aicpu op Lcm
This commit is contained in:
parent
4667e4ea0a
commit
c908ac8cc3
|
@ -0,0 +1,109 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "plugin/device/cpu/kernel/lcm_cpu_kernel.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include <numeric>
|
||||
|
||||
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
|
||||
#include "plugin/device/cpu/kernel/cpu_kernel.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
namespace {
|
||||
const size_t kLcmInputsNum = 2;
|
||||
const size_t kLcmOutputsNum = 1;
|
||||
} // namespace
|
||||
|
||||
bool LcmCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
kernel_name_ = base_operator->name();
|
||||
std::vector<int64_t> x1_shape = inputs[0]->GetShapeVector();
|
||||
std::vector<int64_t> x2_shape = inputs[1]->GetShapeVector();
|
||||
std::vector<int64_t> y_shape = outputs[0]->GetShapeVector();
|
||||
x1_shape_.resize(x1_shape.size(), 1);
|
||||
x2_shape_.resize(x2_shape.size(), 1);
|
||||
y_shape_.resize(y_shape.size(), 1);
|
||||
for (size_t i = 0; i < x1_shape.size(); i++) {
|
||||
x1_shape_[i] = static_cast<size_t>(x1_shape[i]);
|
||||
}
|
||||
for (size_t i = 0; i < x2_shape.size(); i++) {
|
||||
x2_shape_[i] = static_cast<size_t>(x2_shape[i]);
|
||||
}
|
||||
for (size_t i = 0; i < y_shape.size(); i++) {
|
||||
y_shape_[i] = static_cast<size_t>(y_shape[i]);
|
||||
}
|
||||
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
MS_LOG(ERROR) << kernel_name_ << " does not support this kernel data type: " << kernel_attr;
|
||||
return false;
|
||||
}
|
||||
kernel_func_ = func_list_[index].second;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool LcmCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kLcmInputsNum, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kLcmOutputsNum, kernel_name_);
|
||||
const T *x1 = reinterpret_cast<const T *>(inputs[0]->addr);
|
||||
const T *x2 = reinterpret_cast<const T *>(inputs[1]->addr);
|
||||
T *y = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
if (y_shape_.size() == 0) {
|
||||
(void)y_shape_.insert(y_shape_.begin(), 1);
|
||||
}
|
||||
int64_t output_size_ = 1;
|
||||
for (size_t i = 0; i < y_shape_.size(); ++i) {
|
||||
output_size_ *= y_shape_[i];
|
||||
}
|
||||
BroadcastIterator base_iter(x1_shape_, x2_shape_, y_shape_);
|
||||
auto task = [this, &x1, &x2, &y, &base_iter](size_t start, size_t end) {
|
||||
auto iter = base_iter;
|
||||
iter.SetPos(start);
|
||||
for (size_t i = start; i < end; i++) {
|
||||
y[i] = std::lcm(x1[iter.GetInputPosA()], x2[iter.GetInputPosB()]);
|
||||
iter.GenNextPos();
|
||||
}
|
||||
};
|
||||
ParallelLaunchAutoSearch(task, output_size_, this, ¶llel_search_info_);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<std::pair<KernelAttr, LcmCpuKernelMod::LcmLaunchFunc>> LcmCpuKernelMod::func_list_ = {
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||
&LcmCpuKernelMod::LaunchKernel<int32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64),
|
||||
&LcmCpuKernelMod::LaunchKernel<int64_t>}};
|
||||
|
||||
std::vector<KernelAttr> LcmCpuKernelMod::GetOpSupport() {
|
||||
std::vector<KernelAttr> support_list;
|
||||
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
|
||||
[](const std::pair<KernelAttr, LcmLaunchFunc> &pair) { return pair.first; });
|
||||
|
||||
return support_list;
|
||||
}
|
||||
|
||||
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, Lcm, LcmCpuKernelMod);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,65 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_LCM_CPU_KERNEL_H_
|
||||
|
||||
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_LCM_CPU_KERNEL_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <utility>
|
||||
|
||||
#include "plugin/device/cpu/kernel/cpu_kernel.h"
|
||||
#include "plugin/factory/ms_factory.h"
|
||||
#include "plugin/device/cpu/kernel/nnacl/arithmetic.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
class LcmCpuKernelMod : public NativeCpuKernelMod {
|
||||
public:
|
||||
LcmCpuKernelMod() = default;
|
||||
~LcmCpuKernelMod() override = default;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override {
|
||||
return kernel_func_(this, inputs, outputs);
|
||||
}
|
||||
|
||||
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) override;
|
||||
|
||||
protected:
|
||||
std::vector<KernelAttr> GetOpSupport() override;
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
|
||||
using LcmLaunchFunc = std::function<bool(LcmCpuKernelMod *, const std::vector<kernel::AddressPtr> &,
|
||||
const std::vector<kernel::AddressPtr> &)>;
|
||||
static std::vector<std::pair<KernelAttr, LcmLaunchFunc>> func_list_;
|
||||
LcmLaunchFunc kernel_func_;
|
||||
std::vector<size_t> x1_shape_;
|
||||
std::vector<size_t> x2_shape_;
|
||||
std::vector<size_t> y_shape_;
|
||||
bool need_bcast_{false};
|
||||
};
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_LCM_CPU_KERNEL_H_
|
|
@ -803,6 +803,7 @@ GVAR_DEF(PrimitivePtr, kPrimReduceProd, std::make_shared<Primitive>("ReduceProd"
|
|||
GVAR_DEF(PrimitivePtr, kPrimReduceStd, std::make_shared<Primitive>(kReduceStd));
|
||||
GVAR_DEF(PrimitivePtr, kPrimCentralization, std::make_shared<Primitive>("Centralization"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimNeg, std::make_shared<Primitive>(kNeg));
|
||||
GVAR_DEF(PrimitivePtr, kPrimLcm, std::make_shared<Primitive>("Lcm"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimSin, std::make_shared<Primitive>("Sin"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimCos, std::make_shared<Primitive>(kCos));
|
||||
GVAR_DEF(PrimitivePtr, kPrimSub, std::make_shared<Primitive>(kSub));
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
#include "ops/lcm.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "abstract/ops/primitive_infer_map.h"
|
||||
#include "mindapi/src/helper.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
abstract::ShapePtr LcmInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
return BroadCastInferShape(primitive->name(), input_args);
|
||||
}
|
||||
|
||||
TypePtr LcmInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
const std::set<TypePtr> lcm_valid_types = {kInt32, kInt64};
|
||||
TypePtr x1_type = input_args[0]->BuildType();
|
||||
auto inferred_type = CheckAndConvertUtils::CheckTensorTypeValid("x1", x1_type, lcm_valid_types, prim->name());
|
||||
return inferred_type;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
AbstractBasePtr LcmInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
const int64_t lcm_input_num = 2;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, lcm_input_num, primitive->name());
|
||||
auto shape = LcmInferShape(primitive, input_args);
|
||||
auto type = LcmInferType(primitive, input_args);
|
||||
return abstract::MakeAbstract(shape, type);
|
||||
}
|
||||
MIND_API_OPERATOR_IMPL(Lcm, BaseOperator);
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(Lcm, prim::kPrimLcm, LcmInfer, nullptr, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CORE_OPS_LCM_H_
|
||||
#define MINDSPORE_CORE_OPS_LCM_H_
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "ops/base_operator.h"
|
||||
#include "mindapi/base/types.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr auto kNameLcm = "Lcm";
|
||||
/// \brief Computes the least common multiplier element-wise.
|
||||
/// Refer to Python API @ref mindspore.ops.Lcm for more details.
|
||||
class MIND_API Lcm : public BaseOperator {
|
||||
public:
|
||||
MIND_API_BASE_MEMBER(Lcm);
|
||||
/// \brief Constructor.
|
||||
Lcm() : BaseOperator(kNameLcm) { InitIOName({"x1", "x2"}, {"y"}); }
|
||||
};
|
||||
abstract::AbstractBasePtr LcmInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<abstract::AbstractBasePtr> &input_args);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CORE_OPS_LCM_H_
|
|
@ -160,6 +160,7 @@ from .environ_destroy_all import _environ_destroy_all_aicpu
|
|||
from .cross import _cross_aicpu
|
||||
from .check_numerics import _check_numerics_aicpu
|
||||
from .cummax import _cummax_aicpu
|
||||
from .lcm import _lcm_aicpu
|
||||
from .round import _round_aicpu
|
||||
from .truncated_normal import _truncated_normal_aicpu
|
||||
from .floor_div import _floor_div_aicpu
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Lcm op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
lcm_op_info = AiCPURegOp("Lcm") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x1", "required") \
|
||||
.input(1, "x2", "required") \
|
||||
.output(0, "y", "required") \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(lcm_op_info)
|
||||
def _lcm_aicpu():
|
||||
"""Lcm aicpu register"""
|
||||
return
|
|
@ -128,6 +128,7 @@ from .math_func import (
|
|||
tensor_mod,
|
||||
floor_mod,
|
||||
floormod,
|
||||
lcm,
|
||||
tensor_exp,
|
||||
exp,
|
||||
tensor_expm1,
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore.ops.primitive import constexpr
|
|||
from mindspore.ops import operations as P
|
||||
from mindspore.ops import composite as C
|
||||
from ..operations.math_ops import (Bernoulli, BesselJ0, BesselJ1, BesselK0, BesselK0e, BesselY0, BesselY1, BesselK1,
|
||||
BesselK1e, Renorm)
|
||||
BesselK1e, Renorm, Lcm)
|
||||
from ...common import dtype as mstype
|
||||
from ...common.tensor import Tensor
|
||||
from ..._c_expression import Tensor as Tensor_
|
||||
|
@ -2795,6 +2795,39 @@ def mv(mat, vec):
|
|||
return out
|
||||
|
||||
|
||||
def lcm(x1, x2):
|
||||
"""
|
||||
Computes least common multiplier of input tensors element-wise.
|
||||
The shape of two inputs should be broadcastable, and data type of them should be
|
||||
one of: int32, int64
|
||||
|
||||
Inputs:
|
||||
- **x1** (Tensor) - The first input tensor.
|
||||
- **x2** (Tensor) - The second input tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting, and the data type is one
|
||||
with higher digits in the two inputs.
|
||||
|
||||
Raises:
|
||||
TypeError: If data type `x1` or `x2` is not int32 or int64.
|
||||
ValueError: If shape of two inputs are not broadcastable.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x1 = Tensor(np.array([7, 8, 9]))
|
||||
>>> x2 = Tensor(np.array([14, 6, 12]))
|
||||
>>> y = ops.lcm(x1, x2)
|
||||
>>> print(y)
|
||||
[14 24 36]
|
||||
"""
|
||||
|
||||
lcm_ = Lcm()
|
||||
return lcm_(x1, x2)
|
||||
|
||||
|
||||
def cdist(x, y, p=2.0):
|
||||
"""
|
||||
Computes batched the p-norm distance between each pair of the two collections of row vectors.
|
||||
|
|
|
@ -1177,6 +1177,43 @@ class CumProd(PrimitiveWithInfer):
|
|||
raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
|
||||
|
||||
|
||||
class Lcm(Primitive):
|
||||
"""
|
||||
Computes least common multiplier of input tensors element-wise.
|
||||
The shape of two inputs should be broadcastable, and data type of them should be
|
||||
one of: int32, int64
|
||||
|
||||
Inputs:
|
||||
- **x1** (Tensor) - The first input tensor.
|
||||
- **x2** (Tensor) - The second input tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting, and the data type is one
|
||||
with higher digits in the two inputs.
|
||||
|
||||
Raises:
|
||||
TypeError: If data type `x1` or `x2` is not int32 or int64.
|
||||
ValueError: If shape of two inputs are not broadcastable.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x1 = Tensor(np.array([7, 8, 9]))
|
||||
>>> x2 = Tensor(np.array([14, 6, 12]))
|
||||
>>> lcm_ = ops.Lcm()
|
||||
>>> y = lcm_(x1, x2)
|
||||
>>> print(y)
|
||||
[14 24 36]
|
||||
"""
|
||||
|
||||
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
|
||||
|
||||
@prim_attr_register
|
||||
def __init__(self):
|
||||
self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
|
||||
|
||||
|
||||
class Cdist(Primitive):
|
||||
"""
|
||||
Computes batched the p-norm distance between each pair of the two collections of row vectors.
|
||||
|
|
|
@ -464,6 +464,15 @@ class IsRealFunc(nn.Cell):
|
|||
return y
|
||||
|
||||
|
||||
class LcmFunc(nn.Cell):
|
||||
def __init__(self):
|
||||
super(LcmFunc, self).__init__()
|
||||
self.lcm = ops.function.lcm
|
||||
|
||||
def construct(self, x1, x2):
|
||||
return self.lcm(x1, x2)
|
||||
|
||||
|
||||
class Rad2degNet(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Rad2degNet, self).__init__()
|
||||
|
@ -632,6 +641,11 @@ raise_set = [
|
|||
'block': Zeta(),
|
||||
'desc_inputs': [Tensor(np.array([1, 1, 1, 1], np.float32)),
|
||||
Tensor([0.5, 0.5, 0.5, 0.5], mstype.float32)]}),
|
||||
('Lcm', {
|
||||
'block': LcmFunc(),
|
||||
'desc_inputs': [Tensor(np.array([2, 5, 8]).astype(np.int32)),
|
||||
Tensor(np.array([4, 3, 12]).astype(np.int32))],
|
||||
'skip': ['backward']}),
|
||||
('Igamma', {
|
||||
'block': Igamma(),
|
||||
'desc_inputs': [Tensor(np.array([1.1, 2.2, -4.1], np.float32)),
|
||||
|
|
|
@ -49,6 +49,7 @@ from mindspore.ops.operations.array_ops import MatrixDiagV3
|
|||
from mindspore.ops.operations.array_ops import MatrixDiagPartV3
|
||||
from mindspore.ops.operations.array_ops import MatrixSetDiagV3
|
||||
from mindspore.ops.operations.array_ops import ScatterNdMax
|
||||
from mindspore.ops.operations.math_ops import Lcm
|
||||
from mindspore.ops.operations.math_ops import RaggedRange
|
||||
from mindspore.ops.operations.array_ops import RangeV2
|
||||
from mindspore.ops.operations.array_ops import ListDiff
|
||||
|
@ -1500,6 +1501,11 @@ test_case_math_ops = [
|
|||
'desc_inputs': [[2, 1, 4, 5], [2, 1, 4, 5]],
|
||||
'desc_bprop': [Tensor(np.zeros((2, 1, 4, 5), np.bool_))],
|
||||
'skip': ['backward']}),
|
||||
('Lcm', {
|
||||
'block': Lcm(),
|
||||
'desc_inputs': [Tensor(np.array([3, 4, 5]).astype(np.int64)),
|
||||
Tensor(np.array([4, 5, 6]).astype(np.int64))],
|
||||
'skip': ['backward']}),
|
||||
('RealDiv_0', {
|
||||
'block': P.RealDiv(),
|
||||
'desc_const': [Tensor(2048.0), Tensor(0.0)],
|
||||
|
|
Loading…
Reference in New Issue