diff --git a/mindspore/_extends/builtin_operations.py b/mindspore/_extends/builtin_operations.py index a38135c295e..3a49fc01f18 100644 --- a/mindspore/_extends/builtin_operations.py +++ b/mindspore/_extends/builtin_operations.py @@ -21,32 +21,32 @@ import mindspore.common.dtype as mstype from mindspore.common.dtype import dtype_to_nptype, get_py_obj_dtype -def scalar_add(x, y): +def ScalarAdd(x, y): """Implement `scalar_add`.""" return x + y -def scalar_mul(x, y): +def ScalarMul(x, y): """Implement `scalar_mul`.""" return x * y -def scalar_mod(x, y): +def ScalarMod(x, y): """Implement `scalar_mul`.""" return x % y -def scalar_sub(x, y): +def ScalarSub(x, y): """Implement `scalar_sub`.""" return x - y -def scalar_usub(x): +def ScalarUsub(x): """Implement `scalar_usub`.""" return -x -def tuple_getitem(x, index): +def TupleGetItem(x, index): """Implement `tuple_getitem`.""" if isinstance(x, Tensor): x = x.asnumpy() diff --git a/mindspore/ccsrc/frontend/operator/prim_to_function.cc b/mindspore/ccsrc/frontend/operator/prim_to_function.cc index 0a39409e623..1b4884a114d 100644 --- a/mindspore/ccsrc/frontend/operator/prim_to_function.cc +++ b/mindspore/ccsrc/frontend/operator/prim_to_function.cc @@ -15,6 +15,7 @@ */ #include "frontend/operator/prim_to_function.h" +#include "base/core_ops.h" namespace mindspore { // namespace to support prim related definition @@ -25,31 +26,31 @@ PrimToFunction::PrimToFunction() {"bool_not", kPrimTypeOneArg}, {"scalar_cos", kPrimTypeOneArg}, {"scalar_exp", kPrimTypeOneArg}, - {"scalar_floor", kPrimTypeOneArg}, + {kScalarFloor, kPrimTypeOneArg}, {"scalar_log", kPrimTypeOneArg}, {"scalar_sin", kPrimTypeOneArg}, {"scalar_tan", kPrimTypeOneArg}, - {"scalar_trunc", kPrimTypeOneArg}, + {kScalarTrunc, kPrimTypeOneArg}, {"typeof", kPrimTypeOneArg}, - {"scalar_uadd", kPrimTypeOneArg}, - {"scalar_usub", kPrimTypeOneArg}, + {kScalarUadd, kPrimTypeOneArg}, + {kScalarUsub, kPrimTypeOneArg}, // TWO_ARGS prim - {"scalar_add", kPrimTypeTwoArgs}, + {kScalarAdd, kPrimTypeTwoArgs}, {"bool_and", kPrimTypeTwoArgs}, {"bool_eq", kPrimTypeTwoArgs}, {"bool_or", kPrimTypeTwoArgs}, - {"scalar_div", kPrimTypeTwoArgs}, + {kScalarDiv, kPrimTypeTwoArgs}, {"scalar_eq", kPrimTypeTwoArgs}, {"scalar_ge", kPrimTypeTwoArgs}, {"scalar_gt", kPrimTypeTwoArgs}, {"scalar_le", kPrimTypeTwoArgs}, {"scalar_lt", kPrimTypeTwoArgs}, {"scalar_ne", kPrimTypeTwoArgs}, - {"scalar_mod", kPrimTypeTwoArgs}, - {"scalar_mul", kPrimTypeTwoArgs}, - {"scalar_pow", kPrimTypeTwoArgs}, - {"scalar_sub", kPrimTypeTwoArgs}, - {"scalar_floordiv", kPrimTypeTwoArgs}}) {} + {kScalarMod, kPrimTypeTwoArgs}, + {kScalarMul, kPrimTypeTwoArgs}, + {kScalarPow, kPrimTypeTwoArgs}, + {kScalarSub, kPrimTypeTwoArgs}, + {kScalarFloordiv, kPrimTypeTwoArgs}}) {} bool PrimToFunction::GetFunction(const PrimitivePtr &prim, FunctionPtr *const func) const { bool result = false; diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc index 8a11d14a9ed..e3d30fbb7d3 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc @@ -18,6 +18,7 @@ #include +#include "base/core_ops.h" #include "ir/param_info.h" #include "ir/meta_tensor.h" #include "pipeline/jit/parse/python_adapter.h" @@ -306,7 +307,7 @@ bool FindReshapePreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_op } ValueNodePtr prim_anf_node = cnode->input(0)->cast(); PrimitivePtr prim = prim_anf_node->value()->cast(); - if (prim->name() == TUPLE_GETITEM) { + if (prim->name() == prim::kTupleGetItem) { *out_index = GetTupleGetItemIndex(cnode); // find tuple_get_item's previous node auto pre_node = cnode->input(1); diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h index d1f0fcab12c..4a901ee75c4 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h @@ -17,6 +17,8 @@ #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_OPS_UTILS_H_ #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_OPS_UTILS_H_ +#include "base/core_ops.h" + namespace mindspore { namespace parallel { constexpr size_t PRELU_INPUTS_SIZE = 2; @@ -320,7 +322,6 @@ constexpr char KStridedSlice[] = "StridedSlice"; constexpr char UNIQUE[] = "Unique"; // Parallel don't care -constexpr char TUPLE_GETITEM[] = "tuple_getitem"; constexpr char STRING_EQUAL[] = "string_equal"; constexpr char MAKE_TUPLE[] = "make_tuple"; constexpr char MAKE_LIST[] = "make_list"; diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.cc index 271c4c1bb5a..5684b3f722b 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.cc @@ -22,6 +22,7 @@ #include #include "ir/value.h" +#include "base/core_ops.h" #include "frontend/parallel/device_matrix.h" #include "frontend/parallel/graph_util/generate_graph.h" #include "frontend/parallel/strategy.h" @@ -206,8 +207,8 @@ Status UniqueInfo::ComputeReplaceGraph(const CNodePtr &cnode) { auto minimum = gen_g.PushBack({gen_g.NewOpInst(MINIMUM), relu, CreateInt32Tensor(slice_size - 1)}); auto equal = gen_g.PushBack({gen_g.NewOpInst(EQUAL), sub, minimum}); auto unique = gen_g.PushBack({gen_g.NewOpInst(replace_op_name_), gen_g.virtual_input_node()}); - auto tuple_getitem_0 = gen_g.PushBack({gen_g.NewOpInst(TUPLE_GETITEM), unique, CreatInt64Imm(0)}); - auto tuple_getitem_1 = gen_g.PushBack({gen_g.NewOpInst(TUPLE_GETITEM), unique, CreatInt64Imm(1)}); + auto tuple_getitem_0 = gen_g.PushBack({gen_g.NewOpInst(prim::kTupleGetItem), unique, CreatInt64Imm(0)}); + auto tuple_getitem_1 = gen_g.PushBack({gen_g.NewOpInst(prim::kTupleGetItem), unique, CreatInt64Imm(1)}); auto dtype = gen_g.PushBack({gen_g.NewOpInst(DTYPE), tuple_getitem_1}); auto cast = gen_g.PushBack({gen_g.NewOpInst(CAST), equal, dtype}); auto mul = gen_g.PushBack({gen_g.NewOpInst(MUL), tuple_getitem_1, cast}); diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index 93de7b45657..3a5c40ccc3c 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -28,6 +28,7 @@ #include #include +#include "base/core_ops.h" #include "frontend/optimizer/opt.h" #include "frontend/optimizer/optimizer.h" #include "frontend/parallel/auto_parallel/dp_algo_costmodel.h" @@ -599,8 +600,8 @@ void ConstructCostGraphEdges(const std::vector &all_nodes) { PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast(); size_t output_index = 0; - bool bool_result = - (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND); + bool bool_result = (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == prim::kTupleGetItem) || + (prev_prim->name() == DEPEND); while (bool_result) { if (IsAutoParallelCareNode(prev_cnode)) { auto prev_op_info = prev_cnode->user_data(); @@ -639,7 +640,7 @@ void ConstructCostGraphEdges(const std::vector &all_nodes) { edge_count++; break; - } else if (prev_prim->name() == TUPLE_GETITEM) { + } else if (prev_prim->name() == prim::kTupleGetItem) { // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before // this 'tuple_getitem' MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator."; @@ -672,8 +673,8 @@ void ConstructCostGraphEdges(const std::vector &all_nodes) { << "and creating an edge between the Operator before " << "'depend' and the Operator after 'depend'."; } - bool_result = - (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND); + bool_result = (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == prim::kTupleGetItem) || + (prev_prim->name() == DEPEND); } } MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << node_op_info->name(); @@ -960,13 +961,13 @@ std::vector> RecInputTensorNames(const std::map(prim_anf_node); - if (prim->name() == TUPLE_GETITEM || prim->name() == DEPEND) { + if (prim->name() == prim::kTupleGetItem || prim->name() == DEPEND) { auto prev_cnode = cnode->input(1)->cast(); if (prev_cnode == nullptr || !IsValueNode(prev_cnode->input(0))) { return nullptr; } auto prev_prim = prev_cnode->input(0)->cast()->value()->cast(); - while (prev_prim->name() == TUPLE_GETITEM || prev_prim->name() == DEPEND) { + while (prev_prim->name() == prim::kTupleGetItem || prev_prim->name() == DEPEND) { prev_cnode = prev_cnode->input(1)->cast(); if (prev_cnode == nullptr || !IsValueNode(prev_cnode->input(0))) { return nullptr; diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index f009fd9e895..d4dd5352e11 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -27,6 +27,7 @@ #include #include +#include "base/core_ops.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/optimizer.h" #include "frontend/parallel/auto_parallel/graph_costmodel.h" @@ -311,7 +312,7 @@ void ForwardCommunication(OperatorVector forward_op, const CNodePtr &node) { } PrimitivePtr value_node_prim = GetValueNode(uses_cnode->input(0)); MS_EXCEPTION_IF_NULL(value_node_prim); - if (value_node_prim->name() == TUPLE_GETITEM) { + if (value_node_prim->name() == prim::kTupleGetItem) { if (uses_set.size() > 1) { MS_LOG(EXCEPTION) << "Now only support one output, but got " << uses_set.size(); } @@ -409,7 +410,7 @@ void InsertGetTensorSliceOp(const Operator &op, const CNodePtr &node, const Func TensorLayout GetTensorInLayout(const CNodePtr &middle_node, const PrimitivePtr &middle_prim, const OperatorInfoPtr &distribute_operator) { TensorInfo tensorinfo_in; - if (middle_prim->name() == TUPLE_GETITEM) { + if (middle_prim->name() == prim::kTupleGetItem) { auto value_node = middle_node->input(2)->cast(); MS_EXCEPTION_IF_NULL(value_node); size_t index_s = LongToSize(GetValue(value_node->value())); @@ -603,7 +604,7 @@ void StepRedistribution(const CNodePtr &node, const OperatorInfoPtr &distribute_ MS_EXCEPTION_IF_NULL(current_value); PrimitivePtr current_prim = current_value->value()->cast(); MS_EXCEPTION_IF_NULL(current_prim); - insert_node_new = ((current_prim->name() == TUPLE_GETITEM) ? node : insert_node); + insert_node_new = ((current_prim->name() == prim::kTupleGetItem) ? node : insert_node); } else { insert_node_new = insert_node; } @@ -2117,7 +2118,7 @@ std::shared_ptr FindPrevLayout(const AnfNodePtr &node) { } ValueNodePtr prim_anf_node = cnode->input(0)->cast(); PrimitivePtr prim = prim_anf_node->value()->cast(); - if (prim->name() == TUPLE_GETITEM) { + if (prim->name() == prim::kTupleGetItem) { auto tuple_index = GetTupleGetItemIndex(cnode); auto layout_ptr = FindPrevParallelCareNodeLayout(cnode->input(1), LongToSize(tuple_index)); if (!layout_ptr) { @@ -2234,7 +2235,7 @@ LossNodeInfo FindLossCNode(const FuncGraphPtr &func_graph) { } // return -> tuple_getitem -> loss - if (current_prim->name() == TUPLE_GETITEM) { + if (current_prim->name() == prim::kTupleGetItem) { auto tuple_index = GetTupleGetItemIndex(pre_cnode); AnfNodePtr pre_pre_node = pre_cnode->input(1); MS_EXCEPTION_IF_NULL(pre_pre_node); @@ -2491,7 +2492,7 @@ std::vector> GetSensLossPairs(const FuncGraphP } auto expect_tuple_getitem_cnode = expect_tuple_getitem->cast(); - if (!IsSomePrimitive(expect_tuple_getitem_cnode, TUPLE_GETITEM)) { + if (!IsSomePrimitive(expect_tuple_getitem_cnode, prim::kTupleGetItem)) { continue; } diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index ad0c28ed03b..a41764cbd87 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -21,6 +21,7 @@ #include #include "utils/utils.h" +#include "base/core_ops.h" #include "frontend/operator/ops.h" #include "utils/log_adapter.h" #include "ir/graph_utils.h" @@ -631,7 +632,7 @@ void DfGraphConvertor::TraceOutput(const AnfNodePtr node) { MS_LOG(EXCEPTION) << "length of inputs is " << c->inputs().size() << ", which is less than 3"; } TraceOutput(c->input(1)); - } else if (name == "tuple_getitem") { + } else if (name == prim::kTupleGetItem) { TraceOutputFromTupleGetItem(anf_out); } else { // add outputs; @@ -1014,7 +1015,7 @@ void DfGraphConvertor::SetOpInput(const OpAdapterPtr &adpt, const CNodePtr &node if (it != out_handle_cache_.end()) { int ret = adpt->setInput(src, SizeToInt(i), it->second); if (ret == 0) { - if (pred->isa() && GetCNodeTargetFuncName(pred->cast()) == "tuple_getitem") { + if (pred->isa() && GetCNodeTargetFuncName(pred->cast()) == prim::kTupleGetItem) { compute_sout_ << op_draw_name_[pred->cast()->input(1).get()] << " -> " << op_draw_name_[node.get()] << ":" << i << endl; } else if (pred->isa()) { @@ -1538,7 +1539,7 @@ bool DfGraphConvertor::CheckCNode(const std::string &name, const CNodePtr node) } // As for nodes with multi outputs, convert tuple_getitem to OutHandle - if (name == "tuple_getitem") { + if (name == prim::kTupleGetItem) { ConvertTupleGetItem(node); return false; } diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index 77523464bbf..1f22e6892f7 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -26,19 +26,33 @@ namespace mindspore { namespace prim { constexpr auto kGather = "Gather"; +// Arithmetic +constexpr auto kScalarAdd = "ScalarAdd"; +constexpr auto kScalarSub = "ScalarSub"; +constexpr auto kScalarMul = "ScalarMul"; +constexpr auto kScalarDiv = "ScalarDiv"; +constexpr auto kScalarFloordiv = "ScalarFloordiv"; +constexpr auto kScalarMod = "ScalarMod"; +constexpr auto kScalarPow = "ScalarPow"; +constexpr auto kScalarTrunc = "ScalarTrunc"; +constexpr auto kScalarFloor = "ScalarFloor"; +constexpr auto kScalarUadd = "ScalarUadd"; +constexpr auto kScalarUsub = "ScalarUsub"; +constexpr auto kTupleGetItem = "TupleGetItem"; + // Here list all primitives used in backend or some special primitives used by core. // Arithmetic -inline const PrimitivePtr kPrimScalarAdd = std::make_shared("scalar_add"); -inline const PrimitivePtr kPrimScalarSub = std::make_shared("scalar_sub"); -inline const PrimitivePtr kPrimScalarMul = std::make_shared("scalar_mul"); -inline const PrimitivePtr kPrimScalarDiv = std::make_shared("scalar_div"); -inline const PrimitivePtr kPrimScalarFloordiv = std::make_shared("scalar_floordiv"); -inline const PrimitivePtr kPrimScalarMod = std::make_shared("scalar_mod"); -inline const PrimitivePtr kPrimScalarPow = std::make_shared("scalar_pow"); -inline const PrimitivePtr kPrimScalarTrunc = std::make_shared("scalar_trunc"); -inline const PrimitivePtr kPrimScalarFloor = std::make_shared("scalar_floor"); -inline const PrimitivePtr kPrimScalarUadd = std::make_shared("scalar_uadd"); -inline const PrimitivePtr kPrimScalarUsub = std::make_shared("scalar_usub"); +inline const PrimitivePtr kPrimScalarAdd = std::make_shared(kScalarAdd); +inline const PrimitivePtr kPrimScalarSub = std::make_shared(kScalarSub); +inline const PrimitivePtr kPrimScalarMul = std::make_shared(kScalarMul); +inline const PrimitivePtr kPrimScalarDiv = std::make_shared(kScalarDiv); +inline const PrimitivePtr kPrimScalarFloordiv = std::make_shared(kScalarFloordiv); +inline const PrimitivePtr kPrimScalarMod = std::make_shared(kScalarMod); +inline const PrimitivePtr kPrimScalarPow = std::make_shared(kScalarPow); +inline const PrimitivePtr kPrimScalarTrunc = std::make_shared(kScalarTrunc); +inline const PrimitivePtr kPrimScalarFloor = std::make_shared(kScalarFloor); +inline const PrimitivePtr kPrimScalarUadd = std::make_shared(kScalarUadd); +inline const PrimitivePtr kPrimScalarUsub = std::make_shared(kScalarUsub); inline const PrimitivePtr kPrimScalarExp = std::make_shared("scalar_exp"); inline const PrimitivePtr kPrimScalarLog = std::make_shared("scalar_log"); inline const PrimitivePtr kPrimScalarSin = std::make_shared("scalar_sin"); @@ -295,7 +309,7 @@ inline const PrimitivePtr kPrimCall = std::make_shared("call"); inline const PrimitivePtr kPrimMakeTuple = std::make_shared("make_tuple"); inline const PrimitivePtr kPrimMakeSlice = std::make_shared("make_slice"); -inline const PrimitivePtr kPrimTupleGetItem = std::make_shared("tuple_getitem"); +inline const PrimitivePtr kPrimTupleGetItem = std::make_shared(kTupleGetItem); inline const PrimitivePtr kPrimArrayGetItem = std::make_shared("array_getitem"); inline const PrimitivePtr kPrimTupleSetItem = std::make_shared("tuple_setitem"); inline const PrimitivePtr kPrimArraySetItem = std::make_shared("array_setitem"); diff --git a/mindspore/core/utils/parallel_node_check.cc b/mindspore/core/utils/parallel_node_check.cc index ac0c7e8c5bc..770f260c5e7 100644 --- a/mindspore/core/utils/parallel_node_check.cc +++ b/mindspore/core/utils/parallel_node_check.cc @@ -19,9 +19,11 @@ #include #include +#include "base/core_ops.h" + namespace mindspore { // clang-format off -static const std::set PARALLEL_BLACK_LIST_ = {"tuple_getitem", "J", "list_getitem", +static const std::set PARALLEL_BLACK_LIST_ = {prim::kTupleGetItem, "J", "list_getitem", "array_getitem", "tuple_setitem", "Depend", "list_setitem", "array_setitem", "dict_getitem", "list_append", "list_map", "list_reduce", "tuple_reversed", "tile_shape", "tuple_div", "tuple_to_array", "make_dict", "make_slice", "make_record", "string_equal", "VirtualLoss", "return", "env_getitem", diff --git a/mindspore/ops/_grad/grad_implementations.py b/mindspore/ops/_grad/grad_implementations.py index d3bd3f0e2ac..9c9a047054b 100644 --- a/mindspore/ops/_grad/grad_implementations.py +++ b/mindspore/ops/_grad/grad_implementations.py @@ -14,6 +14,7 @@ # ============================================================================ """bprop primitives""" +from mindspore.ops import _constants from ..operations import _grad_ops as G from .. import functional as F from .. import operations as P @@ -50,31 +51,31 @@ def bprop_relu_grad_grad(x, y, out, dout): return dy, F.zeros_like(y) -@bprops.register("scalar_add") +@bprops.register(_constants.kScalarAdd) def bprop_scalar_add(x, y, out, dout): """Backpropagator for primitive `scalar_add`.""" return dout, dout -@bprops.register("scalar_mul") +@bprops.register(_constants.kScalarMul) def bprop_scalar_mul(x, y, out, dout): """Backpropagator for primitive `scalar_mul`.""" return dout*y, dout*x -@bprops.register("scalar_sub") +@bprops.register(_constants.kScalarSub) def bprop_scalar_sub(x, y, out, dout): """Backpropagator for primitive `scalar_sub`.""" return dout, -dout -@bprops.register("scalar_div") +@bprops.register(_constants.kScalarDiv) def bprop_scalar_div(x, y, out, dout): """Backpropagator for primitive `scalar_div`.""" return dout/y, (-dout) * (out/y) -@bprops.register("scalar_pow") +@bprops.register(_constants.kScalarPow) def bprop_scalar_pow(x, y, out, dout): """Backpropagator for primitive `scalar_pow`.""" return dout * (y * (x ** (y-1))), dout * (F.scalar_log(x) * out) @@ -86,13 +87,13 @@ def bprop_scalar_exp(x, out, dout): return (dout * out,) -@bprops.register("scalar_uadd") +@bprops.register(_constants.kScalarUadd) def bprop_scalar_uadd(x, out, dout): """Backpropagator for primitive `scalar_uadd`.""" return (dout,) -@bprops.register("scalar_usub") +@bprops.register(_constants.kScalarUsub) def bprop_scalar_usub(x, out, dout): """Backpropagator for primitive `scalar_usub`.""" return (-dout,) @@ -140,7 +141,7 @@ def bprop_scalar_cast(x, t, out, dout): return F.scalar_cast(dout, F.typeof(x)), t -@bprops.register("tuple_getitem") +@bprops.register(_constants.kTupleGetItem) def bprop_tuple_getitem(data, idx, out, dout): """Backpropagator for primitive `tuple_getitem`.""" return F.tuple_setitem(C.zeros_like(data), idx, dout), C.zeros_like(idx) diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 3d87c11520a..76862815f7e 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -18,11 +18,11 @@ """The names of functional part are summarized here.""" from mindspore.common._register_for_tensor import tensor_operator_registry +from mindspore.ops import _constants from .primitive import Primitive from . import operations as P from .operations import _grad_ops - typeof = Primitive('typeof') hastype = Primitive('hastype') cast = P.Cast() @@ -96,7 +96,7 @@ depend = P.Depend() identity = P.identity() tuple_setitem = Primitive('tuple_setitem') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(_constants.kTupleGetItem) list_getitem = Primitive('list_getitem') list_setitem = Primitive('list_setitem') dict_getitem = Primitive('dict_getitem') @@ -114,22 +114,22 @@ tuple_equal = Primitive("tuple_equal") list_equal = Primitive("list_equal") make_ref = Primitive("make_ref") -scalar_add = Primitive('scalar_add') -scalar_mul = Primitive('scalar_mul') -scalar_sub = Primitive('scalar_sub') -scalar_div = Primitive('scalar_div') -scalar_floordiv = Primitive('scalar_floordiv') +scalar_add = Primitive(_constants.kScalarAdd) +scalar_mul = Primitive(_constants.kScalarMul) +scalar_sub = Primitive(_constants.kScalarSub) +scalar_div = Primitive(_constants.kScalarDiv) +scalar_floordiv = Primitive(_constants.kScalarFloordiv) scalar_log = Primitive('scalar_log') -scalar_pow = Primitive('scalar_pow') +scalar_pow = Primitive(_constants.kScalarPow) scalar_gt = Primitive('scalar_gt') scalar_ge = Primitive('scalar_ge') scalar_le = Primitive('scalar_le') scalar_lt = Primitive('scalar_lt') scalar_eq = Primitive('scalar_eq') scalar_ne = Primitive('scalar_ne') -scalar_uadd = Primitive('scalar_uadd') -scalar_usub = Primitive('scalar_usub') -scalar_mod = Primitive('scalar_mod') +scalar_uadd = Primitive(_constants.kScalarUadd) +scalar_usub = Primitive(_constants.kScalarUsub) +scalar_mod = Primitive(_constants.kScalarMod) string_eq = Primitive('string_equal') string_concat = Primitive('string_concat') bool_not = Primitive("bool_not") diff --git a/tests/ut/cpp/ir/anf_test.cc b/tests/ut/cpp/ir/anf_test.cc index 3d89bc41afa..c3240d59014 100644 --- a/tests/ut/cpp/ir/anf_test.cc +++ b/tests/ut/cpp/ir/anf_test.cc @@ -21,6 +21,7 @@ #include "ir/anf.h" #include "ir/func_graph.h" #include "frontend/operator/ops.h" +#include "base/core_ops.h" namespace mindspore { @@ -32,7 +33,7 @@ class TestAnf : public UT::Common { }; TEST_F(TestAnf, test_ValueNode) { - auto prim = std::make_shared("scalar_add"); + auto prim = std::make_shared(prim::kScalarAdd); ValueNodePtr c = NewValueNode(prim); ASSERT_EQ(c->isa(), true); ASSERT_EQ(IsValueNode(c), true); diff --git a/tests/ut/cpp/ir/clone_test.cc b/tests/ut/cpp/ir/clone_test.cc index a93dc10e279..dce76f09411 100644 --- a/tests/ut/cpp/ir/clone_test.cc +++ b/tests/ut/cpp/ir/clone_test.cc @@ -24,6 +24,7 @@ #include "pipeline/jit/parse/parse.h" #include "ir/graph_utils.h" #include "debug/draw.h" +#include "base/core_ops.h" namespace mindspore { class TestCloner : public UT::Common { @@ -89,7 +90,7 @@ TEST_F(TestCloner, test_clone_simple) { Cloner cl2(gs); auto g3 = cl2[g]; - std::vector results = {Primitive("scalar_add"), Primitive("scalar_mul"), Primitive("return")}; + std::vector results = {Primitive(prim::kScalarAdd), Primitive(prim::kScalarMul), Primitive("return")}; AnfNodeSet d3 = AnfNodeSet(DeepScopedGraphSearch(g3->get_return())); common = d1 & d3; for (auto& x : common) { diff --git a/tests/ut/cpp/operator/ops_test.cc b/tests/ut/cpp/operator/ops_test.cc index b6ca6ac1db5..333c8aca021 100644 --- a/tests/ut/cpp/operator/ops_test.cc +++ b/tests/ut/cpp/operator/ops_test.cc @@ -22,6 +22,7 @@ #include "pybind_api/ir/primitive_py.h" #include "pipeline/jit/parse/python_adapter.h" #include "frontend/operator/ops.h" +#include "base/core_ops.h" namespace mindspore { namespace prim { @@ -34,52 +35,52 @@ class TestOps : public UT::Common { // Arithmetic TEST_F(TestOps, ScalarAddTest) { - auto prim = std::make_shared("scalar_add"); + auto prim = std::make_shared(prim::kScalarAdd); ASSERT_EQ(prim->name(), kPrimScalarAdd->name()); } TEST_F(TestOps, ScalarSubTest) { - auto prim = std::make_shared("scalar_sub"); + auto prim = std::make_shared(prim::kScalarSub); ASSERT_EQ(prim->name(), kPrimScalarSub->name()); } TEST_F(TestOps, ScalarMulTest) { - auto prim = std::make_shared("scalar_mul"); + auto prim = std::make_shared(prim::kScalarMul); ASSERT_EQ(prim->name(), kPrimScalarMul->name()); } TEST_F(TestOps, ScalarDivTest) { - auto prim = std::make_shared("scalar_div"); + auto prim = std::make_shared(prim::kScalarDiv); ASSERT_EQ(prim->name(), kPrimScalarDiv->name()); } TEST_F(TestOps, ScalarModTest) { - auto prim = std::make_shared("scalar_mod"); + auto prim = std::make_shared(prim::kScalarMod); ASSERT_EQ(prim->name(), kPrimScalarMod->name()); } TEST_F(TestOps, ScalarPowTest) { - auto prim = std::make_shared("scalar_pow"); + auto prim = std::make_shared(prim::kScalarPow); ASSERT_EQ(prim->name(), kPrimScalarPow->name()); } TEST_F(TestOps, ScalarTruncTest) { - auto prim = std::make_shared("scalar_trunc"); + auto prim = std::make_shared(prim::kScalarTrunc); ASSERT_EQ(prim->name(), kPrimScalarTrunc->name()); } TEST_F(TestOps, ScalarFloorTest) { - auto prim = std::make_shared("scalar_floor"); + auto prim = std::make_shared(prim::kScalarFloor); ASSERT_EQ(prim->name(), kPrimScalarFloor->name()); } TEST_F(TestOps, ScalarUaddTest) { - auto prim = std::make_shared("scalar_uadd"); + auto prim = std::make_shared(prim::kScalarUadd); ASSERT_EQ(prim->name(), kPrimScalarUadd->name()); } TEST_F(TestOps, ScalarUsubTest) { - auto prim = std::make_shared("scalar_usub"); + auto prim = std::make_shared(prim::kScalarUsub); ASSERT_EQ(prim->name(), kPrimScalarUsub->name()); } @@ -187,7 +188,7 @@ TEST_F(TestOps, MakeRecordTest) { } TEST_F(TestOps, TupleGetItemTest) { - auto prim = std::make_shared("tuple_getitem"); + auto prim = std::make_shared(kTupleGetItem); ASSERT_EQ(prim->name(), kPrimTupleGetItem->name()); } diff --git a/tests/ut/cpp/operator/prim2func_test.cc b/tests/ut/cpp/operator/prim2func_test.cc index 3952128b527..e9aeeaec5b1 100644 --- a/tests/ut/cpp/operator/prim2func_test.cc +++ b/tests/ut/cpp/operator/prim2func_test.cc @@ -22,6 +22,7 @@ #include "ir/anf.h" #include "ir/dtype.h" #include "frontend/operator/prim_to_function.h" +#include "base/core_ops.h" namespace mindspore { namespace prim { @@ -33,7 +34,7 @@ class TestPrimFunc : public UT::Common { }; TEST_F(TestPrimFunc, ScalarAddTest) { - auto prim = std::make_shared("scalar_add"); + auto prim = std::make_shared(prim::kScalarAdd); FunctionPtr func = nullptr; PrimToFunction::GetInstance().GetFunction(prim, &func); diff --git a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc index c6146c9b73d..8470c5b81ba 100644 --- a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc @@ -27,6 +27,7 @@ #include "debug/draw.h" #include "ir/tensor.h" #include "utils/symbolic.h" +#include "base/core_ops.h" namespace mindspore { namespace abstract { @@ -154,7 +155,7 @@ TEST_F(TestPrim, test_list_map) { AbstractBasePtr abstract_v2 = FromValue(static_cast(2), false); AbstractBasePtr abstract_u2 = FromValue(static_cast(2), false); auto abstract_list2 = std::make_shared(AbstractBasePtrList({abstract_v2, abstract_u2})); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); AbstractBasePtr abstract_func = ToAbstract(prim_scalar_add); args_spec_list.push_back(abstract_func); @@ -179,7 +180,7 @@ TEST_F(TestPrim, test_list_reduce) { AbstractBasePtr abstract_v1 = FromValue(v1, false); AbstractBasePtr abstract_v2 = FromValue(v1, false); auto abstract_list = std::make_shared(AbstractBasePtrList({abstract_v1, abstract_v2})); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); AbstractBasePtr abstract_func = ToAbstract(prim_scalar_add); args_spec_list.push_back(abstract_func); diff --git a/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc b/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc index 52e22d5d321..51a95c0bc9c 100644 --- a/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc @@ -27,6 +27,7 @@ #include "ir/graph_utils.h" #include "utils/misc.h" #include "debug/draw.h" +#include "base/core_ops.h" namespace mindspore { namespace abstract { @@ -95,7 +96,7 @@ void TestSpecializeGraph::SetUp() { // build func_graph beta ParameterPtr x1 = graph_beta_->add_parameter(); inputs.clear(); - inputs.push_back(NewValueNode(std::make_shared("scalar_add"))); + inputs.push_back(NewValueNode(std::make_shared(prim::kScalarAdd))); inputs.push_back(x1); inputs.push_back(y); CNodePtr cnode_add = graph_beta_->NewCNode(inputs); @@ -166,7 +167,7 @@ class MetaScalarAdd : public MetaFuncGraph { FuncGraphPtr graph_g = std::make_shared(); ParameterPtr x = graph_g->add_parameter(); ParameterPtr y = graph_g->add_parameter(); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); std::vector inputs; inputs.push_back(NewValueNode(prim_scalar_add)); inputs.push_back(x); diff --git a/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc b/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc index fa86d5549a2..8bfde95be30 100644 --- a/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc @@ -28,6 +28,7 @@ #include "pipeline/jit/resource.h" #include "debug/draw.h" #include "utils/log_adapter.h" +#include "base/core_ops.h" namespace mindspore { namespace abstract { @@ -96,7 +97,7 @@ class MetaScalarAdd : public MetaFuncGraph { FuncGraphPtr fg = std::make_shared(); ParameterPtr x = fg->add_parameter(); ParameterPtr y = fg->add_parameter(); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); std::vector inputs; inputs.push_back(NewValueNode(prim_scalar_add)); inputs.push_back(x); @@ -161,7 +162,7 @@ TEST_F(TestInfer, test_inferred_scalar_add) { args_spec_list.push_back(abstract_v1); args_spec_list.push_back(abstract_v2); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); FuncGraphPtr func_graph = MakeFuncGraph(prim_scalar_add); AbstractBasePtr abs_base_got = engine_->Run(func_graph, args_spec_list).inferred->abstract(); ASSERT_TRUE(abs_base_got.get() == abstract_v1.get()); @@ -388,7 +389,7 @@ TEST_F(TestInferUniform, test_inferred_scalar_add) { args_spec.push_back(abstract_v1); args_spec.push_back(abstract_v2); - auto prim_scalar_add = std::make_shared("scalar_add"); + auto prim_scalar_add = std::make_shared(prim::kScalarAdd); FuncGraphPtr func_graph = MakeFuncGraph(prim_scalar_add); AbstractBasePtr abs_base_got = engine_->Run(func_graph, args_spec).inferred->abstract(); ASSERT_TRUE(*(abs_base_got->GetTypeTrack()) == *(abstract_v1->GetTypeTrack())); @@ -418,7 +419,7 @@ TEST_F(TestEvalOnePrim, test_scalar_add) { AbstractBasePtr base1 = FromValue(x1, false); AbstractBasePtr base2 = FromValue(x2, false); AbstractBasePtrList base_list = {base1, base2}; - auto res = EvalOnePrim(std::make_shared("scalar_add"), base_list)->abstract(); + auto res = EvalOnePrim(std::make_shared(prim::kScalarAdd), base_list)->abstract(); MS_LOG(INFO) << "result spec: " << res->ToString(); AbstractBasePtr exp = FromValue(x3, false); MS_LOG(INFO) << "result exp: " << exp->ToString(); diff --git a/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py b/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py index ab2a39835d5..14d8aea21d2 100644 --- a/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py +++ b/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py @@ -14,9 +14,10 @@ # ============================================================================ """ Test for GraphCloner """ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants -scala_add = Primitive('scalar_add') -scalar_mul = Primitive('scalar_mul') +scala_add = Primitive(Constants.kScalarAdd) +scalar_mul = Primitive(Constants.kScalarMul) def test_clone_simple(): diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py index bcfa077ea5e..28110b6fc4e 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py @@ -18,9 +18,11 @@ import numpy as np import mindspore as ms from mindspore.common.tensor import Tensor from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants from tests.ut.python.model.resnet import resnet50 -scala_add = Primitive('scalar_add') + +scala_add = Primitive(Constants.kScalarAdd) @dataclass diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index b58a8107bf5..e3187584a4a 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -17,6 +17,7 @@ import numpy as np from mindspore import Tensor from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G @@ -26,9 +27,9 @@ from mindspore.ops.operations import _grad_ops as G # pylint: disable=unused-argument # pylint: disable=redefined-outer-name -scalar_add = Primitive('scalar_add') -scalar_mul = Primitive('scalar_mul') -tuple_getitem = Primitive('tuple_getitem') +scalar_add = Primitive(Constants.kScalarAdd) +scalar_mul = Primitive(Constants.kScalarMul) +tuple_getitem = Primitive(Constants.kTupleGetItem) switch = Primitive('switch') @@ -347,7 +348,7 @@ def test_inline_while(tag): def test_cse(tag): """ test_cse """ fns = FnDict() - scalar_div = Primitive('scalar_div') + scalar_div = Primitive(Constants.kScalarDiv) @fns def test_f1(x, y): @@ -920,9 +921,9 @@ def test_convert_switch_ops(tag): fns = FnDict() ge_switch = Primitive('GeSwitch') merge = Primitive('Merge') - add = Primitive('Add') + add = Primitive(Constants.kScalarAdd) neg = Primitive('Neg') - tuple_getitem = Primitive('tuple_getitem') + tuple_getitem = Primitive(Constants.kTupleGetItem) make_tuple = Primitive('make_tuple') @fns diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py index 0ea1f03285d..eb7bb5c36d5 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py @@ -18,8 +18,9 @@ import mindspore.nn as nn from mindspore.ops import Primitive from mindspore.ops import functional as F from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -scala_add = Primitive('scalar_add') +scala_add = Primitive(Constants.kScalarAdd) @dataclass diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py index 1834026dcc6..445191cf4cb 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py @@ -15,6 +15,8 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops import functional as F +from mindspore.ops import _constants as Constants + Add = P.Add() Sub = P.Sub() @@ -24,7 +26,7 @@ Sqrt = P.Sqrt() Square = P.Square() Assign = P.Assign() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) AdamApplyOne = Primitive('AdamApplyOne') AdamApplyOneAssign = Primitive('AdamApplyOneAssign') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py index 5d8c30b946f..3a12da1992b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py @@ -16,6 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops import functional as F +from mindspore.ops import _constants as Constants mul = P.Mul() add = P.Add() @@ -25,7 +26,7 @@ real_div = P.RealDiv() sub = P.Sub() Assign = P.Assign() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) adam_apply_one_with_decay = Primitive('AdamApplyOneWithDecay') adam_apply_one_with_decay_assign = Primitive('AdamApplyOneWithDecayAssign') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_bert_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_bert_fission_test.py index e991c00a941..8b7e3f6d4fc 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_bert_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_bert_fission_test.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) BatchNorm = P.BatchNorm() BNTrainingReduce = Primitive('BNTrainingReduce') BNTrainingUpdateV2 = Primitive('BNTrainingUpdateV2') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_infer_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_infer_fission_test.py index f6bd83a99db..49b2e9fe46d 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_infer_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_infer_fission_test.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) BatchNormGradTraining = G.BatchNormGrad(is_training=True) BatchNormGradInfer = G.BatchNormGrad(is_training=False) BNInferGrad = Primitive('BNInferGrad') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py index e67fbe5ec13..dfe96ba4af9 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py @@ -15,12 +15,13 @@ from mindspore.ops import Primitive from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants batch_norm_grad = G.BatchNormGrad(is_training=True) bn_training_update_grad = Primitive('BNTrainingUpdateGrad') bn_training_reduce_grad = Primitive('BNTrainingReduceGrad') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py index 7abbd683fb1..2d407f32353 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py @@ -15,11 +15,12 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants batch_norm = P.BatchNorm(is_training=False) bn_infer = Primitive('BNInfer') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py index 48cf28b325d..a2b1272954b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py @@ -15,11 +15,12 @@ from mindspore.ops import Primitive from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants batch_norm_grad = G.BatchNormGrad(is_training=False) bn_infer_grad = Primitive('BNInferGrad') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_grad_split.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_grad_split.py index e44bb0ab8a3..d48c34895a2 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_grad_split.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_grad_split.py @@ -15,9 +15,10 @@ from mindspore.ops import Primitive from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) bn_grad = G.BatchNormGrad(is_training=True) bn_grad1 = Primitive('BNGrad1') bn_grad2 = Primitive('BNGrad2') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_split.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_split.py index a801ce61459..d27bb2890b3 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_split.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/bn_split.py @@ -15,9 +15,10 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) bn = P.BatchNorm(is_training=True) fused_bn1 = Primitive('FusedBN1') fused_bn2 = Primitive('FusedBN2') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_norm_no_div_square_sum_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_norm_no_div_square_sum_fusion.py index 2a87acffdd7..54cac4d85c6 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_norm_no_div_square_sum_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_norm_no_div_square_sum_fusion.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants select = P.Select() maximum = P.Maximum() @@ -21,7 +22,7 @@ sqrt = P.Sqrt() greater = P.Greater() clip_by_norm_no_div_square_sum = Primitive('ClipByNormNoDivSum') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_value_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_value_fusion.py index d7d70ce9771..c8390d60264 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_value_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/clip_by_value_fusion.py @@ -14,12 +14,13 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants maximum = P.Maximum() minimum = P.Minimum() clip_by_value = Primitive('ClipByValue') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py index 2f834abe777..a3eb0e27185 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py @@ -14,13 +14,14 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants addn = P.AddN() mul = P.Mul() reduce_sum = P.ReduceSum() confusion_mul_grad = Primitive('ConfusionMulGrad') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) axis = 1 diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_softmax_grad_rule.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_softmax_grad_rule.py index 93902c24cac..6101c13e0ce 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_softmax_grad_rule.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_softmax_grad_rule.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants from mindspore.ops import operations as P mul = P.Mul() @@ -20,7 +21,7 @@ reduce_sum = P.ReduceSum(keep_dims=True) sub = P.Sub() confusion_softmax_grad = Primitive('ConfusionSoftmaxGrad') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) axis = 2 diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py index b35466395a3..0e95ff9e143 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_get_item = Primitive("tuple_getitem") +tuple_get_item = Primitive(Constants.kTupleGetItem) LSTM = P.LSTM(input_size=10, hidden_size=2, num_layers=1, has_bias=True, bidirectional=False, dropout=0.0) add = P.Add() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py index 906dca42ad2..496df8fb619 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py @@ -14,13 +14,14 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants relu = P.ReLU() relu_grad = Primitive('ReluGrad') relu_v2 = Primitive('ReLUV2') relu_grad_v2 = Primitive('ReluGradV2') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/fused_batch_norm_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/fused_batch_norm_fusion_test.py index 472e7a5d4be..8223dbf59e3 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/fused_batch_norm_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/fused_batch_norm_fusion_test.py @@ -17,12 +17,13 @@ from mindspore.common.tensor import Tensor from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops import functional as F +from mindspore.ops import _constants as Constants AssignSub = P.AssignSub() Mul = P.Mul() Sub = P.Sub() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) BatchNorm = P.BatchNorm() Cast = P.Cast() BNTrainingReduce = Primitive('BNTrainingReduce') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/getitem_tuple.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/getitem_tuple.py index ca61996dc20..dc87e15cfb6 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/getitem_tuple.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/getitem_tuple.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py index 0bea8450f74..554f5c7ca70 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) depend = P.Depend() addn = P.AddN() add = P.Add() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py index a901d0f4b6d..52541ccc52a 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py @@ -15,12 +15,13 @@ import mindspore as ms from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants from mindspore.ops import operations as P get_next = P.GetNext([ms.float32, ms.int32], [[32, 64], [32]], 2, "") memcpy_async = Primitive('memcpy_async') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_hccl_op.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_hccl_op.py index 082c8144f57..d7cfd5af3c2 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_hccl_op.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_hccl_op.py @@ -15,12 +15,13 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants all_reduce = P.AllReduce() broadcast = P.Broadcast(1) memcpy_async = Primitive('memcpy_async') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) apply_momentun = P.ApplyMomentum() control_depend = P.ControlDepend() relu = P.ReLU() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py index b48b99aa1cd..668a8ac4d6d 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py index 431a5a32268..bec8c083547 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py @@ -15,14 +15,15 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants # pylint: disable=unused-variable -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() allreduce = P.AllReduce() allreduce.add_prim_attr('fusion', 1) -make_tuple = Primitive('make_tuple') +make_tuple = Primitive("make_tuple") conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1) bn = P.FusedBatchNorm() relu = P.ReLU() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py index 4628663cc46..756ee84a8e9 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Add = P.Add() Mul = P.Mul() @@ -21,7 +22,7 @@ RealDiv = P.RealDiv() Rsqrt = P.Rsqrt() Sqrt = P.Sqrt() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) LambNextMV = Primitive('LambNextMV') class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py index 4de8af21240..176845d81e3 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Add = P.Add() Mul = P.Mul() @@ -21,7 +22,7 @@ RealDiv = P.RealDiv() Rsqrt = P.Rsqrt() Sqrt = P.Sqrt() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) LambNextMVWithDecay = Primitive('LambNextMVWithDecay') class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py index eeae90cb322..4c6f5b53ffd 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants add = P.Add() mul = P.Mul() @@ -21,7 +22,7 @@ real_div = P.RealDiv() rsqrt = P.Rsqrt() sqrt = P.Sqrt() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) LambNextMVWithDecayV1 = Primitive('LambNextMVWithDecayV1') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py index 2a159c40b7a..526e36e8840 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py @@ -14,13 +14,14 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Add = P.Add() Mul = P.Mul() Sqrt = P.Sqrt() Square = P.Square() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) LambNextRight = Primitive('LambNextRight') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_rule_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_rule_fusion.py index 974aab9a7ed..f18cbcd0ac2 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_rule_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_rule_fusion.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants select = P.Select() maximum = P.Maximum() @@ -24,7 +25,7 @@ mul = P.Mul() sub = P.Sub() lamb_update_with_lr = Primitive('LambUpdateWithLR') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_v2_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_v2_test.py index 92b14c6c904..d05fa84ac30 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_v2_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_update_with_lr_v2_test.py @@ -14,6 +14,7 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Sub = P.Sub() Mul = P.Mul() @@ -21,7 +22,7 @@ RealDiv = P.RealDiv() Select = P.Select() Greater = P.Greater() make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) LambUpdateWithLrV2 = Primitive('LambUpdateWithLrV2') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lars_v2_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lars_v2_fission_test.py index a144f0677e2..deb76a80966 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lars_v2_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lars_v2_fission_test.py @@ -14,12 +14,13 @@ # ============================================================================ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants lars_v2 = Primitive('LarsV2') square_sum_all = Primitive('SquareSumAll') lars_v2_update = Primitive('LarsV2Update') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py index db6396d9ee8..5bc6af59f84 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py @@ -14,11 +14,12 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Add = P.Add() Cast = P.Cast() LayerNormBetaGammaBackprop = Primitive('LayerNormBetaGammaBackprop') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_grad_split.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_grad_split.py index 7eb85c33a58..c4dba5ccef2 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_grad_split.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_grad_split.py @@ -15,9 +15,10 @@ from mindspore.ops import Primitive from mindspore.ops.operations import _grad_ops as G +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) layer_norm_grad = G.LayerNormGrad() layer_norm_x_backprop = Primitive('LayerNormXBackprop') layer_norm_beta_gamma_backprop = Primitive('LayerNormBetaGammaBackprop') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py index 164e9a49b7d..039c29444f3 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) depend = P.Depend() addn = P.AddN() add = P.Add() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py index 3302daa8790..fd6f44b021f 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py @@ -16,11 +16,12 @@ import mindspore.common.dtype as mstype from mindspore.common.tensor import Tensor from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Mul = P.Mul() ApplyMomentum = P.ApplyMomentum() FusedMulApplyMomentum = Primitive('FusedMulApplyMomentum') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) make_tuple = Primitive('make_tuple') constant = Tensor(1.0, mstype.float32) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py index 4a0ab550f3c..53f511deafe 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py @@ -14,12 +14,13 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants add = P.Add() mul = P.Mul() fused_mul_add = Primitive('FusedMulAdd') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_addn_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_addn_fusion_test.py index e278068fdb8..cfd4b1e1a5b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_addn_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_addn_fusion_test.py @@ -16,12 +16,13 @@ import mindspore.common.dtype as mstype from mindspore.common.tensor import Tensor from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants addn = P.AddN() mul = P.Mul() fused_mul_addn = Primitive('FusedMulAddN') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) scalar = Tensor(1.0, mstype.float32) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/reduce_min_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/reduce_min_fission_test.py index 7690023e011..d8538e597c1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/reduce_min_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/reduce_min_fission_test.py @@ -15,9 +15,10 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) reduce_min = P.ReduceMin(keep_dims=False) reduce_min1 = Primitive('ReduceMin') reduce_min2 = Primitive('ReduceMin') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py index 2b32d3036df..54c9e6b56ed 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/single_batch_norm_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/single_batch_norm_fission_test.py index 1ea31fba505..5ad492a1978 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/single_batch_norm_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/single_batch_norm_fission_test.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) BatchNorm = P.BatchNorm(is_training=True) BNTrainingReduce = Primitive('BNTrainingReduce') BNTrainingUpdateV3 = Primitive('BNTrainingUpdateV3') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py index 52ba86aaa32..f5caafd0882 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py @@ -14,13 +14,14 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants Mul = P.Mul() ReduceSum = P.ReduceSum(keep_dims=True) Sub = P.Sub() SoftmaxGradExt = Primitive('SoftmaxGradExt') MakeTuple = Primitive('make_tuple') -TupleGetItem = Primitive('tuple_getitem') +TupleGetItem = Primitive(Constants.kTupleGetItem) axes = (2, 3) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/split_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/split_fission_test.py index b25fa1f5d00..7a42d0b275c 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/split_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/split_fission_test.py @@ -15,10 +15,11 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants split = P.Split(0, 8) make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) splitv = Primitive('SplitV') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/square_sum_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/square_sum_fusion.py index 23fb56e3e8a..b03ed9d343b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/square_sum_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/square_sum_fusion.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) square = P.Square() reduce_sum = P.ReduceSum() square_sumv1 = Primitive('SquareSumV1') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/tensor_scatter_update_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/tensor_scatter_update_fission_test.py index 4a84f346072..d42023e54d5 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/tensor_scatter_update_fission_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/tensor_scatter_update_fission_test.py @@ -14,12 +14,13 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants tensor_scatter_update = P.TensorScatterUpdate() tensor_move = Primitive('TensorMove') scatter_nd_update = Primitive('ScatterNdUpdate') make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py index 2b3f9f79f86..0a44ddc774c 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py @@ -15,9 +15,10 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants TopK = P.TopK() -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py index 2bf9401a8e3..0770ff0ca9c 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py index 436cd04374c..23282624fb2 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py @@ -14,8 +14,9 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/unsorted_segment_sum_fission.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/unsorted_segment_sum_fission.py index c7d6b946f07..9f925254652 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/unsorted_segment_sum_fission.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/unsorted_segment_sum_fission.py @@ -14,9 +14,10 @@ # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants make_tuple = Primitive('make_tuple') -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) unsorted_segment_sum = P.UnsortedSegmentSum() num_segments = 4 padding = Primitive('Padding') diff --git a/tests/ut/cpp/python_input/gtest_input/session/session_test.py b/tests/ut/cpp/python_input/gtest_input/session/session_test.py index dea463dc1f7..7109bf76051 100644 --- a/tests/ut/cpp/python_input/gtest_input/session/session_test.py +++ b/tests/ut/cpp/python_input/gtest_input/session/session_test.py @@ -15,12 +15,13 @@ import mindspore.common.dtype as mstype from mindspore.ops import Primitive from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants addn = P.AddN() add = P.Add() reshape = P.Reshape() cast = P.Cast() -tuple_getitem = Primitive('tuple_getitem') +tuple_getitem = Primitive(Constants.kTupleGetItem) max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) diff --git a/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py b/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py index 2b95acb0251..d6f1ae37ffd 100644 --- a/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py +++ b/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py @@ -14,6 +14,7 @@ # ============================================================================ """ multi_relu_case """ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants # Test user define ops @@ -21,7 +22,7 @@ def get_test_ops_fn(): return test_ops_f -scalar_mul = Primitive('scalar_mul') +scalar_mul = Primitive(Constants.kScalarMul) def test_ops_f(x, y): diff --git a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py index f9ed0ca900b..a5b76423e59 100644 --- a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py +++ b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py @@ -14,18 +14,19 @@ # ============================================================================ """ vm_test """ from mindspore.ops import Primitive +from mindspore.ops import _constants as Constants -scala_add = Primitive('scalar_add') -scala_mul = Primitive('scalar_mul') +scala_add = Primitive(Constants.kScalarAdd) +scala_mul = Primitive(Constants.kScalarMul) scalar_gt = Primitive('scalar_gt') -def scalar_add(x, y): +def ScalarAdd(x, y): """Implement `scalar_add`.""" return scala_add(x, y) -def scalar_mul(x, y): +def ScalarMul(x, y): """Implement `scalar_mul`.""" return scala_mul(x, y) diff --git a/tests/ut/cpp/utils/validator_test.cc b/tests/ut/cpp/utils/validator_test.cc index 09ab759fcbe..89875846a7f 100644 --- a/tests/ut/cpp/utils/validator_test.cc +++ b/tests/ut/cpp/utils/validator_test.cc @@ -23,6 +23,7 @@ #include "ir/manager.h" #include "pipeline/jit/static_analysis/prim.h" #include "frontend/operator/ops.h" +#include "base/core_ops.h" namespace mindspore { namespace validator { @@ -35,7 +36,7 @@ class TestValidator : public UT::Common { }; TEST_F(TestValidator, ValidateOperation01) { - auto node = NewValueNode(std::make_shared("scalar_add")); + auto node = NewValueNode(std::make_shared(prim::kScalarAdd)); ValidateOperation(node); // normally, the above statement should not exit, so expected the following statement execute EXPECT_TRUE(true); diff --git a/tests/ut/cpp/vm/segment_runner_test.cc b/tests/ut/cpp/vm/segment_runner_test.cc index bafda55c553..cb112c5fd23 100644 --- a/tests/ut/cpp/vm/segment_runner_test.cc +++ b/tests/ut/cpp/vm/segment_runner_test.cc @@ -31,6 +31,7 @@ #include "utils/convert_utils.h" #include "utils/convert_utils_py.h" #include "utils/log_adapter.h" +#include "base/core_ops.h" namespace mindspore { namespace compile { @@ -46,7 +47,7 @@ class TestCompileSegmentRunner : public UT::Common { }; TEST_F(TestCompileSegmentRunner, test_MsVmConvert1) { - FuncGraphPtr g = get_py_fun_("scalar_add"); + FuncGraphPtr g = get_py_fun_(prim::kScalarAdd); // g was managed by local variable manager in get_py_fun_ and that manager will be freed as no reference. // so a new manager should be declared to make get_outputs() in segment_runner.cc happy. std::shared_ptr manager = mindspore::Manage(g); @@ -62,7 +63,7 @@ TEST_F(TestCompileSegmentRunner, test_MsVmConvert1) { } TEST_F(TestCompileSegmentRunner, test_MsVmConvert2) { - FuncGraphPtr g = get_py_fun_("scalar_mul"); + FuncGraphPtr g = get_py_fun_(prim::kScalarMul); std::shared_ptr manager = mindspore::Manage(g); BackendPtr b = std::make_shared("vm"); diff --git a/tests/ut/python/optimizer/test_python_pass.py b/tests/ut/python/optimizer/test_python_pass.py index b6df9a1a201..f156beabc46 100644 --- a/tests/ut/python/optimizer/test_python_pass.py +++ b/tests/ut/python/optimizer/test_python_pass.py @@ -19,6 +19,7 @@ import mindspore.nn as nn from mindspore import context from mindspore.common.tensor import Tensor from mindspore.ops import operations as P +from mindspore.ops import _constants as Constants from mindspore.graph_utils.python_pass import registe_pass, unregiste_pass, set_renorm, gen_new_parameter,\ cancel_new_parameter, set_reopt from mindspore.common.api import _generate_pip_args @@ -296,12 +297,12 @@ def test_imm_target(): pattern = Call(P.Softmax(), [x]) imm = Imm(0) target_0 = Call("make_tuple", [pattern]) - target = Call("tuple_getitem", [target_0, imm]) + target = Call(Constants.kTupleGetItem, [target_0, imm]) return pattern, target transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(5) unregiste_pass(softmax_pass) assert "make_tuple" in transformed_repr - assert "tuple_getitem" in transformed_repr + assert Constants.kTupleGetItem in transformed_repr assert "Softmax" in transformed_repr def test_gen_new_parameter(): diff --git a/tests/ut/python/pynative_mode/ops/test_hypermap.py b/tests/ut/python/pynative_mode/ops/test_hypermap.py index 7105634e49e..7710574d8aa 100644 --- a/tests/ut/python/pynative_mode/ops/test_hypermap.py +++ b/tests/ut/python/pynative_mode/ops/test_hypermap.py @@ -18,6 +18,7 @@ import numpy as np from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import Primitive +from mindspore.ops import _constants from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P @@ -28,7 +29,7 @@ from ...ut_filter import non_graph_engine tensor_add = P.Add() -scala_add = Primitive('scalar_add') +scala_add = Primitive(_constants.kScalarAdd) add = C.MultitypeFuncGraph('add') diff --git a/tests/ut/python/pynative_mode/ops/test_multitype.py b/tests/ut/python/pynative_mode/ops/test_multitype.py index 8b3f491f4d4..d5fec7d5f64 100644 --- a/tests/ut/python/pynative_mode/ops/test_multitype.py +++ b/tests/ut/python/pynative_mode/ops/test_multitype.py @@ -21,12 +21,13 @@ from mindspore.common.parameter import Parameter from mindspore.ops import Primitive from mindspore.ops import composite as C from mindspore.ops import operations as P +from mindspore.ops import _constants from mindspore import dtype as mstype from ...ut_filter import non_graph_engine tensor_add = P.Add() op_add = P.AddN() -scala_add = Primitive('scalar_add') +scala_add = Primitive(_constants.kScalarAdd) add = C.MultitypeFuncGraph('add')