!44253 remove operator ScalarToArray

Merge pull request !44253 from zhujingxuan/remove
This commit is contained in:
i-robot 2022-10-20 06:34:48 +00:00 committed by Gitee
commit e0f7852c5c
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 20 additions and 131 deletions

View File

@ -386,7 +386,6 @@ Array操作
:template: classtemplate.rst
mindspore.ops.scalar_cast
mindspore.ops.scalar_to_array
mindspore.ops.scalar_to_tensor
mindspore.ops.tuple_to_array

View File

@ -466,7 +466,6 @@ Array操作
:template: classtemplate.rst
mindspore.ops.ScalarCast
mindspore.ops.ScalarToArray
mindspore.ops.ScalarToTensor
mindspore.ops.TupleToArray

View File

@ -1,8 +0,0 @@
mindspore.ops.ScalarToArray
=============================
.. py:class:: mindspore.ops.ScalarToArray
将Scalar转换为 `Tensor`
更多参考详见 :func:`mindspore.ops.scalar_to_array`

View File

@ -1,15 +0,0 @@
mindspore.ops.scalar_to_array
=============================
.. py:function:: mindspore.ops.scalar_to_array(input_x)
将Scalar转换为 `Tensor`
参数:
- **input_x** (Union[int, float]) - ScalarToArray的输入是Scalar且只能是常量值。
返回:
Tensor0维Tensor其值和输入一致。
异常:
- **TypeError** - `input_x` 既不是int也不是float。

View File

@ -386,7 +386,6 @@ Type Conversion
:template: classtemplate.rst
mindspore.ops.scalar_cast
mindspore.ops.scalar_to_array
mindspore.ops.scalar_to_tensor
mindspore.ops.tuple_to_array

View File

@ -466,7 +466,6 @@ Type Conversion
:template: classtemplate.rst
mindspore.ops.ScalarCast
mindspore.ops.ScalarToArray
mindspore.ops.ScalarToTensor
mindspore.ops.TupleToArray

View File

@ -72,29 +72,27 @@ BuiltInTypeMap &GetMethodMap() {
{"__le__", prim::kPrimScalarLe}, // P.scalar_le
{"__ge__", prim::kPrimScalarGe}, // P.scalar_ge
{"__bool__", std::string("int_bool")}, // C.int_bool
{"__ms_to_array__", prim::kPrimScalarToArray}, // P.scalar_to_array
}},
{kNumberTypeUInt,
{
{"__add__", prim::kPrimScalarAdd}, // P.scalar_add,
{"__sub__", prim::kPrimScalarSub}, // P.scalar_sub,
{"__mul__", prim::kPrimScalarMul}, // P.scalar_mul,
{"__floordiv__", prim::kPrimScalarDiv}, // P.scalar_div,
{"__truediv__", std::string("int_truediv")}, // C.int_truediv
{"__mod__", prim::kPrimScalarMod}, // P.scalar_mod,
{"__pow__", prim::kPrimScalarPow}, // P.scalar_pow,
{"__floor__", prim::kPrimIdentity}, // P.identity,
{"__trunc__", prim::kPrimIdentity}, // P.identity,
{"__pos__", prim::kPrimScalarUadd}, // P.scalar_uadd,
{"__neg__", prim::kPrimScalarUsub}, // P.scalar_usub,
{"__eq__", prim::kPrimScalarEq}, // P.scalar_eq,
{"__ne__", prim::kPrimScalarNe}, // P.scalar_ne,
{"__lt__", prim::kPrimScalarLt}, // P.scalar_lt,
{"__gt__", prim::kPrimScalarGt}, // P.scalar_gt,
{"__le__", prim::kPrimScalarLe}, // P.scalar_le,
{"__ge__", prim::kPrimScalarGe}, // P.scalar_ge,
{"__bool__", std::string("int_bool")}, // C.int_bool
{"__ms_to_array__", prim::kPrimScalarToArray}, // P.scalar_to_array,
{"__add__", prim::kPrimScalarAdd}, // P.scalar_add,
{"__sub__", prim::kPrimScalarSub}, // P.scalar_sub,
{"__mul__", prim::kPrimScalarMul}, // P.scalar_mul,
{"__floordiv__", prim::kPrimScalarDiv}, // P.scalar_div,
{"__truediv__", std::string("int_truediv")}, // C.int_truediv
{"__mod__", prim::kPrimScalarMod}, // P.scalar_mod,
{"__pow__", prim::kPrimScalarPow}, // P.scalar_pow,
{"__floor__", prim::kPrimIdentity}, // P.identity,
{"__trunc__", prim::kPrimIdentity}, // P.identity,
{"__pos__", prim::kPrimScalarUadd}, // P.scalar_uadd,
{"__neg__", prim::kPrimScalarUsub}, // P.scalar_usub,
{"__eq__", prim::kPrimScalarEq}, // P.scalar_eq,
{"__ne__", prim::kPrimScalarNe}, // P.scalar_ne,
{"__lt__", prim::kPrimScalarLt}, // P.scalar_lt,
{"__gt__", prim::kPrimScalarGt}, // P.scalar_gt,
{"__le__", prim::kPrimScalarLe}, // P.scalar_le,
{"__ge__", prim::kPrimScalarGe}, // P.scalar_ge,
{"__bool__", std::string("int_bool")}, // C.int_bool
}},
{kNumberTypeFloat,
{
@ -116,7 +114,6 @@ BuiltInTypeMap &GetMethodMap() {
{"__le__", prim::kPrimScalarLe}, // P.scalar_le,
{"__ge__", prim::kPrimScalarGe}, // P.scalar_ge,
{"__bool__", std::string("float_bool")}, // C.float_bool
{"__ms_to_array__", prim::kPrimScalarToArray}, // P.scalar_to_array,
}},
{kObjectTypeTuple,
{
@ -202,7 +199,6 @@ BuiltInTypeMap &GetMethodMap() {
{"__ms_iter__", prim::kPrimIdentity}, // C.array_iter
{"__ms_hasnext__", std::string("array_hasnext")}, // C.array_hasnext
{"__ms_next__", std::string("array_next")}, // C.array_next
{"__ms_to_array__", prim::kPrimIdentity}, // P.identity,
{"gather_elements", std::string("gather_elements")}, // P.GatherD
{"item", std::string("item")}, // P.item,
{"itemset", std::string("itemset")}, // P.itemset,

View File

@ -94,7 +94,6 @@ static std::unordered_map<std::string, std::vector<size_t>> kDynamicInputOpMap =
{"Xdivy", {0, 1}},
{"Xlogy", {0, 1}},
{"ScalarToTensor", {0}},
{"ScalarToArray", {0}},
{"StandardLaplace", {0}},
{"UniqueWithPad", {1}},
{"ApplyAdadelta", {3, 4, 5}},

View File

@ -56,8 +56,6 @@ AbstractBasePtr InferImplSqrt(const AnalysisEnginePtr &, const PrimitivePtr &pri
AbstractBasePtr InferImplSqrtGrad(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplScalarToArray(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplArrayToScalar(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplBroadCastShape(const AnalysisEnginePtr &, const PrimitivePtr &primitive,

View File

@ -162,15 +162,6 @@ TypePtr RangeCheckAndInferType(const PrimitivePtr &prim, const std::vector<Abstr
return start_type;
}
} // namespace
AbstractBasePtr InferImplScalarToArray(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
// Inputs: a scalar.
const std::string op_name = primitive->name();
CheckArgsSize(op_name, args_spec_list, 1);
AbstractScalarPtr arg = CheckArg<AbstractScalar>(op_name, args_spec_list, 0);
return std::make_shared<AbstractTensor>(arg, std::make_shared<Shape>());
}
AbstractBasePtr InferImplArrayToScalar(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
// Inputs: a tensor with 0 shape.

View File

@ -272,7 +272,6 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimRealInner, R{InferImplReal, nullptr, true}},
// Array
{prim::kPrimRange, R{InferImplRange, nullptr, true}},
{prim::kPrimScalarToArray, R{InferImplScalarToArray, nullptr, true}},
{prim::kPrimArrayToScalar, R{InferImplArrayToScalar, nullptr, true}},
{prim::kPrimBroadcastShape, R{InferImplBroadCastShape, nullptr, true}},
{prim::kPrimUnique, R{InferImplUnique, nullptr, true}},

View File

@ -503,7 +503,6 @@ GVAR_DEF(PrimitivePtr, kPrimUnravelIndex, std::make_shared<Primitive>(kUnravelIn
GVAR_DEF(PrimitivePtr, kPrimDynamicBroadcastTo, std::make_shared<Primitive>(kDynamicBroadcastTo));
GVAR_DEF(PrimitivePtr, kPrimCummin, std::make_shared<Primitive>("Cummin"));
GVAR_DEF(PrimitivePtr, kPrimBroadcastTo, std::make_shared<Primitive>("BroadcastTo"));
GVAR_DEF(PrimitivePtr, kPrimScalarToArray, std::make_shared<Primitive>("scalar_to_array"));
GVAR_DEF(PrimitivePtr, kPrimLogNormalReverse, std::make_shared<Primitive>("LogNormalReverse"));
GVAR_DEF(PrimitivePtr, kPrimTopK, std::make_shared<Primitive>(kTopK));
GVAR_DEF(PrimitivePtr, kPrimInTopK, std::make_shared<Primitive>("InTopK"));

View File

@ -54,7 +54,6 @@ from .array_func import (
tensor_slice,
strided_slice,
slice,
scalar_to_array,
scalar_to_tensor,
tuple_to_array,
expand_dims,

View File

@ -77,7 +77,6 @@ tensor_scatter_mul_ = P.TensorScatterMul()
tensor_scatter_div_ = P.TensorScatterDiv()
tensor_scatter_min_ = P.TensorScatterMin()
tensor_scatter_max_ = P.TensorScatterMax()
scalar_to_array_ = P.ScalarToArray()
scalar_to_tensor_ = P.ScalarToTensor()
tuple_to_array_ = P.TupleToArray()
masked_select_ = P.MaskedSelect()
@ -3996,35 +3995,6 @@ def tensor_scatter_div(input_x, indices, updates):
return tensor_scatter_div_(input_x, indices, updates)
def scalar_to_array(input_x):
"""
Converts a scalar to a `Tensor`.
Args:
input_x (Union[int, float]): The input is a scalar. Only constant value is allowed.
Returns:
Tensor. 0-D Tensor and the content is the input.
Raises:
TypeError: If `input_x` is neither int nor float.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x = 1.0
>>> print(type(input_x))
<class 'float'>
>>> output = ops.scalar_to_array(input_x)
>>> print(type(output))
<class 'mindspore.common.tensor.Tensor'>
>>> print(output)
1.0
"""
return scalar_to_array_(input_x)
def scalar_to_tensor(input_x, dtype=mstype.float32):
"""
Converts a scalar to a `Tensor`, and converts the data type to the specified type.
@ -4825,7 +4795,6 @@ __all__ = [
'stack',
'unstack',
'scalar_cast',
'scalar_to_array',
'scalar_to_tensor',
'space_to_batch_nd',
'batch_to_space_nd',

View File

@ -35,7 +35,7 @@ from .array_ops import (ArgMaxWithValue, ArgMinWithValue, Argmax, Argmin, BatchT
Eye, Fill, Gather, GatherD, GatherNd, GatherV2, Identity, Im2Col, InvertPermutation, IsInstance,
IsSubClass, LowerBound, Lstsq, MaskedFill, MaskedSelect, Meshgrid, Mvlgamma, Ones, OnesLike,
Pack, Padding, ParallelConcat, PopulationCount, Range, Rank, Reshape, ResizeNearestNeighbor,
ReverseSequence, ReverseV2, Rint, SameTypeShape, ScalarToArray, ScalarToTensor, ScatterAdd,
ReverseSequence, ReverseV2, Rint, SameTypeShape, ScalarToTensor, ScatterAdd,
ScatterDiv, ScatterMax, ScatterMin, ScatterMul, ScatterNd, ScatterNdAdd, ScatterNdDiv,
ScatterNdMax, ScatterNdMin, ScatterNdSub, ScatterNdUpdate, ScatterNonAliasingAdd, ScatterSub,
ScatterUpdate, SearchSorted, Select, Shape, Size, Slice, Sort, SpaceToBatch, SpaceToBatchND,
@ -319,7 +319,6 @@ __all__ = [
'DynamicRNN',
'ReduceAll',
'ReduceAny',
'ScalarToArray',
'ScalarToTensor',
'TupleToArray',
'GeSwitch',

View File

@ -868,6 +868,7 @@ class TensorShape(Primitive):
class Unsqueeze(PrimitiveWithCheck):
"""Unsqueeze"""
@prim_attr_register
def __init__(self, axis):
self.init_prim_io_names(inputs=['x'], outputs=['y'])
@ -1828,40 +1829,6 @@ class TupleToArray(PrimitiveWithInfer):
return _run_op(self, self.name, args)
class ScalarToArray(PrimitiveWithInfer):
"""
Converts a scalar to a `Tensor`.
Refer to :func:`mindspore.ops.scalar_to_array` for more detail.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> op = ops.ScalarToArray()
>>> input_x = 1.0
>>> print(type(input_x))
<class 'float'>
>>> output = op(input_x)
>>> print(type(output))
<class 'mindspore.common.tensor.Tensor'>
>>> print(output)
1.0
"""
@prim_attr_register
def __init__(self):
pass
def infer_value(self, x):
validator.check_value_type("x", x, [int, float], self.name)
if isinstance(x, int):
ret = np.array(x, np.int32)
else:
ret = np.array(x, np.float32)
return Tensor(ret)
class ScalarToTensor(PrimitiveWithInfer):
"""
Converts a scalar to a `Tensor`, and converts the data type to the specified type.