diff --git a/mindspore/_checkparam.py b/mindspore/_checkparam.py index e8dfe680f45..4c89616430e 100644 --- a/mindspore/_checkparam.py +++ b/mindspore/_checkparam.py @@ -728,6 +728,15 @@ class Validator: raise ValueError(f"axis {axes} has shape entry {s} > 1, cannot be squeezed.") return tuple(new_shape) + @staticmethod + def check_axis_in_range(axis, ndim): + """Checks axes are with the bounds of ndim""" + if not isinstance(axis, int): + raise TypeError(f'axes should be integers, not {type(axis)}') + if not -ndim <= axis < ndim: + raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}') + return axis % ndim + def check_input_format(input_param): """Judge input format.""" diff --git a/mindspore/_extends/parse/standard_method.py b/mindspore/_extends/parse/standard_method.py index 37917fe6820..b2c2f622a5f 100644 --- a/mindspore/_extends/parse/standard_method.py +++ b/mindspore/_extends/parse/standard_method.py @@ -148,7 +148,34 @@ def strides_(x): def astype(x, dtype, copy=True): - """Implementation of `astype`.""" + """ + Return a copy of the tensor, casted to a specified type. + + Args: + dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format + of :class:`mindspore.dtype.float32` or `float32`. + Default: :class:`mindspore.dtype.float32`. + copy (bool, optional): By default, astype always returns a newly allocated + tensor. If this is set to false, the input tensor is returned instead + of a copy if possible. Default: True. + + Returns: + Tensor, with the designated dtype. + + Raises: + TypeError: If `dtype` has types not specified above, or values cannot be understood. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32)) + >>> x = x.astype("int32") + >>> print(x.dtype) + Int32 + """ dtype = check_astype_dtype_const(dtype) if not copy and dtype == x.dtype: return x @@ -156,7 +183,40 @@ def astype(x, dtype, copy=True): def transpose(x, *axis): - """Implementation of `transpose`.""" + r""" + Return a view of the tensor with axes transposed. + + For a 1-D tensor this has no effect, as a transposed vector is simply the + same vector. For a 2-D tensor, this is a standard matrix transpose. For a + n-D tensor, if axes are given, their order indicates how the axes are permuted. + If axes are not provided and tensor.shape = (i[0], i[1],...i[n-2], i[n-1]), + then tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]). + + Args: + axes(Union[None, tuple(int), list(int), int], optional): If axes is None or + blank, tensor.transpose() will reverse the order of the axes. If axes is tuple(int) + or list(int), tensor.transpose() will transpose the tensor to the new axes order. + If axes is int, this form is simply intended as a convenience alternative to the + tuple/list form. + + Returns: + Tensor, has the same dimension as input tensor, with axes suitably permuted. + + Raises: + TypeError: If input arguments have types not specified above. + ValueError: If the number of `axes` is not euqal to a.ndim. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,3), dtype=np.float32)) + >>> x = x.transpose() + >>> print(x.shape) + (3, 2, 1) + """ ndim = F.rank(x) perm = check_transpose_axis_const(axis, ndim) return F.transpose(x, perm) @@ -167,27 +227,86 @@ T_ = transpose def reshape(x, *shape): - """Implementation of `reshape`.""" + """ + Give a new shape to a tensor without changing its data. + + Args: + shape(Union[int, tuple(int), list(int)]): The new shape should be compatible + with the original shape. If an integer, then the result will be a 1-D + array of that length. One shape dimension can be -1. In this case, the + value is inferred from the length of the array and remaining dimensions. + + Returns: + Tensor, with new specified shape. + + Raises: + TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor. + ValueError: If new_shape is not compatible with the original shape. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32) + >>> output = np.reshape(x, (3, 2)) + >>> print(output) + [[-0.1 0.3] + [ 3.6 0.4] + [ 0.5 -3.2]] + """ new_shape = check_reshape_shp_const(shape) return F.reshape(x, new_shape) def ravel(x): - """Implementation of `ravel`.""" + """ + Return a contiguous flattened tensor. + + Returns: + Tensor, a 1-D tensor, containing the same elements of the input. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = x.ravel() + >>> print(output.shape) + (24,) + """ return reshape(x, (-1,)) def flatten(x, order='C'): - """ - Returns a copy of the array collapsed into one dimension. + r""" + Return a copy of the tensor collapsed into one dimension. Args: - order (str, optional): Can choose between `C` and `F`. `C` means to - flatten in row-major (C-style) order. ‘F’ means to flatten in column-major - (Fortran- style) order. Only `C` and `F` are supported. + order (str, optional): Can choose between 'C' and 'F'. 'C' means to + flatten in row-major (C-style) order. 'F' means to flatten in column-major + (Fortran-style) order. Only 'C' and 'F' are supported. Default: 'C'. Returns: - Tensor, has the same data type as x. + Tensor, has the same data type as input. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + TypeError: If `order` is not string type. + ValueError: If `order` is string type, but not 'C' or 'F'. + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = x.flatten() + >>> print(output.shape) + (24,) """ order = check_flatten_order_const(order) if order == 'C': @@ -200,14 +319,29 @@ def flatten(x, order='C'): def swapaxes(x, axis1, axis2): """ - Interchanges two axes of a tensor. + Interchange two axes of a tensor. Args: axis1 (int): First axis. axis2 (int): Second axis. Returns: - Transposed tensor, has the same data type as the original tensor x. + Transposed tensor, has the same data type as the input. + + Raises: + TypeError: If `axis1` or `axis2` is not integer. + ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = np.swapaxes(x, 0, 2) + >>> print(output.shape) + (4,3,2) """ axis1, axis2 = check_swapaxes_axis_const((axis1, axis2), x.ndim) @@ -230,13 +364,28 @@ def swapaxes(x, axis1, axis2): def squeeze(x, axis=None): """ - Removes single-dimensional entries from the shape of an tensor. + Remove single-dimensional entries from the shape of a tensor. Args: - axis: Union[None, int, list(int), tuple(list)]. Default is None. + axis (Union[None, int, list(int), tuple(int)], optional): Default is None. Returns: Tensor, with all or a subset of the dimensions of length 1 removed. + + Raises: + TypeError: If input arguments have types not specified above. + ValueError: If specified axis has shape entry :math:`> 1`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32)) + >>> x = x.squeeze() + >>> print(x.shape) + (2, 2) """ shape = F.shape(x) if axis is None: @@ -246,6 +395,78 @@ def squeeze(x, axis=None): return F.reshape(x, new_shape) +def argmax(x, axis=None): + """ + Returns the indices of the maximum values along an axis. + + Args: + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32")) + >>> print(np.argmax(a)) + 5 + """ + # P.Argmax only supports float + x = x.astype(mstype.float32) + if axis is None: + x = ravel(x) + axis = 0 + else: + axis = check_axis_in_range_const(axis, F.rank(x)) + return P.Argmax(axis)(x) + + +def argmin(x, axis=None): + """ + Returns the indices of the minimum values along an axis. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input array. + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32")) + >>> print(np.argmin(a)) + 0 + """ + # P.Argmax only supports float + x = x.astype(mstype.float32) + if axis is None: + x = ravel(x) + axis = 0 + else: + axis = check_axis_in_range_const(axis, F.rank(x)) + # P.Argmin is currently not supported + return P.Argmax(axis)(F.neg_tensor(x)) + + def getitem(data, item): """Implementation of `getitem`.""" return data.__getitem__(item) @@ -466,6 +687,7 @@ check_reshape_shp_const = constexpr(validator.check_reshape_shp) check_flatten_order_const = constexpr(validator.check_flatten_order) check_swapaxes_axis_const = constexpr(validator.check_swapaxes_axis) prepare_shape_for_squeeze_const = constexpr(validator.prepare_shape_for_squeeze) +check_axis_in_range_const = constexpr(validator.check_axis_in_range) def tensor_bool(x): diff --git a/mindspore/ccsrc/pipeline/jit/resource.cc b/mindspore/ccsrc/pipeline/jit/resource.cc index 50da6e998d7..bd2bc15a279 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.cc +++ b/mindspore/ccsrc/pipeline/jit/resource.cc @@ -184,6 +184,8 @@ BuiltInTypeMap &GetMethodMap() { {"squeeze", std::string("squeeze")}, // P.squeeze() {"astype", std::string("astype")}, // P.cast() {"__bool__", std::string("tensor_bool")}, // C.tensor_bool + {"argmax", std::string("argmax")}, // P.Argmax() + {"argmin", std::string("argmin")}, // P.Argmax() }}, {kObjectTypeRowTensorType, { diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 4de33b82b66..bce61cbe0fe 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -484,6 +484,21 @@ class Tensor(Tensor_): Returns: Tensor, has the same dimension as input tensor, with axes suitably permuted. + + Raises: + TypeError: If input arguments have types not specified above. + ValueError: If the number of `axes` is not euqal to a.ndim. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,3), dtype=np.float32)) + >>> x = x.transpose() + >>> print(x.shape) + (3, 2, 1) """ self.init_check() perm = validator.check_transpose_axis(axes, self.ndim) @@ -501,6 +516,23 @@ class Tensor(Tensor_): Returns: Tensor, with new specified shape. + + Raises: + TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor. + ValueError: If new_shape is not compatible with the original shape. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import Tensor + >>> from mindspore import dtype as mstype + >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32) + >>> output = np.reshape(x, (3, 2)) + >>> print(output) + [[-0.1 0.3] + [ 3.6 0.4] + [ 0.5 -3.2]] """ self.init_check() new_shape = validator.check_reshape_shp(shape) @@ -512,6 +544,17 @@ class Tensor(Tensor_): Returns: Tensor, a 1-D tensor, containing the same elements of the input. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = x.ravel() + >>> print(output.shape) + (24,) """ self.init_check() reshape_op = tensor_operator_registry.get('reshape')() @@ -528,6 +571,21 @@ class Tensor(Tensor_): Returns: Tensor, has the same data type as input. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + TypeError: If `order` is not string type. + ValueError: If `order` is string type, but not 'C' or 'F'. + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = x.flatten() + >>> print(output.shape) + (24,) """ self.init_check() reshape_op = tensor_operator_registry.get('reshape')() @@ -550,6 +608,21 @@ class Tensor(Tensor_): Returns: Transposed tensor, has the same data type as the input. + + Raises: + TypeError: If `axis1` or `axis2` is not integer. + ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((2,3,4), dtype=np.float32)) + >>> output = np.swapaxes(x, 0, 2) + >>> print(output.shape) + (4,3,2) """ self.init_check() axis1, axis2 = validator.check_swapaxes_axis((axis1, axis2), self.ndim) @@ -579,6 +652,21 @@ class Tensor(Tensor_): Returns: Tensor, with all or a subset of the dimensions of length 1 removed. + + Raises: + TypeError: If input arguments have types not specified above. + ValueError: If specified axis has shape entry :math:`> 1`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32)) + >>> x = x.squeeze() + >>> print(x.shape) + (2, 2) """ self.init_check() if axis is None: @@ -600,6 +688,20 @@ class Tensor(Tensor_): Returns: Tensor, with the designated dtype. + + Raises: + TypeError: If `dtype` has types not specified above, or values cannot be understood. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32)) + >>> x = x.astype("int32") + >>> print(x.dtype) + Int32 """ self.init_check() dtype = validator.check_astype_dtype(dtype) @@ -607,6 +709,77 @@ class Tensor(Tensor_): return self return tensor_operator_registry.get('cast')(self, dtype) + def argmax(self, axis=None): + """ + Returns the indices of the maximum values along an axis. + + Args: + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32")) + >>> print(np.argmax(a)) + 5 + """ + # P.Argmax only supports float + a = self.astype(mstype.float32) + if axis is None: + a = a.ravel() + axis = 0 + else: + axis = validator.check_axis_in_range(axis, a.ndim) + return tensor_operator_registry.get('argmax')(axis)(a) + + def argmin(self, axis=None): + """ + Returns the indices of the minimum values along an axis. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input array. + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32")) + >>> print(np.argmin(a)) + 0 + """ + # P.Argmax only supports float + a = self.astype(mstype.float32) + if axis is None: + a = a.ravel() + axis = 0 + else: + axis = validator.check_axis_in_range(axis, a.ndim) + # P.Argmin is currently not supported + return tensor_operator_registry.get('argmax')(axis)(tensor_operator_registry.get('__neg__')(a)) + + def init_check(self): if self.has_init: self.init_data() diff --git a/mindspore/numpy/__init__.py b/mindspore/numpy/__init__.py index 9af22826b7b..05c7b299a3b 100644 --- a/mindspore/numpy/__init__.py +++ b/mindspore/numpy/__init__.py @@ -31,15 +31,18 @@ from .array_ops import (transpose, expand_dims, squeeze, rollaxis, swapaxes, res column_stack, hstack, dstack, vstack, stack, unique, moveaxis, tile, broadcast_to, broadcast_arrays, roll, append, split, vsplit, flip, flipud, fliplr, hsplit, dsplit, take_along_axis, take, repeat, - rot90, select, array_split) + rot90, select, array_split, choose, size, array_str, apply_along_axis, + piecewise, unravel_index, apply_over_axes) from .array_creations import copy_ as copy from .array_creations import (array, asarray, asfarray, ones, zeros, full, arange, linspace, logspace, eye, identity, empty, empty_like, ones_like, zeros_like, full_like, diagonal, tril, triu, tri, trace, meshgrid, mgrid, ogrid, diagflat, - diag, diag_indices, ix_, indices, geomspace, vander) + diag, diag_indices, ix_, indices, geomspace, vander, hamming, + hanning, bartlett, blackman, triu_indices, tril_indices, + triu_indices_from, tril_indices_from, histogram_bin_edges, pad) from .dtypes import (int_, int8, int16, int32, int64, uint, uint8, uint16, - uint32, uint64, float_, float16, float32, float64, bool_, inf, nan, + uint32, uint64, float_, float16, float32, float64, bool_, inf, nan, pi, numeric_types, PINF, NINF) from .math_ops import (mean, inner, add, subtract, multiply, divide, true_divide, power, dot, outer, tensordot, absolute, std, var, average, minimum, @@ -50,31 +53,43 @@ from .math_ops import (mean, inner, add, subtract, multiply, divide, true_divide cross, ceil, trapz, gcd, lcm, convolve, log1p, logaddexp, log2, logaddexp2, log10, ediff1d, nansum, nanmean, nanvar, nanstd, cumsum, nancumsum, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, - arctanh, arctan2, cov) + arctanh, arctan2, cov, multi_dot, nanmax, nanmin, argmax, argmin, searchsorted, + interp, sum_, corrcoef, gradient, sign, copysign, digitize, bincount, histogram, + histogramdd, histogram2d, matrix_power, around, polyadd, polysub, polyval, + polyder, polymul, polyint, result_type, unwrap, cumprod, ravel_multi_index, + norm, bitwise_and, bitwise_or, bitwise_xor, invert, rint, correlate, radians) from .logic_ops import (not_equal, less_equal, less, greater_equal, greater, equal, isfinite, isnan, isinf, isposinf, isneginf, isscalar, logical_and, logical_not, - logical_or, logical_xor, in1d, isin, isclose) + logical_or, logical_xor, in1d, isin, isclose, signbit, sometrue, + array_equal, array_equiv) mod = remainder fabs = absolute +round = around # pylint: disable=redefined-builtin divmod = divmod_ # pylint: disable=redefined-builtin +del divmod_ abs = absolute # pylint: disable=redefined-builtin max = amax # pylint: disable=redefined-builtin min = amin # pylint: disable=redefined-builtin - +sum = sum_ # pylint: disable=redefined-builtin +del sum_ +bitwise_not = invert array_ops_module = ['transpose', 'expand_dims', 'squeeze', 'rollaxis', 'swapaxes', 'reshape', 'ravel', 'concatenate', 'where', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'column_stack', 'hstack', 'dstack', 'vstack', 'stack', 'unique', 'moveaxis', 'tile', 'broadcast_to', 'broadcast_arrays', 'append', 'roll', 'split', 'vsplit', 'flip', 'flipud', 'fliplr', 'hsplit', 'dsplit', 'take_along_axis', 'take', - 'repeat', 'rot90', 'select', 'array_split'] + 'repeat', 'rot90', 'select', 'array_split', 'choose', 'size', 'array_str', + 'apply_along_axis', 'piecewise', 'unravel_index', 'apply_over_axes'] array_creations_module = ['array', 'asarray', 'asfarray', 'ones', 'zeros', 'full', 'arange', 'linspace', 'logspace', 'eye', 'identity', 'empty', 'empty_like', 'ones_like', 'zeros_like', 'full_like', 'diagonal', 'tril', 'triu', 'tri', 'trace', 'meshgrid', 'mgrid', 'ogrid', 'diagflat', 'diag', - 'diag_indices', 'ix_', 'indices', 'geomspace', 'vander'] + 'diag_indices', 'ix_', 'indices', 'geomspace', 'vander', 'hamming', + 'hanning', 'bartlett', 'blackman', 'triu_indices', 'tril_indices', + 'triu_indices_from', 'tril_indices_from', 'histogram_bin_edges', 'pad'] math_module = ['mean', 'inner', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'power', 'dot', 'outer', 'tensordot', 'absolute', 'std', 'var', 'average', 'not_equal', @@ -86,11 +101,17 @@ math_module = ['mean', 'inner', 'add', 'subtract', 'multiply', 'divide', 'true_d 'abs', 'max', 'min', 'gcd', 'lcm', 'log1p', 'logaddexp', 'log2', 'logaddexp2', 'log10', 'convolve', 'ediff1d', 'nansum', 'nanmean', 'nanvar', 'nanstd', 'cumsum', 'nancumsum', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', - 'arcsinh', 'arccosh', 'arctanh', 'arctan2', 'cov'] + 'arcsinh', 'arccosh', 'arctanh', 'arctan2', 'cov', 'multi_dot', 'nanmax', 'nanmin', + 'argmax', 'argmin', 'searchsorted', 'interp', 'sum', 'corrcoef', 'gradient', 'sign', + 'copysign', 'radians', 'digitize', 'bincount', 'histogram', 'histogramdd', 'histogram2d', + 'polyadd', 'polysub', 'polyval', 'polyder', 'polymul', 'polyint', 'result_type', + 'unwrap', 'cumprod', 'ravel_multi_index', 'norm', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'bitwise_not', 'rint', "correlate"] logic_module = ['not_equal', 'less_equal', 'less', 'greater_equal', 'greater', 'equal', 'isfinite', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isscalar', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'in1d', 'isin', 'isclose'] + 'logical_or', 'logical_xor', 'in1d', 'isin', 'isclose', 'signbit', 'sometrue', + 'array_equal', 'array_equiv'] __all__ = array_ops_module + array_creations_module + math_module + logic_module + numeric_types diff --git a/mindspore/numpy/array_creations.py b/mindspore/numpy/array_creations.py index c3f7fea70b8..bdee7507945 100644 --- a/mindspore/numpy/array_creations.py +++ b/mindspore/numpy/array_creations.py @@ -13,10 +13,14 @@ # limitations under the License. # ============================================================================ """array operations, the function docs are adapted from Numpy API.""" +import math +import operator + import numpy as onp from ..common import Tensor from ..common import dtype as mstype +from ..ops import operations as P from ..ops import functional as F from ..ops.primitive import constexpr from ..nn.layer.basic import tril as nn_tril @@ -25,13 +29,15 @@ from .._c_expression import Tensor as Tensor_ from .utils import _check_input_for_asarray, _deep_list, _deep_tensor_to_nparray, \ _broadcast_to_shape, _check_input_tensor, _convert_64_to_32, _get_dtype_from_scalar, \ - _expand + _expand, _to_tensor, _slice_along_axis, _callable from .utils_const import _raise_value_error, _empty, _check_axis_valid, _max, _min, \ _check_same_type, _is_shape_empty, _check_shape, _check_dtype, _tile_size, _abs, \ _raise_type_error, _expanded_shape, _check_is_float, _iota, _type_convert, \ - _canonicalize_axis, _list_comprehensions, _ceil, _tuple_getitem, _tuple_slice -from .array_ops import transpose, ravel, concatenate, broadcast_arrays, reshape, broadcast_to -from .dtypes import nan + _canonicalize_axis, _list_comprehensions, _ceil, _tuple_slice, _raise_unimplemented_error, \ + _tuple_setitem +from .array_ops import transpose, ravel, concatenate, broadcast_arrays, reshape, broadcast_to, flip, \ + apply_along_axis, where +from .dtypes import nan, pi # According to official numpy reference, the dimension of a numpy array must be less # than 32 @@ -39,6 +45,9 @@ MAX_NUMPY_DIMS = 32 # All types that can be accepted as "array_like" parameters in graph mode. ARRAY_TYPES = (int, float, bool, list, tuple, Tensor) +_reduce_min_keepdims = P.ReduceMin(True) +_reduce_max_keepdims = P.ReduceMax(True) +_reduce_mean_keepdims = P.ReduceMean(True) def array(obj, dtype=None, copy=True, ndmin=0): """ @@ -255,6 +264,7 @@ def copy_(a): a = a.astype(origin_dtype) return a + def ones(shape, dtype=mstype.float32): """ Returns a new tensor of given shape and type, filled with ones. @@ -626,7 +636,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): num, endpoint=endpoint, dtype=dtype, axis=axis) shape = F.shape(bases) axis = axis + F.rank(bases) + 1 if axis < 0 else axis - expanded_shape = _tuple_getitem(shape, axis, False) + (1,) + _tuple_getitem(shape, axis) + expanded_shape = _tuple_slice(shape, None, axis) + (1,) + _tuple_slice(shape, axis, None) bases = F.reshape(bases, expanded_shape) start = F.reshape(start, expanded_shape) res = F.tensor_mul(F.tensor_pow(bases, exponents), start) @@ -1768,7 +1778,7 @@ def indices(dimensions, dtype=mstype.int32, sparse=False): Args: dimensions (tuple or list of ints): The shape of the grid. - dtype (data type, optional): Data type of the result. + dtype (:class:`mindspore.dtype`, optional): Data type of the result. sparse (boolean, optional): Defaults to False. Return a sparse representation of the grid instead of a dense representation. @@ -1801,3 +1811,724 @@ def indices(dimensions, dtype=mstype.int32, sparse=False): for d in dimensions: grids += (arange(d, dtype=dtype),) return meshgrid(*grids, sparse=sparse, indexing='ij') + + +def _check_window_size(x): + """Returns True if window size is greater than 1.""" + if not isinstance(x, int): + _raise_type_error('the number fo points should be an int') + return x > 1 + + +def bartlett(M): + """ + Returns the Bartlett window. + The Bartlett window is very similar to a triangular window, except that the + end points are at zero. It is often used in signal processing for tapering a + signal, without generating too much ripple in the frequency domain. + + Args: + M (int): Number of points in the output window. If zero or less, an empty + array is returned. + + Returns: + Tensor, the triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with the + first and last samples equal to zero. + + Raises: + TypeError: if `M` is not an int. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.bartlett(12)) + [0. 0.18181819 0.36363637 0.5454545 0.72727275 0.9090909 + 0.9090909 0.72727275 0.5454545 0.36363637 0.18181819 0. ] + """ + if not _check_window_size(M): + return ones(_max(0, M)) + n = _iota(mstype.float32, M) + m_minus_one = _to_tensor(M - 1) + return _to_tensor(1) - F.absolute(_to_tensor(2)*n - m_minus_one)/m_minus_one + + +def blackman(M): + """ + Returns the Blackman window. + The Blackman window is a taper formed by using the first three terms of a + summation of cosines. It was designed to have close to the minimal leakage + possible. It is close to optimal, only slightly worse than a Kaiser window. + + Args: + M (int): Number of points in the output window. If zero or less, an empty + array is returned. + + Returns: + Tensor, the window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + Raises: + TypeError: if `M` is not an int. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.hamming(12)) + [0.08000001 0.15302339 0.34890914 0.6054648 0.841236 0.9813669 + 0.9813668 0.8412359 0.6054647 0.34890908 0.15302327 0.08000001] + """ + if not _check_window_size(M): + return ones(_max(0, M)) + n_doubled = arange(1 - M, M, 2, dtype=mstype.float32) + return (_to_tensor(0.42) + _to_tensor(0.5)*F.cos(_to_tensor(pi/(M - 1))*n_doubled) + + _to_tensor(0.08)*F.cos(_to_tensor(2*pi/(M - 1))*n_doubled)) + + +def hamming(M): + """ + Returns the Hamming window. + The Hamming window is a taper formed by using a weighted cosine. + + Args: + M (int): Number of points in the output window. If zero or less, an empty + array is returned. + + Returns: + Tensor, the window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + Raises: + TypeError: if `M` is not an int. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.hamming(12)) + [0.08000001 0.15302339 0.34890914 0.6054648 0.841236 0.9813669 + 0.9813668 0.8412359 0.6054647 0.34890908 0.15302327 0.08000001] + """ + if not _check_window_size(M): + return ones(_max(0, M)) + n = _iota(mstype.float32, M) + return _to_tensor(0.54) - _to_tensor(0.46)*F.cos(_to_tensor(2*pi/(M - 1))*n) + + +def hanning(M): + """ + Returns the Hanning window. + The Hanning window is a taper formed by using a weighted cosine. + + Args: + M (int): Number of points in the output window. If zero or less, an empty + array is returned. + + Returns: + Tensor, the window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + Raises: + TypeError: if `M` is not an int. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.hanning(12)) + [0. 0.07937324 0.29229254 0.5711574 0.8274304 0.9797465 + 0.97974646 0.82743025 0.5711573 0.29229245 0.07937312 0. ] + """ + if not _check_window_size(M): + return ones(_max(0, M)) + n = _iota(mstype.float32, M) + return _to_tensor(0.5) - _to_tensor(0.5)*F.cos(_to_tensor(2*pi/(M - 1))*n) + + +@constexpr +def tri_indices(n, k=0, m=None, upper=True): + """Returns triu/tril indices in o(nm) time.""" + if not isinstance(n, (int, float, bool)): + raise TypeError("Input n must be a number.") + if not isinstance(k, (int, float, bool)): + raise TypeError("Input k must be a number.") + if m is None: + m = n + elif not isinstance(m, (int, float, bool)): + raise TypeError("Input m must be a number.") + if upper: + compare = operator.ge + else: + compare = operator.le + x_coordinate = [] + y_coordinate = [] + # math.ceil is used to match numpy's behaviour + for i in range(math.ceil(n)): + curr_limit = i + k + for j in range(math.ceil(m)): + if compare(j, curr_limit): + x_coordinate.append(i) + y_coordinate.append(j) + return asarray_const(x_coordinate), asarray_const(y_coordinate) + + +def triu_indices(n, k=0, m=None): + """ + Returns the indices for the upper-triangle of an (n, m) array. + + Args: + n (int): The size of the arrays for which the returned indices will be valid. + k (int, optional): Diagonal offset. + m (int, optional): The column dimension of the arrays for which the returned + arrays will be valid. By default `m` is taken equal to `n`. + + Returns: + The indices for the triangle. The returned tuple contains two tensors, each + with the indices along one dimension of the tensor. + + Raises: + TypeError: if `n`, `k`, `m` are not numbers. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.triu_indices(3)) + (Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]), + Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2])) + """ + return tri_indices(n, k, m, True) + + +def tril_indices(n, k=0, m=None): + """ + Returns the indices for the lower-triangle of an (n, m) array. + + Args: + n (int): The size of the arrays for which the returned indices will be valid. + k (int, optional): Diagonal offset. + m (int, optional): The column dimension of the arrays for which the returned + arrays will be valid. By default `m` is taken equal to `n`. + + Returns: + The indices for the triangle. The returned tuple contains two tensors, each + with the indices along one dimension of the tensor. + + Raises: + TypeError: if `n`, `k`, `m` are not numbers. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.tril_indices(3)) + (Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]), + Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2])) + """ + return tri_indices(n, k, m, False) + + +def triu_indices_from(arr, k=0): + """ + Returns the indices for the upper-triangle of `arr`. + + Args: + arr (Union[Tensor, list, tuple]): 2-dimensional array. + k (int, optional): Diagonal offset. + + Returns: + triu_indices_from, tuple of 2 tensor, shape(N) + Indices for the upper-triangle of `arr`. + + Raises: + TypeError: if `arr` cannot be converted to tensor, or `k` is not a number. + ValueError: if `arr` cannot be converted to a 2-dimensional tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> tensor = np.ones((3,3)) + >>> print(np.triu_indices_from(tensor)) + (Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]), + Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2])) + """ + arr = asarray(arr) + if arr.ndim != 2: + _raise_value_error("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +def tril_indices_from(arr, k=0): + """ + Returns the indices for the lower-triangle of `arr`. + + Args: + arr (Union[Tensor, list, tuple]): 2-dimensional array. + k (int, optional): Diagonal offset. + + Returns: + triu_indices_from, tuple of 2 tensor, shape(N) + Indices for the upper-triangle of `arr`. + + Raises: + TypeError: if `arr` cannot be converted to tensor, or `k` is not a number. + ValueError: if `arr` cannot be converted to a 2-dimensional tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> tensor = np.ones((3,3)) + >>> print(np.tril_indices_from(tensor)) + (Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]), + Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2])) + """ + arr = asarray(arr) + if arr.ndim != 2: + _raise_value_error("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +def histogram_bin_edges(a, bins=10, range=None, weights=None): # pylint: disable=redefined-builtin + """ + Function to calculate only the edges of the bins used by the histogram function. + + Note: + String values for `bins` is not supported. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram + is computed over the flattened array. + bins ((Union[int, tuple, list, Tensor])): If `bins` is an int, it defines the number + of equal-width bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost edge, + allowing for non-uniform bin widths. + range((float, float), optional): The lower and upper range of the bins. If + not provided, `range` is simply ``(a.min(), a.max())``. Values outside + the range are ignored. The first element of the range must be less than + or equal to the second. + + Returns: + Tensor, the edges to pass into `histogram`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + TypeError: if `bins` is an array and not one-dimensional. + + Examples: + >>> import mindspore.numpy as np + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> print(np.histogram_bin_edges(arr, bins=2)) + [0. 2.5 5. ] + """ + if isinstance(bins, (tuple, list, Tensor)): + bins = _to_tensor(bins) + if F.rank(bins) != 1: + _raise_value_error('`bins` must be 1d, when an array') + return bins + if isinstance(bins, str): + # linspace does not support Tensor for num + _raise_unimplemented_error('string value for `bins` not implemented') + a = _to_tensor(a).ravel().astype(mstype.float32) + if range is None: + start = F.reduce_min(a) + end = F.reduce_max(a) + else: + start, end = _to_tensor(*range) + no_range = (end - start) == 0 + start = where(no_range, start - 0.5, start) + end = where(no_range, end + 0.5, end) + return linspace(start, end, bins + 1) + + +def _pad_empty(arr, pad_width): + """ + pads the array with constant values, used in mode: "empty" + """ + dtype = arr.dtype + for i in range(arr.ndim): + shape = arr.shape + pad_before = () + pad_after = () + # To avoid any memory issues, we don't make tensor with 0s in their shapes + if pad_width[i][0] > 0: + pad_before += (empty(_tuple_setitem(shape, i, pad_width[i][0]), dtype=dtype),) + if pad_width[i][1] > 0: + pad_after += (empty(_tuple_setitem(shape, i, pad_width[i][1]), dtype=dtype),) + tensor_with_pad = pad_before + (arr,) + pad_after + arr = concatenate(tensor_with_pad, axis=i) + return arr + + +def _pad_constant(arr, pad_width, value): + """ + pads the array with constant values, used in mode: "constant" + """ + dtype = arr.dtype + for i in range(arr.ndim): + shape = arr.shape + pad_before = () + pad_after = () + # To avoid any memory issues, we don't make tensor with 0s in their shapes + if pad_width[i][0] > 0: + pad_before += (full(_tuple_setitem(shape, i, pad_width[i][0]), value[i][0], dtype=dtype),) + if pad_width[i][1] > 0: + pad_after += (full(_tuple_setitem(shape, i, pad_width[i][1]), value[i][1], dtype=dtype),) + tensor_with_pad = pad_before + (arr,) + pad_after + arr = concatenate(tensor_with_pad, axis=i) + return arr + + +def _pad_statistic(arr, pad_width, stat_length, stat_op): + """ + pads the array with values calculated along the given axis, used in mode: "maximum", + "minimum", "mean" + """ + ndim = arr.ndim + shape = arr.shape + if stat_length is None: + stat_length = _make_stat_length(shape) + else: + stat_length = _convert_pad_to_nd(stat_length, ndim) + stat_length = _limit_stat_length(stat_length, shape) + for i in range(ndim): + pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i) + pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),) + pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i) + pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),) + tensor_with_pad = pad_before + (arr,) + pad_after + arr = concatenate(tensor_with_pad, axis=i) + return arr + + +def _pad_edge(arr, pad_width): + """pad_edge is equivalent to pad_statistic with stat_lenght=1, used in mode:"edge".""" + def identity_op(arr, axis): + return arr + return _pad_statistic(arr, pad_width, 1, identity_op) + + +def _pad_wrap(arr, pad_width): + """The behaviour of wrap mode is consistent with jax.numpy, used in mode:"wrap".""" + ndim = arr.ndim + shape = arr.shape + for i in range(ndim): + padsize_before = pad_width[i][0] % shape[i] + padsize_after = pad_width[i][1] % shape[i] + total_repeats = pad_width[i][0] // shape[i] + 1 + pad_width[i][1] // shape[i] + tensor_with_pad = () + # To avoid any memory issues, we don't make tensor with 0s in their shapes + if padsize_before > 0: + tensor_with_pad += (_slice_along_axis(arr, i, shape[i]-padsize_before, shape[i]),) + tensor_with_pad += (F.tile(arr, _tuple_setitem((1,)*ndim, i, total_repeats)),) + if padsize_after > 0: + tensor_with_pad += (_slice_along_axis(arr, i, 0, padsize_after),) + arr = concatenate(tensor_with_pad, axis=i) + return arr + + +def _pad_linear(arr, pad_width, end_values): + """Pads the arr with linear range values, used in mode: "linear_ramp".""" + ndim = arr.ndim + shape = arr.shape + dtype = arr.dtype + end_values = _convert_pad_to_nd(end_values, ndim) + for i in range(ndim): + # shape [..., 1, ...] + left_value = _slice_along_axis(arr, i, 0, 1) + right_value = _slice_along_axis(arr, i, shape[i]-1, shape[i]) + pad_before = () + pad_after = () + if pad_width[i][0] > 0: + # shape [..., pad_width[i][0], ...] + pad_before = (linspace(end_values[i][0], left_value, num=pad_width[i][0], + endpoint=False, dtype=dtype, axis=i).squeeze(i+1),) + if pad_width[i][1] > 0: + # shape [..., pad_width[i][1], ...] + pad_after = linspace(right_value, end_values[i][1], num=pad_width[i][1]+1, + endpoint=True, dtype=dtype, axis=i).squeeze(i+1) + pad_after = (_slice_along_axis(pad_after, i, 1, pad_width[i][1]+1),) + tensor_with_pad = pad_before + (arr,) + pad_after + arr = concatenate(tensor_with_pad, axis=i) + return arr + + +def _pad_symmetric(arr, pad_width, reflect_type): + """pad the array with symmetric paddings""" + for i in range(arr.ndim): + array_length = arr.shape[i] + + has_pad_before = (pad_width[i][0] > 0) + has_pad_after = (pad_width[i][1] > 0) + + edge_before = _slice_along_axis(arr, i, 0, 1) + edge_end = _slice_along_axis(arr, i, array_length-1, array_length) + times_to_pad_before = pad_width[i][0] // array_length + 1 + additional_pad_before = pad_width[i][0] % array_length + times_to_pad_after = pad_width[i][1] // array_length + 1 + additional_pad_after = pad_width[i][1] % array_length + curr_pad = None + if has_pad_before: + # Deal with paddings before the original array + for times in range(times_to_pad_before): + if times < times_to_pad_before - 1: + endpoint = array_length + else: + endpoint = additional_pad_before + if endpoint != 0: + curr_pad = _slice_along_axis(arr, i, 0, endpoint) + curr_pad = flip(curr_pad, axis=i) + if reflect_type == "odd": + curr_pad = 2 * edge_before - curr_pad + arr = P.Concat(i)((curr_pad, arr)) + edge_before = _slice_along_axis(arr, i, 0, 1) + if has_pad_after: + # Deal with paddings after the original array + for times in range(times_to_pad_after): + if times < times_to_pad_after - 1: + startpoint = arr.shape[i] - array_length + else: + startpoint = arr.shape[i] - additional_pad_after + if startpoint != arr.shape[i]: + curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i]) + curr_pad = flip(curr_pad, axis=i) + if reflect_type == "odd": + curr_pad = 2 * edge_end - curr_pad + arr = P.Concat(i)((arr, curr_pad)) + edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i]) + return arr + + +def _pad_reflect(arr, pad_width, reflect_type): + """ + pad the array with reflect paddings, this is very similar to symmetric paddings, + but differs at how edges are selected. + """ + # pylint: disable=too-many-nested-blocks + for i in range(arr.ndim): + array_length = arr.shape[i] + if array_length == 1: + total_repeats = pad_width[i][0] + pad_width[i][1] + 1 + arr = F.tile(arr, _tuple_setitem((1,)*arr.ndim, i, total_repeats)) + else: + has_pad_before = (pad_width[i][0] > 0) + has_pad_after = (pad_width[i][1] > 0) + + edge_before = _slice_along_axis(arr, i, 0, 1) + edge_end = _slice_along_axis(arr, i, array_length-1, array_length) + pad_size = array_length - 1 + times_to_pad_before = pad_width[i][0] // pad_size + 1 + additional_pad_before = pad_width[i][0] % pad_size + times_to_pad_after = pad_width[i][1] // pad_size + 1 + additional_pad_after = pad_width[i][1] % pad_size + curr_pad = None + if has_pad_before: + # Deal with paddings before the original array + for times in range(times_to_pad_before): + if times < times_to_pad_before - 1: + endpoint = array_length + else: + endpoint = additional_pad_before + 1 + if endpoint != 1: + curr_pad = _slice_along_axis(arr, i, 1, endpoint) + curr_pad = flip(curr_pad, axis=i) + if reflect_type == "odd": + curr_pad = 2 * edge_before - curr_pad + arr = P.Concat(i)((curr_pad, arr)) + edge_before = _slice_along_axis(arr, i, 0, 1) + if has_pad_after: + # Deal with paddings after the original array + for times in range(times_to_pad_after): + if times < times_to_pad_after - 1: + startpoint = arr.shape[i] - array_length + else: + startpoint = arr.shape[i] - additional_pad_after - 1 + if startpoint != arr.shape[i]-1: + curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i]-1) + curr_pad = flip(curr_pad, axis=i) + if reflect_type == "odd": + curr_pad = 2 * edge_end - curr_pad + arr = P.Concat(i)((arr, curr_pad)) + edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i]) + return arr + + +def _pad_func(arr, pad_width, func, **kwargs): + """applies padding function over different axis.""" + # first creates a padded array with fixed length. + arr_dim = arr.ndim + pad_width = _convert_pad_to_nd(pad_width, arr_dim) + arr = _pad_empty(arr, pad_width) + for i in range(arr_dim): + # function signature: padding_func(tensor, iaxis_pad_width, iaxis, kwargs) + arr = apply_along_axis(func, i, arr, pad_width[i], i, kwargs) + return arr + + +@constexpr +def _make_stat_length(shape): + """converts the stat_length values.""" + return tuple((shape[i], shape[i]) for i, _ in enumerate(shape)) + + +@constexpr +def _limit_stat_length(stat_length, shape): + """limits the stat_length to current array length along given dimension.""" + return tuple((min(stat_pair[0], shape[i]), min(stat_pair[1], shape[i])) for i, stat_pair in enumerate(stat_length)) + + +@constexpr +def _convert_pad_to_nd(pad_values, ndim): + """broadcasts the pad_values to (ndim * 2)""" + if not isinstance(pad_values, (int, list, tuple, Tensor)): + raise TypeError( + "pad_width, stat_length, constant_values or end_values should only be int, list, tuple or tensor") + pad_tensor = _to_tensor(pad_values) + pad_shape = pad_tensor.shape + if not pad_shape: + pad_values = tuple((((pad_values,) * 2) for i in range(ndim))) + elif pad_shape == (1,): + pad_values = tuple((tuple(pad_values) * 2) for i in range(ndim)) + elif pad_shape == (2,): + pad_values = tuple(tuple(pad_values) for i in range(ndim)) + elif pad_shape == (1, 2): + pad_values = tuple(tuple(pad_values[0]) for i in range(ndim)) + elif pad_shape == (ndim, 2): + pad_values = tuple(tuple(pad_pair) for pad_pair in pad_values) + else: + raise ValueError(f"input values must be able to broadcast to {(ndim, 2)}") + return pad_values + + +def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0, + end_values=0, reflect_type="even", **kwargs): + """ + Pads an array. + + Note: + Currently, `median` mode is not supported. `reflect` and `symmetric` mode + only supports GPU backend. + + Args: + arr (Union[list, tuple, Tensor]): The array to pad. + pad_width (Union[int, tuple, list]): Number of values padded to the edges of + each axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates + unique pad widths for each axis. :class:`((before, after),)` yields same + before and after pad for each axis. :class:`(pad,)` or int is a shortcut + for :class:`before = after = pad width` for all axes. + mode (string, optional): + One of the following string values: + + - constant (default): Pads with a constant value. + - edge: Pads with the edge values of `arr`. + - linear_ramp: Pads with the linear ramp between end_value and the `arr` edge value. + - maximum: Pads with the maximum value of all or part of the vector along each axis. + - mean: Pads with the mean value of all or part of the vector along each axis. + - median: Pads with the median value of all or part of the vector along each axis. + - minimum: Pads with the minimum value of all or part of the vector along each axis. + - reflect: Pads with the reflection of the vector mirrored on the first + and last values of the vector along each axis. + - symmetric: Pads with the reflection of the vector mirrored along the edge + of the `arr`. + - wrap: Pads with the wrap of the vector along the axis. The first values + are used to pad the end and the end values are used to pad the beginning. + - empty: Pads with undefined values. + - : The padding function, if used, should modify and return a new 1-d tensor. + It has the following signature: :class:`padding_func(tensor, iaxis_pad_width, iaxis, kwargs)` + stat_length (Union[tuple, list, int], optional): Used in \'maximum\', \'mean\', + \'median\', and \'minimum\'. Number of values at edge of each axis used + to calculate the statistic value. :class:`((before_1, after_1), ... (before_N, after_N))` + creates unique statistic lengths for each axis. :class:`((before, after),)` + yields same before and after statistic lengths for each axis. :class:`(stat_length,)` + or int is a shortcut for :class:`before = after = statistic length` for all + axes. Default is :class:`None`, to use the entire axis. + constant_values (Union[tuple, list, int], optional): + Used in :class:`constant mode`. The values to set the padded values for each + axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates unique pad + constants for each axis. :class:`((before, after),)` yields same before and + after constants for each axis. :class:`(constant,)` or :class:`constant` is + a shortcut for :class:`before = after = constant` for all axes. Default is 0. + end_values (Union[tuple, list, int], optional): Used in 'linear_ramp'. The values + used for the ending value of the linear_ramp and that will form the edge of + the padded `arr`. :class:`((before_1, after_1), ... (before_N, after_N))` + unique end values for each axis. :class`((before, after),)` yields same before + and after end values for each axis. :class:`(constant,)` or :class:`constant` + is a shortcut for :class:`before = after = constant` for all axes. Default is 0. + reflect_type(string, optional) can choose between \'even\' and \'odd\'. Used in + \'reflect\', and \'symmetric\'. The \'even\' style is the default with an + unaltered reflection around the edge value. For the \'odd\' style, the extended + part of the `arr` is created by subtracting the reflected values from two times + the edge value. + + Returns: + Padded tensor of rank equal to `arr` with shape increased according to `pad_width`. + + Raises: + TypeError: if `arr`, `pad_width`, `stat_length`, `constant_values` or `end_values` + have types not specified above. + ValueError: if `mode` cannot be recognized, or if `pad_width`, `stat_length`, + `constant_values`, `end_values` cannot broadcast to :class:`(arr.ndim, 2)`, + or if keyword arguments got unexpected inputs. + NotImplementedError: if mode is function or '/median'/. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> tensor = np.array([1., 2., 3., 4., 5.]) + >>> print(np.pad(tensor, (3, 4))) + [0. 0. 0. 1. 2. 3. 4. 5. 0. 0. 0. 0.] + >>> print(np.pad(tensor, (3, 4), mode="wrap")) + [3. 4. 5. 1. 2. 3. 4. 5. 1. 2. 3. 4.] + >>> >>> print(np.pad(tensor, (3, 4), mode="linear_ramp", end_values=(10, 10))) + [10. 7. 4. 1. 2. 3. 4. 5. 6.25 7.5 8.75 10. ] + """ + arr = _to_tensor(arr) + if arr.ndim == 0: + return arr + pad_width = _convert_pad_to_nd(pad_width, arr.ndim) + stat_func = {"maximum": _reduce_max_keepdims, + "minimum": _reduce_min_keepdims, + "mean": _reduce_mean_keepdims, + "median": "not implemented"} + + if mode not in ("constant", "maximum", "minimum", "mean", "median", "edge", + "wrap", "linear_ramp", "symmetric", "reflect", "empty") and \ + not _callable(arr, mode): + _raise_value_error("Input mode not supported.") + + if mode == "constant": + constant_values = _convert_pad_to_nd(constant_values, arr.ndim) + return _pad_constant(arr, pad_width, constant_values) + if mode in ("maximum", "minimum", "mean", "median"): + # TODO: support median mode once P.Sort/P.Median is supported on GPU/CPU + if mode == "median": + _raise_unimplemented_error("median mode is not supported yet") + return _pad_statistic(arr, pad_width, stat_length, stat_func[mode]) + if mode == "edge": + return _pad_edge(arr, pad_width) + if mode == "wrap": + return _pad_wrap(arr, pad_width) + if mode == "linear_ramp": + return _pad_linear(arr, pad_width, end_values) + if mode == "symmetric": + return _pad_symmetric(arr, pad_width, reflect_type) + if mode == "reflect": + return _pad_reflect(arr, pad_width, reflect_type) + if mode == 'empty': + return _pad_empty(arr, pad_width) + return _pad_func(arr, pad_width, mode, **kwargs) diff --git a/mindspore/numpy/array_ops.py b/mindspore/numpy/array_ops.py index e0209351c2e..5c673ab13f4 100644 --- a/mindspore/numpy/array_ops.py +++ b/mindspore/numpy/array_ops.py @@ -24,14 +24,14 @@ from ..ops.primitive import constexpr from ..nn import Cell from .utils import _convert_list_tensor_to_tuple_tensor, _expand, _broadcast_to_shape, \ - _check_input_tensor, _broadcast_to, _to_tensor + _check_input_tensor, _broadcast_to, _to_tensor, _callable from .utils_const import _check_axes_range, _check_start_normalize, \ _raise_type_error, _raise_value_error, _infer_out_shape, _empty, _promote, \ _check_same_type, _check_axis_valid, _add_unit_axes, _broadcast_tuples, \ _check_is_float, _check_axis_in_range, _check_axis_type, _canonicalize_axis, \ _list_comprehensions, _check_element_int, _is_shape_empty, _type_convert, \ - _tuple_getitem, _expanded_shape, _seq_prod, _get_device, _tuple_setitem, \ - _raise_unimplemented_error + _tuple_slice, _expanded_shape, _seq_prod, _tuple_setitem, _iota, \ + _raise_unimplemented_error, _cumprod, _get_device # According to official numpy reference, the dimension of a numpy array must be less # than 32 @@ -697,6 +697,7 @@ def where(condition, x=None, y=None): [7 5] [7 5]]] """ + condition, x, y = _to_tensor(condition, x, y) # type promotes input tensors dtype1 = F.dtype(x) dtype2 = F.dtype(y) @@ -1781,16 +1782,15 @@ def take_along_axis(arr, indices, axis): ndim = F.rank(arr) if ndim != F.rank(indices): _raise_value_error('`indices` and `arr` must have the same number of dimensions') - _check_axis_in_range(axis, ndim) - axis = axis + ndim if axis < 0 else axis + axis = _check_axis_in_range(axis, ndim) shape_arr = F.shape(arr) shape_indices = F.shape(indices) # broadcasts indices against the shape of arr except at axis - indices = _broadcast_to(indices, _tuple_getitem(shape_indices, axis, False), - _tuple_getitem(shape_arr, axis, False), ndim) - indices = _broadcast_to(indices, _tuple_getitem(shape_arr, axis + 1, False) + - _tuple_getitem(shape_indices, axis + 1), shape_arr, ndim) + indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis), + _tuple_slice(shape_arr, None, axis), ndim) + indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) + + _tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim) return F.gather_d(arr, axis, indices) @@ -1801,18 +1801,21 @@ def _mod(x, y): return F.tensor_sub(x, prod) -def _check_indices(size, indices, mode): +def _check_indices(dims, indices, mode, allow_negative_index=True): """Checks whether indices are out of bounds.""" shape = F.shape(indices) dtype = F.dtype(indices) - lowerbounds = F.fill(dtype, shape, -size) - upperbounds = F.fill(dtype, shape, size - 1) + if not allow_negative_index: + lowerbounds = F.fill(dtype, shape, 0) + else: + lowerbounds = F.fill(dtype, shape, -dims) + upperbounds = F.fill(dtype, shape, dims - 1) out_of_lowerbounds = F.tensor_lt(indices, lowerbounds) out_of_upperbounds = F.tensor_gt(indices, upperbounds) if mode == 'raise': _raise_unimplemented_error('"raise" mode is not implemented') if mode == 'wrap': - return _mod(indices, F.fill(dtype, shape, size)) + return _mod(indices, F.fill(mstype.float32, shape, dims)).astype(dtype) zeros = F.fill(dtype, shape, 0) clipped = F.select(out_of_lowerbounds, zeros, indices) clipped = F.select(out_of_upperbounds, upperbounds, clipped) @@ -1878,8 +1881,7 @@ def take(a, indices, axis=None, mode='clip'): a = ravel(a) axis = 0 ndim = F.rank(a) - _check_axis_in_range(axis, ndim) - axis = axis + ndim if axis < 0 else axis + axis = _check_axis_in_range(axis, ndim) shape_a = F.shape(a) shape_indices = F.shape(indices) @@ -1887,8 +1889,8 @@ def take(a, indices, axis=None, mode='clip'): indices = _check_indices(shape_a[axis], indices, mode) # reshapes indices to shape (Ni..., Nj..., Nk) - shape_ni = _tuple_getitem(shape_a, axis, False) - shape_nk = _tuple_getitem(shape_a, axis + 1) + shape_ni = _tuple_slice(shape_a, None, axis) + shape_nk = _tuple_slice(shape_a, axis + 1, None) shape_out = shape_ni + shape_indices + shape_nk shape_indices = _expanded_shape(ndim, size_indices, axis) indices = F.reshape(indices, shape_indices) @@ -1948,18 +1950,17 @@ def repeat(a, repeats, axis=None): a = ravel(a) axis = 0 ndim = F.rank(a) - _check_axis_in_range(axis, ndim) - axis = axis + ndim if axis < 0 else axis + axis = _check_axis_in_range(axis, ndim) if len(repeats) == 1: repeats = repeats[0] if repeats == 0: return _empty(F.dtype(a), (0,)) return C.repeat_elements(a, repeats, axis) shape = F.shape(a) - size = shape[axis] - if len(repeats) != size: + dims = shape[axis] + if len(repeats) != dims: _raise_value_error('operands could not be broadcast together') - subs = split(a, size, axis) + subs = split(a, dims, axis) repeated_subs = [] for sub, rep in zip(subs, repeats): if rep != 0: @@ -2046,11 +2047,13 @@ def select(condlist, choicelist, default=0): Returns an array drawn from elements in `choicelist`, depending on conditions. Args: - condlist (array_like): The list of conditions which determine from which array - in `choicelist` the output elements are taken. When multiple conditions are - satisfied, the first one encountered in `condlist` is used. - choicelist (array_like): The list of arrays from which the output elements are - taken. It has to be of the same length as `condlist`. + condlist (Union[int, float, bool, list, tuple, Tensor]): The list of conditions + which determine from which array in `choicelist` the output elements are + taken. When multiple conditions are satisfied, the first one encountered in + `condlist` is used. + choicelist (Union[int, float, bool, list, tuple, Tensor]): The list of arrays + from which the output elements are taken. It has to be of the same length as + `condlist`. default (scalar, optional): The element inserted in output when all conditions evaluate to `False`. @@ -2059,7 +2062,6 @@ def select(condlist, choicelist, default=0): `choicelist` where the `m-th` element of the corresponding array in `condlist` is `True`. - Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` @@ -2067,7 +2069,9 @@ def select(condlist, choicelist, default=0): ValueError: if ``len(condlist) != len(choicelist)``. Examples: - >>> condlist = [[True, True, True, False, False], [False, False, True, False, True]] + >>> import mindspore.numpy as np + >>> condlist = [[True, True, True, False, False], \ + [False, False, True, False, True]] >>> choicelist = [[0, 1, 2, 3, 4], [0, 1, 4, 9, 16]] >>> output = np.select(condlist, choicelist) >>> print(output) @@ -2076,32 +2080,481 @@ def select(condlist, choicelist, default=0): condlist, choicelist = _to_tensor(condlist, choicelist) shape_cond = F.shape(condlist) shape_choice = F.shape(choicelist) - if F.rank(condlist) == 0 or F.rank(condlist) == 0: + if F.rank(condlist) == 0 or F.rank(choicelist) == 0: _raise_value_error('input cannot be scalars') case_num = shape_cond[0] if shape_choice[0] != case_num: _raise_value_error('list of cases must be same length as list of conditions') + case_size_cond = _tuple_slice(shape_cond, 1, None) + case_size_choice = _tuple_slice(shape_choice, 1, None) # performs broadcast over the cases in condlist and choicelist - case_size = _infer_out_shape(shape_cond[1:], shape_choice[1:]) + case_size = _infer_out_shape(case_size_cond, case_size_choice) shape_broadcasted = (case_num,) + case_size ndim = len(shape_broadcasted) shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(condlist), 1, True) + - shape_cond[1:]) + case_size_cond) condlist = _broadcast_to_shape(F.reshape(condlist, shape_cond_expanded), shape_broadcasted) shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(choicelist), 1, True) + - shape_choice[1:]) + case_size_choice) choicelist = _broadcast_to_shape(F.reshape(choicelist, shape_choice_expanded), shape_broadcasted) slice_start = _list_comprehensions(ndim - 1, 0, True) slice_size = (1,) + case_size dtype = F.dtype(choicelist) - if _get_device() == 'CPU' and not _check_is_float(dtype): - # F.tensor_slice only supports float on CPU - choicelist = F.cast(choicelist, mstype.float32) - default_slice = F.fill(F.dtype(choicelist), slice_size, default) + if isinstance(default, Tensor): + default_slice = default.astype(F.dtype(choicelist)).reshape(slice_size) + else: + default_slice = F.fill(F.dtype(choicelist), slice_size, default) for i in range(case_num - 1, -1, -1): cond_slice = F.tensor_slice(condlist.astype(mstype.float32), (i,) + slice_start, slice_size) choice_slice = F.tensor_slice(choicelist, (i,) + slice_start, slice_size) default_slice = F.select(cond_slice.astype(mstype.bool_), choice_slice, default_slice) return F.reshape(default_slice, (case_size)).astype(dtype) + + +@constexpr +def _get_grid(shape): + """Returns a grid representing all the indices for an array with the given shape.""" + grids = [] + ndim = len(shape) + for i in range(ndim): + dim_grid = _iota(mstype.int32, shape[i]) + dim_shape = _expanded_shape(ndim, shape[i], i) + dim_grid = _broadcast_to_shape(dim_grid.reshape(dim_shape), shape) + grids.append(dim_grid) + return stack(grids, -1) + + +def choose(a, choices, mode='clip'): + """ + Construct an array from an index array and a list of arrays to choose from. + Given an “index” array `a`` of integers and a sequence of n arrays (choices), + `a` and each choice array are first broadcast, as necessary, to arrays of a + common shape; calling these `Ba` and `Bchoices[i], i = 0,…,n-1` we have that, + necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array + with ``shape Ba.shape`` is created as follows: + + - if ``mode='raise'`` (the default), then, first of all, each element of `a` + (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that `i` + (in that range) is the value at the `(j0, j1, ..., jm)` position in + `Ba` - then the value at the same position in the new array is the + value in ``Bchoices[i]`` at that same position; + + - if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the + range ``[0, n-1]`` back into that range; and then the new array is + constructed as above; + + - if ``mode='clip'``, values in `a` (and thus `Ba`) may be any (signed) integer; + negative integers are mapped to 0; values greater than `n-1` are mapped to + `n-1`; and then the new array is constructed as above. + + Note: + Numpy argument `out` is not supported. + ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead. + + Args: + a (int array): This array must contain integers in ``[0, n-1]``, where `n` is + the number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must + be broadcastable to the same shape. If `choices` is itself an array, then + its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``) + is taken as defining the “sequence”. + mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside + ``[0, n-1]`` will be treated: + + ‘raise’ – raise an error (default); + + ‘wrap’ – wrap around; + + ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are + too large are replaced by the index that addresses the last element + along that axis. Note that this disables indexing with negative numbers. + + Returns: + Tensor, the merged result. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + ValueError: if ``len(condlist) != len(choicelist)``. + + Examples: + >>> import mindspore.numpy as np + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + >>> print(np.choose([2, 3, 1, 0], choices)) + [20 31 12 3] + >>> print(np.choose([2, 4, 1, 0], choices, mode='clip')) + [20 31 12 3] + >>> print(np.choose([2, 4, 1, 0], choices, mode='wrap')) + [20 1 12 3] + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> print(np.choose(a, choices)) + [[ 10 -10 10] + [-10 10 -10] + [ 10 -10 10]] + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> print(np.choose(a, (c1, c2))) + [[[ 1 1 1 1 1] + [ 2 2 2 2 2] + [ 3 3 3 3 3]] + + [[-1 -2 -3 -4 -5] + [-1 -2 -3 -4 -5] + [-1 -2 -3 -4 -5]]] + """ + a = _to_tensor(a) + if isinstance(choices, (tuple, list)): + # broadcasts choices to the same shape if choices is a sequence + choices = _to_tensor(*choices) + shapes = () + for choice in choices: + shapes += (F.shape(choice),) + shape_choice = _infer_out_shape(F.shape(a), *shapes) + tmp = [] + for choice in choices: + tmp.append(broadcast_to(choice, shape_choice)) + choices = stack(tmp) + else: + choices = _to_tensor(choices) + shape_choice = _infer_out_shape(F.shape(a), F.shape(choices)[1:]) + choices = broadcast_to(choices, (F.shape(choices)[0],) + shape_choice) + + if F.rank(a) == 0 or F.rank(choices) == 0: + _raise_value_error('input cannot be scalars') + a = broadcast_to(a, shape_choice) + dtype = F.dtype(choices) + # adjusts dtype for F.tensor_mul and F.gather_nd + a = a.astype(mstype.int32) + choices = choices.astype(mstype.int32) + a = _check_indices(F.shape(choices)[0], a, mode, allow_negative_index=False) + grid = _get_grid(F.shape(a)) + indices = concatenate((a.reshape(F.shape(a) + (1,)), grid), -1) + return F.gather_nd(choices, indices).astype(dtype) + + +def size(a, axis=None): + """ + Returns the number of elements along a given axis. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input data. + axis (int): Axis along which the elements are counted. Default: None. + If None, give the total number of elements. + + Returns: + Number of elements along the specified axis. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + TypeError: If input is not array_like or `axis` is not int or tuple of ints. + ValueError: If any axis is out of range or duplicate axes exist. + + Examples: + >>> import mindspore.numpy as np + >>> x = np.arange(10).reshape(2, 5).astype('float32') + >>> print(np.size(x)) + 10 + >>> print(np.size(x, axis=1)) + 5 + """ + a = _to_tensor(a) + if axis is None: + return a.size + if not isinstance(axis, int): + _raise_type_error("axis argument should be integer.") + axis = _canonicalize_axis(axis, a.ndim) + return a.shape[axis] + + +def array_str(a): + """ + Returns a string representation of the data in an array. + + The data in the array is returned as a single string. + This function is similar to array_repr, the difference being that array_repr also + returns information on the kind of array and its data type. + + Note: + Numpy argument `max_line_width`, `precision` and `suppress_small` are not supported. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input data. + + Returns: + String. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + TypeError: If input is not array_like. + + Examples: + >>> import mindspore.numpy as np + >>> x = np.arange(5) + >>> np.array_str(x) + '[0 1 2 3 4]' + """ + a = _to_tensor(a) + return a.__str__() + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Applies a function to 1-D slices along the given axis. + Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a + 1-D slice of arr along axis. + + Args: + func1d (function): Maps `(M,) -> (Nj…)`. This function should accept 1-D arrays. It is + applied to 1-D slices of arr along the specified axis. + axis (int): Axis along which arr is sliced. + arr (Tensor): Input array with shape `(Ni…, M, Nk…)`. + args (any): Additional arguments to `func1d`. + kwargs (any): Additional named arguments to `func1d`. + + Returns: + Tensor with shape `(Ni…, Nj…, Nk…)`, the output array. Its shape is identical to the + shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced + with new dimensions equal to the shape of the return value of `func1d`. So if `func1d` + returns a scalar, the output will have one fewer dimensions than `arr`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + ValueError: if axis is out of the range. + + Examples: + >>> import mindspore.numpy as np + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> print(np.apply_along_axis(np.diag, -1, b)) + [[[1 0 0] + [0 2 0] + [0 0 3]] + + [[4 0 0] + [0 5 0] + [0 0 6]] + + [[7 0 0] + [0 8 0] + [0 0 9]]] + """ + ndim = F.rank(arr) + shape = F.shape(arr) + axis = _check_axis_in_range(axis, ndim) + arr = moveaxis(arr, axis, -1) + arr = F.reshape(arr, (-1, F.shape(arr)[-1])) + slices = [] + for i in range(F.shape(arr)[0]): + slices.append(func1d(arr[i], *args, **kwargs)) + stacked_slices = stack(slices) + shape_stacked = (_tuple_slice(shape, None, axis) + _tuple_slice(shape, axis + 1, None) + + _tuple_slice(F.shape(stacked_slices), 1, None)) + res = F.reshape(stacked_slices, shape_stacked) + + # moves the dimensions returned by `func1d` back to `axis` + ndim_func = F.rank(res) - ndim + 1 + if ndim_func >= 1: + res = moveaxis(res, F.make_range(ndim - 1, F.rank(res)), + F.make_range(axis, axis + ndim_func)) + return res + + +def _stack_arrays(arrs): + """Stacks a sequence of Tensor""" + if isinstance(arrs, (tuple, list)): + tensor_list = [] + for arr in arrs: + tensor_list.append(_to_tensor(arr)) + return stack(tensor_list) + return atleast_1d(_to_tensor(arrs)) + + +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluates a piecewise-defined function. + Given a set of conditions and corresponding functions, evaluate each function on the input + data wherever its condition is true. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): The input domain. + condlist (Union[bool, list of bool Tensor]): Each boolean array corresponds to a + function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as + the output value. Each boolean array in `condlist` selects a piece of `x`, and + should therefore be of the same shape as `x`. The length of `condlist` must + correspond to that of `funclist`. If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function is the default + value, used wherever all conditions are false. + funclist (Union[list of callables, list of scalars]): Each function is evaluated over + `x` wherever its corresponding condition is True. It should take a 1d array as input + and give an 1d array or a scalar value as output. If, instead of a callable, a scalar + is provided then a constant function ``(lambda x: scalar)`` is assumed. + args (any): Any further arguments given to `piecewise` are passed to the functions upon + execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is + called as ``f(x, 1, 'a')``. + kw (any): Keyword arguments used in calling `piecewise` are passed to the functions upon + execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is + called as ``f(x, alpha=1)``. + + Returns: + Tensor, the output is the same shape and type as `x` and is found by calling the + functions in `funclist` on the appropriate portions of `x`, as defined by the boolean + arrays in `condlist`. Portions not covered by any condition have a default value of 0. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + ValueError: if length of `funclist` is not in ``(len(condlist), len(condlist) + 1)`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.linspace(-2.5, 2.5, 6) + >>> print(np.piecewise(x, [x < 0, x >= 0], [-1, 1])) + [2.5 1.5 0.5 0.5 1.5 2.5] + """ + x = _to_tensor(x) + choicelist = funclist + if isinstance(funclist, (tuple, list)): + if _callable(x, funclist[0]): + choicelist = [] + for func in funclist: + choicelist.append(func(x, *args, **kw)) + condlist = _stack_arrays(condlist) + choicelist = _stack_arrays(choicelist) + + default = 0 + n1 = len(condlist) + n2 = len(funclist) + if n1 + 1 == n2: + default = choicelist[-1] + choicelist = choicelist[:-1] + elif n1 != n2: + _raise_value_error('the number of choices should be either equal to conditions or ', n1 + 1) + return select(condlist, choicelist, default=default) + + +def unravel_index(indices, shape, order='C'): + """ + Converts a flat index or array of flat indices into a tuple of coordinate arrays. + + Note: + Out-of-bound indices are clipped by the boundaries of `shape` instead of raising + an error. + + Args: + indices (Union[int, float, bool, list, tuple, Tensor]): An integer array whose elements + are indices into the flattened version of an array of dimensions shape. + shape (tuple of ints): The shape of the array to use for unraveling indices. + order (Union['C', 'F'], optional): Determines whether the indices should be viewed as + indexing in row-major (C-style) or column-major (Fortran-style) order. + + Returns: + Tensor, each array in the tuple has the same shape as the indices array. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Raises: + ValueError: if `order` is not 'C' or 'F'. + + Examples: + >>> import mindspore.numpy as np + >>> print(np.unravel_index([22, 41, 37], (7,6))) + (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]), + Tensor(shape=[3], dtype=Int32, value= [4, 5, 1])) + >>> print(np.unravel_index([31, 41, 13], (7,6), order='F')) + (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]), + Tensor(shape=[3], dtype=Int32, value= [4, 5, 1])) + """ + indices = _to_tensor(indices) + if order not in ('C', 'F'): + _raise_value_error('invalid order. Expected "C" or "F"') + if isinstance(shape, int): + shape = (shape,) + ndim = F.rank(indices) + if order == 'F': + sizes = _cumprod(shape) + else: + sizes = _cumprod(shape[::-1]) + sizes = _to_tensor(sizes[::-1] + (1,)) + sizes = F.reshape(sizes, (-1,) + _list_comprehensions(ndim, 1, True)) + total_size = sizes[0] + indices = where(indices > total_size - 1, total_size - 1, indices) + if _get_device() == 'GPU': + dtype = F.dtype(total_size) + lowerbounds = (-(total_size.astype(mstype.float32))).astype(dtype) + else: + lowerbounds = -total_size + indices = where(indices < lowerbounds, lowerbounds, indices) + res = _mod(indices, sizes[:-1])//sizes[1:] + + num = len(res) + if ndim == 0 and num == 1: + return res.ravel() + if order == 'F': + r = range(num - 1, -1, -1) + else: + r = range(num) + subs = () + for i in r: + subs += (res[i],) + return subs + + +def apply_over_axes(func, a, axes): + """ + Applies a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first element of `axes`. + The result `res` of the function call must have either the same dimensions as `a` or + one less dimension. If `res` has one less dimension than `a`, a dimension is inserted before `axis`. + The call to `func` is then repeated for each axis in `axes`, with `res` as the first argument. + + Args: + func (function): This function must take two arguments, `func(a, axis)`. + a (Union[int, float, bool, list, tuple, Tensor]): Input tensor. + axes (Union[int, list, tuple]): Axes over which `func` is applied; the elements must be integers. + + Returns: + Tensor. The number of dimensions is the same as `a`, but the shape can be different. + This depends on whether `func` changes the shape of its output with respect to its input. + + Raises: + TypeError: If input `a` is not array_like or `axes` is not int or sequence of ints. + ValueError: If any axis is out of range or duplicate axes exist. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.arange(10).reshape(2, 5).astype('float32') + >>> print(x) + [[0. 1. 2. 3. 4.] + [5. 6. 7. 8. 9.]] + >>> print(np.apply_over_axes(np.sum, x, axes=0)) + [[ 5. 7. 9. 11. 13.]] + """ + a = _to_tensor(a) + if isinstance(axes, int): + axes = (axes,) + res = a + for axis in axes: + res = func(res, axis=axis) + res = F.expand_dims(res, axis) if res.ndim != a.ndim else res + if res.ndim != a.ndim: + _raise_value_error("function is not returning a tensor of the correct shape") + return res diff --git a/mindspore/numpy/logic_ops.py b/mindspore/numpy/logic_ops.py index 1c5b7b0a7c7..60b99ed2b70 100644 --- a/mindspore/numpy/logic_ops.py +++ b/mindspore/numpy/logic_ops.py @@ -16,15 +16,14 @@ from ..ops import functional as F -from ..ops.primitive import constexpr from ..common import dtype as mstype from ..common import Tensor -from .._c_expression import typing from .math_ops import _apply_tensor_op, absolute -from .array_creations import zeros, ones, empty +from .array_creations import zeros, ones, empty, asarray from .utils import _check_input_tensor, _to_tensor, _isnan -from .utils_const import _raise_type_error, _is_shape_empty, _infer_out_shape +from .utils_const import _raise_type_error, _is_shape_empty, _infer_out_shape, _check_same_type, \ + _check_axis_type, _canonicalize_axis, _can_broadcast, _isscalar def not_equal(x1, x2, dtype=None): @@ -410,13 +409,6 @@ def isneginf(x): return _is_sign_inf(x, F.tensor_lt) -@constexpr -def _isscalar(x): - """Returns True if x is a scalar type""" - return isinstance(x, (typing.Number, typing.Int, typing.UInt, typing.Float, - typing.Bool, typing.String)) - - def isscalar(element): """ Returns True if the type of element is a scalar type. @@ -534,8 +526,9 @@ def in1d(ar1, ar2, invert=False): not rely on the uniqueness of the input arrays. Args: - ar1 (array_like): Input array with shape `(M,)`. - ar2 (array_like): The values against which to test each value of `ar1`. + ar1 (Union[int, float, bool, list, tuple, Tensor]): Input array with shape `(M,)`. + ar2 (Union[int, float, bool, list, tuple, Tensor]): The values against which + to test each value of `ar1`. invert (boolean, optional): If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. @@ -746,3 +739,167 @@ def logical_xor(x1, x2, dtype=None): y1 = F.logical_or(x1, x2) y2 = F.logical_or(F.logical_not(x1), F.logical_not(x2)) return _apply_tensor_op(F.logical_and, y1, y2, dtype=dtype) + + +def array_equal(a1, a2, equal_nan=False): + """ + Returns `True` if input arrays have same shapes and all elements equal. + + Note: + In mindpsore, a bool tensor is returned instead, since in Graph mode, the + value cannot be traced and computed at compile time. + + Args: + a1/a2 (Union[int, float, bool, list, tuple, Tensor]): Input arrays. + equal_nan (bool): Whether to compare NaN’s as equal. + + Returns: + Scalar bool tensor, value is `True` if inputs are equal, `False` otherwise. + + Raises: + TypeError: If inputs have types not specified above. + + Supported Platforms: + ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> a = [0,1,2] + >>> b = [[0,1,2], [0,1,2]] + >>> print(np.array_equal(a,b)) + False + """ + a1 = asarray(a1) + a2 = asarray(a2) + if not isinstance(equal_nan, bool): + _raise_type_error("equal_nan must be bool.") + if a1.shape == a2.shape: + res = equal(a1, a2) + if equal_nan: + res = logical_or(res, logical_and(isnan(a1), isnan(a2))) + return res.all() + return _to_tensor(False) + + +def array_equiv(a1, a2): + """ + Returns `True` if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array can + be broadcasted to create the same shape as the other one. + + Note: + In mindpsore, a bool tensor is returned instead, since in Graph mode, the + value cannot be traced and computed at compile time. + + Args: + a1/a2 (Union[int, float, bool, list, tuple, Tensor]): Input arrays. + + Returns: + Scalar bool tensor, value is `True` if inputs are equivalent, `False` otherwise. + + Raises: + TypeError: If inputs have types not specified above. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> a = [0,1,2] + >>> b = [[0,1,2], [0,1,2]] + >>> print(np.array_equiv(a,b)) + True + """ + a1 = asarray(a1) + a2 = asarray(a2) + if _can_broadcast(a1.shape, a2.shape): + return equal(a1, a2).all() + return _to_tensor(False) + + +def signbit(x, dtype=None): + """ + Returns element-wise True where signbit is set (less than zero). + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and + `extobj` are not supported. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): The input value(s). + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor. + + Raises: + TypeError: If input is not array_like or `dtype` is not `None` or `bool`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.array([1, -2.3, 2.1]).astype('float32') + >>> output = np.signbit(x) + >>> print(output) + [False True False] + """ + if dtype is not None and not _check_same_type(dtype, mstype.bool_): + _raise_type_error("Casting was not allowed for signbit.") + x = _to_tensor(x) + res = F.less(x, 0) + if dtype is not None and not _check_same_type(F.dtype(res), dtype): + res = F.cast(res, dtype) + return res + + +def sometrue(a, axis=None, keepdims=False): + """ + Tests whether any array element along a given axis evaluates to True. + + Returns single boolean unless axis is not None + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input tensor or object that can be converted to an array. + axis (Union[None, int, tuple(int)]): Axis or axes along which a logical OR reduction is + performed. Default: None. + If None, perform a logical OR over all the dimensions of the input array. + If negative, it counts from the last to the first axis. + If tuple of ints, a reduction is performed on multiple axes, instead of a single axis or + all the axes as before. + keepdims (bool): Default: False. + If True, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the input array. + If the default value is passed, then keepdims will not be passed through to the any method of + sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + implement keepdims any exceptions will be raised. + + Returns: + Returns single boolean unless axis is not None + + Raises: + TypeError: If input is not array_like or `axis` is not int or tuple of ints or + `keepdims` is not integer or `initial` is not scalar. + ValueError: If any axis is out of range or duplicate axes exist. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.array([1, -2.3, 2.1]).astype('float32') + >>> output = np.signbit(x) + >>> print(output) + [False True False] + """ + if not isinstance(keepdims, int): + _raise_type_error("integer argument expected, but got ", keepdims) + if axis is not None: + _check_axis_type(axis, True, True, False) + axis = _canonicalize_axis(axis, a.ndim) + a = _to_tensor(a) + keepdims = keepdims not in (0, False) + return F.not_equal(a, 0).any(axis, keepdims) diff --git a/mindspore/numpy/math_ops.py b/mindspore/numpy/math_ops.py index 241eefde45d..f794b2a0bb6 100644 --- a/mindspore/numpy/math_ops.py +++ b/mindspore/numpy/math_ops.py @@ -15,6 +15,9 @@ """math operations, the function docs are adapted from Numpy API.""" import operator import functools +import itertools +import sys +from numpy import dtype as nptype from ..ops import operations as P from ..ops import functional as F @@ -22,21 +25,24 @@ from ..ops import composite as C from ..ops.primitive import constexpr from ..common import dtype as mstype from ..common import Tensor +from .._c_expression import typing -from .dtypes import nan, pi +from .dtypes import nan, pi, dtype_map, inf -from .array_creations import asarray_const, ones, zeros, empty, full, full_like +from .array_creations import asarray_const, ones, zeros, empty, full, full_like, diag, \ + arange, histogram_bin_edges, eye from .array_ops import where as where_ -from .array_ops import ravel, expand_dims, moveaxis, concatenate +from .array_ops import ravel, expand_dims, moveaxis, concatenate, flip, stack, atleast_1d from .utils_const import _infer_out_shape, _check_axis_valid, _get_device, \ _check_shape_aligned, _raise_type_error, _check_same_type, _check_is_float, \ _raise_value_error, _promote, _check_axis_type, _canonicalize_axis, \ _is_shape_empty, _check_is_int, _expanded_shape, _check_axis_in_range, \ _check_dtype, _list_comprehensions, _tuple_setitem, _add_unit_axes, _seq_prod, \ - _make_tensor, _promote_for_trigonometric, _raise_runtime_error, _max + _make_tensor, _promote_for_trigonometric, _raise_runtime_error, _max, _type_convert, \ + _raise_unimplemented_error, _abs, _in from .utils import _expand, _broadcast_to, _broadcast_to_shape, _get_size, \ - _check_input_tensor, _to_tensor, _isnan + _check_input_tensor, _to_tensor, _isnan, _convert_bool_to_int, _to_tensor_origin_dtype ZERO_TENSOR = asarray_const(0) @@ -53,6 +59,9 @@ _reduce_max_default = P.ReduceMax() _reduce_max_keepdims = P.ReduceMax(True) _cumsum_default = P.CumSum() _concat = P.Concat(-1) +_cumprod_default = P.CumProd() +_round = P.Round() + def absolute(x, dtype=None): """ @@ -265,8 +274,7 @@ def add(x1, x2, dtype=None): # broadcast is not fully supported in tensor_add on CPU, # so we use tensor_sub as a substitute solution if _get_device() == 'CPU': - _check_input_tensor(x1, x2) - return subtract(x1, F.neg_tensor(x2), dtype=dtype) + return subtract(x1, F.neg_tensor(_to_tensor(x2)), dtype=dtype) return _apply_tensor_op(F.tensor_add, x1, x2, dtype=dtype) @@ -713,11 +721,21 @@ def dot(a, b): [105. 105. 105. 105.]]] """ ndim_a, ndim_b = F.rank(a), F.rank(b) + if ndim_a == 0 or ndim_b == 0: + return F.tensor_mul(a, b) if ndim_a > 0 and ndim_b >= 2: perm = F.make_range(ndim_b) perm = perm[:-2] + (perm[-1],) + (perm[-2],) b = F.transpose(b, perm) - return inner(a, b) + + if F.shape(a)[-1] != F.shape(b)[-1]: + _raise_value_error('shapes are not aligned') + a_aligned = F.reshape(a, (-1, F.shape(a)[-1])) + b_aligned = F.reshape(b, (-1, F.shape(b)[-1])) + + res = _matmul_T(a_aligned, b_aligned) + res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1]) + return res def outer(a, b): @@ -2238,23 +2256,6 @@ def convolve(a, v, mode='full'): v = v[::-1] return _compute_1D_conv(a, v, mode).astype(final_dtype) -def _compute_1D_conv(a, v, mode): - """Returns a 1-D sequence which is the cross-correlate of two 1-D sequences (`a` and `v`).""" - v_size = F.shape_mul(v.shape) - if mode not in ('same', 'full', 'valid'): - _raise_value_error("mode must be one of ['full', 'same', 'valid']") - if v_size > 1: - if mode == 'same': - pad_left = _to_tensor(_list_comprehensions(v_size // 2, 0.0, True)) - pad_right = _to_tensor(_list_comprehensions(v_size - v_size // 2 - 1, 0.0, True)) - a = P.Concat(0)((pad_left, a, pad_right)) - elif mode == 'full': - pad = _to_tensor(_list_comprehensions(v_size - 1, 0.0, True)) - a = P.Concat(0)((pad, a, pad)) - a = a.reshape(1, 1, 1, a.size) - v = v.reshape(1, 1, 1, v.size) - _conv = P.Conv2D(1, (1, v.size)) - return _conv(a, v).reshape(-1) def _handle_weights(weights, num_samples): """Checks fweight and aweight in np.cov.""" @@ -2476,6 +2477,96 @@ def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None, return reduce_fn(a, axes).astype(dtype) +def nanmax(a, axis=None, dtype=None, keepdims=False): + """ + Return the maximum of an array or maximum along an axis, ignoring any NaNs. . + + Note: + Numpy arguments `out` is not supported. + For all NaN slices, a very small negative number is returned instead of NaN. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Array containing numbers whose maximum + is desired. If `a` is not an array, a conversion is attempted. + axis (Union[int, tuple of int, None], optional): Axis or axes along which the maximum is + computed. The default is to compute the maximum of the flattened array. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + keepdims (boolean, optional): defaults to False. If this is set to True, the axes which + are reduced are left in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + Returns: + Tensor. + + Raises: + ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or + if the axes contain duplicates. + + Supported Platforms: + ``GPU`` ``CPU`` + + Examples: + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> output = np.nanmax(a) + >>> print(output) + 3.0 + >>> output = np.nanmax(a, axis=0) + >>> print(output) + [3. 2.] + """ + a = _to_tensor(a) + nan_mask = _isnan(a) + a = F.select(nan_mask, full(F.shape(a), -sys.maxsize - 1, F.dtype(a)), a) + reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default + return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype) + + +def nanmin(a, axis=None, dtype=None, keepdims=False): + """ + Returns the minimum of array elements over a given axis treating Not a Numbers (NaNs) as zero. + + Note: + Numpy arguments `out` is not supported. + For all-NaN slices, a very large number is returned instead of NaN. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Array containing numbers whose minimum + is desired. If `a` is not an array, a conversion is attempted. + axis (Union[int, tuple of int, None], optional): Axis or axes along which the minimum is + computed. The default is to compute the minimum of the flattened array. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + keepdims (boolean, optional): defaults to False. If this is set to True, the axes which + are reduced are left in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + Returns: + Tensor. + + Raises: + ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or + if the axes contain duplicates. + + Supported Platforms: + ``GPU`` ``CPU`` + + Examples: + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> output = np.nanmin(a) + >>> print(output) + 1.0 + >>> output = np.nanmin(a, axis=0) + >>> print(output) + [1. 2.] + """ + a = _to_tensor(a) + nan_mask = _isnan(a) + a = F.select(nan_mask, full(F.shape(a), sys.maxsize, F.dtype(a)), a) + reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default + return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype) + + def _reduce_nansum(x, axis, keepdims=False): """Computes reduce sum treating NaNs as zeros.""" x = F.select(_isnan(x), zeros(F.shape(x), F.dtype(x)), x) @@ -2525,10 +2616,6 @@ def nansum(a, axis=None, dtype=None, keepdims=False): a = _to_tensor(a) nan_mask = _isnan(a) a = F.select(nan_mask, zeros(F.shape(a), F.dtype(a)), a) - if dtype is None and _get_device() == 'CPU' and not _check_is_float(F.dtype(a)): - # F.reduce_sum only supports float on CPU - dtype = F.dtype(a) - a = F.cast(a, mstype.float32) return _reduce(a, functools.partial(_reduce_nansum, keepdims=keepdims), axis=axis, keepdims=keepdims, dtype=dtype) @@ -3752,6 +3839,603 @@ def promote_types(type1, type2): return _promote(type1, type2) +def corrcoef(x, y=None, rowvar=True, dtype=None): + r""" + Returns Pearson product-moment correlation coefficients. + + Please refer to the documentation for cov for more detail. The relationship + between the correlation coefficient matrix, R, and the covariance matrix, C, is + :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` + The values of R are between -1 and 1, inclusive. + + Note: + Currently, complex numbers are not supported. + + Args: + x (Union[int, float, bool, tuple, list, Tensor]): A 1-D or 2-D array containing + multiple variables and observations. Each row of `x` represents a variable, + and each column a single observation of all those variables. Also see rowvar below. + y (Union[int, float, bool, tuple, list, Tensor], optional): An additional set + of variables and observations. `y` has the same shape as `x`. + rowvar (bool, optional): If rowvar is `True` (default), then each row represents + a variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows contain observations. + dtype (:class:`mindspore.dtype`, optional): Data-type of the result. By default, + the return data-type will have at least float32 precision. + + Returns: + Tensor, The correlation coefficient matrix of the variables. + + Raises: + TypeError: if the inputs have types not specified above. + ValueError: if `x` and `y` have wrong dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> output = np.corrcoef([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]]) + >>> print(output) + [[1. 0.9827076 1. ] + [0.9827077 0.99999994 0.9827077 ] + [1. 0.9827076 1. ]] + """ + # This implementation was adapted from original Numpy. + c = cov(x, y, rowvar) + if not c.shape: + return F.tensor_div(c, c) + d = diag(c) + stddev = sqrt(d) + c /= F.expand_dims(stddev, -1) + c /= F.expand_dims(stddev, 0) + c = clip(c, -1, 1) + if dtype is not None: + return c.astype(dtype) + return c + + +def _slice_along_axis(f, axis, slice_start, slice_end): + """ + Slice a tensor along a given axis, a helper function for gradient + + Args: + f (Tensor): Input Tensor. + axis (int): Specified axis. + slice_start (int): The start of the slice. + slice_end (int): The end of the int. + + Returns: + Sliced tensor. + """ + slice_size = slice_end - slice_start + index_start = (0,) * f.ndim + index_end = f.shape + index_start = _tuple_setitem(index_start, axis, slice_start) + index_end = _tuple_setitem(index_end, axis, slice_size) + return F.tensor_slice(f, index_start, index_end) + + +def _gradient_along_axis(f, h, axis): + """compute the gradients of `f` along a given axis, a helper function of gradient.""" + end = f.shape[axis] + upper_edge = _slice_along_axis(f, axis, 1, 2) - _slice_along_axis(f, axis, 0, 1) + lower_edge = _slice_along_axis(f, axis, end-1, end) - _slice_along_axis(f, axis, end-2, end-1) + if end <= 2: + a_grad = concatenate((upper_edge, lower_edge), axis) + else: + middle = (_slice_along_axis(f, axis, 2, end) - _slice_along_axis(f, axis, 0, end-2)) * 0.5 + a_grad = concatenate((upper_edge, middle, lower_edge), axis) + return a_grad / h + + +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Returns the gradient of a N-dimensional array. + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Note: + Currently we only support `edge_order`=1 and uniform spacing of `varargs`. + + Args: + f (Union[tuple, list, Tensor]): An N-dimensional array containing samples of + a scalar function. + varargs (Union[tuple[number], tuple[tensor scalar]], optional) + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + edge_order (int): Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + axis (Union[None, int, tuple(int), list(int)], optional): Gradient is calculated + only along the given axis or axes. The default :class:`(axis = None)` is to calculate + the gradient for all the axes of the input tensor. `axis` may be negative, + in which case it counts from the last to the first `axis`. + + Returns: + gradient, a list of tensors (or a single tensor if there is only one dimension + to be calculated). Each derivative has the same shape as f. + + Raises: + TypeError: if the inputs have types not specified above. + ValueError: if `axis` values out of bounds, or shape of `f` has entries < 1. + NotImplementedError: if `edge_order` != 1, or `varargs` contains non-scalar entries. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> output = np.gradient([[1, 2, 6], [3, 4, 5]], axis=-1) + >>> print(output) + [[1. 2.5 4. ] + [1. 1. 1. ]] + """ + # This implementation was adapted from Numpy and jax.numpy + if edge_order != 1: + _raise_unimplemented_error("edge_order != 1 not implemented") + if not isinstance(f, Tensor): + f = asarray_const(f) + if f.dtype != mstype.float64: + f = f.astype(mstype.float32) + if axis is None: + axis = F.make_range(f.ndim) + else: + _check_axis_type(axis, True, True, True) + axis = _canonicalize_axis(axis, f.ndim) + axis = (axis,) if isinstance(axis, int) else axis + + len_axes = len(axis) + n = len(varargs) + dx = None + # check varargs and make varags the same length as axis + if n == 0 or varargs is None: + # no spacing + dx = (1,) * len_axes + elif n == 1: + # single value for all axes + dx = varargs * len_axes + elif n == len_axes: + dx = varargs + else: + _raise_type_error("Invalid number of arguments") + + a_grad = [] + + for idx in F.make_range(len_axes): + h = dx[idx] + ax = axis[idx] + if f.shape[ax] < 2: + _raise_value_error("Shape of array too small to calculate a numerical gradient, " + "at least 2 elements are required.") + # if h is not scalar + if not (isinstance(h, (int, float, bool)) or (isinstance(h, Tensor) and h.ndim == 0)): + _raise_unimplemented_error("Non-constant spacing not implemented") + + a_grad.append(_gradient_along_axis(f, h, ax)) + + if len(axis) == 1: + return a_grad[0] + + return a_grad + + +def sum_(a, axis=None, dtype=None, keepdims=False, initial=None): + """ + Returns sum of array elements over a given axis. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and + `extobj` are not supported. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): Elements to sum. + axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None. + If None, sum all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple + instead of a single axis or all the axes as before. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + keepdims (bool): If this is set to True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will broadcast correctly against the input array. + If the default value is passed, then keepdims will not be passed through to the sum method of + sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + implement keepdims any exceptions will be raised. + initial (scalar): Starting value for the sum. + + Returns: + Tensor. An array with the same shape as a, with the specified axis removed. + If a is a 0-d array, or if axis is None, a scalar is returned. + If an output array is specified, a reference to out is returned. + + Raises: + TypeError: If input is not array_like or `axis` is not int or tuple of ints or + `keepdims` is not integer or `initial` is not scalar. + ValueError: If any axis is out of range or duplicate axes exist. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.sum([0.5, 1.5])) + 2.0 + >>> print(np.sum(np.array([-1, 0, 1], np.int32))) + 0 + >>> x = np.arange(10).reshape(2, 5).astype('float32') + >>> print(np.sum(x, axis=1)) + [10. 35.] + """ + if not isinstance(keepdims, int): + _raise_type_error("integer argument expected, but got ", keepdims) + if initial is not None and not isinstance(initial, (int, float, bool)): + _raise_type_error("initial argument should be a scalar.") + if axis is None: + axis = () + else: + _check_axis_type(axis, True, True, False) + axis = _canonicalize_axis(axis, a.ndim) + a = _convert_bool_to_int(_to_tensor(a)) + if _is_shape_empty(a.shape): + a = F.fill(a.dtype, (1,), 0) + + if keepdims: + res = _reduce_sum_keepdims(a, axis) + else: + res = _reduce_sum_default(a, axis) + if initial is not None: + res += initial + if dtype is not None and not _check_same_type(F.dtype(res), dtype): + res = F.cast(res, dtype) + return res + + +@constexpr +def _min_cost_chain_matmul(dims): + """ + Returns indices of splits that has the minimal cost for matmul. + s[i, j] holds the index of the split with minimal cost for arrays[i, i + 1, ... j] + """ + dims = tuple(dims) + n = len(dims) - 1 + m = [[0]*n for _ in range(n)] + s = [[0]*n for _ in range(n)] + for pos in range(1, n): + for i in range(n - pos): + j = i + pos + m[i][j] = sys.maxsize + for k in range(i, j): + cost = m[i][k] + m[k + 1][j] + dims[i]*dims[k + 1]*dims[j + 1] + if cost < m[i][j]: + m[i][j] = cost + s[i][j] = k + return s + + +@constexpr +def _get_dims(shapes): + """ + Returns the chain of the dimensions in arrays. + dims[i] == arrays[i - 1].shape[1] == arrays[i].shape[0] + """ + shapes = tuple(shapes) + if any(len(shape) != 2 for shape in shapes): + raise ValueError('Array must be 2 dimensional') + dims = tuple(map(operator.itemgetter(0), shapes)) + if any(shape[1] != dim for shape, dim in zip(shapes[:-1], dims[1:])): + raise ValueError(f'shapes not aligned') + return dims + (shapes[-1][1],) + + +def _multi_dot(arrays, i, j, order): + """Computes multi dot recursively using minimal cost.""" + if i == j: + return arrays[i] + return dot(_multi_dot(arrays, i, order[i][j], order), + _multi_dot(arrays, order[i][j] + 1, j, order)) + + +def multi_dot(arrays): + """ + Computes the dot product of two or more arrays in a single function call, while automatically + selecting the fastest evaluation order. + multi_dot chains numpy.dot and uses optimal parenthesization of the matrices + `[1] `. Depending on the shapes of the + matrices, this can speed up the multiplication a lot. + If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it + is treated as a column vector. The other arguments must be 2-D. + + Note: + Numpy argument `out` is not supported. + + Args: + arrays (sequence of array_like): If the first argument is 1-D it is treated as row + vector. If the last argument is 1-D it is treated as column vector. The other + arguments must be 2-D. + + Returns: + Tensor, the dot product of the supplied arrays. + + Raises: + ValueError: arrays are not 2-D. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> A = np.ones((10000, 100)) + >>> B = np.ones((100, 1000)) + >>> C = np.ones((1000, 5)) + >>> D = np.ones((5, 333)) + >>> output = np.multi_dot([A, B, C, D]) + >>> print(output) + [[500000. 500000. 500000. ... 500000. 500000. 500000.] + [500000. 500000. 500000. ... 500000. 500000. 500000.] + [500000. 500000. 500000. ... 500000. 500000. 500000.] + ... + [500000. 500000. 500000. ... 500000. 500000. 500000.] + [500000. 500000. 500000. ... 500000. 500000. 500000.] + [500000. 500000. 500000. ... 500000. 500000. 500000.]] + """ + arrays = _to_tensor(*arrays) + if len(arrays) < 2: + _raise_value_error('Expecting at least 2 arrays') + if len(arrays) == 2: + return dot(*arrays) + + shape_out = () + arrs = [] + for arr in arrays: + arrs.append(arr) + + if F.rank(arrs[0]) == 1: + arrs[0] = F.reshape(arrs[0], (1, arrs[0].size)) + else: + shape_out += (F.shape(arrs[0])[0],) + if F.rank(arrs[-1]) == 1: + arrs[-1] = F.reshape(arrs[-1], (arrs[-1].size, 1)) + else: + shape_out += (F.shape(arrs[-1])[1],) + + shapes = [] + for arr in arrs: + shapes.append(F.shape(arr)) + dims = _get_dims(shapes) + order = _min_cost_chain_matmul(dims) + res = _multi_dot(arrs, 0, len(arrs) - 1, order) + return F.reshape(res, shape_out) + + +def argmax(a, axis=None): + """ + Returns the indices of the maximum values along an axis. + + Note: + Numpy argument `out` is not supported. + On Ascend, in case of multiple occurrences of the maximum values, the return + indices may not necessarily correspond to the first occurrence. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input array. + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmax(a)) + 5 + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmax(a), axis=0) + [1 1 1] + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmax(a), axis=0) + [2 2] + >>> b = np.array([0, 5, 2, 3, 4, 5]) + >>> b[1] = 5 + >>> print(np.argmax(b)) + 1 + """ + return a.argmax(axis) + + +def argmin(a, axis=None): + """ + Returns the indices of the minimum values along an axis. + + Note: + Numpy argument `out` is not supported. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input array. + axis (int, optional): By default, the index is into + the flattened array, otherwise along the specified axis. + + Returns: + Tensor, array of indices into the array. It has the same + shape as a.shape with the dimension along axis removed. + + Raises: + ValueError: if axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmin(a)) + 0 + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmin(a), axis=0) + [0 0 0] + >>> a = np.arange(10, 16).reshape(2, 3) + >>> print(np.argmin(a), axis=0) + [0 0] + >>> b = np.array([0, 5, 2, 3, 4, 5]) + >>> b[1] = 5 + >>> print(np.argmin(b)) + 0 + """ + return a.argmin(axis) + + +@constexpr +def _get_sort_range(size): + """Returns the range for number of searches (log2(size)) on a sorted array with the given size.""" + return tuple(range(ceil(log2(_to_tensor(size + 1).astype(mstype.float32))).astype(mstype.int32))) + + +def searchsorted(a, v, side='left', sorter=None): + """ + Finds indices where elements should be inserted to maintain order. + Finds the indices into a sorted array a such that, if the corresponding elements + in v were inserted before the indices, the order of a would be preserved. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): 1-D input array. If `sorter` is + None, then it must be sorted in ascending order, otherwise `sorter` must be + an array of indices that sort it. + v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`. + side ('left', 'right', optional): If ‘left’, the index of the first suitable + location found is given. If ‘right’, return the last such index. If there is + no suitable index, return either 0 or N (where N is the length of `a`). + sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of + integer indices that sort array `a` into ascending order. They are typically + the result of argsort. + + Returns: + Tensor, array of insertion points with the same shape as `v`. + + Raises: + ValueError: if argument for `side` or `sorter` is invalid. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> print(np.searchsorted([1,2,3,4,5], 3)) + 2 + >>> print(np.searchsorted([1,2,3,4,5], 3, side='right')) + 3 + >>> print(np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])) + [0 5 1 2] + """ + if side not in ('left', 'right'): + _raise_value_error(f'{side} is an invalid value for keyword "side"') + a = _to_tensor(a).astype(mstype.float32) + v = _to_tensor(v) + shape = F.shape(v) + if sorter is not None: + if F.rank(sorter) != 1 or sorter.size != a.size: + _raise_value_error('sorter must be 1-D array with the same size as `a`') + sorter = _to_tensor(sorter) + sorter = F.expand_dims(sorter, -1) + a = F.gather_nd(a, sorter) + less_op = F.tensor_le if side == 'left' else F.tensor_lt + i = F.fill(mstype.int32, shape, 0) + j = F.fill(mstype.int32, shape, a.size) + two = F.fill(mstype.int32, shape, 2) + + for _ in _get_sort_range(a.size): + mid = floor_divide(add(i, j), two) + mask = less_op(v, F.gather_nd(a, F.expand_dims(mid, -1))) + i = F.select(mask, i, mid) + j = F.select(mask, mid, j) + return j + + +def interp(x, xp, fp, left=None, right=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + Returns the one-dimensional piecewise linear interpolant to a function with given + discrete data points `(xp, fp)`, evaluated at `x`. + + Note: + Numpy argument `period` is not supported. + Complex values are not supported. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): The x-coordinates at which + to evaluate the interpolated values. + xp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the + x-coordinates of the data points, must be increasing. + fp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the + y-coordinates of the data points, same length as `xp`. + left (float, optional): Value to return for ``x < xp[0]``, default is ``fp[0]``. + right (float, optional): Value to return for ``x > xp[-1]``, default is ``fp[-1]``. + + Returns: + Tensor, the interpolated values, same shape as `x`. + + Raises: + ValueError: if `xp` or `fp` is not one-dimensional, or if `xp` and `fp` do not have + the same length. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> print(np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)) + >>> print(np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])) + [3. 3. 2.5 0.55999994 0. ] + >>> UNDEF = -99.0 + >>> print(np.interp(3.14, xp, fp, right=UNDEF)) + -99.0 + """ + # TODO implement period once sort is supported + x, xp, fp = _to_tensor(x, xp, fp) + if F.rank(xp) != 1 or F.rank(fp) != 1: + _raise_value_error('xp and fp must be 1-d sequences') + size = xp.size + if fp.size != size: + _raise_value_error('the y-coordinates must have the same length as `xp`') + + shape = F.shape(x) + xp = xp.astype(mstype.float32) + fp = fp.astype(mstype.float32) + + indices_1 = clip(searchsorted(xp, x), 0, size - 1) + indices_0 = clip(indices_1 - _to_tensor(1), 0, size - 1) + indices_0 = F.expand_dims(indices_0, -1) + indices_1 = F.expand_dims(indices_1, -1) + x_0 = F.gather_nd(xp, indices_0) + x_1 = F.gather_nd(xp, indices_1) + y_0 = F.gather_nd(fp, indices_0) + y_1 = F.gather_nd(fp, indices_1) + res = (y_0*(x_1 - x) + y_1*(x - x_0))/(x_1 - x_0) + res = F.select(F.equal(x_0, x_1), y_0, res) + # where x < xp[0], y = left or xp[0] + # where x > xp[-1], y = right or xp[-1] + idx_0 = _to_tensor([0]) + idx_last = _to_tensor([size - 1]) + if left is None: + left = F.gather_nd(fp, idx_0) + left = full(shape, left, mstype.float32) + if right is None: + right = F.gather_nd(fp, idx_last) + right = full(shape, right, mstype.float32) + choose_left = F.tensor_lt(x, F.gather_nd(xp, idx_0)) + choose_right = F.tensor_gt(x, F.gather_nd(xp, idx_last)) + res = F.select(choose_left, left, res) + res = F.select(choose_right, right, res) + return res + + def _apply_tensor_op(fn, *args, dtype=None): """Applies tensor operations based on fn""" args = _to_tensor(*args) @@ -3762,3 +4446,1440 @@ def _apply_tensor_op(fn, *args, dtype=None): if dtype is not None and not _check_same_type(F.dtype(res), dtype): res = F.cast(res, dtype) return res + + +def sign(x, dtype=None): + """ + Returns an element-wise indication of the sign of a number. + + The sign function returns `-1 if x < 0, 0 if x == 0, 1 if x > 0`. nan is returned for nan inputs. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + Complex inputs are not supported now. + + Args: + x (Union[int, float, list, tuple, Tensor]): Input values. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + The sign of x. This is a tensor or a scalar when x is a scalar. + + Raises: + TypeError: if dtype of the input is not in the given types or + the input can not be converted to tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> output = np.sign(np.array([-1., 0., 1., 1.2])) + >>> print(output) + [-1. 0. 1. 1.] + """ + if not isinstance(x, (int, float, list, tuple, Tensor)): + _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x) + x = _to_tensor(x) + if _check_same_type(F.dtype(x), mstype.bool_): + _raise_type_error("sign does not accept dtype bool.") + + _non_zero_sign = x / absolute(x) + _zero = _broadcast_to_shape(_make_tensor(0, x.dtype), x.shape) + is_zero = F.equal(x, 0) + res = F.select(is_zero, _zero, _non_zero_sign) + + if dtype is not None and not _check_same_type(F.dtype(res), dtype): + res = F.cast(res, dtype) + return res + + +def copysign(x1, x2, dtype=None): + """ + Changes the sign of `x1` to that of `x2`, element-wise. + + If `x2` is a scalar, its sign will be copied to all elements of `x1`. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + Complex inputs are not supported now. + + Args: + x1 (Union[int, float, list, tuple, Tensor]): Values to change the sign of. + x2 (Union[int, float, list, tuple, Tensor]): The sign of x2 is copied to x1. If `x1.shape != x2.shape`, + they must be broadcastable to a common shape (which becomes the shape of the output). + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor or scalar. The values of `x1` with the sign of `x2`. This is a scalar if both `x1` and `x2` are scalars. + + Raises: + TypeError: if dtype of the input is not in the given types or + the input can not be converted to tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> output = np.copysign(np.array([1, -1, -1]), np.array([-1, 1, -1])) + >>> print(output) + [-1 1 -1] + """ + if not isinstance(x1, (int, float, list, tuple, Tensor)): + _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x1) + if not isinstance(x2, (int, float, list, tuple, Tensor)): + _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x2) + x1, x2 = _to_tensor(x1, x2) + x2 = _broadcast_to_shape(x2, x1.shape) + if _check_same_type(F.dtype(x1), mstype.bool_) or _check_same_type(F.dtype(x2), mstype.bool_): + _raise_type_error("sign does not accept dtype bool.") + + original_dtype = x1.dtype + if not _check_is_float(original_dtype): + pos_tensor = F.absolute(x1.astype('float32')).astype(original_dtype) + else: + pos_tensor = F.absolute(x1) + + neg_tensor = F.neg_tensor(pos_tensor) + less_zero = F.less(x2, 0) + res = F.select(less_zero, neg_tensor, pos_tensor) + + if dtype is not None and not _check_same_type(F.dtype(res), dtype): + res = F.cast(res, dtype) + return res + + +def digitize(x, bins, right=False): + """ + Returns the indices of the bins to which each value in input array belongs. + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is returned + as appropriate. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): Input array to be binned. + bins (Union[int, float, bool, list, tuple, Tensor]): Array of bins. It has to + be 1-dimensional and monotonic. + right (boolean, optional): Indicating whether the intervals include the right + or the left bin edge. Default behavior is ``(right==False)`` indicating + that the interval does not include the right edge. The left bin end is + open in this case, i.e., ``bins[i-1] <= x < bins[i]`` is the default + behavior for monotonically increasing bins. + + Returns: + Tensor of ints, output array of indices, of same shape as `x`. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> inds = np.digitize(x, bins) + >>> print(inds) + [1 3 3 4 5] + """ + x, bins = _to_tensor(x, bins) + if x.size == 0: + return x + if bins.size == 0: + return zeros(F.shape(x), mstype.int32) + side = 'left' if right else 'right' + first_bin = bins[0] + last_bin = bins[_type_convert(int, bins.size) - 1] + cond = first_bin <= last_bin + incr = searchsorted(bins, x, side) + decr = _to_tensor(bins.size) - searchsorted(flip(bins), x, side) + return where_(cond, incr, decr) + + +def bincount(x, weights=None, minlength=0, length=None): + """ + Count number of occurrences of each value in array of non-negative ints. + The number of bins (of size 1) is one larger than the largest value in `x`. + If `minlength` is specified, there will be at least this number of bins in the + output array (though it will be longer if necessary, depending on the contents + of `x`). Each bin gives the number of occurrences of its index value in `x`. If + `weights` is specified the input array is weighted by it, i.e. if a value `n` + is found at position `i`, ``out[n] += weight[i]`` instead of ``out[n] += 1``. + + Note: + The additional argument `length` specifies the number of bins (overriding + ``x.max() + 1``), which must be provided in graph mode. + If `x` contains negative values, no error will be raised, and negative values + are treated as zeros instead. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): 1-d input array. + weights (Union[int, float, bool, list, tuple, Tensor], optional): Weights, + array of the same shape as `x`. + minlength (int, optional): A minimum number of bins for the output array. + length (int, optional): Number of bins. + + Returns: + Tensor, the result of binning the input array. The length of out is equal to + ``np.amax(x)+1``. + + Raises: + ValueError: if `x` is not one-dimensional, or if `x` and `weights` do not have + the same shape. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.bincount(np.arange(5))) + [1 1 1 1 1] + >>> print(np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))) + [1 3 1 1 0 0 0 1] + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> print(np.bincount(x, weights=w)) + [0.3 0.7 1.1] + """ + x = _to_tensor(x) + if F.rank(x) != 1: + _raise_value_error('`x` should be one-dimensional') + x = clip(x, 0, None) + if length is None: + if F.isconstant(x): + length = int(maximum(F.reduce_max(x), minlength - 1).asnumpy()) + 1 + else: + _raise_value_error('argument `length` must be provided in graph mode') + idx = arange(length).reshape(length, 1) + idx_mapping = F.equal(x, idx) + if weights is not None: + weights = _to_tensor(weights) + if F.shape(x) != F.shape(weights): + _raise_value_error('`x` and `weights` must have the same length') + idx_mapping *= weights + return F.reduce_sum(idx_mapping.astype(mstype.float32), 1).ravel() + + +def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin + """ + Computes the histogram of a dataset. + + Note: + String values for `bins` is not supported. + Deprecated numpy argument `normed` is not supported. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram + is computed over the flattened array. + bins (Union[int, tuple, list, Tensor], optional): If `bins` is an int, it + defines the number of equal-width bins in the given range (10, by + default). If `bins` is a sequence, it defines the bin edges, including + the rightmost edge, allowing for non-uniform bin widths. + range((float, float), optional): The lower and upper range of the bins. If + not provided, `range` is simply ``(a.min(), a.max())``. Values outside + the range are ignored. The first element of the range must be less than + or equal to the second. + weights (Union[int, float, bool, list, tuple, Tensor], optional): An array + of weights, of the same shape as `a`. Each value in a only contributes + its associated weight towards the bin count (instead of 1). If density + is True, the weights are normalized, so that the integral of the density + over the range remains 1. + density (boolean, optional): If False, the result will contain the number of + samples in each bin. If True, the result is the value of the probability + density function at the bin, normalized such that the integral over the + range is 1. Note that the sum of the histogram values will not be equal + to 1 unless bins of unity width are chosen; it is not a probability mass + function. + + Returns: + (Tensor, Tensor), the values of the histogram and the bin edges. + + Raises: + ValueError: if `x` and `weights` do not have the same size. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import numpy as np + >>> print(np.histogram([1, 2, 1], bins=[0, 1, 2, 3])) + (Tensor(shape=[3], dtype=Int32, value= [0, 2, 1]), + Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3])) + >>> print(np.histogram(np.arange(4), bins=np.arange(5), density=True)) + (Tensor(shape=[4], dtype=Float32, value= + [ 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01]), + Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4])) + >>> print(np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])) + (Tensor(shape=[3], dtype=Int32, value= [1, 4, 1]), + Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3])) + """ + a = _to_tensor(a).ravel() + bin_edges = histogram_bin_edges(a, bins, range, weights) + data_to_bins = searchsorted(bin_edges, a, 'right') + bin_size = _type_convert(int, bin_edges.size) + data_to_bins = where_(a == bin_edges[-1], _to_tensor(bin_size - 1), data_to_bins) + count = bincount(data_to_bins, weights, length=bin_size)[1:] + if count.size == 0: + return count, bin_edges + if density: + count = F.cast(count, mstype.float32) + count = count/diff(bin_edges)/F.reduce_sum(count) + return count, bin_edges + + +@constexpr +def _factor_flattened_hist(nbin): + """Returns the factor that will be applied to the histogram to be flattened.""" + factor = list((itertools.accumulate(nbin[1:][::-1], operator.mul)))[::-1] + factor.append(1) + return factor + + +def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin + """ + Computes the multidimensional histogram of some data. + + Note: + Deprecated numpy argument `normed` is not supported. + + Args: + sample (Union[list, tuple, Tensor]): The data to be histogrammed, either `(N, D)` + array, or `(D, N)` array_like. Note the unusual interpretation of sample + when an array_like: + + When an array, each row is a coordinate in a `D-dimensional` space - such as + ``histogramdd(np.array([p1, p2, p3]))``. + + When an array_like, each element is the list of values for single coordinate + - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + bins (Union[int, tuple, list], optional): The bin specification: + + A sequence of arrays describing the monotonically increasing bin edges along + each dimension. + + The number of bins for each dimension ``(nx, ny, … =bins)`` + + The number of bins for all dimensions ``(nx=ny=…=bins)``. + range(Union[list, tuple], optional): A sequence of length `D`, each an optional + ``(lower, upper)`` tuple giving the outer bin edges to be used if the edges + are not given explicitly in bins. An entry of None in the sequence results in + the minimum and maximum values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of `D` None values. + weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values + `w_i` weighing each sample ``(x_i, y_i, z_i, …)``. + density (boolean, optional): If False, the default, returns the number of samples + in each bin. If True, returns the probability density function at the bin, + ``bin_count / sample_count / bin_volume``. + + Returns: + (Tensor, list of Tensor), the values of the histogram and the bin edges. + + Raises: + ValueError: if `range` does not have the same size as the number of samples. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import numpy as np + >>> sample = np.arange(15).reshape(5, 3) + >>> print(sample) + [[ 0 1 2] + [ 3 4 5] + [ 6 7 8] + [ 9 10 11] + [12 13 14]] + >>> print(np.histogramdd(sample, bins=(2, 3, 4))) + (Tensor(shape=[2, 3, 4], dtype=Int32, value= + [[[1, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 2]]]), + [Tensor(shape=[3], dtype=Float32, value= + [ 0.00000000e+00, 6.00000000e+00, 1.20000000e+01]), + Tensor(shape=[4], dtype=Float32, value= + [ 1.00000000e+00, 5.00000000e+00, 9.00000000e+00, 1.30000000e+01]), + Tensor(shape=[5], dtype=Float32, value= + [ 2.00000000e+00, 5.00000000e+00, 8.00000000e+00, 1.10000000e+01, 1.40000000e+01])]) + """ + if isinstance(sample, (tuple, list)): + sample = _to_tensor(*sample) + sample = stack(sample, -1) + elif not isinstance(sample, Tensor): + _raise_type_error('sample should be (N, D) array, or (D, N) array_like') + if F.rank(sample) != 2: + _raise_value_error('when an array, sample should be 2-dimensional') + ndim = F.shape(sample)[1] + + if isinstance(bins, int): + bins = _list_comprehensions(ndim, bins) + if isinstance(bins, (tuple, list)): + if len(bins) != ndim: + _raise_value_error('The dimension of bins must be equal to the dimension of the sample') + elif not isinstance(bins, Tensor): + _raise_type_error('bins should be int or sequence') + + if range is None: + range = _list_comprehensions(ndim, None, False, True) + else: + if len(range) != ndim: + _raise_value_error('range argument must have one entry per dimension') + + bin_edges = [] + dedges = [] + for i in F.make_range(ndim): + edges = histogram_bin_edges(sample[:, i], bins[i], range[i], weights) + bin_edges.append(edges) + dedges.append(diff(edges)) + + data_indices = [] + nbin = () + flattened_bin_size = 1 + for i in F.make_range(ndim): + data_to_bins = searchsorted(bin_edges[i], sample[:, i], 'right') + bin_size = _type_convert(int, bin_edges[i].size) + data_to_bins = where_(sample[:, i] == bin_edges[i][-1], _to_tensor(bin_size - 1), data_to_bins) + data_indices.append(data_to_bins) + nbin += (bin_size + 1,) + flattened_bin_size *= (bin_size + 1) + + factor = F.reshape(_to_tensor(_factor_flattened_hist(nbin)), (ndim, 1)) + stacked_indices = stack(data_indices) * factor + if _get_device() == 'Ascend': + stacked_indices = F.cast(stacked_indices, mstype.float32) + flattened_hist = F.reduce_sum(stacked_indices.astype(mstype.float32), 0) + count = bincount(flattened_hist, weights, length=flattened_bin_size) + count = F.reshape(count, nbin) + slices = _list_comprehensions(ndim, F.make_slice(1, -1, 1), True) + count = count[slices] + + if density: + s = F.reduce_sum(count.astype(mstype.float32)) + for i in F.make_range(ndim): + shape = _expanded_shape(ndim, dedges[i].size, i) + count /= _to_tensor(dedges[i]).reshape(shape) + count /= s + return count, bin_edges + + +def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin + """ + Computes the multidimensional histogram of some data. + + Note: + Deprecated numpy argument `normed` is not supported. + + Args: + x (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the x + coordinates of the points to be histogrammed. + y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y + coordinates of the points to be histogrammed. + bins (Union[int, tuple, list], optional): The bin specification: + + If int, the number of bins for the two dimensions ``(nx=ny=bins)``. + + If array_like, the bin edges for the two dimensions ``(x_edges=y_edges=bins)``. + + If [int, int], the number of bins in each dimension ``(nx, ny = bins)``. + + If [array, array], the bin edges in each dimension ``(x_edges, y_edges = bins)``. + + A combination [int, array] or [array, int], where int is the number of bins and + array is the bin edges. + range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost + edges of the bins along each dimension (if not specified explicitly in the bins + parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values + `w_i` weighing each sample `(x_i, y_i)`. + density (boolean, optional): If False, the default, returns the number of samples + in each bin. If True, returns the probability density function at the bin, + ``bin_count / sample_count / bin_volume``. + + Returns: + (Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges + along the first and second dimensions. + + Raises: + ValueError: if `range` does not have the same size as the number of samples. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import numpy as np + >>> x = np.arange(5) + >>> y = np.arange(2, 7) + >>> print(np.histogram2d(x, y, bins=(4, 6))) + (Tensor(shape=[4, 6], dtype=Int32, value= + [[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0] + [0, 0, 0, 0, 1, 1]]), + Tensor(shape=[5], dtype=Float32, value= + [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00, 3.00000000e+00, 4.00000000e+00]), + Tensor(shape=[7], dtype=Float32, value= + [ 2.00000000e+00, 2.66666675e+00, 3.33333349e+00, 4.00000000e+00, 4.66666698e+00, + 5.33333349e+00, 6.00000000e+00])) + """ + count, bin_edges = histogramdd((x, y), bins=bins, range=range, weights=weights, density=density) + return count, bin_edges[0], bin_edges[1] + + +def matrix_power(a, n): + """ + Raises a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix squarings and + matrix multiplications. + If :math:`n == 0`, the identity matrix of the same shape as `M` is returned. + + Note: + Stacks of object matrices are not currently supported and + :math:`n < 0` is not supported. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input matrix. + n (int): The exponent can be any integer or long integer, positive or zero. + + Returns: + Tensor. + + Raises: + TypeError: if the input can not be converted to a tensor or + the exponent is not integer. + ValueError: if the input includes less than 2 dimensions or + the last 2 dimensions are not square. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> from mindspore import numpy as np + >>> a = np.arange(16).reshape(4, 4).astype('float32') + >>> print(np.matrix_power(a, 2)) + [[ 56. 62. 68. 74.] + [152. 174. 196. 218.] + [248. 286. 324. 362.] + [344. 398. 452. 506.]] + """ + a = _to_tensor(a) + if not isinstance(n, int): + _raise_type_error("exponent must be an integer") + if a.ndim < 2: + _raise_value_error(str(a.ndim) + "-dimensional array given. Array must be at least two-dimensional") + if a.shape[-2] != a.shape[-1]: + _raise_value_error("Last 2 dimensions of the array must be square") + + if n < 0: + _raise_value_error("n < 0 is not supported now.") + if n == 0: + return _broadcast_to_shape(eye(a.shape[-1], a.shape[-1], dtype=a.dtype), a.shape) + if n == 1: + return a + res = a + while n > 1: + res = C.matmul(res, a) + n = n - 1 + return res + + +def around(a, decimals=0): + """ + Evenly round to the given number of decimals. + + Note: + Numpy argument `out` is not supported. + Complex numbers are not supported. + + Args: + a (Union[int, float, list, tuple, Tensor]): Input data. + decimals (int): Number of decimal places to round to. Default: 0. + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + + Returns: + Tensor. A tensor of the same type as a, containing the rounded values. + The result of rounding a float is a float. + + Raises: + TypeError: if the input can not be converted to a tensor or + the `decimals` argument is not integer. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> a = np.array([-1.3, 0.0, 0.5, 1.5, 2.5]) + >>> print(np.around(a)) + [-1. 0. 0. 2. 2.] + """ + a = _to_tensor_origin_dtype(a) + if not isinstance(decimals, int): + _raise_type_error("decimals must be an integer") + if decimals < 0: + _raise_value_error("decimals < 0 is not supported now.") + if decimals == 0: + return _round(a) + return F.tensor_div(_round(a * 10**decimals), 10**decimals) + + +def _to_poly1d(x): + x = atleast_1d(_to_tensor(x)) + if F.rank(x) > 1: + _raise_value_error('input array must be scalar or 1-d sequence') + return x + + +def polyadd(a1, a2): + """ + Finds the sum of two polynomials. + Returns the polynomial resulting from the sum of two input polynomials. + + Note: + Numpy object poly1d is currently not supported. + + Args: + a1 (Union[int, float, bool, list, tuple, Tensor): Input polynomial. + a2 (Union[int, float, bool, list, tuple, Tensor): Input polynomial. + + Returns: + Tensor, the sum of the inputs. + + Raises: + ValueError: if the input array has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polyadd([1, 2], [9, 5, 4])) + [9 6 6] + """ + a1 = _to_poly1d(a1) + a2 = _to_poly1d(a2) + diff_size = a1.size - a2.size + if diff_size == 0: + return add(a1, a2) + if diff_size > 0: + return concatenate((a1[:diff_size], add(a1[diff_size:], a2))) + return concatenate((a2[:-diff_size], add(a1, a2[-diff_size:]))) + + +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + + Note: + Numpy object poly1d is currently not supported. + + Args: + a1 (Union[int, float, bool, list, tuple, Tensor): Minuend polynomial. + a2 (Union[int, float, bool, list, tuple, Tensor): Subtrahend polynomial. + + Returns: + Tensor, the difference of the inputs. + + Raises: + ValueError: if the input array has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polysub([2, 10, -2], [3, 10, -4])) + [-1 0 2] + """ + return polyadd(a1, -_to_tensor(a2)) + + +def polyval(p, x): + """ + Evaluates a polynomial at specific values. + If `p` is of length `N`, this function returns the value: + ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` + If `x` is a sequence, then ``p(x)`` is returned for each element of `x`. If `x` + is another polynomial then the composite polynomial ``p(x(t))`` is returned. + + Note: + Numpy object poly1d is currently not supported. + + Args: + p (Union[int, float, bool, list, tuple, Tensor): 1D array of polynomial + coefficients (including coefficients equal to zero) from highest + degree to the constant term. + x (Union[int, float, bool, list, tuple, Tensor): A number, an array of + numbers, at which to evaluate `p`. + + Returns: + Tensor. + Raises: + ValueError: if `p` has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polyval([3,0,1], 5)) + 76 + """ + p = _to_poly1d(p) + x = _to_tensor(x) + shape = F.shape(x) + exp_p = arange(_type_convert(int, p.size) - 1, -1, -1) + var_p = (x.reshape(shape + (1,)))**exp_p + return F.reduce_sum(p*var_p, -1) + + +def polyder(p, m=1): + """ + Returns the derivative of the specified order of a polynomial. + + Note: + Numpy object poly1d is currently not supported. + + Args: + p (Union[int, float, bool, list, tuple, Tensor): Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients. + m (int, optional): Defaults to 1, order of differentiation. + + Returns: + Tensor, a new polynomial representing the derivative. + + Raises: + ValueError: if `p` has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polyder([1, 1, 1, 1])) + [3 2 1] + """ + p = _to_poly1d(p) + if m < 0: + _raise_value_error('Order of derivative must be positive') + if m >= p.size: + return _to_tensor([]) + for _ in range(m): + coeff = _to_tensor(F.make_range(_type_convert(int, p.size) - 1, 0, -1)) + p = p[:-1]*coeff + return p + + +def polymul(a1, a2): + """ + Finds the product of two polynomials. + + Note: + Numpy object poly1d is currently not supported. + + Args: + a1 (Union[int, float, bool, list, tuple, Tensor): Input polynomial. + a2 (Union[int, float, bool, list, tuple, Tensor): Input polynomial. + + Returns: + Tensor, a new polynomial representing the derivative. + + Raises: + ValueError: if the input array has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polyder([1, 1, 1, 1])) + [3 2 1] + """ + a1 = _to_poly1d(a1) + a2 = _to_poly1d(a2) + return convolve(a1, a2) + + +def polyint(p, m=1, k=None): + """ + Returns an antiderivative (indefinite integral) of a polynomial. + + Note: + Numpy object poly1d is currently not supported. + + Args: + p (Union[int, float, bool, list, tuple, Tensor): Polynomial to integrate. A + sequence is interpreted as polynomial coefficients. + m (int, optional): Defaults to 1, Order of the antiderivative. + k (Union[int, list of int]y, optinoal): Integration constants. They are given + in the order of integration: those corresponding to highest-order terms + come first. If None (default), all constants are assumed to be zero. If + ``m = 1``, a single scalar can be given instead of a list. + + Returns: + Tensor, a new polynomial representing the antiderivative. + + Raises: + ValueError: if `p` has more than 1 dimensions. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.polyint([1, 1, 1])) + [0.33333334 0.5 1. 0. ] + """ + p = _to_poly1d(p) + if m < 0: + _raise_value_error('Order of derivative must be positive') + if m == 0: + return p + if k is None: + k = zeros(m, F.dtype(p)) + k = atleast_1d(_to_tensor(k)) + if k.size == 1: + k = F.tile(k, (m,)) + k = F.reshape(k, (m, 1)) + for i in range(m): + coeff = _to_tensor(F.make_range(_type_convert(int, p.size), 0, -1)) + p = concatenate((true_divide(p, coeff), k[i])) + return p + + +@constexpr +def _get_dtype(x): + """Returns the dtype of x.""" + if isinstance(x, bool): + return mstype.bool_ + if isinstance(x, int): + return mstype.int32 + if isinstance(x, float): + return mstype.float32 + if isinstance(x, typing.Number): + return x + if isinstance(x, str): + t = dtype_map.get(x, None) + if t is None: + t = dtype_map.get(str(nptype(x))) + return t + raise TypeError('data type not understood') + + +def result_type(*arrays_and_dtypes): + """ + Returns the type that results from applying the type promotion rules to the arguments. + + Note: + The promotion rule is slightly different from original Numpy, but more like + jax, due to the preference on ``32-bit`` over ``64-bit`` data types. + Complex dtypes are not supported. + + Args: + *arrays_and_dtypes (Union[int, float, bool, list, tuple, Tensor, :class:`mindspore.dtype`, str]): + The operands of some operation whose result type is needed. + + Returns: + :class:`mindspore.dtype`, the result type. + + Raises: + TypeError: if the input is not a valid data type. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.result_type('i2', np.float32, True)) + Float32 + """ + def get_dtype(x): + if isinstance(x, Tensor): + return F.dtype(_to_tensor(x)) + return _get_dtype(x) + + dtype_out = get_dtype(arrays_and_dtypes[0]) + for i in arrays_and_dtypes[1:]: + dtype_out = _promote(dtype_out, get_dtype(i)) + return dtype_out + + +def unwrap(p, discont=3.141592653589793, axis=-1): + """ + Unwraps by changing deltas between values to ``2*pi`` complement. + Unwraps radian phase `p` by changing absolute jumps greater than `discont` to their + `2*pi` complement along the given axis. + + Note: + For absolute jumps that are within a very close range to pi, unwrapping may be done + differently than numpy due to differences in round-off. + + Args: + p (Union[int, float, bool, list, tuple, Tensor): Input array. + discont (float, optional): Maximum discontinuity between values, default is pi. + axis (int, optional): Axis along which unwrap will operate, default is the last axis. + + Returns: + Tensor. + + Raises: + ValueError: if the axis is out of range. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> phase = np.add(np.linspace(0, np.pi, num=5), [0, 0, 0, np.pi, np.pi]) + >>> print(phase) + [0. 0.7853982 1.5707964 5.4977875 6.2831855] + >>> print(np.unwrap(phase)) + [ 0.0000000e+00 7.8539819e-01 1.5707964e+00 -7.8539848e-01 -4.7683716e-07] + """ + p = _to_tensor(p) + ndim = F.rank(p) + axis = _check_axis_in_range(axis, ndim) + dd = diff(p, axis=axis) + ddmod = remainder(add(dd, pi), 2*pi) - pi + ddmod = where_(F.logical_and(ddmod == -pi, dd > 0), pi, ddmod) + ph_correct = ddmod - dd + ph_correct = where_(absolute(dd) < discont, 0, ph_correct) + slice_all = _list_comprehensions(F.rank(p), F.make_slice(None, None, None), True) + slice0 = _tuple_setitem(slice_all, axis, F.make_slice(0, 1, None)) + slice1 = _tuple_setitem(slice_all, axis, F.make_slice(1, None, None)) + head = p[slice0] + tail = add(p[slice1], cumsum(ph_correct, axis)) + return concatenate((head, tail), axis=axis) + + +def cumprod(a, axis=None, dtype=None): + """ + Returns the cumulative product of elements along a given axis. + + Note: + Numpy argument `out` is not supported. + + Args: + a (Union[int, float, bool, list, tuple, Tensor]): Input tensor. + axis (int, optional): Axis along which the cumulative product is computed. + By default the input is flattened. + dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the + output Tensor. + + Returns: + Tensor. + + Raises: + TypeError: If the input can not be converted to tensor or `axis` is not integer. + ValueError: If axis is out of range. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.array([1, 2, 3]) + >>> print(np.cumprod(x)) + [1 2 6] + """ + a = _to_tensor_origin_dtype(a) + original_dtype = F.dtype(a) + + if axis is not None and not isinstance(axis, int): + _raise_type_error("integer axis is expected, but got", axis) + if axis is None: + a = a.ravel() + axis = 0 + _check_axis_in_range(axis, a.ndim) + + a = a.astype('float32') if original_dtype != mstype.float64 else a + if dtype is None: + if original_dtype in [mstype.int8, mstype.int16, mstype.bool_]: + dtype = mstype.int32 + elif original_dtype in [mstype.uint8, mstype.uint16]: + dtype = mstype.uint32 + else: + dtype = original_dtype + return _cumprod_default(a, axis).astype(dtype, copy=False) + + +def _process_index(index, dims, mode='raise'): + """Generates index (Tensor) according to different modes.""" + if mode == "raise": + _raise_unimplemented_error("'raise' mode is not implemented") + if mode not in ['clip', 'wrap']: + _raise_value_error("invalid mode. Expected 'wrap' or 'clip'") + ori_shape = index.shape + tup = () + for i, idx in enumerate(index): + d = dims[i] + if mode == "clip": + idx = clip(idx, 0, d - 1) + elif mode == "wrap": + idx = remainder(idx, d) + idx = F.expand_dims(idx, 0) if idx.ndim < 1 else idx + tup += (idx,) + return P.Concat(0)(tup).reshape(ori_shape) + + +def _get_strides(dims, order='C'): + """Generates strides (1-D tensor) according to `dims` (1-D tensor).""" + if order not in ['C', 'F']: + _raise_value_error("invalid order. Expected 'C' or 'F'") + tup = (_to_tensor([1]),) + dims = dims[1:][::-1] if order == 'C' else dims[:-1] + for d in dims: + tensor = tup[-1] * d + if tensor.ndim < 1: + tensor = F.expand_dims(tensor, 0) + tup += (tensor,) + tup = tup[::-1] if order == 'C' else tup + return P.Concat(0)(tup) + + +def ravel_multi_index(multi_index, dims, mode='clip', order='C'): + """ + Converts a tuple of index arrays into an array of flat indices, + applying boundary modes to the multi-index. + + Note: + `raise` mode is not supported. Default mode is `clip`. + + Args: + multi_index (tuple of array_like): + A tuple of integer arrays, one array for each dimension. + dims (Union[int, tuple of ints]): The shape of array into which the indices from multi_index apply. + mode ({`wrap`, `clip`}): Specifies how out-of-bounds indices are handled. Default: `clip`. + - `wrap`: wrap around + - `clip`: clip to the range + In `clip` mode, a negative index which would normally wrap will clip to 0 instead. + order ({`C`, `F`}): Determines whether the multi-index should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + Returns: + Raveled_indices array. An array of indices into the flattened version of an array of dimensions dims. + + Raises: + TypeError: If `multi_index` or `dims` can not be converted to tensor or + `dims` is not a sequence of integer values. + ValueError: If the length of `multi_index` and that of `dims` are not equal. + + Supported Platforms: + ``GPU`` + + Examples: + >>> import mindspore.numpy as np + >>> arr = np.array([[3, 6, 6], [4, 5, 1]]) + >>> output = np.ravel_multi_index(arr, (7, 6)) + >>> print(output) + [22. 41. 37.] + """ + if isinstance(dims, int): + dims = (dims,) + dims = _to_tensor(dims) + if dims.ndim > 1: + _raise_type_error("only integer scalar arrays can be converted to a scalar index.") + multi_index = _to_tensor(multi_index) + if len(multi_index) != len(dims): + _raise_value_error("parameter multi_index must be a sequence of length ", len(dims)) + + multi_index = _process_index(multi_index, dims, mode) + strides = _get_strides(dims, order) + s_shape = strides.shape + _list_comprehensions(multi_index.ndim - 1, 1, True) + strides = _broadcast_to_shape(strides.reshape(s_shape), multi_index.shape) + return sum_((multi_index * strides).astype('float32'), axis=0) + + +def _vector_norm(x, ord, axis, keepdims): # pylint: disable=redefined-builtin + """Returns norm of a vector.""" + if _in(ord, ('fro', 'nuc')): + _raise_value_error('Frobenius norm and nuclear norm are only defined for vectors') + if ord is None: + ord = 2 + if ord == inf: + res = P.ReduceMax(keepdims)(absolute(x), axis) + elif ord == -inf: + res = P.ReduceMin(keepdims)(absolute(x), axis) + elif ord == 0: + res = P.ReduceSum(keepdims)(F.not_equal(x, 0).astype(mstype.float32), axis) + else: + res = power(P.ReduceSum(keepdims)(power(absolute(x), ord), axis), 1./ord) + return res + + +def _matrix_norm(x, ord, axis, keepdims): # pylint: disable=redefined-builtin + """Returns norm of a matrix.""" + if ord == 0: + _raise_value_error('for 0 axis, norm is defined only for 2-D matrices') + if ord == 'nuc': + _raise_unimplemented_error('nuclear norm is not implemented') + if _in(ord, (2, -2)): + _raise_unimplemented_error('2-norm is not implemented for matrices') + if _in(ord, (None, 'fro')): + res = F.sqrt(P.ReduceSum(keepdims)(F.square(x), axis)) + else: + axis0, axis1 = axis + if not keepdims: + if _abs(ord) == inf and axis0 > axis1: + axis0 -= 1 + elif _abs(ord) == 1 and axis1 > axis0: + axis1 -= 1 + if ord == inf: + res = P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0) + elif ord == -inf: + res = P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0) + elif ord == 1: + res = P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1) + elif ord == -1: + res = P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1) + else: + return _raise_value_error('invalid norm order for matrices') + return res + + +def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-builtin + """ + Matrix or vector norm. + This function is able to return one of eight different matrix norms, or one of an + infinite number of vector norms (described below), depending on the value of the + ord parameter. + + Note: + Nuclear norm and 2-norm are not supported for matrices. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): Input array. If `axis` is None, + `x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None, + the 2-norm of ``x.ravel`` will be returned. + ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm. + inf means numpy’s inf object. The default is None. + axis (Union[None, int, 2-tuple of ints], optional): If `axis` is an integer, it + specifies the axis of `x` along which to compute the vector norms. If `axis` is + a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of + these matrices are computed. If `axis` is None then either a vector norm (when x + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default is None. + keepdims (boolean, optional): If this is set to True, the axes which are normed over + are left in the result as dimensions with size one. With this option the result + will broadcast correctly against the original `x`. + + Returns: + Tensor, norm of the matrix or vector(s). + + Raises: + ValueError: If the norm order is not defined. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.norm(np.arange(9))) + 14.282857 + """ + if not isinstance(ord, (int, float)) and not _in(ord, (None, 'fro', 'nuc', inf, -inf)): + _raise_value_error('invalid value for `ord`') + x = _to_tensor(x) + ndim = F.rank(x) + if axis is None: + if ord is None: + x = x.ravel() + if F.rank(x) not in (1, 2): + _raise_value_error('for None axis, array must a vector or a 2-D matrix') + axis = F.make_range(F.rank(x)) + axis = _check_axis_valid(axis, F.rank(x)) + + if len(axis) == 1: + res = _vector_norm(x, ord, axis, keepdims) + elif len(axis) == 2: + res = _matrix_norm(x, ord, axis, keepdims) + else: + return _raise_value_error('invalid number of dimensions to norm') + + if keepdims and ndim > F.rank(res): + res = _expand(res, ndim) + return res + + +def bitwise_and(x1, x2, dtype=None): + """ + Computes the bit-wise AND of two arrays element-wise. + Computes the bit-wise AND of the underlying binary representation of the integers in + the input arrays. This ufunc implements the C/Python operator &. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + + Args: + x1 (Tensor): Input array. + x2 (Tensor): Input array. Only integer and boolean types are handled. If + ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes + the shape of the output). + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor or scalar, this is a scalar if both x1 and x2 are scalars. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.bitwise_and(13, 17)) + 1 + """ + return _apply_tensor_op(F.bitwise_and, x1, x2, dtype=dtype) + + +def bitwise_or(x1, x2, dtype=None): + """ + Computes the bit-wise OR of two arrays element-wise. + Computes the bit-wise OR of the underlying binary representation of the integers in + the input arrays. This ufunc implements the C/Python operator |. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + + Args: + x1 (Tensor): Input array. + x2 (Tensor): Input array. Only integer and boolean types are handled. If + ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes + the shape of the output). + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor or scalar, this is a scalar if both x1 and x2 are scalars. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.bitwise_or(13, 16)) + 29 + """ + return _apply_tensor_op(F.bitwise_or, x1, x2, dtype=dtype) + + +def bitwise_xor(x1, x2, dtype=None): + """ + Computes the bit-wise XOR of two arrays element-wise. + Computes the bit-wise XOR of the underlying binary representation of the integers in + the input arrays. This ufunc implements the C/Python operator ^. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + + Args: + x1 (Tensor): Input array. + x2 (Tensor): Input array. Only integer and boolean types are handled. If + ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes + the shape of the output). + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor or scalar, this is a scalar if both x1 and x2 are scalars. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.bitwise_xor(13, 17)) + 28 + """ + return _apply_tensor_op(F.bitwise_xor, x1, x2, dtype=dtype) + + +def invert(x, dtype=None): + """ + Computes bit-wise inversion, or bit-wise NOT, element-wise. + Computes the bit-wise NOT of the underlying binary representation of the integers in + the input arrays. This ufunc implements the C/Python operator ~. + For signed integer inputs, the two’s complement is returned. In a two’s-complement system + negative numbers are represented by the two’s complement of the absolute value. This is + the most common method of representing signed integers on computers + `[1] `. A N-bit two’s-complement system + can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + Supported dtypes on Ascend: np.int16, np.uint16. + + Args: + x (Tensor): Only integer and boolean types are handled. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor or scalar, this is a scalar if both x1 and x2 are scalars. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> print(np.invert(np.array(13, dtype=np.uint16))) + 65522 + """ + return _apply_tensor_op(F.invert, x, dtype=dtype) + + +def rint(x, dtype=None): + """ + Rounds elements of the array to the nearest integer. + + Note: + Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are + not supported. + + Args: + x (Union[int, float, bool, list, tuple, Tensor]): Input tensor. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Output tensor is same shape and type as x. This is a scalar if x is a scalar. + + Raises: + TypeError: If `x` can not be converted to tensor. + + Supported Platforms: + ``Ascend`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.array([-1.7, -1.5, 0.2, 1.5, 1.7, 2.0]) + >>> print(np.rint(x)) + [-2. -2. 0. 2. 2. 2.] + """ + return _apply_tensor_op(_round, x, dtype=dtype) + + +def correlate(a, v, mode='valid'): + """ + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal processing texts: + + :math:`c_{av}[k] = sum_n a[n+k] * conj(v[n])` + + with `a` and `v` sequences being zero-padded where necessary and conj being the conjugate. + + Note: + Currently, complex numbers are not supported. + + Args: + a (Union[list, tuple, Tensor]): First input sequence. + v (Union[list, tuple, Tensor]): Second input sequence. + mode (str, optional): By default, mode is `\'valid\'`. + If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`. + The convolution product is only given for points where the signals overlap + completely. Values outside the signal boundary have no effect. + If `mode` is `\'full\'`, it returns the convolution at each point of overlap, with + an output shape of :math:`(N + M - 1,)`. + At the end-points of the convolution, the signals do not overlap completely, + and boundary effects may be seen. + If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary + effects are still visible. + + Returns: + Tensor. Discrete cross-correlation of `a` and `v`. + + Raises: + TypeError: if the inputs can not be converted to tensor. + ValueError: if `a` and `v` are empty or have wrong dimensions + + Supported Platforms: + ``GPU`` + + Examples: + >>> import mindspore.numpy as np + >>> output = np.correlate([1, 2, 3], [0, 1, 0.5]) + >>> print(output) + [3.5] + >>> output = np.correlate([1, 2, 3], [0, 1, 0.5], mode="same") + >>> print(output) + [2. 3.5 3. ] + """ + a, v = _to_tensor(a, v) + if a.ndim != 1 or v.ndim != 1: + _raise_value_error("only support 1-dimensional inputs.") + if a.size == 0 or v.size == 0: + _raise_value_error("Inputs cannot be empty.") + + promote_dtype = _promote(a.dtype, v.dtype) + # P.Conv2D requires that the two tensors have the same data type. + # If the promote data type is not supported, it will be converted to float32. + # The supported dtype list may vary in the future. + if promote_dtype not in [mstype.float32, mstype.float16]: + promote_dtype = mstype.float32 + a = a.astype(promote_dtype) + v = v.astype(promote_dtype) + if a.size < v.size: + a, v = v, a + return _compute_1D_conv(a, v, mode)[::-1] + return _compute_1D_conv(a, v, mode) + + +def _compute_1D_conv(a, v, mode): + """Returns a 1-D sequence which is the cross-correlate of two 1-D sequences (`a` and `v`).""" + v_size = F.shape_mul(v.shape) + if mode not in ('same', 'full', 'valid'): + _raise_value_error("mode must be one of ['full', 'same', 'valid']") + if v_size > 1: + if mode == 'same': + pad_left = _to_tensor(_list_comprehensions(v_size // 2, 0.0, True)) + pad_right = _to_tensor(_list_comprehensions(v_size - v_size // 2 - 1, 0.0, True)) + a = P.Concat(0)((pad_left, a, pad_right)) + elif mode == 'full': + pad = _to_tensor(_list_comprehensions(v_size - 1, 0.0, True)) + a = P.Concat(0)((pad, a, pad)) + a = a.reshape(1, 1, 1, a.size) + v = v.reshape(1, 1, 1, v.size) + _conv = P.Conv2D(1, (1, v.size)) + return _conv(a, v).reshape(-1) + + +def radians(x, dtype=None): + """ + Converts angles from degrees to radians. + + Args: + x (Tensor): Angles in degrees. + dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the + output Tensor. + + Returns: + Tensor, the corresponding radian values. This is a tensor scalar if `x` + is a tensor scalar. + + Raises: + TypeError: if `x` is not a tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> import mindspore.numpy as np + >>> x = np.asarray([1, 2, 3, -4, -5]) + >>> output = np.radians(x) + >>> print(output) + [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647] + """ + return deg2rad(x, dtype=dtype) diff --git a/mindspore/numpy/utils.py b/mindspore/numpy/utils.py index 1b40324b7dd..72277f19999 100644 --- a/mindspore/numpy/utils.py +++ b/mindspore/numpy/utils.py @@ -13,11 +13,14 @@ # limitations under the License. # ============================================================================ """internal utility functions""" +import types + from ..common import Tensor from ..ops import functional as F from ..common import dtype as mstype -from .utils_const import _tile_size, _add_unit_axes, _raise_type_error, _type_convert +from .utils_const import _tile_size, _add_unit_axes, _raise_type_error, _type_convert, \ + _tuple_setitem, _callable_const def _deep_list(array_like): @@ -154,3 +157,52 @@ def _get_dtype_from_scalar(*input_numbers): def _isnan(x): """Computes isnan.""" return F.not_equal(x, x) + + +def _convert_bool_to_int(tensor): + """Convert tensor with bool type to int32.""" + if tensor.dtype == mstype.bool_: + return tensor.astype("int32") + return tensor + + +def _slice_along_axis(f, axis, slice_start, slice_end): + """ + Slice a tensor along a given axis + + Args: + f (Tensor): Input Tensor. + axis (int): Specified axis. + slice_start (int): The start of the slice. + slice_end (int): The end of the slice. + + Returns: + Sliced tensor. + """ + index_start = (0,) * f.ndim + index_end = f.shape + slice_size = slice_end - slice_start + index_start = _tuple_setitem(index_start, axis, slice_start) + index_end = _tuple_setitem(index_end, axis, slice_size) + return F.tensor_slice(f, index_start, index_end) + + +def _to_tensor_origin_dtype(*args): + """Returns each input as Tensor and remains original dtype.""" + res = [] + for arg in args: + if isinstance(arg, (int, float, bool, list, tuple)): + arg = _type_convert(Tensor, arg) + elif not isinstance(arg, Tensor): + _raise_type_error("Expect input to be array like.") + res.append(arg) + if len(res) == 1: + return res[0] + return res + + +def _callable(tensor, obj): + """Returns True if `obj` is a function.""" + if F.isconstant(tensor): + return isinstance(obj, types.FunctionType) + return _callable_const(F.typeof(obj)) diff --git a/mindspore/numpy/utils_const.py b/mindspore/numpy/utils_const.py index 302e170a076..394eb22440b 100644 --- a/mindspore/numpy/utils_const.py +++ b/mindspore/numpy/utils_const.py @@ -14,8 +14,9 @@ # ============================================================================ """internal graph-compatible utility functions""" import math -from itertools import zip_longest +from itertools import zip_longest, accumulate from collections import deque +import operator import mindspore.context as context from ..ops import functional as F @@ -126,6 +127,18 @@ def _infer_out_shape(*shapes): return tuple(shape_out) +@constexpr +def _can_broadcast(*shapes): + """ + Returns Ture if shapes can broadcast, False if they cannot. + """ + try: + _infer_out_shape(*shapes) + except ValueError: + return False + return True + + @constexpr def _check_axis_in_range(axis, ndim): """Checks axes are with the bounds of ndim""" @@ -133,6 +146,7 @@ def _check_axis_in_range(axis, ndim): raise TypeError(f'axes should be integers, not {type(axis)}') if not -ndim <= axis < ndim: raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}') + return axis % ndim @constexpr @@ -145,14 +159,11 @@ def _check_axis_valid(axes, ndim): axes = F.make_range(ndim) return axes if isinstance(axes, (tuple, list)): - for axis in axes: - _check_axis_in_range(axis, ndim) - axes = tuple(map(lambda x: x % ndim, axes)) + axes = tuple(map(lambda x: _check_axis_in_range(x, ndim), axes)) if any(axes.count(el) > 1 for el in axes): raise ValueError('duplicate value in "axis"') return axes - _check_axis_in_range(axes, ndim) - return (axes % ndim,) + return (_check_axis_in_range(axes, ndim),) @constexpr @@ -397,7 +408,7 @@ def _type_convert(force, obj): @constexpr -def _list_comprehensions(obj, item=None, return_tuple=False): +def _list_comprehensions(obj, item=None, return_tuple=False, make_none=False): """ Generates a new list/tuple by list comprehension. @@ -416,7 +427,9 @@ def _list_comprehensions(obj, item=None, return_tuple=False): lst = obj if isinstance(obj, int): lst = range(obj) - if item is None: + if make_none: + res = [None for _ in lst] + elif item is None: res = [i for i in lst] else: res = [item for i in lst] @@ -425,17 +438,6 @@ def _list_comprehensions(obj, item=None, return_tuple=False): return res -@constexpr -def _tuple_getitem(tup, idx, startswith=True): - """ - Returns a slice from tup starting with idx. If startswith is False, - returns a lice from tup ending with idx instead. - """ - if startswith: - return tup[idx:] - return tup[:idx] - - @constexpr def _tuple_setitem(tup, idx, value): """ @@ -471,7 +473,7 @@ def _seq_prod(seq1, seq2): @constexpr def _make_tensor(val, dtype): - """ Returns the tensor with value `val` and dtype `dtype`.""" + """Returns the tensor with value `val` and dtype `dtype`.""" return Tensor(val, dtype) @@ -479,3 +481,26 @@ def _make_tensor(val, dtype): def _tuple_slice(tup, start, end): """get sliced tuple from start and end.""" return tup[start:end] + + +@constexpr +def _isscalar(x): + """Returns True if x is a scalar type""" + return isinstance(x, (typing.Number, typing.Int, typing.UInt, typing.Float, + typing.Bool, typing.String)) + + +@constexpr +def _cumprod(x): + return tuple(accumulate(x, operator.mul)) + + +@constexpr +def _in(x, y): + return x in y + + +@constexpr +def _callable_const(x): + """Returns true if x is a function in graph mode.""" + return isinstance(x, typing.Function) diff --git a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py index 3215d4bea36..4ad5d92cee0 100644 --- a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py +++ b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py @@ -778,9 +778,8 @@ def get_stride_info_from_tuple(data_shape, tuple_index): @constexpr def mstype_eq(x, y): - if x == y: - return True - return False + """Determine whether the input `x` equals `y`.""" + return x == y @constexpr @@ -841,3 +840,26 @@ def tuple_slice(tup, start, end): @constexpr def expanded_shape(shape, expand_size): return (1,)*expand_size + shape + + +@constexpr +def sequence_mul_int(seq, number): + """ + Make a new list with native python syntax. + + Args: + seq (Union[list, tuple]): Input sequence. + y (int): Input number. + + Returns: + New sequence, has the same type as `seq`. + """ + if not isinstance(number, int): + raise TypeError(f"can't multiply sequence by non-int of type {type(number)}") + return seq * number + + +@constexpr +def check_in_sequence(x, y): + """Determine whether the input `x` is in the sequence `y`.""" + return x in y diff --git a/mindspore/ops/composite/multitype_ops/in_impl.py b/mindspore/ops/composite/multitype_ops/in_impl.py index 89776fe0c7c..1b83f86a824 100644 --- a/mindspore/ops/composite/multitype_ops/in_impl.py +++ b/mindspore/ops/composite/multitype_ops/in_impl.py @@ -130,3 +130,33 @@ def _tensor_in_tuple(x, y): bool, if x in y return true, x not in y return false. """ return compile_utils.tensor_in_sequence(x, y) + + +@in_.register("mstype", "List") +def _mstype_in_list(x, y): + """ + Determine if a mindspore type is in a list. + + Args: + x: mstype + y: List + + Returns: + bool, if x in y return true, x not in y return false. + """ + return const_utils.check_in_sequence(x, y) + + +@in_.register("mstype", "Tuple") +def _mstype_in_tuple(x, y): + """ + Determine if a mindspore type is in a tuple. + + Args: + x: mstype + y: Tuple + + Returns: + bool, if x in y return true, x not in y return false. + """ + return const_utils.check_in_sequence(x, y) diff --git a/mindspore/ops/composite/multitype_ops/mul_impl.py b/mindspore/ops/composite/multitype_ops/mul_impl.py index d130d1a92f8..143975baad8 100644 --- a/mindspore/ops/composite/multitype_ops/mul_impl.py +++ b/mindspore/ops/composite/multitype_ops/mul_impl.py @@ -15,6 +15,7 @@ """Implementation for internal polymorphism `mul` operations.""" +from . import _constexpr_utils as const_utils from ...composite import base from ... import functional as F @@ -68,3 +69,47 @@ def _tensor_mul_scalar(x, y): Tensor, has the same dtype as x. """ return F.tensor_mul(x, y) + + +@mul.register("List", "Number") +def _list_mul_scalar(x, y): + """ + Returns x * y where x is a list and y is a number. y must be integer. + + Outputs: + List. + """ + return const_utils.sequence_mul_int(x, y) + + +@mul.register("Tuple", "Number") +def _tuple_mul_scalar(x, y): + """ + Returns x * y where x is a tuple and y is a number. y must be integer. + + Outputs: + Tuple. + """ + return const_utils.sequence_mul_int(x, y) + + +@mul.register("Number", "List") +def _scalar_mul_list(x, y): + """ + Returns x * y where x is a number and y is a list. x must be integer. + + Outputs: + List. + """ + return const_utils.sequence_mul_int(y, x) + + +@mul.register("Number", "Tuple") +def _scalar_mul_tuple(x, y): + """ + Returns x * y where x is a number and y is a tuple. x must be integer. + + Outputs: + Tuple. + """ + return const_utils.sequence_mul_int(y, x) diff --git a/mindspore/ops/composite/multitype_ops/not_in_impl.py b/mindspore/ops/composite/multitype_ops/not_in_impl.py index 4499ccf0959..907d334628f 100644 --- a/mindspore/ops/composite/multitype_ops/not_in_impl.py +++ b/mindspore/ops/composite/multitype_ops/not_in_impl.py @@ -130,3 +130,33 @@ def _tensor_not_in_tuple(x, y): bool, if x not in y return true, x in y return false. """ return not compile_utils.tensor_in_sequence(x, y) + + +@not_in_.register("mstype", "List") +def _mstype_not_in_list(x, y): + """ + Determine if a mindspore type is not in a list. + + Args: + x: mstype + y: List + + Returns: + bool, if x not in y return true, x in y return false. + """ + return not const_utils.check_in_sequence(x, y) + + +@not_in_.register("mstype", "Tuple") +def _mstype_not_in_tuple(x, y): + """ + Determine if a mindspore type is not in a tuple. + + Args: + x: mstype + y: Tuple + + Returns: + bool, if x not in y return true, x in y return false. + """ + return not const_utils.check_in_sequence(x, y) diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 9645c63a509..c42530111d9 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -86,6 +86,10 @@ square = P.Square() sqrt = P.Sqrt() log = P.Log() reduce_sum = P.ReduceSum() +reduce_max = P.ReduceMax() +reduce_min = P.ReduceMin() +reduce_mean = P.ReduceMean() +reduce_prod = P.ReduceProd() tensor_slice = P.Slice() maximum = P.Maximum() minimum = P.Minimum() @@ -106,6 +110,10 @@ asinh = P.Asinh() acosh = P.Acosh() atanh = P.Atanh() atan2 = P.Atan2() +bitwise_and = P.BitwiseAnd() +bitwise_or = P.BitwiseOr() +bitwise_xor = P.BitwiseXor() +invert = P.Invert() scalar_to_array = P.ScalarToArray() scalar_to_tensor = P.ScalarToTensor() @@ -227,6 +235,8 @@ tensor_operator_registry.register('mean', P.ReduceMean) tensor_operator_registry.register('reshape', P.Reshape) tensor_operator_registry.register('transpose', P.Transpose) tensor_operator_registry.register('broadcast_to', P.BroadcastTo) +tensor_operator_registry.register('matmul', P.MatMul) +tensor_operator_registry.register('argmax', P.Argmax) # ms cannot support Tensor(True) compare tensor_operator_registry.register('__eq__', equal) tensor_operator_registry.register('__ne__', not_equal) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index faf795120eb..0f1cb67643a 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -2654,7 +2654,7 @@ class Acosh(PrimitiveWithInfer): TypeError: If `input_x` is not a Tensor. Supported Platforms: - ``Ascend`` ``GPU`` ``CPU`` + ``Ascend`` ``GPU`` Examples: >>> acosh = ops.Acosh() @@ -2731,7 +2731,7 @@ class Asinh(PrimitiveWithInfer): TypeError: If `input_x` is not a Tensor. Supported Platforms: - ``Ascend`` ``GPU`` ``CPU`` + ``Ascend`` ``GPU`` Examples: >>> asinh = ops.Asinh() diff --git a/tests/st/numpy_native/test_array_creations.py b/tests/st/numpy_native/test_array_creations.py index 69ea19602ba..6c882cfca59 100644 --- a/tests/st/numpy_native/test_array_creations.py +++ b/tests/st/numpy_native/test_array_creations.py @@ -805,6 +805,127 @@ def test_vander(): match_all_arrays(mnp_vander, onp_vander, error=1e-4) +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_bartlett(): + for i in [-3, -1, 0, 1, 5, 6, 10, 15]: + match_all_arrays(mnp.bartlett(i), onp.bartlett(i), error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_blackman(): + for i in [-3, -1, 0, 1, 5, 6, 10, 15]: + match_all_arrays(mnp.blackman(i), onp.blackman(i), error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_hamming(): + for i in [-3, -1, 0, 1, 5, 6, 10, 15]: + match_all_arrays(mnp.hamming(i), onp.hamming(i), error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_hanning(): + for i in [-3, -1, 0, 1, 5, 6, 10, 15]: + match_all_arrays(mnp.hanning(i), onp.hanning(i), error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_triu_indices(): + m = rand_int().tolist() + n = rand_int().tolist() + k = rand_int().tolist() + mnp_res = mnp.triu_indices(n, k, m) + onp_res = onp.triu_indices(n, k, m) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_tril_indices(): + m = rand_int().tolist() + n = rand_int().tolist() + k = rand_int().tolist() + mnp_res = mnp.tril_indices(n, k, m) + onp_res = onp.tril_indices(n, k, m) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_triu_indices_from(): + m = int(rand_int().tolist()) + n = int(rand_int().tolist()) + t = mnp.asarray(rand_int(m, n).tolist()) + k = rand_int().tolist() + mnp_res = mnp.triu_indices_from(t, k) + onp_res = onp.triu_indices_from(t.asnumpy(), k) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_tril_indices_from(): + m = int(rand_int().tolist()) + n = int(rand_int().tolist()) + t = mnp.asarray(rand_int(m, n).tolist()) + k = rand_int().tolist() + mnp_res = mnp.tril_indices_from(t, k) + onp_res = onp.tril_indices_from(t.asnumpy(), k) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_histogram_bin_edges(): + x = onp.random.randint(-10, 10, 10) + for bins in [(1, 2, 3), [2], 1, 5, 10]: + # pylint: disable=redefined-builtin + for range in [None, (3, 3), (2, 20)]: + match_res(mnp.histogram_bin_edges, onp.histogram_bin_edges, x, bins=bins, range=range, error=3) + match_res(mnp.histogram_bin_edges, onp.histogram_bin_edges, x, onp.arange(5)) + + @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @@ -836,3 +957,90 @@ def test_linspace_exception(): def test_empty_like_exception(): with pytest.raises(ValueError): mnp.empty_like([[1, 2, 3], [4, 5]]) + + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_pad(): + x_np = onp.random.random([2, 3, 4]).astype("float32") + x_ms = mnp.asarray(x_np.tolist()) + + # pad constant + mnp_res = mnp.pad(x_ms, ((1, 1), (2, 2), (3, 4))) + onp_res = onp.pad(x_np, ((1, 1), (2, 2), (3, 4))) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.pad(x_ms, ((1, 1), (2, 3), (4, 5)), constant_values=((3, 4), (5, 6), (7, 8))) + onp_res = onp.pad(x_np, ((1, 1), (2, 3), (4, 5)), constant_values=((3, 4), (5, 6), (7, 8))) + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad statistic + mnp_res = mnp.pad(x_ms, ((1, 1), (2, 2), (3, 4)), mode="mean", stat_length=((1, 2), (2, 10), (3, 4))) + onp_res = onp.pad(x_np, ((1, 1), (2, 2), (3, 4)), mode="mean", stat_length=((1, 2), (2, 10), (3, 4))) + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad edge + mnp_res = mnp.pad(x_ms, ((1, 1), (2, 2), (3, 4)), mode="edge") + onp_res = onp.pad(x_np, ((1, 1), (2, 2), (3, 4)), mode="edge") + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad wrap + mnp_res = mnp.pad(x_ms, ((1, 1), (2, 2), (3, 4)), mode="wrap") + onp_res = onp.pad(x_np, ((1, 1), (2, 2), (3, 4)), mode="wrap") + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad linear_ramp + mnp_res = mnp.pad(x_ms, ((1, 3), (5, 2), (3, 0)), mode="linear_ramp", end_values=((0, 10), (9, 1), (-10, 99))) + onp_res = onp.pad(x_np, ((1, 3), (5, 2), (3, 0)), mode="linear_ramp", end_values=((0, 10), (9, 1), (-10, 99))) + match_all_arrays(mnp_res, onp_res, error=1e-5) + + +def pad_with_msfunc(vector, pad_width, iaxis, kwargs): + pad_value = kwargs.get('padder', 10) + vector[:pad_width[0]] = pad_value + vector[-pad_width[1]:] = pad_value + return vector + + +def pad_with_npfunc(vector, pad_width, iaxis, kwargs): + pad_value = kwargs.get('padder', 10) + vector[:pad_width[0]] = pad_value + vector[-pad_width[1]:] = pad_value + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_pad_gpu(): + x_np = onp.random.random([2, 1, 4, 3]).astype("float32") + x_ms = mnp.asarray(x_np.tolist()) + + # pad symmetric odd + mnp_res = mnp.pad(x_ms, ((10, 3), (5, 2), (3, 0), (2, 6)), mode='symmetric', reflect_type='odd') + onp_res = onp.pad(x_np, ((10, 3), (5, 2), (3, 0), (2, 6)), mode='symmetric', reflect_type='odd') + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad symmetric even + mnp_res = mnp.pad(x_ms, ((10, 13), (5, 12), (3, 0), (2, 6)), mode='symmetric', reflect_type='even') + onp_res = onp.pad(x_np, ((10, 13), (5, 12), (3, 0), (2, 6)), mode='symmetric', reflect_type='even') + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad reflect odd + mnp_res = mnp.pad(x_ms, ((10, 3), (5, 2), (3, 0), (2, 6)), mode='reflect', reflect_type='odd') + onp_res = onp.pad(x_np, ((10, 3), (5, 2), (3, 0), (2, 6)), mode='reflect', reflect_type='odd') + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad reflect even + mnp_res = mnp.pad(x_ms, ((10, 13)), mode='reflect', reflect_type='even') + onp_res = onp.pad(x_np, ((10, 13)), mode='reflect', reflect_type='even') + match_all_arrays(mnp_res, onp_res, error=1e-5) + + # pad func + x_np = onp.random.random([2, 4]).astype("float32") + x_ms = mnp.asarray(x_np.tolist()) + mnp_res = mnp.pad(x_ms, ((5, 5)), mode=pad_with_msfunc, padder=99) + onp_res = onp.pad(x_np, ((5, 5)), mode=pad_with_npfunc, padder=99) + match_all_arrays(mnp_res, onp_res, error=1e-5) diff --git a/tests/st/numpy_native/test_array_ops.py b/tests/st/numpy_native/test_array_ops.py index 7dab7484b20..585fb6556d1 100644 --- a/tests/st/numpy_native/test_array_ops.py +++ b/tests/st/numpy_native/test_array_ops.py @@ -23,7 +23,7 @@ import mindspore.numpy as mnp from mindspore.nn import Cell from .utils import rand_int, run_non_kw_test, check_all_results, match_array, \ - rand_bool, match_res, run_multi_test, to_tensor + rand_bool, match_res, run_multi_test, to_tensor, match_all_arrays class Cases(): @@ -1253,6 +1253,22 @@ def test_select(): match_res(mnp.select, onp.select, condlist, choicelist, default=10) +def test_choose(): + x = rand_int(2, 1, 4).astype(onp.int32) + y = rand_int(3, 2, 5, 4).astype(onp.int32) + match_res(mnp.choose, onp.choose, x, y, mode='wrap') + match_res(mnp.choose, onp.choose, x, y, mode='clip') + + x = rand_int(5, 3, 1, 7).astype(onp.int32) + y1 = rand_int(7).astype(onp.int32) + y2 = rand_int(1, 3, 1).astype(onp.int32) + y3 = rand_int(5, 1, 1, 7).astype(onp.int32) + onp_arrays = (x, (y1, y2, y3)) + mnp_arrays = (to_tensor(x), tuple(map(to_tensor, (y1, y2, y3)))) + match_all_arrays(mnp.choose(*mnp_arrays, mode='wrap'), onp.choose(*onp_arrays, mode='wrap')) + match_all_arrays(mnp.choose(*mnp_arrays, mode='clip'), onp.choose(*onp_arrays, mode='clip')) + + class ReshapeExpandSqueeze(Cell): def __init__(self): super(ReshapeExpandSqueeze, self).__init__() @@ -1444,3 +1460,159 @@ def test_rot90(): o_rot = onp_rot90(onp_array) m_rot = mnp_rot90(mnp_array) check_all_results(o_rot, m_rot) + + +def mnp_size(x): + a = mnp.size(x) + b = mnp.size(x, axis=0) + return a, b + + +def onp_size(x): + a = onp.size(x) + b = onp.size(x, axis=0) + return a, b + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_size(): + onp_arr = onp.random.rand(2, 3, 4).astype('float32') + mnp_arr = to_tensor(onp_arr) + for actual, expected in zip(mnp_size(mnp_arr), onp_size(onp_arr)): + match_array(actual, expected) + + +def mnp_array_str(x): + return mnp.array_str(x) + + +def onp_array_str(x): + return onp.array_str(x) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_array_str(): + onp_arr = onp.random.rand(2, 3, 4).astype('float32') + mnp_arr = to_tensor(onp_arr) + for actual, expected in zip(mnp_size(mnp_arr), onp_size(onp_arr)): + match_array(actual, expected) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_apply_along_axis(): + onp_arr = rand_int(5, 3, 7) + mnp_arr = to_tensor(onp_arr) + for i in range(-3, 3): + mnp_res = mnp.apply_along_axis(mnp.diag, i, mnp_arr) + onp_res = onp.apply_along_axis(onp.diag, i, onp_arr) + match_all_arrays(mnp_res, onp_res) + mnp_res = mnp.apply_along_axis(lambda x: x[0], 2, mnp_arr) + onp_res = onp.apply_along_axis(lambda x: x[0], 2, onp_arr) + match_all_arrays(mnp_res, onp_res) + mnp_res = mnp.apply_along_axis(lambda x, y, offset=0: (x[4] - y)*offset, 2, mnp_arr, 1, offset=3) + onp_res = onp.apply_along_axis(lambda x, y, offset=0: (x[4] - y)*offset, 2, onp_arr, 1, offset=3) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_piecewise(): + x = rand_int(2, 4) + mnp_x = to_tensor(x) + condlist = [x < 2, x == 2, x > 2] + mnp_condlist = [mnp_x < 2, mnp_x == 2, mnp_x > 2] + funclist = [lambda x, offset=0: x - offset, lambda x, offset=0: x, lambda x, offset=0: x*offset] + mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist, offset=2) + onp_res = onp.piecewise(x, condlist, funclist, offset=2) + match_all_arrays(mnp_res, onp_res) + + funclist = [-1, 0, 1] + mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist) + onp_res = onp.piecewise(x, condlist, funclist) + match_all_arrays(mnp_res, onp_res) + + condlist = [x > 10, x < 0] + mnp_x = to_tensor(x) + mnp_condlist = [mnp_x > 10, mnp_x < 0] + funclist = [lambda x: x - 2, lambda x: x - 1, lambda x: x*2] + mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist) + onp_res = onp.piecewise(x, condlist, funclist) + match_all_arrays(mnp_res, onp_res) + + x = 2 + condlist = True + funclist = [lambda x: x - 1] + mnp_res = mnp.piecewise(x, condlist, funclist) + onp_res = onp.piecewise(x, condlist, funclist) + match_all_arrays(mnp_res, onp_res) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_unravel_index(): + shapes = [(), 1, 3, (5, 1), (2, 6, 3)] + dims = [(5, 4, 7), (5*4, 7), 5*4*7] + for shape in shapes: + x = onp.random.randint(0, 5*4*7, shape) + for dim in dims: + for order in ('C', 'F'): + mnp_res = mnp.unravel_index(to_tensor(x), dim, order=order) + onp_res = onp.unravel_index(x, dim, order=order) + match_all_arrays(mnp_res, onp_res) + + +def mnp_apply_over_axes(x): + a = mnp.apply_over_axes(mnp.sum, x, axes=0) + b = mnp.apply_over_axes(mnp.sum, x, axes=(0, 1)) + c = mnp.apply_over_axes(mnp.std, x, axes=1) + d = mnp.apply_over_axes(mnp.mean, x, axes=(-1,)) + return a, b, c, d + + +def onp_apply_over_axes(x): + a = onp.apply_over_axes(onp.sum, x, axes=0) + b = onp.apply_over_axes(onp.sum, x, axes=(0, 1)) + c = onp.apply_over_axes(onp.std, x, axes=1) + d = onp.apply_over_axes(onp.mean, x, axes=(-1,)) + return a, b, c, d + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_apply_over_axes(): + arrs = [ + onp.random.rand(2, 2).astype('float32'), + onp.random.rand(3, 2, 2).astype('float32'), + onp.random.rand(5, 4, 3, 3).astype('float32'), + ] + for x in arrs: + for expected, actual in zip(onp_apply_over_axes(x), + mnp_apply_over_axes(to_tensor(x))): + match_array(actual.asnumpy(), expected, error=5) diff --git a/tests/st/numpy_native/test_logic_ops.py b/tests/st/numpy_native/test_logic_ops.py index c5e9668d05a..95b7dc3d8db 100644 --- a/tests/st/numpy_native/test_logic_ops.py +++ b/tests/st/numpy_native/test_logic_ops.py @@ -398,3 +398,91 @@ def test_logical_not(): expected = onp_logical_not(arr) actual = mnp_logical_not(to_tensor(arr)) onp.testing.assert_equal(actual.asnumpy().tolist(), expected.tolist()) + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_array_equal(): + a = [0, 1, 2, float('inf'), float('nan')] + b = [0, 1, 2, float('inf'), float('nan')] + match_all_arrays(mnp.array_equal(a, b), onp.array_equal(a, b)) + a = [0, 1, 2] + b = [[0, 1, 2], [0, 1, 2]] + assert mnp.array_equal(a, b) == onp.array_equal(a, b) + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_array_equiv(): + a = [0, 1, 2, float('inf'), float('nan')] + b = [0, 1, 2, float('inf'), float('nan')] + match_all_arrays(mnp.array_equal(a, b), onp.array_equal(a, b)) + a = [0, 1, 2] + b = [[0, 1, 2], [0, 1, 2]] + assert mnp.array_equal(a, b) == onp.array_equal(a, b) + + +def mnp_signbit(*arrs): + arr1 = arrs[0] + arr2 = arrs[1] + a = mnp.signbit(arr1) + b = mnp.signbit(arr2, dtype=mnp.bool_) + return a, b + + +def onp_signbit(*arrs): + arr1 = arrs[0] + arr2 = arrs[1] + a = onp.signbit(arr1) + b = onp.signbit(arr2, dtype='bool') + return a, b + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_signbit(): + onp_arrs = [onp.arange(-10, 10).astype('float32'), onp.arange(-10, 10).astype('int32')] + mnp_arrs = [mnp.arange(-10, 10).astype('float32'), mnp.arange(-10, 10).astype('int32')] + for actual, expected in zip(mnp_signbit(*mnp_arrs), onp_signbit(*onp_arrs)): + onp.testing.assert_equal(actual.asnumpy().tolist(), expected.tolist()) + + +def mnp_sometrue(x): + a = mnp.sometrue(x) + b = mnp.sometrue(x, axis=0) + c = mnp.sometrue(x, axis=(0, -1)) + d = mnp.sometrue(x, axis=(0, 1), keepdims=True) + e = mnp.sometrue(x, axis=(0, 1), keepdims=-1) + f = mnp.sometrue(x, axis=(0, 1), keepdims=0) + return a, b, c, d, e, f + + +def onp_sometrue(x): + a = onp.sometrue(x) + b = onp.sometrue(x, axis=0) + c = onp.sometrue(x, axis=(0, -1)) + d = onp.sometrue(x, axis=(0, 1), keepdims=True) + e = onp.sometrue(x, axis=(0, 1), keepdims=-1) + f = onp.sometrue(x, axis=(0, 1), keepdims=0) + return a, b, c, d, e, f + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_sometrue(): + onp_arr = onp.full((3, 2), [True, False]) + mnp_arr = to_tensor(onp_arr) + for actual, expected in zip(mnp_sometrue(mnp_arr), onp_sometrue(onp_arr)): + onp.testing.assert_equal(actual.asnumpy().tolist(), expected.tolist()) diff --git a/tests/st/numpy_native/test_math_ops.py b/tests/st/numpy_native/test_math_ops.py index 875876b7fc8..572997b8b06 100644 --- a/tests/st/numpy_native/test_math_ops.py +++ b/tests/st/numpy_native/test_math_ops.py @@ -18,6 +18,7 @@ import pytest import numpy as onp import mindspore.numpy as mnp +from mindspore.common.dtype import dtype_to_nptype from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \ run_single_test, match_res, match_array, match_meta, match_all_arrays, to_tensor @@ -600,14 +601,14 @@ def test_outer(): @pytest.mark.env_onecard def test_type_promotion(): arr = rand_int(2, 3) - onp_sum = onp_add(arr, arr) + onp_res = onp_add(arr, arr) a = to_tensor(arr, dtype=mnp.float16) b = to_tensor(arr, dtype=mnp.float32) c = to_tensor(arr, dtype=mnp.int32) - match_array(mnp_add(a, b).asnumpy(), onp_sum) - match_array(mnp_add(b, c).asnumpy(), onp_sum) + match_array(mnp_add(a, b).asnumpy(), onp_res) + match_array(mnp_add(b, c).asnumpy(), onp_res) def mnp_absolute(x): @@ -1817,6 +1818,93 @@ def test_lcm(): match_res(mnp_lcm, onp_lcm, x, y) +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_exception_innner(): + with pytest.raises(ValueError): + mnp.inner(to_tensor(test_case.arrs[0]), + to_tensor(test_case.arrs[1])) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_exception_add(): + with pytest.raises(ValueError): + mnp.add(to_tensor(test_case.arrs[1]), to_tensor(test_case.arrs[2])) + + +def mnp_nanmax(x): + a = mnp.nanmax(x) + b = mnp.nanmax(x, keepdims=True) + c = mnp.nanmax(x, axis=-2) + d = mnp.nanmax(x, axis=0, keepdims=True) + e = mnp.nanmax(x, axis=(-2, 3)) + f = mnp.nanmax(x, axis=(-3, -1), keepdims=True) + return a, b, c, d, e, f + + +def onp_nanmax(x): + a = onp.nanmax(x) + b = onp.nanmax(x, keepdims=True) + c = onp.nanmax(x, axis=-2) + d = onp.nanmax(x, axis=0, keepdims=True) + e = onp.nanmax(x, axis=(-2, 3)) + f = onp.nanmax(x, axis=(-3, -1), keepdims=True) + return a, b, c, d, e, f + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_nanmax(): + x = rand_int(2, 3, 4, 5) + x[0][2][1][3] = onp.nan + x[1][0][2][4] = onp.nan + x[1][1][1][1] = onp.nan + run_multi_test(mnp_nanmax, onp_nanmax, (x,)) + + +def mnp_nanmin(x): + a = mnp.nanmin(x) + b = mnp.nanmin(x, keepdims=True) + c = mnp.nanmin(x, axis=-2) + d = mnp.nanmin(x, axis=0, keepdims=True) + e = mnp.nanmin(x, axis=(-2, 3)) + f = mnp.nanmin(x, axis=(-3, -1), keepdims=True) + return a, b, c, d, e, f + + +def onp_nanmin(x): + a = onp.nanmin(x) + b = onp.nanmin(x, keepdims=True) + c = onp.nanmin(x, axis=-2) + d = onp.nanmin(x, axis=0, keepdims=True) + e = onp.nanmin(x, axis=(-2, 3)) + f = onp.nanmin(x, axis=(-3, -1), keepdims=True) + return a, b, c, d, e, f + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_nanmin(): + x = rand_int(2, 3, 4, 5) + x[0][2][1][3] = onp.nan + x[1][0][2][4] = onp.nan + x[1][1][1][1] = onp.nan + run_multi_test(mnp_nanmin, onp_nanmin, (x,)) + + def mnp_nansum(x): a = mnp.nansum(x) b = mnp.nansum(x, keepdims=True) @@ -1927,10 +2015,17 @@ def test_mean(): @pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard -def test_exception_innner(): - with pytest.raises(ValueError): - mnp.inner(to_tensor(test_case.arrs[0]), - to_tensor(test_case.arrs[1])) +def test_corrcoef(): + x = onp.random.random((3, 4)).tolist() + mnp_res = mnp.corrcoef(x) + onp_res = onp.corrcoef(x) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.corrcoef(x[0]) + onp_res = onp.corrcoef(x[0]) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.corrcoef(x, rowvar=False) + onp_res = onp.corrcoef(x, rowvar=False) + match_all_arrays(mnp_res, onp_res, error=1e-5) @pytest.mark.level1 @@ -1939,9 +2034,227 @@ def test_exception_innner(): @pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard -def test_exception_add(): - with pytest.raises(ValueError): - mnp.add(to_tensor(test_case.arrs[1]), to_tensor(test_case.arrs[2])) +def test_multi_dot(): + arrays = [rand_int(3), rand_int(3, 5), rand_int(5, 2), rand_int(2, 7), rand_int(7)] + mnp_arrays = [to_tensor(arr) for arr in arrays] + match_all_arrays(mnp.multi_dot(mnp_arrays), onp.linalg.multi_dot(arrays)) + match_all_arrays(mnp.multi_dot(mnp_arrays[1:]), onp.linalg.multi_dot(arrays[1:])) + match_all_arrays(mnp.multi_dot(mnp_arrays[:-1]), onp.linalg.multi_dot(arrays[:-1])) + match_all_arrays(mnp.multi_dot(mnp_arrays[1:-1]), onp.linalg.multi_dot(arrays[1:-1])) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_gradient(): + f = onp.random.random((3, 4, 5)).tolist() + mnp_res = mnp.gradient(f) + onp_res = onp.gradient(f) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.gradient(f, axis=1) + onp_res = onp.gradient(f, axis=1) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.gradient(f, -3, axis=(-1, 1)) + onp_res = onp.gradient(f, -3, axis=(-1, 1)) + match_all_arrays(mnp_res, onp_res, error=1e-5) + mnp_res = mnp.gradient(f, -3, 5, axis=(-1, 0)) + onp_res = onp.gradient(f, -3, 5, axis=(-1, 0)) + match_all_arrays(mnp_res, onp_res, error=1e-5) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_argmax(): + match_res(mnp.argmax, onp.argmax, rand_int()) + match_res(mnp.argmax, onp.argmax, rand_int(3)) + match_res(mnp.argmax, onp.argmax, rand_int(1, 1, 1)) + x = onp.random.choice(onp.arange(-100, 100), size=(2, 3, 4, 5), replace=False) + match_res(mnp.argmax, onp.argmax, x) + for i in range(-4, 4): + match_res(mnp.argmax, onp.argmax, x, axis=i) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_argmin(): + match_res(mnp.argmin, onp.argmin, rand_int()) + match_res(mnp.argmin, onp.argmin, rand_int(3)) + match_res(mnp.argmin, onp.argmin, rand_int(1, 1, 1)) + x = rand_int(2, 3, 4, 5) + match_res(mnp.argmin, onp.argmin, x) + for i in range(-4, 4): + match_res(mnp.argmin, onp.argmin, x, axis=i) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_searchsorted(): + x = onp.arange(-10, 10) + y = onp.random.randint(-15, 15, size=(2, 3, 4)) + onp.random.choice([0, 0.5], (2, 3, 4)) + sorter = onp.random.shuffle(onp.arange(20)) + match_res(mnp.searchsorted, onp.searchsorted, x, y) + match_res(mnp.searchsorted, onp.searchsorted, x, y, side='right') + match_res(mnp.searchsorted, onp.searchsorted, x, y, sorter=sorter) + match_res(mnp.searchsorted, onp.searchsorted, x, y, side='right', sorter=sorter) + + +@pytest.mark.level2 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_interp(): + x = onp.random.randint(-15, 15, size=(2, 3, 4)) + onp.random.choice([0, 0.5], (2, 3, 4)) + xp = onp.arange(-10, 10) + fp = onp.random.uniform(-50, 50, 20) + match_res(mnp.interp, onp.interp, x, xp, fp, error=3) + match_res(mnp.interp, onp.interp, x, xp, fp, left=onp.random.rand(), error=3) + match_res(mnp.interp, onp.interp, x, xp, fp, right=onp.random.rand(), error=3) + match_res(mnp.interp, onp.interp, x, xp, fp, left=onp.random.rand(), right=onp.random.rand(), error=3) + + +@pytest.mark.level2 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_digitize(): + bins = onp.random.randint(-10, 10, size=10) + bins.sort() + x = onp.random.randint(-15, 15, size=(2, 3, 4)) + onp.random.choice([0, 0.5], (2, 3, 4)) + match_res(mnp.digitize, onp.digitize, x, []) + match_res(mnp.digitize, onp.digitize, [], []) + match_res(mnp.digitize, onp.digitize, [], bins) + match_res(mnp.digitize, onp.digitize, x, bins) + match_res(mnp.digitize, onp.digitize, x, bins, right=True) + bins = onp.flip(bins) + match_res(mnp.digitize, onp.digitize, x, bins) + match_res(mnp.digitize, onp.digitize, x, bins, right=True) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_bincount(): + x = onp.random.randint(0, 10, 20) + weights = onp.random.randn(20) + match_res(mnp.bincount, onp.bincount, x) + match_res(mnp.bincount, onp.bincount, x, minlength=25) + match_res(mnp.bincount, onp.bincount, x, weights, error=3) + match_res(mnp.bincount, onp.bincount, x, weights, minlength=25, error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_histogram(): + x = onp.random.randint(-10, 10, 10) + weights = onp.random.randn(10) + for bins in [(1, 2, 3), [2], 1, 5, 10]: + # pylint: disable=redefined-builtin + for range in [None, (3, 3), (2, 20)]: + match_res(mnp.histogram, onp.histogram, x, bins=bins, range=range, error=3) + match_res(mnp.histogram, onp.histogram, x, bins=bins, range=range, density=True, error=3) + mnp_res = mnp.histogram(to_tensor(x), bins=bins, range=range, weights=to_tensor(weights)) + onp_res = onp.histogram(x, bins=bins, range=range, weights=weights) + match_all_arrays(mnp_res, onp_res, error=3) + mnp_res = mnp.histogram(to_tensor(x), bins=bins, range=range, + weights=to_tensor(weights), density=True) + onp_res = onp.histogram(x, bins=bins, range=range, weights=weights, density=True) + match_all_arrays(mnp_res, onp_res, error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_histogramdd(): + x = onp.random.randint(-10, 10, (5, 3)) + y = [onp.random.randint(-10, 10, 5), onp.random.randint(-10, 10, 5), onp.random.randint(-10, 10, 5)] + mnp_y = list(map(to_tensor, y)) + weights = onp.random.randn(5) + for bins in [(15, 4, 9), 10, [onp.arange(5).tolist(), onp.arange(3, 6).tolist(), + onp.arange(10, 20).tolist()]]: + # pylint: disable=redefined-builtin + for range in [None, [[0, 5], [2, 7], [1, 3]]]: + mnp_res = mnp.histogramdd(to_tensor(x), bins=bins, range=range) + onp_res = onp.histogramdd(x, bins=bins, range=range) + match_all_arrays(mnp_res[0], onp_res[0], error=3) + match_all_arrays(mnp_res[1], onp_res[1], error=3) + mnp_res = mnp.histogramdd(to_tensor(x), bins=bins, range=range, density=True) + onp_res = onp.histogramdd(x, bins=bins, range=range, density=True) + match_all_arrays(mnp_res[0], onp_res[0], error=3) + match_all_arrays(mnp_res[1], onp_res[1], error=3) + mnp_res = mnp.histogramdd(to_tensor(x), bins=bins, range=range, weights=to_tensor(weights)) + onp_res = onp.histogramdd(x, bins=bins, range=range, weights=weights) + match_all_arrays(mnp_res[0], onp_res[0], error=3) + match_all_arrays(mnp_res[1], onp_res[1], error=3) + mnp_res = mnp.histogramdd(to_tensor(x), bins=bins, range=range, + weights=to_tensor(weights), density=True) + + mnp_res = mnp.histogramdd(mnp_y, bins=bins, range=range, weights=to_tensor(weights), + density=True) + onp_res = onp.histogramdd(y, bins, range=range, weights=weights, density=True) + match_all_arrays(mnp_res[0], onp_res[0], error=3) + match_all_arrays(mnp_res[1], onp_res[1], error=3) + + bins = onp.arange(24).reshape(3, 8) + mnp_res = mnp.histogramdd(to_tensor(x), bins=to_tensor(bins)) + onp_res = onp.histogramdd(x, bins=bins) + match_all_arrays(mnp_res[0], onp_res[0], error=3) + match_all_arrays(mnp_res[1], onp_res[1], error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_histogram2d(): + x = onp.random.randint(-10, 10, 10) + y = onp.random.randint(-10, 10, 10) + + weights = onp.random.randn(10) + for bins in [(5, 7), 4, [onp.arange(5).tolist(), onp.arange(2, 10).tolist()], [8, [1, 2, 3]]]: + # pylint: disable=redefined-builtin + for range in [None, [(3, 3), (2, 20)]]: + match_res(mnp.histogram2d, onp.histogram2d, x, y, bins=bins, range=range, error=3) + match_res(mnp.histogram2d, onp.histogram2d, x, y, bins=bins, range=range, density=True, + error=3) + mnp_res = mnp.histogram2d(to_tensor(x), to_tensor(y), bins=bins, range=range, + weights=to_tensor(weights)) + onp_res = onp.histogram2d(x, y, bins=bins, range=range, weights=weights) + match_all_arrays(mnp_res, onp_res, error=3) + mnp_res = mnp.histogram2d(to_tensor(x), to_tensor(y), bins=bins, range=range, + weights=to_tensor(weights), density=True) + onp_res = onp.histogram2d(x, y, bins=bins, range=range, weights=weights, density=True) + match_all_arrays(mnp_res, onp_res, error=3) @pytest.mark.level1 @@ -1955,6 +2268,277 @@ def test_exception_mean(): mnp.mean(to_tensor(test_case.arrs[0]), (-1, 0)) +def mnp_sum(x): + a = mnp.sum(x) + b = mnp.sum(x, axis=0) + c = mnp.sum(x, axis=(0, 1)) + d = mnp.sum(x, keepdims=True) + e = mnp.sum(x, initial=-1) + f = mnp.sum(x, initial=1) + g = mnp.sum(x, axis=(0, 2, -2), keepdims=True, initial=0.5, dtype=mnp.float64) + return a, b, c, d, e, f, g + + +def onp_sum(x): + a = onp.sum(x) + b = onp.sum(x, axis=0) + c = onp.sum(x, axis=(0, 1)) + d = onp.sum(x, keepdims=True) + e = onp.sum(x, initial=-1) + f = onp.sum(x, initial=1) + g = onp.sum(x, axis=(0, 2, -2), keepdims=True, initial=0.5, dtype=onp.float64) + return a, b, c, d, e, f, g + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_sum(): + onp_arr = onp.random.rand(2, 3, 4).astype('float32') + mnp_arr = to_tensor(onp_arr) + for actual, expected in zip(mnp_sum(mnp_arr), onp_sum(onp_arr)): + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_sign(x): + return mnp.sign(x) + + +def onp_sign(x): + return onp.sign(x) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_sign(): + onp_arr = [ + onp.array(3.5).astype('float32'), + onp.arange(-5, 5).astype('float32'), + onp.random.rand(2, 3, 4).astype('float32') + ] + mnp_arr = list(map(to_tensor, onp_arr)) + for onp_x, mnp_x in zip(onp_arr, mnp_arr): + expected = onp_sign(onp_x) + actual = mnp_sign(mnp_x) + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_copysign(x, y): + return mnp.copysign(x, y) + + +def onp_copysign(x, y): + return onp.copysign(x, y) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_copysign(): + onp_arr = [[onp.array([1, -1, 2, -3]).astype('float32'), + onp.array([1, -1, -1, 1]).astype('float32')], + [onp.random.rand(2, 3, 4).astype('float32'), + onp.random.rand(2, 3, 4).astype('float32')]] + mnp_arr = list(map(to_tensor, onp_arr)) + for onp_x, mnp_x in zip(onp_arr, mnp_arr): + expected = onp_copysign(onp_x[0], onp_x[1]) + actual = mnp_copysign(mnp_x[0], mnp_x[1]) + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_matrix_power(x): + a = mnp.matrix_power(x, 0) + b = mnp.matrix_power(x, 1) + c = mnp.matrix_power(x, 2) + d = mnp.matrix_power(x, 3) + return a, b, c, d + + +def onp_matrix_power(x): + a = onp.linalg.matrix_power(x, 0) + b = onp.linalg.matrix_power(x, 1) + c = onp.linalg.matrix_power(x, 2) + d = onp.linalg.matrix_power(x, 3) + return a, b, c, d + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_matrix_power(): + arrs = [ + onp.random.rand(2, 2).astype('float32'), + onp.random.rand(3, 2, 2).astype('float32'), + onp.random.rand(5, 4, 3, 3).astype('float32'), + ] + for x in arrs: + onp_res = onp_matrix_power(x) + mnp_res = mnp_matrix_power(to_tensor(x)) + for expected, actual in zip(onp_res, mnp_res): + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_around(x): + a = mnp.around(x) + b = mnp.around(x, 1) + c = mnp.around(x, 2) + d = mnp.around(x, 3) + return a, b, c, d + + +def onp_around(x): + a = onp.around(x) + b = onp.around(x, 1) + c = onp.around(x, 2) + d = onp.around(x, 3) + return a, b, c, d + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_around(): + arrs = [ + onp.random.rand(2, 2).astype('float32'), + onp.random.rand(3, 2, 2).astype('float32'), + onp.random.rand(5, 4, 3, 3).astype('float32'), + ] + for x in arrs: + onp_res = onp_around(x) + mnp_res = mnp_around(to_tensor(x)) + for expected, actual in zip(onp_res, mnp_res): + match_array(actual.asnumpy(), expected, error=5) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polyadd(): + arrs = [rand_int(), rand_int(1), rand_int(3), rand_int(7)] + for x in arrs: + for y in arrs: + match_res(mnp.polyadd, onp.polyadd, x, y) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polysub(): + arrs = [rand_int(), rand_int(1), rand_int(3), rand_int(7)] + for x in arrs: + for y in arrs: + match_res(mnp.polysub, onp.polysub, x, y, error=1) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polyval(): + polys = [rand_int(1), rand_int(3), rand_int(7)] + arrs = [rand_int(), rand_int(1), rand_int(3), rand_int(2, 3, 1), rand_int(1, 5, 4)] + for p in polys: + for x in arrs: + match_res(mnp.polyval, onp.polyval, p, x, error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polyder(): + poly = rand_int(7) + for i in range(5): + match_res(mnp.polyder, onp.polyder, poly, m=i) + + +@pytest.mark.level2 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polymul(): + arrs = [rand_int(), rand_int(1), rand_int(3), rand_int(7)] + for x in arrs: + for y in arrs: + match_res(mnp.polymul, onp.polymul, x, y) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_polyint(): + poly = rand_int(7) + match_res(mnp.polyint, onp.polyint, poly, m=1, k=7, error=3) + match_res(mnp.polyint, onp.polyint, poly, m=1, k=[9], error=3) + match_res(mnp.polyint, onp.polyint, poly, m=3, k=2, error=3) + + for i in range(5): + match_res(mnp.polyint, onp.polyint, poly, m=i, k=rand_int(i).tolist(), error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_result_type(): + x = ('?', True, mnp.uint16, mnp.ones((2, 3)).astype(mnp.int32), 'float') + y = ('?', True, onp.uint16, onp.ones((2, 3)).astype(onp.int32), 'float') + for i in range(4): + mnp_args = x[:i + 1] + actual = dtype_to_nptype(mnp.result_type(*mnp_args)) + onp_args = y[:i + 1] + expected = onp.result_type(*onp_args) + if expected == onp.int64: + expected = onp.int + elif expected == onp.float64: + expected = onp.float32 + assert actual == expected + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_unwrap(): + x = onp.linspace(onp.linspace((0, 1), (10, 15), 5), onp.linspace((0, 2), (3*onp.pi, 7*onp.pi), 5), 7) + x[5:2] += onp.pi + for i in range(-3, 3): + match_res(mnp.unwrap, onp.unwrap, x, axis=i, error=3) + + @pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @@ -1964,3 +2548,185 @@ def test_exception_mean(): def test_exception_amax(): with pytest.raises(TypeError): mnp.amax(mnp.array([[1, 2], [3, 4]]).astype(mnp.float32), initial=[1.0, 2.0]) + + +def mnp_cumprod(x): + a = mnp.cumprod(x) + b = mnp.cumprod(x, axis=0) + c = mnp.cumprod(x, axis=1) + return a, b, c + + +def onp_cumprod(x): + a = onp.cumprod(x) + b = onp.cumprod(x, axis=0) + c = onp.cumprod(x, axis=1) + return a, b, c + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_cumprod(): + mnp_x = mnp.arange(1, 7).reshape(2, 3) + tensors = [mnp_x.astype('bool'), + mnp_x.astype('uint8'), + mnp_x.astype('int16'), + mnp_x.astype('float16'), + mnp_x.astype('float32')] + for x in tensors: + onp_res = onp_cumprod(x.asnumpy()) + mnp_res = mnp_cumprod(x) + for expected, actual in zip(onp_res, mnp_res): + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_ravel_multi_index(x): + a = mnp.ravel_multi_index(x, (7, 6)) + b = mnp.ravel_multi_index(x, (7, 6), order='F') + c = mnp.ravel_multi_index(x, (4, 6), mode='clip') + d = mnp.ravel_multi_index(x, (4, 4), mode='wrap') + return a, b, c, d + + +def onp_ravel_multi_index(x): + a = onp.ravel_multi_index(x, (7, 6)) + b = onp.ravel_multi_index(x, (7, 6), order='F') + c = onp.ravel_multi_index(x, (4, 6), mode='clip') + d = onp.ravel_multi_index(x, (4, 4), mode='wrap') + return a, b, c, d + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_ravel_multi_index(): + x = mnp.array([[3, 6, 6], [4, 5, 1]]) + onp_res = onp_ravel_multi_index(x.asnumpy()) + mnp_res = mnp_ravel_multi_index(x) + for expected, actual in zip(onp_res, mnp_res): + match_array(actual.asnumpy(), expected, error=5) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_norm(): + arrs = [rand_int(1), rand_int(9), rand_int(6, 4), rand_int(5, 2, 3, 7)] + for x in arrs: + for keepdims in [True, False]: + match_res(mnp.norm, onp.linalg.norm, x, keepdims=keepdims, error=3) + + axes = [None, -1, 1, 2] + order = [None, float('inf'), -float('inf'), 0, 1, -1, 2, -2, 3.7, -5, 3] + for x, axis in zip(arrs, axes): + # pylint: disable=redefined-builtin + for ord in order: + for keepdims in [True, False]: + match_res(mnp.norm, onp.linalg.norm, x, ord=ord, axis=axis, keepdims=keepdims, error=3) + + x = rand_int(3, 6, 4, 5) + axes = [(0, 1), (0, 3), (1, 3), (2, 3)] + order = [None, 'fro', float('inf'), -float('inf'), 1, -1] + for axis in axes: + # pylint: disable=redefined-builtin + for ord in order: + for keepdims in [True, False]: + match_res(mnp.norm, onp.linalg.norm, x, ord=ord, axis=axis, keepdims=keepdims, error=3) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_bitwise_and(): + arrs = [onp.random.randint(-100, 100, ()), onp.random.randint(-100, 100, (1,)), + onp.random.randint(-100, 100, (5,)), onp.random.randint(-100, 100, (3, 1)), + onp.random.randint(-100, 100, (4, 1, 5))] + for x in arrs: + for y in arrs: + match_res(mnp.bitwise_and, onp.bitwise_and, x, y, dtype=mnp.int32) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_bitwise_or(): + arrs = [onp.random.randint(-100, 100, ()), onp.random.randint(-100, 100, (1,)), + onp.random.randint(-100, 100, (5,)), onp.random.randint(-100, 100, (3, 1)), + onp.random.randint(-100, 100, (4, 1, 5))] + for x in arrs: + for y in arrs: + match_res(mnp.bitwise_or, onp.bitwise_or, x, y, dtype=mnp.int32) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_bitwise_xor(): + arrs = [onp.random.randint(-100, 100, ()), onp.random.randint(-100, 100, (1,)), + onp.random.randint(-100, 100, (5,)), onp.random.randint(-100, 100, (3, 1)), + onp.random.randint(-100, 100, (4, 1, 5))] + for x in arrs: + for y in arrs: + match_res(mnp.bitwise_xor, onp.bitwise_xor, x, y, dtype=mnp.int32) + + +@pytest.mark.level1 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_invert(): + x = onp.random.randint(-100, 100, (2, 3)) + match_res(mnp.invert, onp.invert, x, dtype=mnp.int16) + match_res(mnp.invert, onp.invert, x.astype(onp.uint16), dtype=mnp.uint16) + + +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +@pytest.mark.env_onecard +def test_rint(): + arrs = [ + onp.random.rand(2, 2).astype('float32'), + onp.random.rand(3, 2, 2).astype('float32'), + onp.random.rand(5, 4, 3, 3).astype('float32'), + ] + for x in arrs: + for expected, actual in zip(onp.rint(x), mnp.rint(to_tensor(x))): + match_array(actual.asnumpy(), expected, error=5) + + +def mnp_correlate(a, v): + a = mnp.correlate(a, v, mode="valid") + b = mnp.correlate(a, v, mode="full") + c = mnp.correlate(a, v, mode="same") + d = mnp.correlate(a, v) + return a, b, c, d + + +def onp_correlate(a, v): + a = onp.correlate(a, v, mode="valid") + b = onp.correlate(a, v, mode="full") + c = onp.correlate(a, v, mode="same") + d = onp.correlate(a, v) + return a, b, c, d + + +@pytest.mark.level1 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_correlate(): + first_sequences = [[1], [1, 2], [0, 0, 0, 1], [1, 2, 3, 4, 5]] + second_sequences = [[2], [0, 1], [1, 2, 3]] + for a in first_sequences: + for v in second_sequences: + mnp_res = mnp_correlate(a, v) + onp_res = onp_correlate(a, v) + match_all_arrays(mnp_res, onp_res) diff --git a/tests/st/numpy_native/utils.py b/tests/st/numpy_native/utils.py index 3568b244696..50777da0fac 100644 --- a/tests/st/numpy_native/utils.py +++ b/tests/st/numpy_native/utils.py @@ -24,7 +24,7 @@ def match_array(actual, expected, error=0): if isinstance(actual, int): actual = onp.asarray(actual) - if isinstance(expected, int): + if isinstance(expected, (int, tuple)): expected = onp.asarray(expected) if error > 0: @@ -91,11 +91,9 @@ def rand_bool(*shape): def match_res(mnp_fn, onp_fn, *arrs, **kwargs): """Checks results from applying mnp_fn and onp_fn on arrs respectively""" - dtype = kwargs.get('dtype', mnp.float32) - kwargs.pop('dtype', None) + dtype = kwargs.pop('dtype', mnp.float32) mnp_arrs = map(functools.partial(Tensor, dtype=dtype), arrs) - error = kwargs.get('error', 0) - kwargs.pop('error', None) + error = kwargs.pop('error', 0) mnp_res = mnp_fn(*mnp_arrs, **kwargs) onp_res = onp_fn(*arrs, **kwargs) match_all_arrays(mnp_res, onp_res, error=error) @@ -173,6 +171,7 @@ def run_logical_test(mnp_fn, onp_fn, test_case): for x2 in test_case.boolean_arrs: match_res(mnp_fn, onp_fn, x1, x2, dtype=mnp.bool_) + def to_tensor(obj, dtype=None): if dtype is None: res = Tensor(obj)