!15072 numpy-native fix support of non tensor input for isin and reduce ufuncs

From: @jachua
Reviewed-by: @c_34,@guoqi1024
Signed-off-by: @c_34,@guoqi1024
This commit is contained in:
mindspore-ci-bot 2021-04-14 10:11:53 +08:00 committed by Gitee
commit d0b7c98743
4 changed files with 30 additions and 18 deletions

View File

@ -669,7 +669,7 @@ def where(condition, x=None, y=None):
Args:
condition (Tensor): where True, yield `x`, otherwise yield `y`.
x (Tensor)
x (Tensor): Values from which to choose.
y (Tensor): Values from which to choose. `x`, `y` and `condition` need
to be broadcastable to some shape.

View File

@ -578,9 +578,9 @@ def isin(element, test_elements, invert=False):
not rely on the uniqueness of the input arrays.
Args:
element (array_like): Input array.
test_elements (array_like): The values against which to test each value of
`element`.
element (Union[int, float, bool, list, tuple, Tensor]): Input array.
test_elements (Union[int, float, bool, list, tuple, Tensor]): The values against
which to test each value of `element`.
invert (boolean, optional): If True, the values in the returned array are
inverted, as if calculating `element` not in `test_elements`. Default is False.
@ -603,6 +603,7 @@ def isin(element, test_elements, invert=False):
[[ True False]
[False True]]
"""
element = _to_tensor(element)
res = in1d(element, test_elements, invert=invert)
return F.reshape(res, F.shape(element))

View File

@ -378,6 +378,7 @@ def divide(x1, x2, dtype=None):
[0.33333334 0.5 ]
[0.33333334 0.5 ]]
"""
x1, x2 = _to_tensor(x1, x2)
if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
x1 = F.cast(x1, mstype.float32)
x2 = F.cast(x2, mstype.float32)
@ -2427,7 +2428,7 @@ def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None,
Applies comparison based on cmp_fn and reduction based on reduce_fn.
If cmp_fn is None, only reduction is performed.
"""
_check_input_tensor(a)
a = _to_tensor(a)
shape = F.shape(a)
ndim = F.rank(a)
@ -2458,8 +2459,6 @@ def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None,
if initial is not None:
initial = full(shape, initial, dtype)
a = cmp_fn(a, initial)
if not axes:
return a.astype(dtype)
if isinstance(where, Tensor):
if initial is None:
return _raise_value_error('initial value must be provided for where masks')
@ -2580,6 +2579,8 @@ def nanmean(a, axis=None, dtype=None, keepdims=False):
>>> print(output)
[1. 3.5]
"""
if dtype is None:
dtype = mstype.float32
a = _to_tensor(a)
axis = _check_axis_valid(axis, F.rank(a))
sum_a = nansum(a, axis=axis, dtype=dtype, keepdims=keepdims)
@ -2592,7 +2593,7 @@ def _nanvar(a, axis, ddof=0, keepdims=False):
pow_a = F.tensor_pow(F.tensor_sub(a, mean_a), 2)
sum_a = _reduce_nansum(pow_a, axis, keepdims)
count = _count_nonnan(a, axis, keepdims)
return F.tensor_div(sum_a, F.tensor_sub(count, ddof))
return divide(sum_a, F.tensor_sub(count, ddof))
def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
@ -2633,16 +2634,18 @@ def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
Examples:
>>> import mindspore.numpy as np
>>> a = np.array([[1, np.nan], [3, 4]])
>>> output = np.nanstd(a)
>>> output = np.nanvar(a)
>>> print(output)
1.2472192
>>> output = np.nanstd(a, axis=0)
1.5555557
>>> output = np.nanvar(a, axis=0)
>>> print(output)
[1. 0.]
>>> output = np.nanstd(a, axis=1)
>>> output = np.nanvar(a, axis=1)
>>> print(output)
[0. 0.5]
[0. 0.25]
"""
if dtype is None:
dtype = mstype.float32
return _reduce(a, functools.partial(_nanvar, ddof=ddof, keepdims=keepdims), axis=axis,
keepdims=keepdims, dtype=dtype)
@ -2686,16 +2689,18 @@ def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
Examples:
>>> import mindspore.numpy as np
>>> a = np.array([[1, np.nan], [3, 4]])
>>> output = np.nanvar(a)
>>> output = np.nanstd(a)
>>> print(output)
1.5555557
>>> output = np.nanvar(a, axis=0)
1.2472192
>>> output = np.nanstd(a, axis=0)
>>> print(output)
[1. 0.]
>>> output = np.nanvar(a, axis=1)
>>> output = np.nanstd(a, axis=1)
>>> print(output)
[0. 0.25]
[0. 0.5]
"""
if dtype is None:
dtype = mstype.float32
return _reduce(a, lambda a, axis: F.sqrt(_nanvar(a, axis, ddof=ddof, keepdims=keepdims)),
axis=axis, keepdims=keepdims, dtype=dtype)

View File

@ -370,6 +370,7 @@ def test_nanstd():
arr2[0][4][3][0][2] = onp.nan
run_single_test(mnp_nanstd, onp_nanstd, arr1, error=1e-5)
run_single_test(mnp_nanstd, onp_nanstd, arr2, error=1e-5)
match_res(mnp.nanstd, onp.nanstd, rand_int())
def mnp_var(x):
@ -436,6 +437,7 @@ def test_nanvar():
arr2[0][4][3][0][2] = onp.nan
run_single_test(mnp_nanvar, onp_nanvar, arr1, error=1e-5)
run_single_test(mnp_nanvar, onp_nanvar, arr2, error=1e-5)
match_res(mnp.nanvar, onp.nanvar, rand_int())
def mnp_average(x):
@ -993,6 +995,7 @@ def test_amax():
a = rand_int(2, 3, 4, 5).astype('float32')
mask = rand_bool(2, 3, 4, 5)
run_multi_test(mnp_amax, onp_amax, (a, mask))
match_res(mnp.amax, onp.amax, rand_int())
def mnp_amin(x, mask):
@ -1029,6 +1032,7 @@ def test_amin():
a = rand_int(2, 3, 4, 5).astype('float32')
mask = rand_bool(2, 3, 4, 5)
run_multi_test(mnp_amin, onp_amin, (a, mask))
match_res(mnp.amin, onp.amin, rand_int())
def mnp_hypot(x1, x2):
@ -1843,6 +1847,7 @@ def test_nansum():
x[1][0][2][4] = onp.nan
x[1][1][1][1] = onp.nan
run_multi_test(mnp_nansum, onp_nansum, (x,))
match_res(mnp.nansum, onp.nansum, rand_int())
def mnp_nanmean(x):
@ -1875,6 +1880,7 @@ def test_nanmean():
x[1][0][2][4] = onp.nan
x[1][1][1][1] = onp.nan
run_multi_test(mnp_nanmean, onp_nanmean, (x,))
match_res(mnp.nanmean, onp.nanmean, rand_int())
def mnp_mean(*arrs):