From 681180d3b4ba58ec072438bf24db6017f75434fd Mon Sep 17 00:00:00 2001 From: yanglf1121 Date: Mon, 22 Mar 2021 13:27:16 +0800 Subject: [PATCH] fix numpy_native ci error on cpu --- RELEASE.md | 113 ++++++++++++++++++ mindspore/numpy/array_creations.py | 3 +- tests/st/numpy_native/__init__.py | 2 +- tests/st/numpy_native/test_array_creations.py | 6 +- 4 files changed, 120 insertions(+), 4 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 194aef55c51..49e57cd32b9 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -24,6 +24,119 @@ The FusedBatchNorm and FusedBatchNormEx interface has been deleted. Please use t The MetaTensor interface has been deleted. The function of MetaTensor has been integrated into tensor. +###### `mindspore.numpy.array()`, `mindspore.numpy.asarray()`, `mindspore.numpy.asfarray()`, `mindspore.numpy.copy()` now support GRAPH mode, but cannot accept `numpy.ndarray` as input arguments anymore([!12726](https://gitee.com/mindspore/mindspore/pulls/12726)) + +Previously, these interfaces can accept numpy.ndarray as arguments and convert numpy.ndarray to Tensor, but cannot be used in GRAPH mode. +However, currently MindSpore Parser cannot parse numpy.ndarray in JIT-graph. To support these interfaces in graph mode, we have to remove `numpy.ndarray` support. With that being said, users can still use `Tensor` to convert `numpy.ndarray` to tensors. + + + + + + + + + +
1.1.0 1.2.0
+ +```python +>>> import mindspore.numpy as mnp +>>> import numpy +>>> +>>> nd_array = numpy.array([1,2,3]) +>>> tensor = mnp.asarray(nd_array) # this line cannot be parsed in GRAPH mode +``` + + + +```python +>>> import mindspore.numpy as mnp +>>> import numpy +>>> +>>> tensor = mnp.asarray([1,2,3]) # this line can be parsed in GRAPH mode +``` + +
+ +###### mindspore.numpy interfaces remove support for keyword arguments `out` and `where`([!12726](https://gitee.com/mindspore/mindspore/pulls/12726)) + +Previously, we have incomplete support for keyword arguments `out` and `where` in mindspore.numpy interfaces, however, the `out` argument is only functional when `where` argument is also provided, and `out` cannot be used to pass reference to numpy functions. Therefore, we have removed these two arguments to avoid any confusion users may have. Their original functionality can be found in [np.where](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/numpy/mindspore.numpy.where.html#mindspore.numpy.where) + + + + + + + + + +
1.1.0 1.2.0
+ +```python +>>> import mindspore.numpy as np +>>> +>>> a = np.ones((3,3)) +>>> b = np.ones((3,3)) +>>> out = np.zeros((3,3)) +>>> where = np.asarray([[True, False, True],[False, False, True],[True, True, True]]) +>>> res = np.add(a, b, out=out, where=where) # `out` cannot be used as a reference, therefore it is misleading +``` + + + +```python +>>> import mindspore.numpy as np +>>> +>>> a = np.ones((3,3)) +>>> b = np.ones((3,3)) +>>> out = np.zeros((3,3)) +>>> where = np.asarray([[True, False, True],[False, False, True],[True, True, True]]) +>>> res = np.add(a, b) +>>> out = np.where(where, x=res, y=out) # instead of np.add(a, b, out=out, where=where) +``` + +
+ +#### Deprecations + +##### Python API + +###### `nn.MatMul` is now deprecated in favor of `ops.matmul` ([!12817](https://gitee.com/mindspore/mindspore/pulls/12817)) + +[ops.matmul](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/ops/mindspore.ops.matmul.html#mindspore.ops.matmul) follows the API of [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html) as closely as possible. As a function interface, [ops.matmul](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/ops/mindspore.ops.matmul.html#mindspore.ops.matmul) is applied without instantiation, as opposed to `nn.MatMul`, which should only be used as a class instance. + + + + + + + + + +
1.1.0 1.2.0
+ +```python +>>> import numpy as np +>>> from mindspore import Tensor, nn +>>> +>>> x = Tensor(np.ones((2, 3)).astype(onp.float32) +>>> y = Tensor(np.ones((3, 4)).astype(onp.float32) +>>> nn.MatMul()(x, y) +``` + + + +```python +>>> import numpy as np +>>> from mindspore import Tensor, ops +>>> +>>> x = Tensor(np.ones((2, 3)).astype(onp.float32) +>>> y = Tensor(np.ones((3, 4)).astype(onp.float32) +>>> ops.matmul(x, y) +``` + +
+ # MindSpore 1.1.1 Release Notes ## MindSpore diff --git a/mindspore/numpy/array_creations.py b/mindspore/numpy/array_creations.py index 8fd8a00e026..7ffe8d669d7 100644 --- a/mindspore/numpy/array_creations.py +++ b/mindspore/numpy/array_creations.py @@ -451,6 +451,7 @@ def _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis): dtype = _check_dtype(dtype) else: dtype = mstype.float32 + start, stop = broadcast_arrays(start, stop) axis = _canonicalize_axis(axis, start.ndim+1) return start, stop, num, endpoint, dtype, axis @@ -499,8 +500,6 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis start, stop, num, endpoint, dtype, axis = _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis) if not isinstance(retstep, bool): _raise_type_error("retstep should be an boolean, but got ", retstep) - start, stop = broadcast_arrays(start, stop) - axis = _canonicalize_axis(axis, start.ndim+1) bounds_shape = start.shape bounds_shape = _tuple_slice(bounds_shape, None, axis) + (1,) + _tuple_slice(bounds_shape, axis, None) iota_shape = _list_comprehensions(start.ndim+1, 1, True) diff --git a/tests/st/numpy_native/__init__.py b/tests/st/numpy_native/__init__.py index bd8949921aa..efdee8dee7a 100644 --- a/tests/st/numpy_native/__init__.py +++ b/tests/st/numpy_native/__init__.py @@ -18,4 +18,4 @@ import mindspore.context as context # pylint: disable=unused-argument def setup_module(module): - context.set_context(mode=context.GRAPH_MODE) + context.set_context(mode=context.PYNATIVE_MODE) diff --git a/tests/st/numpy_native/test_array_creations.py b/tests/st/numpy_native/test_array_creations.py index 0a46ba26f0d..69ea19602ba 100644 --- a/tests/st/numpy_native/test_array_creations.py +++ b/tests/st/numpy_native/test_array_creations.py @@ -332,7 +332,7 @@ def test_arange(): match_array(actual, expected, error=6) -@pytest.mark.level1 +@pytest.mark.level0 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_gpu_training @@ -363,6 +363,10 @@ def test_linspace(): 2.0, [3, 4, 5], num=5, endpoint=False).asnumpy() match_array(actual, expected, error=6) + actual = onp.linspace(2.0, [[3, 4, 5]], num=5, endpoint=False, axis=2) + expected = mnp.linspace(2.0, [[3, 4, 5]], num=5, endpoint=False, axis=2).asnumpy() + match_array(actual, expected, error=6) + start = onp.random.random([2, 1, 4]).astype("float32") stop = onp.random.random([1, 5, 1]).astype("float32") actual = onp.linspace(start, stop, num=20, retstep=True,