diff --git a/tests/st/ops/ascend/test_add.py b/tests/st/ops/ascend/test_add.py index 0fa8e2d204d..6a07bb879ff 100644 --- a/tests/st/ops/ascend/test_add.py +++ b/tests/st/ops/ascend/test_add.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.add = P.TensorAdd() - def construct(self, x, y): - return self.add(x, y) + def construct(self, x_, y_): + return self.add(x_, y_) x = np.ones([1, 3, 3, 4]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_biasAddGrad.py b/tests/st/ops/ascend/test_biasAddGrad.py index 5d1e9668648..e01a2bbd5b9 100644 --- a/tests/st/ops/ascend/test_biasAddGrad.py +++ b/tests/st/ops/ascend/test_biasAddGrad.py @@ -31,8 +31,8 @@ class Net(nn.Cell): # 'normal', [2, 3, 3, 4]), name='dout') @ms_function - def construct(self, dout): - return self.bias_add_grad(dout) + def construct(self, dout_): + return self.bias_add_grad(dout_) dout = np.ones([2, 3, 4, 4]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_conv2dGradFilter.py b/tests/st/ops/ascend/test_conv2dGradFilter.py index 96c878bd467..928f3cccf23 100644 --- a/tests/st/ops/ascend/test_conv2dGradFilter.py +++ b/tests/st/ops/ascend/test_conv2dGradFilter.py @@ -34,8 +34,8 @@ class Net(nn.Cell): self.get_shape = P.Shape() @ms_function - def construct(self, x, out): - return self.conv2d_grad(out, x, self.get_shape(self.y)) + def construct(self, x_, out_): + return self.conv2d_grad(out_, x_, self.get_shape(self.y)) x = Tensor(np.array([[[ diff --git a/tests/st/ops/ascend/test_drop_out_gen_mask.py b/tests/st/ops/ascend/test_drop_out_gen_mask.py index 64360a6e24d..6771a3a68b0 100644 --- a/tests/st/ops/ascend/test_drop_out_gen_mask.py +++ b/tests/st/ops/ascend/test_drop_out_gen_mask.py @@ -29,9 +29,9 @@ class Net(nn.Cell): self.mask = P.DropoutGenMask(10, 28) self.shape = P.Shape() - def construct(self, x, y): - shape_x = self.shape(x) - return self.mask(shape_x, y) + def construct(self, x_, y_): + shape_x = self.shape(x_) + return self.mask(shape_x, y_) x = np.ones([2, 4, 2, 2]).astype(np.int32) diff --git a/tests/st/ops/ascend/test_equal_count.py b/tests/st/ops/ascend/test_equal_count.py index a204a48c6b3..bd73d8745e6 100644 --- a/tests/st/ops/ascend/test_equal_count.py +++ b/tests/st/ops/ascend/test_equal_count.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.equal_count = P.EqualCount() - def construct(self, x, y): - return self.equal_count(x, y) + def construct(self, x_, y_): + return self.equal_count(x_, y_) x = np.random.randn(32).astype(np.int32) diff --git a/tests/st/ops/ascend/test_matmul.py b/tests/st/ops/ascend/test_matmul.py index 3981e59f74a..02d216162af 100644 --- a/tests/st/ops/ascend/test_matmul.py +++ b/tests/st/ops/ascend/test_matmul.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(1, 3).astype(np.float32) diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax.py b/tests/st/ops/ascend/test_maxpool_with_argmax.py index efb9a16234b..e87748939b5 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np - import mindspore.context as context import mindspore.nn as nn from mindspore.common.api import ms_function diff --git a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py index 3f47e03fbd4..738201f3d83 100644 --- a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py +++ b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py @@ -63,7 +63,7 @@ def test_net(): expect = loss_np SparseSoftmaxCrossEntropyWithLogits = Net() loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels)) - '''assert''' +# assert assert np.allclose(expect.flatten(), loss_me.asnumpy().flatten(), 0.01, 0.01) print(loss_me.asnumpy().flatten()) print("-------------------------") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_add.py b/tests/st/ops/ascend/test_tbe_ops/test_add.py index af33e9c003a..bdf03da9436 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_add.py @@ -25,8 +25,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.add = P.TensorAdd() - def construct(self, x, y): - return self.add(x, y) + def construct(self, x_, y_): + return self.add(x_, y_) x = np.random.randn(1, 3, 3, 4).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py index 3d112d24518..a98f2a5371f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py @@ -65,12 +65,10 @@ def test_conv2d_backprop_filter(): conv2d_filter = Net() output = conv2d_filter() print("================================") - """ - expect output: - [[[[ -60, -142, -265] - [-104, -211, -322] - [-102, -144, -248]]]] - """ +# expect output: +# [[[[ -60, -142, -265] +# [-104, -211, -322] +# [-102, -144, -248]]]] expect = np.array([[[[-60, -142, -265], [-104, -211, -322], [-102, -144, -248]]]]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py index d0ef791b9da..63ef0289042 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py @@ -64,15 +64,13 @@ def test_conv2d_backprop_input(): conv2d_input = Net() output = conv2d_input() print("================================") - """ - expect output: - [[[[ -5, -4, 5, 12, 0, -8] - [-15, -6, 17, 17, -2, -11] - [-15, -8, 13, 12, 2, -4] - [-13, -6, 8, -14, 5, 20] - [ -3, -4, -4, -19, 7, 23] - [ -3, -2, 0, -14, 3, 16]]]] - """ +# expect output: +# [[[[ -5, -4, 5, 12, 0, -8] +# [-15, -6, 17, 17, -2, -11] +# [-15, -8, 13, 12, 2, -4] +# [-13, -6, 8, -14, 5, 20] +# [ -3, -4, -4, -19, 7, 23] +# [ -3, -2, 0, -14, 3, 16]]]] expect = np.array([[[[-5, -4, 5, 12, 0, -8], [-15, -6, 17, 17, -2, -11], [-15, -8, 13, 12, 2, -4], diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index d0c8a97d89f..0f890ea9987 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -59,7 +59,7 @@ def gelu_backward_cmp(input_shape): class MEGeluLargeIn(Cell): def __init__(self): - super(GELU, self).__init__() + super(MEGeluLargeIn, self).__init__() self.matmul = P.MatMul() self.gelu = P.Gelu() @@ -79,7 +79,7 @@ class GradLargeIn(Cell): def gelu_backward_me_large_in_impl(x1, x2, output_grad): - n = GradLargeIn() + n = GELU() grad_with_sense = GradLargeIn(n) grad_with_sense.set_train() input_grad = grad_with_sense(x1, x2, output_grad) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less.py b/tests/st/ops/ascend/test_tbe_ops/test_less.py index ccffaaf5f12..bc29054ae3c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.less = P.Less() @ms_function - def construct(self, x1, x2): - return self.less(x1, x2) + def construct(self, x1_, x2_): + return self.less(x1_, x2_) x1 = np.random.randn(3, 4).astype(np.float16) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py index d6af031905f..0bdd5cd9557 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.less_equal = P.LessEqual() @ms_function - def construct(self, x1, x2): - return self.less_equal(x1, x2) + def construct(self, x1_, x2_): + return self.less_equal(x1_, x2_) x1 = np.random.randn(3, 4).astype(np.float16) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py index 7c7e5db1f18..19ea6ce7ac1 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_and = P.LogicalAnd() @ms_function - def construct(self, x1, x2): - return self.logical_and(x1, x2) + def construct(self, x1_, x2_): + return self.logical_and(x1_, x2_) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py index 333298a2e2b..a530ec62169 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_not = P.LogicalNot() @ms_function - def construct(self, x1): - return self.logical_not(x1) + def construct(self, x): + return self.logical_not(x) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py index 5dfb8fd7658..f8fda645564 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_or = P.LogicalOr() @ms_function - def construct(self, x1, x2): - return self.logical_or(x1, x2) + def construct(self, x1_, x2_): + return self.logical_or(x1_, x2_) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py index 65f1c11060c..84d3e2f28d5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py @@ -27,8 +27,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(1, 3).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py index 80409ec7126..0926a9882c6 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.matmul = P.MatMul(transpose_b=True) @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(10, 1).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index aad47e4aa0f..529343812eb 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -44,15 +44,15 @@ class GradWrap(Cell): return gout -def gen_data(inputA_np, inputB_np, grad=None): +def gen_data(inputA_np, inputB_np, grad_=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) inputB_me = inputB_np if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) - if grad is None: - grad = np.random.randn(2).astype(np.float32) + if grad_ is None: + grad_ = np.random.randn(2).astype(np.float32) print("----inputA---") print(inputA_np) print("----inputB---") @@ -60,7 +60,7 @@ def gen_data(inputA_np, inputB_np, grad=None): net_me = GradWrap(MaxNetMe()) net_me.set_train() - output = net_me(inputA_me, inputB_me, Tensor(grad)) + output = net_me(inputA_me, inputB_me, Tensor(grad_)) print("---me---") print(output[0].asnumpy()) print(output[1].asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index aafe4383108..3a19aaa1d16 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -44,7 +44,7 @@ class GradWrap(Cell): return gout -def gen_data(inputA_np, inputB_np, grad=None): +def gen_data(inputA_np, inputB_np, grad_=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) @@ -53,12 +53,12 @@ def gen_data(inputA_np, inputB_np, grad=None): if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) - if grad is None: - grad = np.random.randn(1, 3, 2, 2).astype(np.float32) + if grad_ is None: + grad_ = np.random.randn(1, 3, 2, 2).astype(np.float32) print(inputA_np) print(inputB_np) - print(grad) + print(grad_) net_me = GradWrap(MinNetMe()) net_me.set_train() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index b26fe30697d..40dc5ebadae 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -31,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, inputValue, output_grad): + return self.grad(self.network)(inputValue, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index d680ac0a557..1cac1004fd0 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import sys +import numpy as np import mindspore.context as context import mindspore.dataset as ds @@ -31,8 +31,8 @@ SCHEMA_DIR = "{0}/resnet_all_datasetSchema.json".format(data_path) def test_me_de_train_dataset(): data_list = ["{0}/train-00001-of-01024.data".format(data_path)] - data_set = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR, - columns_list=["image/encoded", "image/class/label"]) + data_set_new = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR, + columns_list=["image/encoded", "image/class/label"]) resize_height = 224 resize_width = 224 @@ -42,21 +42,21 @@ def test_me_de_train_dataset(): # define map operations decode_op = vision.Decode() - resize_op = vision.Resize(resize_height, resize_width, + resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR) # Bilinear as default rescale_op = vision.Rescale(rescale, shift) # apply map operations on images - data_set = data_set.map(input_columns="image/encoded", operations=decode_op) - data_set = data_set.map(input_columns="image/encoded", operations=resize_op) - data_set = data_set.map(input_columns="image/encoded", operations=rescale_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=decode_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=resize_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=rescale_op) hwc2chw_op = vision.HWC2CHW() - data_set = data_set.map(input_columns="image/encoded", operations=hwc2chw_op) - data_set = data_set.repeat(1) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=hwc2chw_op) + data_set_new = data_set_new.repeat(1) # apply batch operations - batch_size = 32 - data_set = data_set.batch(batch_size, drop_remainder=True) - return data_set + batch_size_new = 32 + data_set_new = data_set_new.batch(batch_size_new, drop_remainder=True) + return data_set_new def convert_type(shapes, types): diff --git a/tests/st/ops/cpu/test_concat_op.py b/tests/st/ops/cpu/test_concat_op.py index 9d5067a35d6..c2a1d07853c 100644 --- a/tests/st/ops/cpu/test_concat_op.py +++ b/tests/st/ops/cpu/test_concat_op.py @@ -14,10 +14,10 @@ # ============================================================================ import pytest +import numpy as np from mindspore import Tensor from mindspore.ops import operations as P import mindspore.nn as nn -import numpy as np import mindspore.context as context from mindspore.common import dtype as mstype diff --git a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py index a422468b2e1..87a0f735a20 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py @@ -68,12 +68,10 @@ def test_conv2d_backprop_filter(): conv2d_filter = Net4() output = conv2d_filter() print("================================") - """ - expect output: - [[[[ -60, -142, -265] - [-104, -211, -322] - [-102, -144, -248]]]] - """ +# expect output: +# [[[[ -60, -142, -265] +# [-104, -211, -322] +# [-102, -144, -248]]]] expect = np.array([[[[-60, -142, -265], [-104, -211, -322], [-102, -144, -248]]]]).astype(np.float32) diff --git a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py index a75e676507f..7945f3828fe 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py @@ -66,16 +66,14 @@ class Net5(nn.Cell): def test_conv2d_backprop_input(): conv2d_input = Net5() output = conv2d_input() - print("================================") - """ - expect output: - [[[[ -5, -4, 5, 12, 0, -8] - [-15, -6, 17, 17, -2, -11] - [-15, -8, 13, 12, 2, -4] - [-13, -6, 8, -14, 5, 20] - [ -3, -4, -4, -19, 7, 23] - [ -3, -2, 0, -14, 3, 16]]]] - """ + print("================================") +# expect output: +# [[[[ -5, -4, 5, 12, 0, -8] +# [-15, -6, 17, 17, -2, -11] +# [-15, -8, 13, 12, 2, -4] +# [-13, -6, 8, -14, 5, 20] +# [ -3, -4, -4, -19, 7, 23] +# [ -3, -2, 0, -14, 3, 16]]]] expect = np.array([[[[-5, -4, 5, 12, 0, -8], [-15, -6, 17, 17, -2, -11], [-15, -8, 13, 12, 2, -4], diff --git a/tests/st/ops/cpu/test_conv2d_op.py b/tests/st/ops/cpu/test_conv2d_op.py index 627a722b5b8..454f32eac7e 100644 --- a/tests/st/ops/cpu/test_conv2d_op.py +++ b/tests/st/ops/cpu/test_conv2d_op.py @@ -55,16 +55,13 @@ def test_conv2d(): conv2d = NetConv2d() output = conv2d() print("================================") - """ - expect output: - [[[[ 45. 48. 51.] - [ 54. 57. 60.] - [ 63. 66. 69.]] - - [[126. 138. 150.] - [162. 174. 186.] - [198. 210. 222.]]]] - """ +# expect output: +# [[[[ 45. 48. 51.] +# [ 54. 57. 60.] +# [ 63. 66. 69.]] +# [[126. 138. 150.] +# [162. 174. 186.] +# [198. 210. 222.]]]] expect = np.array([[[[45, 48, 51], [54, 57, 60], [63, 66, 69]], diff --git a/tests/st/ops/cpu/test_gather_op.py b/tests/st/ops/cpu/test_gather_op.py index 50fb2096dd5..b9ac24ad358 100644 --- a/tests/st/ops/cpu/test_gather_op.py +++ b/tests/st/ops/cpu/test_gather_op.py @@ -14,11 +14,10 @@ # ============================================================================ import pytest +import numpy as np from mindspore import Tensor from mindspore.ops import operations as P import mindspore.nn as nn -from mindspore.common.api import ms_function -import numpy as np import mindspore.context as context from mindspore.common import dtype as mstype @@ -96,7 +95,7 @@ def test_gatherv2_axisN1(): expect = np.array([[[1., 2.], [4., 5.]], [[7., 8.], - [10.,11.]]]) + [10., 11.]]]) error = np.ones(shape=ms_output.asnumpy().shape) * 1.0e-6 diff = ms_output.asnumpy() - expect assert np.all(diff < error) diff --git a/tests/st/ops/cpu/test_momentum_op.py b/tests/st/ops/cpu/test_momentum_op.py index 43ba785aed2..717925c23e6 100644 --- a/tests/st/ops/cpu/test_momentum_op.py +++ b/tests/st/ops/cpu/test_momentum_op.py @@ -65,10 +65,8 @@ def test_momentum(): print("================================") print(losses) - """ - expect output: - [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 - 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] - """ +# expect output: +# [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 +# 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] return losses diff --git a/tests/st/ops/cpu/test_slice_op.py b/tests/st/ops/cpu/test_slice_op.py index 0f0aa53d04b..90c777ef502 100644 --- a/tests/st/ops/cpu/test_slice_op.py +++ b/tests/st/ops/cpu/test_slice_op.py @@ -41,8 +41,8 @@ def test_slice(): expect = [[[2., -2., 2.]], [[4., -4., 4.]]] - slice = Slice() - output = slice(x) + slice_op = Slice() + output = slice_op(x) print("output:\n", output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/custom_ops_tbe/add3_impl.py b/tests/st/ops/custom_ops_tbe/add3_impl.py index f169ff40d7a..36f296d4c1e 100644 --- a/tests/st/ops/custom_ops_tbe/add3_impl.py +++ b/tests/st/ops/custom_ops_tbe/add3_impl.py @@ -13,17 +13,17 @@ # limitations under the License. # ============================================================================ from __future__ import absolute_import -from te import tvm -from topi import generic import te.lang.cce -from topi.cce import util +from te import tvm from te.platform.fusion_manager import fusion_manager +from topi import generic +from topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType @fusion_manager.register("add3") def add3_compute(input1, input2, const_bias): sum2 = te.lang.cce.vadd(input1, input2) - sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype = input1.dtype)) + sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype=input1.dtype)) return sum3 @@ -44,7 +44,7 @@ cus_add3_op_info = TBERegOp("CusAdd3") \ @op_info_register(cus_add3_op_info) -def CusAdd3Impl(input1, inptu2, sum, const_bias, kernel_name="CusAdd3Impl"): +def CusAdd3Impl(input1, inptu2, sum1, const_bias, kernel_name="CusAdd3Impl"): shape = input1.get("shape") shape = util.shape_refine(shape) dtype = input1.get("dtype").lower() diff --git a/tests/st/ops/custom_ops_tbe/cus_add3.py b/tests/st/ops/custom_ops_tbe/cus_add3.py index ae0c4d32052..a534be3eae4 100644 --- a/tests/st/ops/custom_ops_tbe/cus_add3.py +++ b/tests/st/ops/custom_ops_tbe/cus_add3.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np from mindspore.ops import prim_attr_register, PrimitiveWithInfer -from mindspore.ops import operations as P -from mindspore import Tensor # sum = input1 + input2 + const_bias class CusAdd3(PrimitiveWithInfer): diff --git a/tests/st/ops/custom_ops_tbe/cus_square.py b/tests/st/ops/custom_ops_tbe/cus_square.py index 59ba7988699..be43d2f5deb 100644 --- a/tests/st/ops/custom_ops_tbe/cus_square.py +++ b/tests/st/ops/custom_ops_tbe/cus_square.py @@ -15,7 +15,6 @@ import numpy as np from mindspore import Tensor from mindspore.ops import prim_attr_register, PrimitiveWithInfer -from mindspore.ops import operations as P # y = x^2 class CusSquare(PrimitiveWithInfer): @@ -36,10 +35,10 @@ class CusSquare(PrimitiveWithInfer): def infer_dtype(self, data_dtype): return data_dtype - + def get_bprop(self): def bprop(data, out, dout): gradient = data * 2 dx = gradient * dout - return (dx, ) + return (dx,) return bprop diff --git a/tests/st/ops/gpu/test_select_op.py b/tests/st/ops/gpu/test_select_op.py index 03c100cab96..1b1ccb7ef5c 100644 --- a/tests/st/ops/gpu/test_select_op.py +++ b/tests/st/ops/gpu/test_select_op.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.select = P.Select() - def construct(self, cond, input_x, input_y): - return self.select(cond, input_x, input_y) + def construct(self, cond_op, input_x, input_y): + return self.select(cond_op, input_x, input_y) cond = np.array([[True, False], [True, False]]).astype(np.bool) diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 5e5fa7deb21..bf1d8b72d39 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -315,16 +315,16 @@ test_case_array_ops = [ 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}), ('SpaceToDepthNet', { 'block': SpaceToDepthNet(), - 'desc_inputs': [Tensor(np.random.rand(1,3,2,2).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}), ('DepthToSpaceNet', { 'block': DepthToSpaceNet(), - 'desc_inputs': [Tensor(np.random.rand(1,12,1,1).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}), ('SpaceToBatchNDNet', { 'block': SpaceToBatchNDNet(), - 'desc_inputs': [Tensor(np.random.rand(1,1,2,2).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}), ('BatchToSpaceNDNet', { 'block': BatchToSpaceNDNet(), - 'desc_inputs': [Tensor(np.random.rand(4,1,1,1).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}), ] test_case_lists = [test_case_array_ops] diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 1c4ab8c76d8..5223899256b 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -26,7 +26,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class AssignAddNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(AssignAddNet, self).__init__() self.op = P.AssignAdd() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") @@ -37,7 +37,7 @@ class AssignAddNet(nn.Cell): class AssignSubNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(AssignSubNet, self).__init__() self.op = P.AssignSub() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") diff --git a/tests/ut/python/ops/test_multitype_ops.py b/tests/ut/python/ops/test_multitype_ops.py index 54997486e99..33be1adac63 100644 --- a/tests/ut/python/ops/test_multitype_ops.py +++ b/tests/ut/python/ops/test_multitype_ops.py @@ -13,8 +13,8 @@ # limitations under the License. # ============================================================================ """multitype_ops directory test case""" -import numpy as np from functools import partial, reduce +import numpy as np import mindspore.nn as nn import mindspore.context as context diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 1d93e3c1bd3..6a04f9e671f 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -231,7 +231,7 @@ class ApplyRMSNet(nn.Cell): self.apply_rms = P.ApplyRMSProp() self.lr = 0.001 self.rho = 0.0 - self.momentum= 0.0 + self.momentum = 0.0 self.epsilon = 1e-10 self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") self.ms = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="ms") @@ -574,7 +574,8 @@ test_case_math_ops = [ ('CumSum', { 'block': CumSumNet(), 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))], - 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}), + 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], + [1, 3, 7, 9]]).astype(np.float32))]}), ('ReduceSum_3', { 'block': P.ReduceSum(), 'desc_const': [0], diff --git a/tests/ut/python/ops/test_ops_reid.py b/tests/ut/python/ops/test_ops_reid.py index 741f631ab6f..b3b3e1d4701 100644 --- a/tests/ut/python/ops/test_ops_reid.py +++ b/tests/ut/python/ops/test_ops_reid.py @@ -103,7 +103,7 @@ test_case_reid_ops = [ 'desc_bprop': [[128, 64, 112, 112]]}), ('PRelu', { 'block': P.PReLU(), - 'desc_inputs': [[128, 64, 112, 112], [64, ]], + 'desc_inputs': [[128, 64, 112, 112], [64,]], 'desc_bprop': [[128, 64, 112, 112]]}), ('Cos', { 'block': P.Cos(), @@ -155,11 +155,11 @@ test_case = functools.reduce(lambda x, y: x + y, test_case_lists) test_exec_case = filter(lambda x: 'skip' not in x[1] or - 'exec' not in x[1]['skip'], test_case) + 'exec' not in x[1]['skip'], test_case) test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'] and 'backward_exec' - not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'] and 'backward_exec' + not in x[1]['skip'], test_case) @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)