diff --git a/tests/st/ops/ascend/test_addn.py b/tests/st/ops/ascend/test_addn.py index efca046b546..6d0d5b5be00 100644 --- a/tests/st/ops/ascend/test_addn.py +++ b/tests/st/ops/ascend/test_addn.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -47,4 +44,4 @@ def test_net(): expect = 3.0 add = Net() output = add(x, y) - assert (output == expect) + assert output == expect diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py b/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py index 22969303b98..29fb0cf4a93 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.ops import operations as P context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") @@ -37,7 +36,7 @@ def test_net_bool(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_int8(): @@ -45,7 +44,7 @@ def test_net_int8(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_uint8(): @@ -53,7 +52,7 @@ def test_net_uint8(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_int16(): @@ -61,7 +60,7 @@ def test_net_int16(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_uint16(): @@ -69,7 +68,7 @@ def test_net_uint16(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_int32(): @@ -77,7 +76,7 @@ def test_net_int32(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_uint32(): @@ -85,7 +84,7 @@ def test_net_uint32(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_int64(): @@ -93,7 +92,7 @@ def test_net_int64(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_uint64(): @@ -101,7 +100,7 @@ def test_net_uint64(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_float16(): @@ -109,7 +108,7 @@ def test_net_float16(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_float32(): @@ -117,7 +116,7 @@ def test_net_float32(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) def test_net_float64(): @@ -125,4 +124,4 @@ def test_net_float64(): net = Net() output = net(Tensor(x), -1) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py b/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py index 02f591702e2..3d398db6353 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py @@ -36,7 +36,7 @@ def test_net_int8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_uint8(): @@ -44,7 +44,7 @@ def test_net_uint8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_int16(): @@ -52,7 +52,7 @@ def test_net_int16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_uint16(): @@ -60,7 +60,7 @@ def test_net_uint16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_int32(): @@ -68,7 +68,7 @@ def test_net_int32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_uint32(): @@ -76,7 +76,7 @@ def test_net_uint32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_int64(): @@ -84,7 +84,7 @@ def test_net_int64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_uint64(): @@ -92,7 +92,7 @@ def test_net_uint64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_float16(): @@ -100,7 +100,7 @@ def test_net_float16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) def test_net_float32(): @@ -108,4 +108,4 @@ def test_net_float32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.flatten())) + assert np.all(output.asnumpy() == x.flatten()) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py b/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py index 2dcff2013cc..f07746705bd 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.ops import operations as P context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") @@ -37,7 +36,7 @@ def test_net_bool(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_int8(): @@ -45,7 +44,7 @@ def test_net_int8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_uint8(): @@ -53,7 +52,7 @@ def test_net_uint8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_int16(): @@ -61,7 +60,7 @@ def test_net_int16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_uint16(): @@ -69,7 +68,7 @@ def test_net_uint16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_int32(): @@ -77,7 +76,7 @@ def test_net_int32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_uint32(): @@ -85,7 +84,7 @@ def test_net_uint32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_int64(): @@ -93,7 +92,7 @@ def test_net_int64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_uint64(): @@ -101,7 +100,7 @@ def test_net_uint64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_float16(): @@ -109,7 +108,7 @@ def test_net_float16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_float32(): @@ -117,7 +116,7 @@ def test_net_float32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) def test_net_float64(): @@ -125,4 +124,4 @@ def test_net_float64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.isfinite(x))) + assert np.all(output.asnumpy() == np.isfinite(x)) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py b/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py index 0e62404f9dc..313c4ee2709 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.ops import operations as P context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") @@ -37,7 +36,7 @@ def test_net_bool(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_int8(): @@ -45,7 +44,7 @@ def test_net_int8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_uint8(): @@ -53,7 +52,7 @@ def test_net_uint8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_int16(): @@ -61,7 +60,7 @@ def test_net_int16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_uint16(): @@ -69,7 +68,7 @@ def test_net_uint16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_int32(): @@ -77,7 +76,7 @@ def test_net_int32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_uint32(): @@ -85,7 +84,7 @@ def test_net_uint32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_int64(): @@ -93,7 +92,7 @@ def test_net_int64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_uint64(): @@ -101,7 +100,7 @@ def test_net_uint64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_float16(): @@ -109,7 +108,7 @@ def test_net_float16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_float32(): @@ -117,7 +116,7 @@ def test_net_float32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) def test_net_float64(): @@ -125,4 +124,4 @@ def test_net_float64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py b/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py index 9af7275f86d..96c16cd3d2a 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py @@ -36,7 +36,7 @@ def test_net_bool(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_int8(): @@ -44,7 +44,7 @@ def test_net_int8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_uint8(): @@ -52,7 +52,7 @@ def test_net_uint8(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_int16(): @@ -60,7 +60,7 @@ def test_net_int16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_uint16(): @@ -68,7 +68,7 @@ def test_net_uint16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_int32(): @@ -76,7 +76,7 @@ def test_net_int32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_uint32(): @@ -84,7 +84,7 @@ def test_net_uint32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_int64(): @@ -92,7 +92,7 @@ def test_net_int64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_uint64(): @@ -100,7 +100,7 @@ def test_net_uint64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_float16(): @@ -108,7 +108,7 @@ def test_net_float16(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_float32(): @@ -116,7 +116,7 @@ def test_net_float32(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) def test_net_float64(): @@ -124,4 +124,4 @@ def test_net_float64(): net = Net() output = net(Tensor(x)) print(output.asnumpy()) - assert (np.all(output.asnumpy() == x.squeeze())) + assert np.all(output.asnumpy() == x.squeeze()) diff --git a/tests/st/ops/ascend/test_apply_momentum.py b/tests/st/ops/ascend/test_apply_momentum.py index 603dc23cfea..e85f2c7bb74 100644 --- a/tests/st/ops/ascend/test_apply_momentum.py +++ b/tests/st/ops/ascend/test_apply_momentum.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P @@ -34,11 +31,11 @@ class Net(nn.Cell): self.accumulation = Parameter(initializer( 'normal', [2, 3, 3, 4]), name='accumulation') self.learning_rate = Parameter(initializer( - 'normal', [1, ]), name='learning_rate') + 'normal', [1,]), name='learning_rate') self.gradient = Parameter(initializer( 'normal', [2, 3, 3, 4]), name='gradient') self.momentum = Parameter(initializer( - 'normal', [1, ]), name='momentum') + 'normal', [1,]), name='momentum') def construct(self): return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) diff --git a/tests/st/ops/ascend/test_biasAddGrad.py b/tests/st/ops/ascend/test_biasAddGrad.py index 90282e22071..5d1e9668648 100644 --- a/tests/st/ops/ascend/test_biasAddGrad.py +++ b/tests/st/ops/ascend/test_biasAddGrad.py @@ -18,9 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_bias_add_grad.py b/tests/st/ops/ascend/test_bias_add_grad.py index 2b95b0a2d28..ba5ef663ff8 100644 --- a/tests/st/ops/ascend/test_bias_add_grad.py +++ b/tests/st/ops/ascend/test_bias_add_grad.py @@ -16,11 +16,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_conv2dGradFilter.py b/tests/st/ops/ascend/test_conv2dGradFilter.py index 293cc0e65e0..96c878bd467 100644 --- a/tests/st/ops/ascend/test_conv2dGradFilter.py +++ b/tests/st/ops/ascend/test_conv2dGradFilter.py @@ -18,7 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G diff --git a/tests/st/ops/ascend/test_conv_grad.py b/tests/st/ops/ascend/test_conv_grad.py index efc9cd9dea8..e24f218087d 100644 --- a/tests/st/ops/ascend/test_conv_grad.py +++ b/tests/st/ops/ascend/test_conv_grad.py @@ -33,8 +33,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_dense.py b/tests/st/ops/ascend/test_dense.py index 48623b0df01..c4916d53cd9 100644 --- a/tests/st/ops/ascend/test_dense.py +++ b/tests/st/ops/ascend/test_dense.py @@ -18,9 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_dense_grad.py b/tests/st/ops/ascend/test_dense_grad.py index d5f88c1c21f..7a529144ee4 100644 --- a/tests/st/ops/ascend/test_dense_grad.py +++ b/tests/st/ops/ascend/test_dense_grad.py @@ -18,9 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation context.set_context(device_target="Ascend") @@ -33,8 +30,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_full_connection.py b/tests/st/ops/ascend/test_full_connection.py index ca8d62eacb1..51554ac2fea 100644 --- a/tests/st/ops/ascend/test_full_connection.py +++ b/tests/st/ops/ascend/test_full_connection.py @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_fused_batchnorm.py b/tests/st/ops/ascend/test_fused_batchnorm.py index bf0b7ffd4fa..59e2df67deb 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm.py +++ b/tests/st/ops/ascend/test_fused_batchnorm.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_fused_batchnorm_grad.py b/tests/st/ops/ascend/test_fused_batchnorm_grad.py index 47adc2d0659..a8d4190e097 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_fused_batchnorm_grad.py @@ -34,8 +34,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_image_gradients.py b/tests/st/ops/ascend/test_image_gradients.py index 8b9eb2dceac..eb647e34943 100644 --- a/tests/st/ops/ascend/test_image_gradients.py +++ b/tests/st/ops/ascend/test_image_gradients.py @@ -39,8 +39,8 @@ def test_image_gradients(): expected_dx = np.array([[[[1, 0], [1, 0]]]]).astype(np.int32) net = Net() dy, dx = net(image) - assert np.any(dx.asnumpy() - expected_dx) == False - assert np.any(dy.asnumpy() - expected_dy) == False + assert not np.any(dx.asnumpy() - expected_dx) + assert not np.any(dy.asnumpy() - expected_dy) def test_image_gradients_multi_channel_depth(): @@ -61,5 +61,5 @@ def test_image_gradients_multi_channel_depth(): net = Net() dy, dx = net(image) - assert np.any(dx.asnumpy() - expected_dx.asnumpy()) == False - assert np.any(dy.asnumpy() - expected_dy.asnumpy()) == False + assert not np.any(dx.asnumpy() - expected_dx.asnumpy()) + assert not np.any(dy.asnumpy() - expected_dy.asnumpy()) diff --git a/tests/st/ops/ascend/test_matmul.py b/tests/st/ops/ascend/test_matmul.py index d0fecfed23c..3981e59f74a 100644 --- a/tests/st/ops/ascend/test_matmul.py +++ b/tests/st/ops/ascend/test_matmul.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_maxpool_grad.py b/tests/st/ops/ascend/test_maxpool_grad.py index 810d544bb4a..9af45111203 100644 --- a/tests/st/ops/ascend/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_maxpool_grad.py @@ -31,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax.py b/tests/st/ops/ascend/test_maxpool_with_argmax.py index abd1c4005f8..efb9a16234b 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax.py @@ -16,7 +16,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter @@ -43,7 +42,6 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(1, 64, 112, 112).astype(np.float32) maxpool = Net() output = maxpool() print("***********output output*********") diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py index 0d21ee82054..8d0d5155803 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation @@ -33,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_relu.py b/tests/st/ops/ascend/test_relu.py index 0da6347fa3b..c22f22d0661 100644 --- a/tests/st/ops/ascend/test_relu.py +++ b/tests/st/ops/ascend/test_relu.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_relu_grad.py b/tests/st/ops/ascend/test_relu_grad.py index 72b70bddb89..4ebc17d507f 100644 --- a/tests/st/ops/ascend/test_relu_grad.py +++ b/tests/st/ops/ascend/test_relu_grad.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation @@ -33,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_simplemean.py b/tests/st/ops/ascend/test_simplemean.py index c61ed6300e1..9af5c6c3320 100644 --- a/tests/st/ops/ascend/test_simplemean.py +++ b/tests/st/ops/ascend/test_simplemean.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_simplemean_grad.py b/tests/st/ops/ascend/test_simplemean_grad.py index c7116f47299..2704c1434fb 100644 --- a/tests/st/ops/ascend/test_simplemean_grad.py +++ b/tests/st/ops/ascend/test_simplemean_grad.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation @@ -33,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py index ced2c5492ef..3f47e03fbd4 100644 --- a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py +++ b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py @@ -59,7 +59,7 @@ def test_net(): '''Compare Numpy with MS type is float32''' labels_shape = (32,) logits_shape = [32, 1001] - labels, logits, loss_np, bp_np = np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, np.float32) + labels, logits, loss_np, _ = np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, np.float32) expect = loss_np SparseSoftmaxCrossEntropyWithLogits = Net() loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py b/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py index a6f09b8bd62..65b682de2ca 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py b/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py index 6f01e9deea4..7d3a403d656 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py b/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py index f6cfc6863e4..9dc8b15fc92 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_add.py b/tests/st/ops/ascend/test_tbe_ops/test_add.py index 6ca1aaf1e3e..af33e9c003a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_add.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_addn.py b/tests/st/ops/ascend/test_tbe_ops/test_addn.py index 793fff1f009..9b2de51afaf 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_addn.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_addn.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py b/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py index 8d256384309..e8c61d70a9d 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py @@ -34,7 +34,7 @@ class Adam: self.epsilon = epsilon def train_mindspore_impl(self): - input = Tensor(np.random.randn(self.batch_num, self.input_channels).astype(np.float32)) + input_ = Tensor(np.random.randn(self.batch_num, self.input_channels).astype(np.float32)) weight_np = Tensor(np.random.randn(self.output_channels, self.input_channels).astype(np.float32)) bias = Tensor(np.random.randn(self.output_channels).astype(np.float32)) @@ -60,9 +60,9 @@ class Adam: train_network.set_train() print('MS Initialized!') - for i in range(self.epoch): - train_network(input, label) - output = ms_dense(input) + for _ in range(self.epoch): + train_network(input_, label) + output = ms_dense(input_) print("===============output=================", output) return output.asnumpy() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py b/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py index 9e1aa6f0aae..d078b277093 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P @@ -32,11 +29,11 @@ class Net(nn.Cell): self.accumulation = Parameter(initializer( 'normal', [2, 3, 3, 4]), name='accumulation') self.learning_rate = Parameter(initializer( - 'normal', [1, ]), name='learning_rate') + 'normal', [1,]), name='learning_rate') self.gradient = Parameter(initializer( 'normal', [2, 3, 3, 4]), name='gradient') self.momentum = Parameter(initializer( - 'normal', [1, ]), name='momentum') + 'normal', [1,]), name='momentum') def construct(self): return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py b/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py index ee2af00d3f1..ba85da20ca5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ import numpy as np -import pytest from mindspore import context from mindspore.common.tensor import Tensor @@ -38,7 +37,7 @@ def tf_me_batchmatmul(inputa, inputb): net = Net() net.set_train() model = Model(net) - out_me = model.predict(Tensor(inputa), Tensor(inputb)) + model.predict(Tensor(inputa), Tensor(inputb)) def test_batchmatmul_normal_shape1(): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py index 9a185501c4d..44148cafb0a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py index 394670790c8..0340f9e6be0 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py @@ -34,8 +34,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): @@ -55,4 +55,4 @@ def test_net(): x = np.random.randn(1, 64, 112, 112).astype(np.float32) sens = np.random.randn(1, 64, 112, 112).astype(np.float32) net = Grad(Net()) - output = net(Tensor(x), Tensor(sens)) + net(Tensor(x), Tensor(sens)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py index e982140f7fa..bc4dcf7cdc3 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py index 773ab083715..834302919de 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py @@ -18,9 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py b/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py index bec6783749f..f00c37e54dd 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py @@ -17,13 +17,10 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter +from mindspore import log as logger from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -from mindspore import log as logger class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu.py index aadb48415e9..f44daaac1b5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu.py @@ -12,32 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import math import numpy as np import pytest from mindspore import context from mindspore import log as logger from mindspore.common.tensor import Tensor -from mindspore.nn import GELU, Cell -from mindspore.ops import operations as P +from mindspore.nn import GELU from mindspore.train.model import Model context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -def gelu_forward_me_impl(input): +def gelu_forward_me_impl(input_): n = GELU() n.set_train() m = Model(n) - out = m.predict(input) + out = m.predict(input_) return out.asnumpy() def gelu_forward_cmp(input_shape, data_type=np.float32): input_np = np.random.randn(*input_shape).astype(data_type) input_me = Tensor(input_np) - out_me = gelu_forward_me_impl(input_me) + gelu_forward_me_impl(input_me) @pytest.mark.skip(reason="scalar") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index 68c1b2b53d8..d0c8a97d89f 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -12,11 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import math import numpy as np -import pytest -import mindspore as ms from mindspore import context from mindspore import log as logger from mindspore.common.tensor import Tensor @@ -33,15 +30,15 @@ class Grad(Cell): self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) self.network = network - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) -def gelu_backward_me_impl(input, output_grad): +def gelu_backward_me_impl(input_, output_grad): n = GELU() grad_with_sense = Grad(n) grad_with_sense.set_train() - input_grad = grad_with_sense(input, output_grad) + input_grad = grad_with_sense(input_, output_grad) return input_grad.asnumpy() @@ -86,7 +83,7 @@ def gelu_backward_me_large_in_impl(x1, x2, output_grad): grad_with_sense = GradLargeIn(n) grad_with_sense.set_train() input_grad = grad_with_sense(x1, x2, output_grad) - return input_grad[0].asnumpy(), input_grad[1].asnumpy(), + return input_grad[0].asnumpy(), input_grad[1].asnumpy() def test_grad_gelu_input_10240_1024(): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py index f0532cec5bc..6295d7ae533 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py @@ -30,8 +30,8 @@ class Net(Cell): super(Net, self).__init__() self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) - def construct(self, input): - x = self.layernorm(input) + def construct(self, input_): + x = self.layernorm(input_) return x diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py index aeb248b2887..c068cbfe8a5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py @@ -30,8 +30,8 @@ class Grad(Cell): self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) self.network = network - def construct(self, input, output_grad, ): - gout = self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad,): + gout = self.grad(self.network)(input_, output_grad) return gout @@ -40,8 +40,8 @@ class Net(Cell): super(Net, self).__init__() self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) - def construct(self, input): - x = self.layernorm(input) + def construct(self, input_): + x = self.layernorm(input_) return x diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py index 429e3eeb425..7c7e5db1f18 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py index 4fb6bc7fdb0..333298a2e2b 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py index 734e0a9485e..5dfb8fd7658 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py index 9defdbbf72a..65f1c11060c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py index 5f12b91836e..80409ec7126 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum.py index d6f0e8acf89..02b51305b2a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum.py @@ -39,9 +39,9 @@ def me_max(inputa, inputb, dtype=ms.float32): net.set_train() model = Model(net) print(type(inputa)) - if isinstance(inputa, np.ndarray) == True: + if isinstance(inputa, np.ndarray): inputa = Tensor(inputa) - if isinstance(inputb, np.ndarray) == True: + if isinstance(inputb, np.ndarray): inputb = Tensor(inputb) out = model.predict(inputa, inputb) print(out) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index 35252dfabbe..aad47e4aa0f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -46,12 +46,12 @@ class GradWrap(Cell): def gen_data(inputA_np, inputB_np, grad=None): inputA_me = inputA_np - if isinstance(inputA_np, np.ndarray) == True: + if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) inputB_me = inputB_np - if isinstance(inputB_np, np.ndarray) == True: + if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) - if grad == None: + if grad is None: grad = np.random.randn(2).astype(np.float32) print("----inputA---") print(inputA_np) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py index 2bc1b45730d..7beb22f005e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py @@ -31,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + return self.grad(self.network)(input_, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum.py index 3dce61b997e..e9ba1a2b092 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum.py @@ -18,9 +18,6 @@ import mindspore as ms import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.train.model import Model @@ -42,9 +39,9 @@ def me_min(inputa, inputb, dtype=ms.float32): net.set_train() model = Model(net) print(type(inputa)) - if isinstance(inputa, np.ndarray) == True: + if isinstance(inputa, np.ndarray): inputa = Tensor(inputa) - if isinstance(inputb, np.ndarray) == True: + if isinstance(inputb, np.ndarray): inputb = Tensor(inputb) out = model.predict(inputa, inputb) print(out) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index dbde426728e..aafe4383108 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -15,7 +15,6 @@ import numpy as np import mindspore.context as context -import mindspore.nn as nn from mindspore import Tensor from mindspore.nn import Cell from mindspore.ops import composite as C @@ -47,11 +46,11 @@ class GradWrap(Cell): def gen_data(inputA_np, inputB_np, grad=None): inputA_me = inputA_np - if isinstance(inputA_np, np.ndarray) == True: + if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) inputB_me = inputB_np - if isinstance(inputB_np, np.ndarray) == True: + if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) if grad is None: diff --git a/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py b/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py index 42f1c86823a..3f50c5eed97 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P diff --git a/tests/st/ops/ascend/test_tbe_ops/test_pow.py b/tests/st/ops/ascend/test_tbe_ops/test_pow.py index 8acf5284704..370781fa021 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_pow.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_pow.py @@ -16,11 +16,7 @@ import numpy as np import mindspore as ms import mindspore.context as context -import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.nn import Cell from mindspore.ops import operations as P from mindspore.train.model import Model @@ -33,31 +29,29 @@ class PowMe(Cell): super(PowMe, self).__init__() self.pow = P.Pow() - def construct(self, input, exp): - return self.pow(input, exp) + def construct(self, input_, exp): + return self.pow(input_, exp) -def pow_forward_me_impl(input, exp): +def pow_forward_me_impl(input_, exp): n = PowMe() n.set_train() m = Model(n) - out = m.predict(input, exp) + out = m.predict(input_, exp) return out.asnumpy() def pow_forward_cmp(input_shape, exp_shape): - if len(input_shape) == 0: + if not input_shape: input_np = np.absolute(np.random.randn()) else: input_np = np.absolute(np.random.randn(*input_shape).astype(np.float32)) - input_tf = input_np input_me = Tensor(input_np, dtype=ms.float32) - if len(exp_shape) == 0: + if not exp_shape: exp_np = np.absolute(np.random.randn()) else: exp_np = np.absolute(np.random.randn(*exp_shape).astype(np.float32)) - exp_tf = exp_np exp_me = Tensor(exp_np, dtype=ms.float32) out_me = pow_forward_me_impl(input_me, exp_me) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu.py b/tests/st/ops/ascend/test_tbe_ops/test_relu.py index 9ada58e6420..77e898c08bf 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index 94150db0710..b26fe30697d 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py index 351e7f617af..645765792a7 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py @@ -18,8 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops.composite import GradOperation @@ -33,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input): - return self.grad(self.network)(input) + def construct(self, input_): + return self.grad(self.network)(input_) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py b/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py index d79cca38469..982d7951fab 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_select.py b/tests/st/ops/ascend/test_tbe_ops/test_select.py index 5f149a590f1..56fd8ba0a1a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_select.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_select.py @@ -16,11 +16,7 @@ import numpy as np import mindspore as ms import mindspore.context as context -import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.nn import Cell from mindspore.ops import operations as P from mindspore.train.model import Model @@ -41,11 +37,11 @@ def me_select(cond, inputa, inputb, dtype=ms.float32): net = Select(dtype) net.set_train() model = Model(net) - if isinstance(inputa, np.ndarray) == True: + if isinstance(inputa, np.ndarray): inputa = Tensor(inputa) - if isinstance(inputb, np.ndarray) == True: + if isinstance(inputb, np.ndarray): inputb = Tensor(inputb) - if isinstance(cond, np.bool_) == True: + if isinstance(cond, np.bool_): cond = np.array(cond) out = model.predict(Tensor(cond), inputa, inputb) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py index 223a41135c2..3243e2e4efb 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py @@ -18,7 +18,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.ops import operations as P context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_softmax.py b/tests/st/ops/ascend/test_tbe_ops/test_softmax.py index 3c3cf776b2b..07feff5be07 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_softmax.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_softmax.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py b/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py index 1c08f194076..7bcbf70b714 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py @@ -37,5 +37,5 @@ def test_net(): features = np.random.randn(32, 1001).astype(np.float16) labels = np.random.randn(32, 1001).astype(np.float16) SoftmaxCrossEntropyWithLogits = Net() - output = SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels)) + SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels)) # print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_split.py b/tests/st/ops/ascend/test_tbe_ops/test_split.py index 51279d6de06..bed4fdae81a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_split.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_split.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py b/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py index 207dcce6b1a..5a61ae9c35d 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_square.py b/tests/st/ops/ascend/test_tbe_ops/test_square.py index 64005672c34..ab6c3a993dc 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_square.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_square.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py index 8908cb1d5f2..96a40c4d1ab 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ import numpy as np -import pytest import mindspore.context as context import mindspore.ops.operations as P @@ -32,8 +31,8 @@ class Net(Cell): self.end = end self.stride = stride - def construct(self, input): - x = self.stridedslice(input, self.begin, self.end, self.stride) + def construct(self, input_): + x = self.stridedslice(input_, self.begin, self.end, self.stride) return x @@ -47,17 +46,17 @@ def me_stridedslice(input1, begin, end, stride): def test_stridedslice_input_2d(): - input = np.random.randn(5, 5).astype(np.int32) + input_ = np.random.randn(5, 5).astype(np.int32) begin = (0, 0) end = (2, 2) stride = (1, 1) - me_stridedslice(input, begin, end, stride) + me_stridedslice(input_, begin, end, stride) def test_stridedslice_input_3d(): - input = np.random.randn(5, 5, 5).astype(np.float32) + input_ = np.random.randn(5, 5, 5).astype(np.float32) begin = (0, 0, 0) end = (3, 3, 3) stride = (1, 1, 1) - me_stridedslice(input, begin, end, stride) + me_stridedslice(input_, begin, end, stride) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py index 9e693c78daa..1938aaeca38 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ import numpy as np -import pytest from mindspore import context from mindspore.common.tensor import Tensor @@ -30,8 +29,8 @@ class Grad(Cell): self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) self.network = network - def construct(self, input, output_grad): - gout = self.grad(self.network)(input, output_grad) + def construct(self, input_, output_grad): + gout = self.grad(self.network)(input_, output_grad) return gout @@ -43,13 +42,13 @@ class Net(Cell): self.end = end self.stride = stride - def construct(self, input): - x = self.stridedslice(input, self.begin, self.end, self.stride) + def construct(self, input_): + x = self.stridedslice(input_, self.begin, self.end, self.stride) return x -def me_stridedslice(input, begin, end, stride, gradients): - input_me = Tensor(input) +def me_stridedslice(input_, begin, end, stride, gradients): + input_me = Tensor(input_) out_grad_me = Tensor(gradients) net_me = Grad(Net(begin, end, stride)) net_me.set_train() @@ -58,9 +57,9 @@ def me_stridedslice(input, begin, end, stride, gradients): def test_grad_stridedslice_1d(): - input = np.random.randn(2).astype(np.float32) + input_ = np.random.randn(2).astype(np.float32) begin = (0,) end = (2,) stride = (1,) gradients = np.random.randn(2).astype(np.float32) - me_stridedslice(input, begin, end, stride, gradients) + me_stridedslice(input_, begin, end, stride, gradients) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sub.py b/tests/st/ops/ascend/test_tbe_ops/test_sub.py index 97248082e82..77d5302fc53 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sub.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sub.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py index 04ada07d6ce..b9dd0080878 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py @@ -17,7 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G from mindspore.train.model import Model diff --git a/tests/st/ops/ascend/test_tbe_ops/test_tile.py b/tests/st/ops/ascend/test_tbe_ops/test_tile.py index 02e5b5890bd..f2a5ed6a878 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_tile.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_tile.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_topk.py b/tests/st/ops/ascend/test_tbe_ops/test_topk.py index 907c8534150..c96f3db9317 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_topk.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_topk.py @@ -17,9 +17,6 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import ms_function -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py b/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py index 3f1367cc6f1..8f246fb07ce 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py @@ -29,14 +29,14 @@ class Net(nn.Cell): self.transpose = P.Transpose() self.perm = perm_in - def construct(self, input): - x = self.transpose(input, self.perm) + def construct(self, input_): + x = self.transpose(input_, self.perm) return x -def ms_transpose(input, perm_in): +def ms_transpose(input_, perm_in): context.set_context(mode=context.GRAPH_MODE) - input_me = Tensor(input) + input_me = Tensor(input_) net = Net(perm_in) net.set_train() model = Model(net) @@ -47,6 +47,6 @@ def ms_transpose(input, perm_in): def test_net(): - input = np.random.randn(8, 24, 1, 1).astype(np.float16) + input_ = np.random.randn(8, 24, 1, 1).astype(np.float16) perm = (0, 2, 3, 1) - ms_transpose(input, perm) + ms_transpose(input_, perm) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py b/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py index 75ce11d935f..e58ea23a1f9 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py @@ -33,19 +33,19 @@ class Net(nn.Cell): return self.seg_sum(x, segment_ids, self.num_segments) -def me_un_seg_sum(input, indices, num_segments): +def me_un_seg_sum(input_, indices, num_segments): context.set_context(mode=context.GRAPH_MODE) net = Net(num_segments) net.set_train() model = Model(net) - out = model.predict(Tensor(input), Tensor(indices)) + out = model.predict(Tensor(input_), Tensor(indices)) return out.asnumpy() def comapre_un_seg_sum(shape, indices, num_segments, dtype): - input = np.random.randn(*shape).astype(dtype) + input_ = np.random.randn(*shape).astype(dtype) indices_me = np.array(indices).astype(np.int32) - out_me = me_un_seg_sum(input, indices_me, num_segments) + out_me = me_un_seg_sum(input_, indices_me, num_segments) print("-------------ms------------------") print(out_me) diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index 889d4883f9c..d680ac0a557 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -87,8 +87,8 @@ if __name__ == '__main__': super(dataiter, self).__init__() def construct(self): - input, label = get_next() - return tadd(input) + input_, _ = get_next() + return tadd(input_) net = dataiter() diff --git a/tests/st/ops/cpu/test_bias_add_grad.py b/tests/st/ops/cpu/test_bias_add_grad.py index bb3d7411005..9c185c4e1df 100644 --- a/tests/st/ops/cpu/test_bias_add_grad.py +++ b/tests/st/ops/cpu/test_bias_add_grad.py @@ -18,7 +18,6 @@ import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(mode=context.GRAPH_MODE, device_target='CPU') diff --git a/tests/st/ops/cpu/test_maxpool_grad_op.py b/tests/st/ops/cpu/test_maxpool_grad_op.py index d1e1032d1ca..ee0a654969b 100644 --- a/tests/st/ops/cpu/test_maxpool_grad_op.py +++ b/tests/st/ops/cpu/test_maxpool_grad_op.py @@ -21,7 +21,6 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(mode=context.GRAPH_MODE, device_target='CPU') diff --git a/tests/st/ops/cpu/test_momentum_op.py b/tests/st/ops/cpu/test_momentum_op.py index 862e8eae770..43ba785aed2 100644 --- a/tests/st/ops/cpu/test_momentum_op.py +++ b/tests/st/ops/cpu/test_momentum_op.py @@ -57,7 +57,7 @@ def test_momentum(): train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer train_network.set_train() losses = [] - for i in range(epoch): + for _ in range(epoch): data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01) label = Tensor(np.array([0]).astype(np.int32)) loss = train_network(data, label) @@ -70,6 +70,5 @@ def test_momentum(): [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] """ - error = np.ones(shape=[1, 10]) * 1.0e-6 return losses diff --git a/tests/st/ops/cpu/test_one_hot_op.py b/tests/st/ops/cpu/test_one_hot_op.py index 3383049db7c..fb825179c36 100644 --- a/tests/st/ops/cpu/test_one_hot_op.py +++ b/tests/st/ops/cpu/test_one_hot_op.py @@ -20,7 +20,6 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function -from mindspore.ops import operations as P context.set_context(device_target='CPU') diff --git a/tests/st/ops/cpu/test_relu_grad_op.py b/tests/st/ops/cpu/test_relu_grad_op.py index 2eb0ef4693d..b1336b0ffe4 100644 --- a/tests/st/ops/cpu/test_relu_grad_op.py +++ b/tests/st/ops/cpu/test_relu_grad_op.py @@ -21,7 +21,6 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G context.set_context(mode=context.GRAPH_MODE, device_target='CPU') @@ -48,7 +47,7 @@ class NetReluGrad(nn.Cell): def test_relu_grad(): relu_grad = NetReluGrad() output = relu_grad() - expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32) + expect = np.array([[[[0, 0, 1,], [0, 0, 0,], [1, 1, 0.]]]]).astype(np.float32) error = np.ones(shape=[3, 3]) * 1.0e-6 diff = output.asnumpy() - expect assert np.all(diff < error) diff --git a/tests/st/ops/cpu/test_relu_op.py b/tests/st/ops/cpu/test_relu_op.py index b2b64eb61aa..03680fe9cc7 100644 --- a/tests/st/ops/cpu/test_relu_op.py +++ b/tests/st/ops/cpu/test_relu_op.py @@ -44,8 +44,8 @@ class NetRelu(nn.Cell): def test_relu(): relu = NetRelu() output = relu() - expect = np.array([[[[0, 1, 10, ], - [1, 0, 1, ], + expect = np.array([[[[0, 1, 10,], + [1, 0, 1,], [10, 1, 0.]]]]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/custom_ops_tbe/conv_layer.py b/tests/st/ops/custom_ops_tbe/conv_layer.py new file mode 100755 index 00000000000..dad5a8c8697 --- /dev/null +++ b/tests/st/ops/custom_ops_tbe/conv_layer.py @@ -0,0 +1,519 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import te.lang.cce +from te import tvm +from te.platform import CUBE_MKN +from topi import generic +from topi.cce import util +from topi.cce.util import is_v200_version + +# pylint: disable=R0912,R0913,R0914,R0915,E1101 +# the dim of shape in conv must be 4 +PAD_SHAPE_DIM = 2 + +NONETYPE = type(None) + + +@util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), + int, int, (list, tuple), (list, tuple), + str, str, str, + str, str, str, + str, bool, str) +def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, + strideh, stridew, quantize_config, scale_sqrt, + scale_q_dtype, offset_q_dtype, scale_dq_dtype, + scale_rq_dtype, offset_rq_dtype, offset_w_dtype, + offset_pad_dtype, bias, kernel_name): + # conv shape check + util.check_kernel_name(kernel_name) + + # conv data type check + util.check_dtype_rule(in_dtype, ['float16', 'int8', 'uint8']) + util.check_dtype_rule(w_dtype, ['float16', 'int8', 'uint8']) + res_dtype_list = ['float16', 'int8', 'uint8'] + if is_v200_version(): + res_dtype_list.append('int32') + util.check_dtype_rule(res_dtype, res_dtype_list) + util.check_dtype_rule(scale_q_dtype, ['float16']) + util.check_dtype_rule(offset_q_dtype, ['float16']) + util.check_dtype_rule(scale_dq_dtype, ['float16']) + util.check_dtype_rule(scale_rq_dtype, ['float16']) + util.check_dtype_rule(offset_rq_dtype, ['float16']) + util.check_dtype_rule(offset_w_dtype, ['int32']) + util.check_dtype_rule(offset_pad_dtype, ['uint8']) + + if not isinstance(bias, bool): + raise RuntimeError("bias dtype should be bool.") + + if quantize_config[0] == 0: + if is_v200_version(): + util.check_dtype_rule(in_dtype, ('int8',)) + util.check_dtype_rule(w_dtype, ('int8',)) + util.check_dtype_rule(res_dtype, ('int32',)) + else: + util.check_dtype_rule(in_dtype, ['float16']) + util.check_dtype_rule(w_dtype, ['float16']) + util.check_dtype_rule(res_dtype, ['float16']) + + if quantize_config[0] == 1: + util.check_dtype_rule(w_dtype, ['int8']) + if quantize_config[1] == 0: + util.check_dtype_rule(in_dtype, ['int8', 'float16']) + util.check_dtype_rule(res_dtype, ['int8', 'float16']) + elif quantize_config[1] == 1: + util.check_dtype_rule(in_dtype, ['uint8', 'float16']) + util.check_dtype_rule(res_dtype, ['uint8', 'float16']) + elif quantize_config[1] == 2: + raise RuntimeError("All Offset mode quantize not support.") + else: + raise RuntimeError("Invalid quantize algorithm.") + + # quantize switch on + if quantize_config[0] == 1: + # quantize -> DeQuantize dataflow + if in_dtype == 'float16' and w_dtype == 'int8' and res_dtype == 'float16': + pass + # DeQuantize dataflow + elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and + res_dtype == 'float16'): + pass + # quantize -> ReQuantize dataflow + elif (in_dtype == 'float16' and w_dtype == 'int8' and res_dtype in + ['int8', 'uint8']): + pass + # ReQuantize dataflow + elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and res_dtype in + ['int8', 'uint8']): + pass + else: + raise RuntimeError("Not support in/out data type for quantize.") + + if quantize_config not in ([1, 0, 0], [1, 1, 0], [1, 0, 1], [1, 1, 1]): + raise RuntimeError("Invalid Quantize Config.") + + if scale_sqrt not in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], + [1, 0, 1], [0, 1, 1], [1, 1, 1]): + raise RuntimeError("Invalid Quantize Config.") + + # quantize switch off + elif quantize_config[0] == 0: + if quantize_config != [0, 0, 0]: + raise RuntimeError("Invalid Quantize Config.") + if scale_sqrt != [0, 0, 0]: + raise RuntimeError("Invalid Quantize Config.") + else: + raise RuntimeError("Invalid Quantize Config.") + + if isinstance(padh, list): + if len(padh) != PAD_SHAPE_DIM: + raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM) + pad_top = padh[0] + pad_bottom = padh[1] + else: + pad_top = padh + pad_bottom = padh + + if isinstance(padw, list): + if len(padw) != PAD_SHAPE_DIM: + raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM) + pad_left = padw[0] + pad_right = padw[1] + else: + pad_left = padw + pad_right = padw + + shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \ + pad_left, pad_right, strideh, \ + stridew, in_dtype, w_dtype, res_dtype) + + return shape_in, shape_w + + +@util.check_input_type((list, tuple), (list, tuple), str, str, str, \ + (list, int), (list, int), int, int, + (list, NONETYPE), (list, NONETYPE), + str, str, str, + str, str, str, str, + bool, str, bool, bool) +def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, + quantize_config=None, scale_sqrt=None, + scale_q_dtype='float16', offset_q_dtype='float16', scale_dq_dtype='float16', + scale_rq_dtype='float16', offset_rq_dtype='float16', offset_w_dtype='int32', + offset_pad_dtype='uint8', bias=False, kernel_name="cce_conv", need_build=False, + need_print=False): + """ + + Parameters + ---------- + shape_in : shape of data_in + + shape_w : shape of filter + + in_dtype : the feature map data type + + w_dtype : the weight data type + + res_dtype : the result data type + + padh: the padding shape in H + + padw: the padding shape in weight + + strideh: the stride value in H + + stridew: the stride value in weight + + quantize_config: quantize config table, default [0, 0, 0] + quantize_config[0] - quantize function switch + 0: quantize off + 1: quantize on + quantize_config[1] - quantize_algorithm + 0: non offset + 1: half offset + 2: all offset ( Not supported now ) + quantize_config[2] - QuantizeScaleType (for Dequantize/Requantize, quantize always scalar) + 0: scalar + 1: vector + + scale_sqrt: scale mode + scale_sqrt[0] - Quantize scale mode + 0: non sqrt + 1: sqrt + scale_sqrt[1] - DeQuantize scale mode + 0: non sqrt + 1: sqrt + scale_sqrt[2] - ReQuantize scale mode + 0: non sqrt + 1: sqrt + + scale_q_dtype: Quantize scale data type, default 'float16' + + offset_q_dtype: Quantize offset data type, default 'float16' + + scale_dq_dtype: DeQuantize scale data type, default 'float16' + + scale_rq_dtype: ReQuantize scale data type, default 'float16' + + offset_rq_dtype: ReQuantize offset data type, default 'float16' + + offset_w_dtype: weight offset data type, default 'int32' + + offset_pad_dtype: Quantize Cube offset data type, default 'uint8' + + bias: the tag for bias or not + + kernel_name : cce kernel name, default value is "cce_conv" + + need_build : if need to build CCEC kernel, default value is False + + need_print : if need to print the ir, default value is False + + Returns + ------- + wrapped_tensor + + """ + # for pylint, otherwise "Dangerous default value [] as argument" + if quantize_config is None: + quantize_config = [0, 0, 0] + if scale_sqrt is None: + scale_sqrt = [0, 0, 0] + + in_dtype = in_dtype.lower() + w_dtype = w_dtype.lower() + res_dtype = res_dtype.lower() + scale_q_dtype = scale_q_dtype.lower() + offset_q_dtype = offset_q_dtype.lower() + scale_dq_dtype = scale_dq_dtype.lower() + scale_rq_dtype = scale_rq_dtype.lower() + offset_rq_dtype = offset_rq_dtype.lower() + offset_w_dtype = offset_w_dtype.lower() + offset_pad_dtype = offset_pad_dtype.lower() + + mad_dtype = 'float32' + if w_dtype == 'int8': + mad_dtype = 'int32' + + shape_in = list(shape_in) + shape_w = list(shape_w) + + shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, + stridew, + quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, + scale_dq_dtype, + scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, + bias, kernel_name) + + # quantize switch on + if quantize_config[0] == 1: + quantize_turn_on = True + # quantize -> DeQuantize dataflow + if in_dtype == 'float16' and w_dtype == 'int8' and res_dtype == 'float16': + is_quantize = True + is_dequantize = True + is_requantize = False + # DeQuantize dataflow + elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and + res_dtype == 'float16'): + is_quantize = False + is_dequantize = True + is_requantize = False + # quantize -> ReQuantize dataflow + elif (in_dtype == 'float16' and w_dtype == 'int8' and res_dtype in + ['int8', 'uint8']): + is_quantize = True + is_dequantize = False + is_requantize = True + # ReQuantize dataflow + elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and res_dtype in + ['int8', 'uint8']): + is_quantize = False + is_dequantize = False + is_requantize = True + else: + raise RuntimeError("Not support in/out data type for quantize.") + + # quantize switch off + elif quantize_config[0] == 0: + quantize_turn_on = False + is_quantize = False + is_dequantize = False + is_requantize = False + + if quantize_config != [0, 0, 0]: + raise RuntimeError("Invalid Quantize Config.") + if scale_sqrt != [0, 0, 0]: + raise RuntimeError("Invalid Quantize Config.") + else: + raise RuntimeError("Invalid Quantize Config.") + + batch_size = shape_in[0] + in_channel = shape_in[1] + feature_map_h = shape_in[2] + feature_map_w = shape_in[3] + block_size_k = CUBE_MKN[in_dtype]['mac'][1] + fmap_shape_nc1hwc0 = (batch_size, (in_channel + block_size_k - 1) // block_size_k, + feature_map_h, feature_map_w, block_size_k) + + out_channel = shape_w[0] + in_channel_weight = shape_w[1] + filter_h = shape_w[2] + filter_w = shape_w[3] + block_size_k = CUBE_MKN[w_dtype]['mac'][1] + block_size_n = CUBE_MKN[w_dtype]['mac'][2] + filter_shape_frac_z = (in_channel_weight * filter_h * filter_w // block_size_k, + out_channel // block_size_n, block_size_n, block_size_k) + + with tvm.target.cce(): + data = tvm.placeholder( + fmap_shape_nc1hwc0, name='Fmap', dtype=in_dtype) + weight = tvm.placeholder( + filter_shape_frac_z, name='Filter', dtype=w_dtype) + bias_tensor = None + scale_q = None + scale_dq = None + scale_rq = None + offset_pad = None + offset_rq = None + offset_q = None + scale_drq = None + + # bias or fusion_bias(half offset) + if bias or (quantize_config[1] == 1 and quantize_turn_on): + bias_tensor = tvm.placeholder( + (out_channel,), name='bias_tensor', \ + dtype="int32" if quantize_turn_on else res_dtype) + + # quantize on + if quantize_turn_on: + quantize_algorithm = quantize_config[1] + if is_quantize: + scale_q = tvm.placeholder( + (CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype) + if quantize_algorithm == 1: + offset_q = tvm.placeholder( + (CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype) + + if is_dequantize: + scale_dq_shape = (CUBE_MKN[scale_dq_dtype]['mac'][1],) if quantize_config[2] == 0 \ + else (out_channel,) + scale_dq = tvm.placeholder( + scale_dq_shape, name='scaleDq', dtype=scale_dq_dtype) + + if is_requantize: + scale_rq_shape = (CUBE_MKN[scale_rq_dtype]['mac'][1],) if quantize_config[2] == 0 \ + else (out_channel,) + scale_rq = tvm.placeholder( + scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype) + if quantize_algorithm == 1: + offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],) + offset_rq = tvm.placeholder( + offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype) + + # need offset_pad , for half offset + if quantize_algorithm == 1: + offset_pad = tvm.placeholder( + (CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad', + dtype=offset_pad_dtype) + + if quantize_algorithm == 0: + if is_quantize: + if is_dequantize: + scale_drq = scale_dq + else: + scale_drq = scale_rq + + conv_res = te.lang.cce.conv( + data, weight, {"bias_tensor": bias_tensor, + "scale_q": scale_q, + "offset_q": offset_q, + "scale_drq": scale_drq, + "offset_pad": offset_pad, + "offset_rq": offset_rq, + "quantize_config": quantize_config, + "is_quantize": is_quantize, + "is_dequantize": is_dequantize, + "is_requantize": is_requantize, + "scale_sqrt": scale_sqrt, + "pad_h": padh, "pad_w": padw, + "stride_h": strideh, "stride_w": stridew, + "filter_h": filter_h, "filter_w": filter_w, + "res_dtype": res_dtype, "mad_dtype": mad_dtype}, + dsl_flag=False) + if bias: + tensor_list = [data, weight, bias_tensor, scale_q, + scale_drq, conv_res] + else: + tensor_list = [data, weight, scale_q, + scale_drq, conv_res] + else: + if is_dequantize: + scale_drq = scale_dq + else: + scale_drq = scale_rq + conv_res = te.lang.cce.conv( + data, weight, {"bias_tensor": bias_tensor, + "scale_q": scale_q, + "offset_q": offset_q, + "scale_drq": scale_drq, + "offset_pad": offset_pad, + "offset_rq": offset_rq, + "quantize_config": quantize_config, + "is_quantize": is_quantize, + "is_dequantize": is_dequantize, + "is_requantize": is_requantize, + "scale_sqrt": scale_sqrt, + "pad_h": padh, "pad_w": padw, + "stride_h": strideh, "stride_w": stridew, + "filter_h": filter_h, "filter_w": filter_w, + "res_dtype": res_dtype, "mad_dtype": mad_dtype}, + dsl_flag=False) + if bias: + tensor_list = [data, weight, bias_tensor, + scale_drq, conv_res] + else: + tensor_list = [data, weight, + scale_drq, conv_res] + + # half offset + else: + if is_quantize: + if is_dequantize: + scale_drq = scale_dq + else: + scale_drq = scale_rq + conv_res = te.lang.cce.conv( + data, weight, {"bias_tensor": bias_tensor, + "scale_q": scale_q, + "offset_q": offset_q, + "scale_drq": scale_drq, + "offset_pad": offset_pad, + "offset_rq": offset_rq, + "quantize_config": quantize_config, + "is_quantize": is_quantize, + "is_dequantize": is_dequantize, + "is_requantize": is_requantize, + "scale_sqrt": scale_sqrt, + "pad_h": padh, "pad_w": padw, + "stride_h": strideh, "stride_w": stridew, + "filter_h": filter_h, "filter_w": filter_w, + "res_dtype": res_dtype, "mad_dtype": mad_dtype}, + dsl_flag=False) + if is_dequantize: + tensor_list = [data, weight, bias_tensor, scale_q, offset_q, + scale_drq, offset_pad, conv_res] + else: + tensor_list = [data, weight, bias_tensor, scale_q, offset_q, + scale_drq, offset_rq, offset_pad, conv_res] + else: + if is_dequantize: + scale_drq = scale_dq + else: + scale_drq = scale_rq + conv_res = te.lang.cce.conv( + data, weight, {"bias_tensor": bias_tensor, + "scale_q": scale_q, + "offset_q": offset_q, + "scale_drq": scale_drq, + "offset_pad": offset_pad, + "offset_rq": offset_rq, + "quantize_config": quantize_config, + "is_quantize": is_quantize, + "is_dequantize": is_dequantize, + "is_requantize": is_requantize, + "scale_sqrt": scale_sqrt, + "pad_h": padh, "pad_w": padw, + "stride_h": strideh, "stride_w": stridew, + "filter_h": filter_h, "filter_w": filter_w, + "res_dtype": res_dtype, "mad_dtype": mad_dtype}, + dsl_flag=False) + if is_dequantize: + tensor_list = [data, weight, bias_tensor, + scale_drq, offset_pad, conv_res] + else: + tensor_list = [data, weight, bias_tensor, + scale_drq, offset_rq, offset_pad, conv_res] + else: + conv_res = te.lang.cce.conv( + data, weight, {"bias_tensor": bias_tensor, + "scale_q": scale_q, + "offset_q": offset_q, + "scale_drq": scale_drq, + "offset_pad": offset_pad, + "offset_rq": offset_rq, + "quantize_config": quantize_config, + "is_quantize": is_quantize, + "is_dequantize": is_dequantize, + "is_requantize": is_requantize, + "scale_sqrt": scale_sqrt, + "pad_h": padh, "pad_w": padw, + "stride_h": strideh, "stride_w": stridew, + "filter_h": filter_h, "filter_w": filter_w, + "res_dtype": res_dtype, "mad_dtype": mad_dtype}, + dsl_flag=False) + if bias: + tensor_list = [data, weight, bias_tensor, conv_res] + else: + tensor_list = [data, weight, conv_res] + sch = generic.auto_schedule(conv_res) + + config = { + "print_ir": need_print, + "need_build": need_build, + "name": kernel_name, + "tensor_list": tensor_list + } + + te.lang.cce.cce_build_code(sch, config) diff --git a/tests/st/ops/custom_ops_tbe/cus_square.py b/tests/st/ops/custom_ops_tbe/cus_square.py index 85d4ebb996f..59ba7988699 100644 --- a/tests/st/ops/custom_ops_tbe/cus_square.py +++ b/tests/st/ops/custom_ops_tbe/cus_square.py @@ -20,12 +20,12 @@ from mindspore.ops import operations as P # y = x^2 class CusSquare(PrimitiveWithInfer): """CusSquare definition""" + from square_impl import CusSquareImpl @prim_attr_register def __init__(self): """init CusSquare""" self.init_prim_io_names(inputs=['x'], outputs=['y']) - from square_impl import CusSquareImpl def vm_impl(self, x): x = x.asnumpy() diff --git a/tests/st/ops/gpu/test_batch_matmul.py b/tests/st/ops/gpu/test_batch_matmul.py index 361280dd9b4..e8450bd81d4 100644 --- a/tests/st/ops/gpu/test_batch_matmul.py +++ b/tests/st/ops/gpu/test_batch_matmul.py @@ -119,7 +119,7 @@ def test_4d_transpose_ab(): [[5612, 5810, 6008, 6206]]]] assert (output.asnumpy() == expect).all() -def test_4d_fp16(): +def test_4D_fp16(): input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16) diff --git a/tests/st/ops/gpu/test_batchnorm_fold2_op.py b/tests/st/ops/gpu/test_batchnorm_fold2_op.py index 47b11b7b7a9..f888666d201 100644 --- a/tests/st/ops/gpu/test_batchnorm_fold2_op.py +++ b/tests/st/ops/gpu/test_batchnorm_fold2_op.py @@ -68,10 +68,11 @@ def test_batchnrom_fold2(): current_step = np.array([0]).astype('int32') output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std), Tensor(running_mean), Tensor(current_step)) - expect = ((x + beta.reshape(-1, 1, 1) - - (gamma * running_mean / running_std).reshape(-1, 1, 1) if current_step >= freeze_bn else - x * (running_std / batch_std).reshape(-1, 1, 1) + - (beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1))) + expect = (x + beta.reshape(-1, 1, + 1) - (gamma * running_mean / running_std).reshape(-1, 1, + 1) if current_step >= freeze_bn else + x * (running_std / batch_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, + 1)) error = np.ones(shape=expect.shape) * 1.0e-6 diff = output.asnumpy() - expect assert np.all(diff < error) @@ -80,9 +81,11 @@ def test_batchnrom_fold2(): current_step = np.array([100000]).astype('int32') output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std), Tensor(running_mean), Tensor(current_step)) - expect = ((x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, 1) - if current_step >= freeze_bn else x * (batch_std / running_std).reshape(-1, 1, 1) + - (beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1))) + expect = (x + beta.reshape(-1, 1, + 1) - (gamma * running_mean / running_std).reshape(-1, 1, + 1) if current_step >= freeze_bn else + x * (batch_std / running_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, + 1)) error = np.ones(shape=expect.shape) * 1.0e-6 diff = output.asnumpy() - expect assert np.all(diff < error) diff --git a/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py b/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py index 5d20a95ac07..655f3446245 100644 --- a/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py +++ b/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py @@ -38,8 +38,8 @@ class Net(nn.Cell): def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std): n = x.shape[0] * x.shape[2] * x.shape[3] - dx = (d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * - (x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n) + dx = d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * ( + x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n return dx diff --git a/tests/st/ops/gpu/test_dense_op.py b/tests/st/ops/gpu/test_dense_op.py index 125b273a820..220f7ae051d 100644 --- a/tests/st/ops/gpu/test_dense_op.py +++ b/tests/st/ops/gpu/test_dense_op.py @@ -172,8 +172,8 @@ class Grad(nn.Cell): self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) self.network = network - def construct(self, inputs, bias, dy): - return self.grad(self.network)(inputs, bias, dy) + def construct(self, input_, bias, dy): + return self.grad(self.network)(input_, bias, dy) @pytest.mark.level0 diff --git a/tests/st/ops/gpu/test_lstm_op.py b/tests/st/ops/gpu/test_lstm_op.py index d73850668e0..f0a58c2d36c 100644 --- a/tests/st/ops/gpu/test_lstm_op.py +++ b/tests/st/ops/gpu/test_lstm_op.py @@ -783,10 +783,6 @@ def test_grad(): bidirectional = True dropout = 0.0 - num_directions = 1 - if bidirectional: - num_directions = 2 - net = Grad(Net(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)) dy = np.array([[[-3.5471e-01, 7.0540e-01, -7.5945e-01, -1.2322e+00], @@ -804,7 +800,7 @@ def test_grad(): [[-1.6032e+00, -1.8818e-01, 7.0441e-01, -2.8765e+00], [1.0065e-01, 9.2045e-01, 2.7426e-01, 2.6196e-01]]]).astype(np.float32) - dx, dh, dc, dw = net(Tensor(dy)) + dx, dh, dc, _ = net(Tensor(dy)) expect_dx = np.array([[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -0.00746152, -0.00879683, 0.00643571, 0.0015958, 0.01480642], [0.05794962, -0.02326604, 0.01862703, 0.02053947, 0.02607713, -0.01278067, 0.04250786, @@ -964,12 +960,8 @@ def test_lstm_dropout(): bidirectional = False dropout = 1.0 - num_directions = 1 - if bidirectional: - num_directions = 2 - net = LstmNetWithDropout(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout) - y, h, c, _, _ = net() + y, _, _, _, _ = net() expect_y = np.array([[[-0.45210335, -0.0844336], [-0.14677924, 0.07140275]], diff --git a/tests/st/ops/gpu/test_minimum_op.py b/tests/st/ops/gpu/test_minimum_op.py index 9ba8cc53096..c5669b17e05 100644 --- a/tests/st/ops/gpu/test_minimum_op.py +++ b/tests/st/ops/gpu/test_minimum_op.py @@ -178,7 +178,8 @@ def test_broadcast(): expect_dx2 = np.array([[[[0., 4.250458, 2.5030296, 3.623167, 6.4171505, 7.2115746]], [[0., 4.367449, 2.803152, 2.5352, 0., 0.]], [[0.7087075, 0., 2.040332, 2.1372325, 0., 2.9222295]], - [[1.0278877, 5.247942, 2.6855955, 5.494814, 3.5657988, 0.66265094]]]]).astype(np.float32) + [[1.0278877, 5.247942, 2.6855955, 5.494814, 3.5657988, + 0.66265094]]]]).astype(np.float32) net = Grad(MinimumNet()) output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) diff --git a/tests/st/ops/gpu/test_reduce_sum_op.py b/tests/st/ops/gpu/test_reduce_sum_op.py index 94a9ea3de2e..6c16235a804 100644 --- a/tests/st/ops/gpu/test_reduce_sum_op.py +++ b/tests/st/ops/gpu/test_reduce_sum_op.py @@ -182,88 +182,88 @@ def test_ReduceSum(): diff0 = abs(output[0].asnumpy() - expect0) error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert (output[0].shape() == expect0.shape) + assert output[0].shape() == expect0.shape expect1 = np.sum(x1, axis=axis1, keepdims=keep_dims1) diff1 = abs(output[1].asnumpy() - expect1) error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert (output[1].shape() == expect1.shape) + assert output[1].shape() == expect1.shape expect2 = np.sum(x2, axis=axis2, keepdims=keep_dims2) diff2 = abs(output[2].asnumpy() - expect2) error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert (output[2].shape() == expect2.shape) + assert output[2].shape() == expect2.shape expect3 = np.sum(x3, axis=axis3, keepdims=keep_dims3) diff3 = abs(output[3].asnumpy() - expect3) error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert (output[3].shape() == expect3.shape) + assert output[3].shape() == expect3.shape expect4 = np.sum(x4, axis=np_axis4, keepdims=keep_dims4) diff4 = abs(output[4].asnumpy() - expect4) error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert (output[4].shape() == expect4.shape) + assert output[4].shape() == expect4.shape expect5 = np.sum(x5, axis=np_axis5, keepdims=keep_dims5) diff5 = abs(output[5].asnumpy() - expect5) error5 = np.ones(shape=expect5.shape) * 1.0e-5 assert np.all(diff5 < error5) - assert (output[5].shape() == expect5.shape) + assert output[5].shape() == expect5.shape expect6 = np.sum(x6, axis=axis6, keepdims=keep_dims6) diff6 = abs(output[6].asnumpy() - expect6) error6 = np.ones(shape=expect6.shape) * 1.0e-5 assert np.all(diff6 < error6) - assert (output[6].shape() == expect6.shape) + assert output[6].shape() == expect6.shape expect7 = np.sum(x7, axis=axis7, keepdims=keep_dims7) diff7 = abs(output[7].asnumpy() - expect7) error7 = np.ones(shape=expect7.shape) * 1.0e-5 assert np.all(diff7 < error7) - assert (output[7].shape() == expect7.shape) + assert output[7].shape() == expect7.shape expect8 = np.sum(x8, axis=axis8, keepdims=keep_dims8) diff8 = abs(output[8].asnumpy() - expect8) error8 = np.ones(shape=expect8.shape) * 1.0e-5 assert np.all(diff8 < error8) - assert (output[8].shape() == expect8.shape) + assert output[8].shape() == expect8.shape expect9 = np.sum(x9, axis=axis9, keepdims=keep_dims9) diff9 = abs(output[9].asnumpy() - expect9) error9 = np.ones(shape=expect9.shape) * 1.0e-5 assert np.all(diff9 < error9) - assert (output[9].shape() == expect9.shape) + assert output[9].shape() == expect9.shape expect10 = np.sum(x10, axis=axis10, keepdims=keep_dims10) diff10 = abs(output[10].asnumpy() - expect10) error10 = np.ones(shape=expect10.shape) * 1.0e-5 assert np.all(diff10 < error10) - assert (output[10].shape() == expect10.shape) + assert output[10].shape() == expect10.shape expect11 = np.sum(x11, axis=axis11, keepdims=keep_dims11) diff11 = abs(output[11].asnumpy() - expect11) error11 = np.ones(shape=expect11.shape) * 1.0e-5 assert np.all(diff11 < error11) - assert (output[11].shape() == expect11.shape) + assert output[11].shape() == expect11.shape expect12 = np.sum(x12, axis=axis12, keepdims=keep_dims12) diff12 = abs(output[12].asnumpy() - expect12) error12 = np.ones(shape=expect12.shape) * 1.0e-5 assert np.all(diff12 < error12) - assert (output[12].shape() == expect12.shape) + assert output[12].shape() == expect12.shape expect13 = np.sum(x13, axis=axis13, keepdims=keep_dims13) diff13 = abs(output[13].asnumpy() - expect13) error13 = np.ones(shape=expect13.shape) * 1.0e-5 assert np.all(diff13 < error13) - assert (output[13].shape() == expect13.shape) + assert output[13].shape() == expect13.shape expect14 = np.sum(x14, axis=np_axis14, keepdims=keep_dims14) diff14 = abs(output[14].asnumpy() - expect14) error14 = np.ones(shape=expect14.shape) * 1.0e-5 assert np.all(diff14 < error14) - assert (output[14].shape() == expect14.shape) + assert output[14].shape() == expect14.shape diff --git a/tests/st/ops/gpu/test_relu_grad_op.py b/tests/st/ops/gpu/test_relu_grad_op.py index 5fb1fb6ad0b..c63e4920386 100644 --- a/tests/st/ops/gpu/test_relu_grad_op.py +++ b/tests/st/ops/gpu/test_relu_grad_op.py @@ -19,7 +19,6 @@ import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G @@ -42,7 +41,7 @@ def test_relu_grad(): dy = Tensor(np.array([[[[1, 0, 1], [0, 1, 0], [1, 1, 1]]]]).astype(np.float32)) - expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32) + expect = np.array([[[[0, 0, 1,], [0, 0, 0,], [1, 1, 0.]]]]).astype(np.float32) error = np.ones(shape=[3, 3]) * 1.0e-6 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") diff --git a/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py b/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py index 8fd70b8172e..f8d3ec0e8b3 100644 --- a/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py +++ b/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py @@ -20,6 +20,7 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor + class NetSoftmaxCrossEntropyWithLogits(nn.Cell): def __init__(self): super(NetSoftmaxCrossEntropyWithLogits, self).__init__() diff --git a/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py b/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py index df4b6924103..d18eeeb0ad5 100644 --- a/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py +++ b/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py @@ -20,6 +20,7 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor + class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): def __init__(self): super(NetSparseSoftmaxCrossEntropyWithLogits, self).__init__() diff --git a/tests/st/ops/test_rmsprop.py b/tests/st/ops/test_rmsprop.py index 7f48aa6335a..d0b65d627f1 100644 --- a/tests/st/ops/test_rmsprop.py +++ b/tests/st/ops/test_rmsprop.py @@ -36,8 +36,7 @@ class NetRMSProp(nn.Cell): def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): if self.use_centered: return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) - else: - return self.rms_opt(var, rms, mom, lr, g, decay, momentum, epsilon) + return self.rms_opt(var, rms, mom, lr, g, decay, momentum, epsilon) def rmsprop_numpy(variable, gradients, mean_square, moment, diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 2328bdfc834..3ade4b983d8 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -26,7 +26,8 @@ from mindspore.common import dtype as mstype from mindspore.nn import Cell from mindspore.ops import operations as P from mindspore.ops import prim_attr_register -from mindspore.ops.primitive import Primitive, PrimitiveWithInfer +from mindspore.ops.primitive import PrimitiveWithInfer +import mindspore.context as context from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ @@ -305,8 +306,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) diff --git a/tests/ut/python/ops/test_array_ops_check.py b/tests/ut/python/ops/test_array_ops_check.py index 015b6b03c07..d28beff0efa 100755 --- a/tests/ut/python/ops/test_array_ops_check.py +++ b/tests/ut/python/ops/test_array_ops_check.py @@ -13,26 +13,15 @@ # limitations under the License. # ============================================================================ """ test ops """ -import functools import numpy as np import mindspore.nn as nn -import mindspore.ops.composite as C from mindspore import Tensor -from mindspore import ops from mindspore.common import dtype as mstype -from mindspore.common.api import _executor -from mindspore.common.parameter import Parameter -from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.ops.operations import _grad_ops as G -from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ - import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, - pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ - import pipeline_for_compile_grad_ge_graph_for_case_by_case_config + import pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception class ExpandDimsNet(nn.Cell): diff --git a/tests/ut/python/ops/test_bprop_disorder.py b/tests/ut/python/ops/test_bprop_disorder.py index 0760d9f2bb7..fc0a7ca7e54 100644 --- a/tests/ut/python/ops/test_bprop_disorder.py +++ b/tests/ut/python/ops/test_bprop_disorder.py @@ -17,6 +17,7 @@ import functools import numpy as np import mindspore.nn as nn +import mindspore.context as context from mindspore import Tensor, Parameter from mindspore.common.parameter import ParameterTuple from mindspore.ops import composite as C @@ -89,8 +90,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index 1e154576168..0e965b6fb31 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -42,10 +42,10 @@ def cond_data_test(x_init, y_init): def construct(self, x, y): cond = self.less(x, y) - st1, sf1 = self.switch(x, cond) - st2, sf2 = self.switch(y, cond) + st1, _ = self.switch(x, cond) + st2, _ = self.switch(y, cond) add_ret = self.add(st1, st2) - st3, sf3 = self.switch(self.value, cond) + _, sf3 = self.switch(self.value, cond) sq_ret = self.square(sf3) ret = self.merge((add_ret, sq_ret)) return ret[0] @@ -125,7 +125,7 @@ def test_if_str_is_not_none_right(): self.z = z def construct(self, x, y): - if self.z == None: + if self.z is None: ret = x else: ret = y @@ -146,7 +146,7 @@ def test_if_str_is_not_none_left(): self.z = z def construct(self, x, y): - if None == self.z: + if self.z is None: ret = x else: ret = y @@ -167,7 +167,7 @@ def test_if_none_equal_none(): self.z = z def construct(self, x, y): - if self.z == None: + if self.z is None: ret = x else: ret = y diff --git a/tests/ut/python/ops/test_list.py b/tests/ut/python/ops/test_list.py index 35ff469a31c..f5f919b998b 100644 --- a/tests/ut/python/ops/test_list.py +++ b/tests/ut/python/ops/test_list.py @@ -16,6 +16,7 @@ import functools import numpy as np import mindspore.nn as nn +import mindspore.context as context from mindspore import Tensor from mindspore.ops import operations as P from ..ut_filter import non_graph_engine @@ -132,7 +133,7 @@ def test_list_append_2(): class ListOperate(nn.Cell): - def __init__(self, ): + def __init__(self,): super(ListOperate, self).__init__() def construct(self, t, l): @@ -211,9 +212,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index 6f500c45f77..d1830e93c1b 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -22,9 +22,7 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common import dtype as mstype -from mindspore.common.api import _executor from mindspore.ops import composite as C -from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.ops import prim_attr_register, PrimitiveWithInfer from ..ut_filter import non_graph_engine @@ -306,8 +304,8 @@ class NetWithLossCumSum(nn.Cell): self.loss = VirtualLoss() self.network = network - def construct(self, input): - predict = self.network(input) + def construct(self, input_): + predict = self.network(input_) return self.loss(predict) @@ -318,8 +316,8 @@ class GradWrapCumSum(nn.Cell): super(GradWrapCumSum, self).__init__() self.network = network - def construct(self, input): - return C.grad(self.network)(input) + def construct(self, input_): + return C.grad(self.network)(input_) class NetCumSum(nn.Cell): @@ -330,8 +328,8 @@ class NetCumSum(nn.Cell): self.cumsum = P.CumSum() self.axis = 1 - def construct(self, input): - return self.cumsum(input, self.axis) + def construct(self, input_): + return self.cumsum(input_, self.axis) class SignNet(nn.Cell): @@ -444,9 +442,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 14bbe493bda..1c4ab8c76d8 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -13,26 +13,16 @@ # limitations under the License. # ============================================================================ """ test ops """ -import functools import numpy as np import mindspore.nn as nn -import mindspore.ops.composite as C from mindspore import Tensor -from mindspore import ops from mindspore.common import dtype as mstype -from mindspore.common.api import _executor from mindspore.common.parameter import Parameter -from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.ops.operations import _grad_ops as G -from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ - import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, - pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ - import pipeline_for_compile_grad_ge_graph_for_case_by_case_config + import pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception class AssignAddNet(nn.Cell): diff --git a/tests/ut/python/ops/test_momentum.py b/tests/ut/python/ops/test_momentum.py index e6aeaab8ce9..8889feb4fbd 100644 --- a/tests/ut/python/ops/test_momentum.py +++ b/tests/ut/python/ops/test_momentum.py @@ -17,6 +17,7 @@ import functools import numpy as np import mindspore.nn as nn +import mindspore.context as context from mindspore import Parameter, ParameterTuple, Tensor from mindspore.ops import composite as C from mindspore.ops import functional as F @@ -126,8 +127,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) diff --git a/tests/ut/python/ops/test_multitype_ops.py b/tests/ut/python/ops/test_multitype_ops.py index eecdf359be9..54997486e99 100644 --- a/tests/ut/python/ops/test_multitype_ops.py +++ b/tests/ut/python/ops/test_multitype_ops.py @@ -17,6 +17,7 @@ import numpy as np from functools import partial, reduce import mindspore.nn as nn +import mindspore.context as context from mindspore import Tensor from mindspore.ops import functional as F, composite as C from ..ut_filter import non_graph_engine @@ -28,7 +29,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class ScalarAddScalar(nn.Cell): """ ScalarAddScalar definition """ - def __init__(self, ): + def __init__(self,): super(ScalarAddScalar, self).__init__() self.n1 = 1.2 self.n2 = 1.3 @@ -40,7 +41,7 @@ class ScalarAddScalar(nn.Cell): class ScalarAddTensor1(nn.Cell): """ ScalarAddTensor1 definition """ - def __init__(self, ): + def __init__(self,): super(ScalarAddTensor1, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 1.2 @@ -53,7 +54,7 @@ class ScalarAddTensor1(nn.Cell): class ScalarAddTensor2(nn.Cell): """ ScalarAddTensor2 definition """ - def __init__(self, ): + def __init__(self,): super(ScalarAddTensor2, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 1.2 @@ -66,7 +67,7 @@ class ScalarAddTensor2(nn.Cell): class TensorAddScalar(nn.Cell): """ TensorAddScalar definition """ - def __init__(self, ): + def __init__(self,): super(TensorAddScalar, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 1.2 @@ -78,7 +79,7 @@ class TensorAddScalar(nn.Cell): class ScalarTensorSub(nn.Cell): """ ScalarTensorSub definition """ - def __init__(self, ): + def __init__(self,): super(ScalarTensorSub, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 2.1 @@ -94,7 +95,7 @@ class ScalarTensorSub(nn.Cell): class ScalarTensorMul(nn.Cell): """ ScalarTensorMul definition """ - def __init__(self, ): + def __init__(self,): super(ScalarTensorMul, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 2.1 @@ -110,7 +111,7 @@ class ScalarTensorMul(nn.Cell): class ScalarTensorDiv(nn.Cell): """ ScalarTensorDiv definition """ - def __init__(self, ): + def __init__(self,): super(ScalarTensorDiv, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) self.n1 = 2.1 @@ -132,8 +133,7 @@ class EqualClass(nn.Cell): def construct(self): if self.n1 == self.n2: return self.n1 - else: - return self.n2 + return self.n2 grad_scale = C.MultitypeFuncGraph("grad_scale") @@ -151,7 +151,7 @@ class MapPartialNet(nn.Cell): def __init__(self): super(MapPartialNet, self).__init__() self.reciprocal_scale = 1.2 - self.x1 = Tensor(np.ones([2, 1, 2, ], np.float32)) + self.x1 = Tensor(np.ones([2, 1, 2,], np.float32)) self.x2 = Tensor(np.ones([2, 1, 2, 2], np.float32)) def construct(self, x, y): @@ -188,7 +188,7 @@ class UnZipNet(nn.Cell): class ScalarTensorOp2Cast(nn.Cell): - def __init__(self, ): + def __init__(self,): super(ScalarTensorOp2Cast, self).__init__() self.f = 1.2 self.t = Tensor(np.ones([2, 1, 2, 2], np.float16)) @@ -285,8 +285,6 @@ test_exec_case = reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index 8f3fae1d71a..1216944fafc 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ """ test nn ops """ -import functools import numpy as np import mindspore diff --git a/tests/ut/python/ops/test_nn_ops_check.py b/tests/ut/python/ops/test_nn_ops_check.py index e52af518593..9a8b4278610 100755 --- a/tests/ut/python/ops/test_nn_ops_check.py +++ b/tests/ut/python/ops/test_nn_ops_check.py @@ -13,26 +13,15 @@ # limitations under the License. # ============================================================================ """ test ops """ -import functools import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore import ops -from mindspore.common import dtype as mstype -from mindspore.common.api import _executor -from mindspore.common.parameter import Parameter -from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.ops.operations import _grad_ops as G -from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ - import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, - pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ - import pipeline_for_compile_grad_ge_graph_for_case_by_case_config + import pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception class Conv2DBackpropInputNet(nn.Cell): diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index a884209feb2..25c76033f50 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -88,8 +88,8 @@ class NetForPackInput(nn.Cell): def construct(self, *args): t = () - for i in range(len(args)): - t = t + (self.mul(args[i], args[i]),) + for element in args: + t = t + (self.mul(element, element),) return self.op(t) @@ -136,8 +136,8 @@ class ArgmaxNet(nn.Cell): super(ArgmaxNet, self).__init__() self.argmax = P.Argmax(axis=1) - def construct(self, input): - return self.argmax(input) + def construct(self, input_): + return self.argmax(input_) class ArgminNet(nn.Cell): @@ -145,8 +145,8 @@ class ArgminNet(nn.Cell): super(ArgminNet, self).__init__() self.argmin = P.Argmin(axis=1) - def construct(self, input): - return self.argmin(input) + def construct(self, input_): + return self.argmin(input_) class CumSumNet(nn.Cell): @@ -155,8 +155,8 @@ class CumSumNet(nn.Cell): self.cumsum = P.CumSum() self.axis = 1 - def construct(self, input): - return self.cumsum(input, self.axis) + def construct(self, input_): + return self.cumsum(input_, self.axis) class SummaryNet(nn.Cell): @@ -1156,7 +1156,7 @@ test_case_array_ops = [ 'desc_inputs': [(Tensor(np.array([1], np.float32)), Tensor(np.array([1], np.float32)), Tensor(np.array([1], np.float32)))], - 'desc_bprop': [[3, ]]}), + 'desc_bprop': [[3,]]}), ('Pack_0', { 'block': NetForPackInput(P.Pack()), 'desc_inputs': [[2, 2], [2, 2], [2, 2]], @@ -1302,7 +1302,7 @@ test_case = functools.reduce(lambda x, y: x + y, test_case_lists) test_exec_case = test_case test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'], test_case) @non_graph_engine diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index 418e54c4c86..6dc4ac6b04b 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -16,8 +16,8 @@ import functools import logging import numpy as np -import pytest +import mindspore.context as context from mindspore import Tensor from mindspore import nn from mindspore.common.api import _executor @@ -220,8 +220,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm -import mindspore.context as context - @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) diff --git a/tests/ut/python/ops/test_python_operators.py b/tests/ut/python/ops/test_python_operators.py index cab6fef68c0..dd85c3310c5 100644 --- a/tests/ut/python/ops/test_python_operators.py +++ b/tests/ut/python/ops/test_python_operators.py @@ -16,7 +16,6 @@ import functools import numpy as np -import mindspore as ms from mindspore import Tensor from mindspore import context from mindspore import nn @@ -40,7 +39,7 @@ class ComparisonOpsNet(nn.Cell): d = y >= x e = x < y f = x < 1.0 - g = 1.0 > y + g = y < 1.0 h = y > x i = y == 3.0 j = x != 4 diff --git a/tests/ut/python/ops/test_tuple.py b/tests/ut/python/ops/test_tuple.py index 9e6b0b92029..eafaaede916 100644 --- a/tests/ut/python/ops/test_tuple.py +++ b/tests/ut/python/ops/test_tuple.py @@ -18,9 +18,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore import context from mindspore import dtype as mstype -from mindspore.ops import operations as P from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ diff --git a/tests/ut/python/ops/test_tuple_slice.py b/tests/ut/python/ops/test_tuple_slice.py index eeb7f382e19..eb21f3f2eb0 100644 --- a/tests/ut/python/ops/test_tuple_slice.py +++ b/tests/ut/python/ops/test_tuple_slice.py @@ -14,7 +14,6 @@ # ============================================================================ """ test_tuple_slice """ import numpy as np -import pytest import mindspore.ops.operations as P from mindspore import Tensor diff --git a/tests/ut/python/pynative_mode/test_cell_bprop.py b/tests/ut/python/pynative_mode/test_cell_bprop.py index 041ae9ef047..ce38916f1ba 100644 --- a/tests/ut/python/pynative_mode/test_cell_bprop.py +++ b/tests/ut/python/pynative_mode/test_cell_bprop.py @@ -257,7 +257,7 @@ def test_grad_inline_bprop_two_input(): grads = C.grad_all(net)(input1, input2) assert (grads[0].asnumpy() == np.array([2, 2]).astype(np.float32)).all() assert (grads[1].asnumpy() == np.array([2, 2]).astype(np.float32)).all() - assert (len(grads) == 2) + assert len(grads) == 2 class TwoInputBprop(nn.Cell): @@ -317,7 +317,7 @@ def test_grad_inline_bprop_multi_input(): grads = C.grad_all(net)(input1, input2) assert (grads[0].asnumpy() == np.array([[12, 12], [12, 12]]).astype(np.float32)).all() assert (grads[1].asnumpy() == np.array([[19, 19], [19, 19]]).astype(np.float32)).all() - assert (len(grads) == 2) + assert len(grads) == 2 class MulAddWithParam(nn.Cell): diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index db7914a2927..bd00073ce65 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -476,10 +476,10 @@ def test_foo(): def double_nested_loop(x): i = 0 s = 0 - while (i < x): + while i < x: j = 0 i = i + 1 - while (j < 3): + while j < 3: j = j + 1 s = s + j return s @@ -859,7 +859,7 @@ def grad_refactor_14(a, b): return a * b def inner3(x): - if (x > 2): + if x > 2: return a return b