From 66bbdb4a31e57431e96dc9c3d9c0cac02d82d9c5 Mon Sep 17 00:00:00 2001 From: buxue Date: Tue, 9 Jun 2020 12:18:51 +0800 Subject: [PATCH] change tensor dtype and shape from function to attr --- example/resnet50_imagenet2012/train.py | 8 +- .../model/thor_layer.py | 6 +- example/yolov3_coco2017/train.py | 2 +- mindspore/ccsrc/ir/tensor.cc | 48 +++++----- mindspore/common/dtype.py | 4 +- mindspore/common/initializer.py | 4 +- mindspore/common/parameter.py | 4 +- mindspore/common/tensor.py | 10 +-- mindspore/model_zoo/mobilenetV2.py | 12 +-- mindspore/model_zoo/mobilenetV3.py | 12 +-- mindspore/nn/layer/basic.py | 8 +- mindspore/nn/layer/combined.py | 2 +- mindspore/nn/layer/conv.py | 2 +- mindspore/nn/layer/embedding.py | 2 +- mindspore/nn/layer/normalization.py | 2 +- mindspore/nn/layer/pooling.py | 6 +- mindspore/nn/layer/quant.py | 6 +- .../composite/multitype_ops/greater_impl.py | 2 +- mindspore/ops/operations/array_ops.py | 24 ++--- mindspore/ops/operations/math_ops.py | 4 +- mindspore/ops/operations/nn_ops.py | 8 +- mindspore/train/_utils.py | 4 +- mindspore/train/serialization.py | 30 +++---- model_zoo/bert/src/fused_layer_norm.py | 2 +- model_zoo/mobilenetv2/src/mobilenetV2.py | 12 +-- model_zoo/mobilenetv3/src/mobilenetV3.py | 12 +-- model_zoo/resnet101/train.py | 8 +- model_zoo/ssd/src/init_params.py | 4 +- tests/st/gnn/aggregator.py | 6 +- tests/st/nccl/test_nccl_all_gather_op.py | 2 +- tests/st/nccl/test_nccl_all_reduce_op.py | 12 +-- tests/st/nccl/test_nccl_reduce_scatter_op.py | 6 +- .../models/bert/src/fused_layer_norm.py | 2 +- tests/st/ops/ascend/test_autocast.py | 82 ++++++++--------- .../ops/ascend/test_tbe_ops/test_bias_add.py | 2 +- tests/st/ops/ascend/test_tdt_data_ms.py | 2 +- tests/st/ops/cpu/test_argmax_op.py | 2 +- tests/st/ops/cpu/test_equalcount_op.py | 4 +- tests/st/ops/cpu/test_softmax_op.py | 2 +- .../cpu/test_softmax_with_cross_entropy_op.py | 4 +- tests/st/ops/gpu/test_correction_mul_op.py | 2 +- tests/st/ops/gpu/test_equal_op.py | 16 ++-- tests/st/ops/gpu/test_exp_op.py | 8 +- tests/st/ops/gpu/test_log_op.py | 8 +- tests/st/ops/gpu/test_mul_op.py | 20 ++--- tests/st/ops/gpu/test_neg_op.py | 8 +- tests/st/ops/gpu/test_realdiv_op.py | 20 ++--- tests/st/ops/gpu/test_reciprocal_op.py | 8 +- tests/st/ops/gpu/test_reduce_max_op.py | 14 +-- tests/st/ops/gpu/test_reduce_mean_op.py | 30 +++---- tests/st/ops/gpu/test_reduce_sum_op.py | 30 +++---- tests/st/ops/gpu/test_sub_op.py | 20 ++--- tests/st/ops/gpu/test_tile_op.py | 6 +- tests/st/ops/gpu/test_zeroslike_op.py | 8 +- tests/ut/python/dtype/test_list.py | 8 +- tests/ut/python/exec/test_bias_add.py | 2 +- tests/ut/python/exec/test_train.py | 2 +- tests/ut/python/ir/test_tensor.py | 88 +++++++++---------- tests/ut/python/ir/test_tensor_py.py | 4 +- tests/ut/python/pipeline/infer/infer.py | 2 +- .../python/pynative_mode/nn/test_layernorm.py | 4 +- tests/ut/python/utils/test_initializer.py | 2 +- tests/ut/python/utils/test_serialize.py | 4 +- tests/vm_impl/array_ops_vm_impl.py | 2 +- 64 files changed, 348 insertions(+), 342 deletions(-) diff --git a/example/resnet50_imagenet2012/train.py b/example/resnet50_imagenet2012/train.py index abb55731dce..6896320ecea 100755 --- a/example/resnet50_imagenet2012/train.py +++ b/example/resnet50_imagenet2012/train.py @@ -79,12 +79,12 @@ if __name__ == '__main__': for _, cell in net.cells_and_names(): if isinstance(cell, nn.Conv2d): cell.weight.default_input = weight_init.initializer(weight_init.XavierUniform(), - cell.weight.default_input.shape(), - cell.weight.default_input.dtype()).to_tensor() + cell.weight.default_input.shape, + cell.weight.default_input.dtype).to_tensor() if isinstance(cell, nn.Dense): cell.weight.default_input = weight_init.initializer(weight_init.TruncatedNormal(), - cell.weight.default_input.shape(), - cell.weight.default_input.dtype()).to_tensor() + cell.weight.default_input.shape, + cell.weight.default_input.dtype).to_tensor() if not config.use_label_smooth: config.label_smooth_factor = 0.0 diff --git a/example/resnet50_imagenet2012_THOR/model/thor_layer.py b/example/resnet50_imagenet2012_THOR/model/thor_layer.py index 046278a67dd..d84cbf7a939 100644 --- a/example/resnet50_imagenet2012_THOR/model/thor_layer.py +++ b/example/resnet50_imagenet2012_THOR/model/thor_layer.py @@ -338,15 +338,15 @@ class Dense_Thor(Cell): self.has_bias = check_bool(has_bias) self.thor = True if isinstance(weight_init, Tensor): - if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ - weight_init.shape()[1] != in_channels: + if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ + weight_init.shape[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") if self.has_bias: if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index 683d7bd7854..43039b7bbc2 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -56,7 +56,7 @@ def init_net_param(network, init_value='ones'): params = network.trainable_params() for p in params: if isinstance(p.data, Tensor) and 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name: - p.set_parameter_data(initializer(init_value, p.data.shape(), p.data.dtype())) + p.set_parameter_data(initializer(init_value, p.data.shape, p.data.dtype)) def main(): diff --git a/mindspore/ccsrc/ir/tensor.cc b/mindspore/ccsrc/ir/tensor.cc index 537c23dc29a..53a4ef49d55 100644 --- a/mindspore/ccsrc/ir/tensor.cc +++ b/mindspore/ccsrc/ir/tensor.cc @@ -384,6 +384,28 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { .def(py::init(), py::arg("input"), py::arg("dtype") = nullptr) .def(py::init(), py::arg("input"), py::arg("dtype") = nullptr) .def_readonly(PYTHON_TENSOR_FLAG, &Tensor::parse_info_) + .def_property_readonly("dtype", &Tensor::Dtype, R"mydelimiter( + Get the tensor's data type. + + Returns: + type, the data type of tensor. + + Examples: + >>> data = mindspore.Tensor(np.ones((2, 1), np.int32)) + >>> data.dtype + Int32 + )mydelimiter") + .def_property_readonly("shape", &Tensor::GetPyTupleShape, R"mydelimiter( + Get the tensor's shape. + + Returns: + tuple[int], the shape of tensor. + + Examples: + >>> data = mindspore.Tensor(np.ones((3, 3))) + >>> data.shape() + (3, 3) + )mydelimiter") .def("asnumpy", &Tensor::data_sync, R"mydelimiter( Convert tensor to numpy.ndarray. @@ -437,17 +459,6 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { >>> data.dim() 2 )mydelimiter") - .def("dtype", &Tensor::Dtype, R"mydelimiter( - Get the tensor's data type. - - Returns: - type, the data type of tensor. - - Examples: - >>> data = mindspore.Tensor(np.ones((2, 1), np.int32)) - >>> data.dtype() - Int32 - )mydelimiter") .def("set_dtype", &Tensor::SetDtype, R"mydelimiter( Set the tensor's data type. @@ -459,17 +470,6 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { >>> data.set_dtype(mindspore.int32) mindspore.int32 )mydelimiter") - .def("shape", &Tensor::GetPyTupleShape, R"mydelimiter( - Get the tensor's shape. - - Returns: - tuple[int], the shape of tensor. - - Examples: - >>> data = mindspore.Tensor(np.ones((3, 3))) - >>> data.shape() - (3, 3) - )mydelimiter") .def("__str__", &Tensor::ToString) .def("__repr__", &Tensor::ToStringRepr) .def(py::pickle( @@ -488,8 +488,8 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) { (void)py::class_>(*m, "MetaTensor") .def(py::init>(), py::arg("dtype"), py::arg("shape")) .def_readonly(PYTHON_META_TENSOR_FLAG, &MetaTensor::parse_info_) - .def("dtype", &MetaTensor::Dtype, "Get the MetaTensor's dtype.") - .def("shape", &MetaTensor::shape, "Get the MetaTensor's shape."); + .def_property_readonly("dtype", &MetaTensor::Dtype, "Get the MetaTensor's dtype.") + .def_property_readonly("shape", &MetaTensor::shape, "Get the MetaTensor's shape."); })); } // namespace tensor } // namespace mindspore diff --git a/mindspore/common/dtype.py b/mindspore/common/dtype.py index 02a27591d42..46b111d2f64 100644 --- a/mindspore/common/dtype.py +++ b/mindspore/common/dtype.py @@ -170,8 +170,8 @@ def get_py_obj_dtype(obj): Type of MindSpore type. """ # Tensor - if hasattr(obj, 'dtype') and callable(obj.dtype) and isinstance(obj.dtype(), typing.Type): - return tensor_type(obj.dtype()) + if hasattr(obj, 'dtype') and isinstance(obj.dtype, typing.Type): + return tensor_type(obj.dtype) if hasattr(obj, '__primitive_flag__') or hasattr(obj, 'construct'): return function if isinstance(obj, (typing.Type, type)): diff --git a/mindspore/common/initializer.py b/mindspore/common/initializer.py index 54c0a1debe7..a50c88ec9a3 100644 --- a/mindspore/common/initializer.py +++ b/mindspore/common/initializer.py @@ -331,11 +331,11 @@ def initializer(init, shape=None, dtype=mstype.float32): raise TypeError("Unsupported init type '{}'.".format(type(init))) if isinstance(init, Tensor): - init_shape = init.shape() + init_shape = init.shape shape = shape if isinstance(shape, (tuple, list)) else [shape] if shape is not None and init_shape != tuple(shape): raise ValueError("The shape of init should be same as variable shape, but got the shape of init {} and " - "the variable shape {}.".format(list(init.shape()), shape)) + "the variable shape {}.".format(list(init.shape), shape)) return init if isinstance(shape, list): diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index a943cbcf63a..bb2160bd2ca 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -140,8 +140,8 @@ class Parameter: x.name = prefix + '.' + x.name x.is_init = False if init != 'same': - shape = self.default_input.shape() - dtype = self.default_input.dtype() + shape = self.default_input.shape + dtype = self.default_input.dtype if isinstance(init, (str, Initializer, numbers.Number)): x.init_mode = initializer(init, shape=shape, dtype=dtype) x.default_input = MetaTensor(dtype, shape) diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 9c0f7e20947..c806ec00920 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -45,13 +45,13 @@ class Tensor(Tensor_): >>> # init a tensor with input data >>> t1 = Tensor(np.zeros([1, 2, 3]), mindspore.float32) >>> assert isinstance(t1, Tensor) - >>> assert t1.shape() == (1, 2, 3) - >>> assert t1.dtype() == mindspore.float32 + >>> assert t1.shape == (1, 2, 3) + >>> assert t1.dtype == mindspore.float32 >>> >>> # init a tensor with a float scalar >>> t2 = Tensor(0.1) >>> assert isinstance(t2, Tensor) - >>> assert t2.dtype() == mindspore.float64 + >>> assert t2.dtype == mindspore.float64 """ def __init__(self, input_data, dtype=None): @@ -80,7 +80,7 @@ class Tensor(Tensor_): return False # The GE backend don't support single `Equal` operator execution. # bool type is not supported for `Equal` operator in backend. - if context.get_context("enable_ge") or self.dtype() == mstype.bool_ or other.dtype() == mstype.bool_: + if context.get_context("enable_ge") or self.dtype == mstype.bool_ or other.dtype == mstype.bool_: return Tensor(np.array(self.asnumpy() == other.asnumpy())) return tensor_operator_registry.get('__eq__')(self, other) @@ -166,7 +166,7 @@ class Tensor(Tensor_): return out[0] def __str__(self): - if self.dtype() == mstype.type_none: + if self.dtype == mstype.type_none: return "Unknown Tensor type!" return str(self.asnumpy()) diff --git a/mindspore/model_zoo/mobilenetV2.py b/mindspore/model_zoo/mobilenetV2.py index df35c5f3693..5b1b4cc5ef3 100644 --- a/mindspore/model_zoo/mobilenetV2.py +++ b/mindspore/model_zoo/mobilenetV2.py @@ -267,21 +267,21 @@ class MobileNetV2(nn.Cell): if isinstance(m, (nn.Conv2d, DepthwiseConv)): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n), - m.weight.data.shape()).astype("float32"))) + m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) elif isinstance(m, nn.BatchNorm2d): m.gamma.set_parameter_data( - Tensor(np.ones(m.gamma.data.shape(), dtype="float32"))) + Tensor(np.ones(m.gamma.data.shape, dtype="float32"))) m.beta.set_parameter_data( - Tensor(np.zeros(m.beta.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.beta.data.shape, dtype="float32"))) elif isinstance(m, nn.Dense): m.weight.set_parameter_data(Tensor(np.random.normal( - 0, 0.01, m.weight.data.shape()).astype("float32"))) + 0, 0.01, m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) def mobilenet_v2(**kwargs): diff --git a/mindspore/model_zoo/mobilenetV3.py b/mindspore/model_zoo/mobilenetV3.py index 820e60493f7..61b63f9ea12 100644 --- a/mindspore/model_zoo/mobilenetV3.py +++ b/mindspore/model_zoo/mobilenetV3.py @@ -322,21 +322,21 @@ class MobileNetV3(nn.Cell): if isinstance(m, (nn.Conv2d)): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n), - m.weight.data.shape()).astype("float32"))) + m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) elif isinstance(m, nn.BatchNorm2d): m.gamma.set_parameter_data( - Tensor(np.ones(m.gamma.data.shape(), dtype="float32"))) + Tensor(np.ones(m.gamma.data.shape, dtype="float32"))) m.beta.set_parameter_data( - Tensor(np.zeros(m.beta.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.beta.data.shape, dtype="float32"))) elif isinstance(m, nn.Dense): m.weight.set_parameter_data(Tensor(np.random.normal( - 0, 0.01, m.weight.data.shape()).astype("float32"))) + 0, 0.01, m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) def mobilenet_v3(model_name, **kwargs): diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 115f72b319e..c050d35f8f9 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -131,7 +131,7 @@ class Flatten(Cell): Examples: >>> net = nn.Flatten() >>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32) - >>> input.shape() + >>> input.shape (2, 2, 2) >>> net(input) [[1.2 1.2 2.1 2.1] @@ -198,15 +198,15 @@ class Dense(Cell): self.has_bias = check_bool(has_bias) if isinstance(weight_init, Tensor): - if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ - weight_init.shape()[1] != in_channels: + if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ + weight_init.shape[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") if self.has_bias: if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") diff --git a/mindspore/nn/layer/combined.py b/mindspore/nn/layer/combined.py index 671365e3939..e5b3d0d03a1 100644 --- a/mindspore/nn/layer/combined.py +++ b/mindspore/nn/layer/combined.py @@ -69,7 +69,7 @@ class Conv2d(Cell): Examples: >>> net = combined.Conv2d(120, 240, 4, batchnorm=True, activation='ReLU') >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) - >>> net(input).shape() + >>> net(input).shape (1, 240, 1024, 640) """ diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index e02908aed33..b2a0de9cbe2 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -168,7 +168,7 @@ class Conv2d(_Conv): Examples: >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) - >>> net(input).shape() + >>> net(input).shape (1, 240, 1024, 640) """ @cell_attr_register diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index 8a6a0bd33da..538bce1dbc9 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -56,7 +56,7 @@ class Embedding(Cell): >>> >>> # Maps the input word IDs to word embedding. >>> output = net(input_data) - >>> output.shape() + >>> output.shape (8, 128, 768) """ def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32): diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index f90b8d28ed5..8ffb7664f09 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -474,7 +474,7 @@ class LayerNorm(Cell): Examples: >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) - >>> shape1 = x.shape()[1:] + >>> shape1 = x.shape[1:] >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m(x) """ diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index 89bc65bb09e..6c26fcea675 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -113,7 +113,7 @@ class MaxPool2d(_PoolNd): [0. 0. 4. 0.] [1. 8. 7. 0.]]]] >>> output = pool(x) - >>> output.shape() + >>> output.shape (1, 2, 2, 2) >>> output [[[[7. 8.] @@ -195,7 +195,7 @@ class AvgPool2d(_PoolNd): [0. 8. 9. 7.] [2. 1. 4. 9.]]]] >>> output = pool(x) - >>> output.shape() + >>> output.shape (1, 2, 2, 2) >>> output [[[[4.888889 4.4444447] @@ -260,7 +260,7 @@ class AvgPool1d(_PoolNd): >>> pool = nn.AvgPool1d(kernel_size=6, strides=1) >>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32) >>> output = pool(x) - >>> output.shape() + >>> output.shape (1, 3, 1) """ diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index b843986b0e7..46f6cd5a450 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -571,8 +571,8 @@ class DenseQuant(Cell): self.has_bias = check_bool(has_bias) if isinstance(weight_init, Tensor): - if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ - weight_init.shape()[1] != in_channels: + if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ + weight_init.shape[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer( @@ -580,7 +580,7 @@ class DenseQuant(Cell): if self.has_bias: if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer( diff --git a/mindspore/ops/composite/multitype_ops/greater_impl.py b/mindspore/ops/composite/multitype_ops/greater_impl.py index 7bbf53da492..ec65704f629 100644 --- a/mindspore/ops/composite/multitype_ops/greater_impl.py +++ b/mindspore/ops/composite/multitype_ops/greater_impl.py @@ -23,7 +23,7 @@ greater = base.MultitypeFuncGraph("greater") @greater.register("Number", "Number") -def _greater_scala(x, y): +def _greater_scalar(x, y): """ Determine whether two numbers are greater. diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 061aa2e08fb..9d288b16855 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -145,10 +145,10 @@ class SameTypeShape(PrimitiveWithInfer): def __call__(self, x, y): """run in PyNative mode""" - validator.check_value_type("x", x, Tensor, self.name) - validator.check_value_type("y", y, Tensor, self.name) - validator.check('x dtype', x.dtype(), 'y dtype', y.dtype(), Rel.EQ, self.name, TypeError) - validator.check('x shape', x.shape(), 'y shape', y.shape(), Rel.EQ, self.name) + validator.check_value_type('x', x, Tensor, self.name) + validator.check_value_type('y', y, Tensor, self.name) + validator.check('x dtype', x.dtype, 'y dtype', y.dtype, Rel.EQ, self.name, TypeError) + validator.check('x shape', x.shape, 'y shape', y.shape, Rel.EQ, self.name) return x def __infer__(self, x, y): @@ -187,7 +187,7 @@ class Cast(PrimitiveWithInfer): def check_elim(self, x, dtype): if isinstance(x, Tensor): - if x.dtype() == dtype: + if x.dtype == dtype: return (True, x) return (False, None) raise ValueError("Expecting (Tensor, dtype), got : {}".format(inputs)) @@ -498,7 +498,7 @@ class GatherV2(PrimitiveWithInfer): The original Tensor. - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`. Specifies the indices of elements of the original Tensor. Must be in the range - `[0, input_param.shape()[axis])`. + `[0, input_param.shape[axis])`. - **axis** (int) - Specifies the dimension index to gather indices. Outputs: @@ -542,7 +542,7 @@ class SparseGatherV2(GatherV2): The original Tensor. - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`. Specifies the indices of elements of the original Tensor. Must be in the range - `[0, input_param.shape()[axis])`. + `[0, input_param.shape[axis])`. - **axis** (int) - Specifies the dimension index to gather indices. Outputs: @@ -700,7 +700,7 @@ class Split(PrimitiveWithInfer): output_num (int): The number of output tensors. Default: 1. Raises: - ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())), + ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)), or if the output_num is less than or equal to 0, or if the dimension which to split cannot be evenly divided by output_num. @@ -1644,7 +1644,7 @@ class Unpack(PrimitiveWithInfer): A tuple of Tensors, the shape of each objects is same. Raises: - ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())). + ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)). Examples: >>> unpack = P.Unpack() @@ -1850,7 +1850,7 @@ class StridedSlice(PrimitiveWithInfer): >>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32) >>> slice = P.StridedSlice() >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1)) - >>> output.shape() + >>> output.shape (1, 1, 3) >>> output [[[3, 3, 3]]] @@ -1974,7 +1974,7 @@ class Diag(PrimitiveWithInfer): if x is None: return None # do constant-folding only when x rank is 1 - if len(x.shape()) != 1: + if len(x.shape) != 1: return None ret = np.diag(x.asnumpy()) return Tensor(ret) @@ -2026,7 +2026,7 @@ class DiagPart(PrimitiveWithInfer): if x is None: return None # do constant-folding only when x rank is 2 - if len(x.shape()) != 2: + if len(x.shape) != 2: return None ret = np.diag(x.asnumpy()) return Tensor(ret) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 01849597b39..f789d22554a 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -2329,8 +2329,8 @@ class NMSWithMask(PrimitiveWithInfer): def infer_shape(self, bboxes_shape): cls_name = self.name validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name) - validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT, cls_name) - validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ, cls_name) + validator.check_integer("bboxes.shape[0]", bboxes_shape[0], 0, Rel.GT, cls_name) + validator.check_integer("bboxes.shape[1]", bboxes_shape[1], 5, Rel.EQ, cls_name) num = bboxes_shape[0] return (bboxes_shape, (num,), (num,)) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 258d909c6ee..7b5c2868a70 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -78,7 +78,7 @@ class Flatten(PrimitiveWithInfer): >>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32) >>> flatten = P.Flatten() >>> output = flatten(input_tensor) - >>> assert output.shape() == (1, 24) + >>> assert output.shape == (1, 24) """ @prim_attr_register @@ -840,7 +840,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): >>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32) >>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3)) >>> output = depthwise_conv2d(input, weight) - >>> assert output.shape() == (10, 96, 30, 30) + >>> assert output.shape == (10, 96, 30, 30) """ @prim_attr_register @@ -2057,7 +2057,7 @@ class DropoutDoMask(PrimitiveWithInfer): >>> dropout_do_mask = P.DropoutDoMask() >>> mask = dropout_gen_mask(shape, keep_prob) >>> output = dropout_do_mask(x, mask, keep_prob) - >>> assert output.shape() == (20, 16, 50) + >>> assert output.shape == (20, 16, 50) """ @prim_attr_register @@ -2114,7 +2114,7 @@ class ResizeBilinear(PrimitiveWithInfer): >>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.int32) >>> resize_bilinear = P.ResizeBilinear((5, 5)) >>> result = resize_bilinear(tensor) - >>> assert result.shape() == (5, 5) + >>> assert result.shape == (5, 5) """ @prim_attr_register diff --git a/mindspore/train/_utils.py b/mindspore/train/_utils.py index 7bc07b126ef..958ea7e2c2e 100644 --- a/mindspore/train/_utils.py +++ b/mindspore/train/_utils.py @@ -157,8 +157,8 @@ def _to_full_tensor(elem, device_num, global_rank, scaling_sens=None): data = Tensor(data) if not isinstance(data, Tensor): raise ValueError("elements in tensors must be Tensor") - shape_ = data.shape() - type_ = data.dtype() + shape_ = data.shape + type_ = data.dtype new_shape = () batchsize_per_device = 1 for i, item in enumerate(shape_): diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index d09494d1897..8048eedeecd 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -42,17 +42,17 @@ def _special_process_par(par, new_par): Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor. """ - par_shape_len = len(par.data.shape()) - new_par_shape_len = len(new_par.data.shape()) + par_shape_len = len(par.data.shape) + new_par_shape_len = len(new_par.data.shape) delta_len = new_par_shape_len - par_shape_len delta_i = 0 for delta_i in range(delta_len): - if new_par.data.shape()[par_shape_len + delta_i] != 1: + if new_par.data.shape[par_shape_len + delta_i] != 1: break if delta_i == delta_len - 1: new_val = new_par.data.asnumpy() - new_val = new_val.reshape(par.data.shape()) - par.set_parameter_data(Tensor(new_val, par.data.dtype())) + new_val = new_val.reshape(par.data.shape) + par.set_parameter_data(Tensor(new_val, par.data.dtype)) return True return False @@ -61,17 +61,17 @@ def _update_param(param, new_param): """Updates param's data from new_param's data.""" if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor): - if param.data.dtype() != new_param.data.dtype(): + if param.data.dtype != new_param.data.dtype: logger.error("Failed to combine the net and the parameters for param %s.", param.name) msg = ("Net parameters {} type({}) different from parameter_dict's({})" - .format(param.name, param.data.dtype(), new_param.data.dtype())) + .format(param.name, param.data.dtype, new_param.data.dtype)) raise RuntimeError(msg) - if param.data.shape() != new_param.data.shape(): + if param.data.shape != new_param.data.shape: if not _special_process_par(param, new_param): logger.error("Failed to combine the net and the parameters for param %s.", param.name) msg = ("Net parameters {} shape({}) different from parameter_dict's({})" - .format(param.name, param.data.shape(), new_param.data.shape())) + .format(param.name, param.data.shape, new_param.data.shape)) raise RuntimeError(msg) return @@ -79,12 +79,12 @@ def _update_param(param, new_param): return if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor): - if param.data.shape() != (1,) and param.data.shape() != (): + if param.data.shape != (1,) and param.data.shape != (): logger.error("Failed to combine the net and the parameters for param %s.", param.name) msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)." - .format(param.name, param.data.shape())) + .format(param.name, param.data.shape)) raise RuntimeError(msg) - param.set_parameter_data(initializer(new_param.data, param.data.shape(), param.data.dtype())) + param.set_parameter_data(initializer(new_param.data, param.data.shape, param.data.dtype)) elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor): logger.error("Failed to combine the net and the parameters for param %s.", param.name) @@ -120,12 +120,12 @@ def save_checkpoint(parameter_list, ckpoint_file_name): param["data"].init_data() param_data = param["data"].asnumpy().reshape(-1) param_tensor.tensor_content = param_data.tostring() - param_tensor.tensor_type = str(param["data"].dtype()) + param_tensor.tensor_type = str(param["data"].dtype) - if param['data'].shape() == (): + if param['data'].shape == (): param_tensor.dims.append(0) else: - for dim in param['data'].shape(): + for dim in param['data'].shape: param_tensor.dims.append(dim) with open(ckpoint_file_name, "wb") as f: diff --git a/model_zoo/bert/src/fused_layer_norm.py b/model_zoo/bert/src/fused_layer_norm.py index ee3160b036f..5dbe9999ad7 100644 --- a/model_zoo/bert/src/fused_layer_norm.py +++ b/model_zoo/bert/src/fused_layer_norm.py @@ -73,7 +73,7 @@ class FusedLayerNorm(Cell): Examples: >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) - >>> shape1 = x.shape()[1:] + >>> shape1 = x.shape[1:] >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m(x) """ diff --git a/model_zoo/mobilenetv2/src/mobilenetV2.py b/model_zoo/mobilenetv2/src/mobilenetV2.py index df35c5f3693..5b1b4cc5ef3 100644 --- a/model_zoo/mobilenetv2/src/mobilenetV2.py +++ b/model_zoo/mobilenetv2/src/mobilenetV2.py @@ -267,21 +267,21 @@ class MobileNetV2(nn.Cell): if isinstance(m, (nn.Conv2d, DepthwiseConv)): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n), - m.weight.data.shape()).astype("float32"))) + m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) elif isinstance(m, nn.BatchNorm2d): m.gamma.set_parameter_data( - Tensor(np.ones(m.gamma.data.shape(), dtype="float32"))) + Tensor(np.ones(m.gamma.data.shape, dtype="float32"))) m.beta.set_parameter_data( - Tensor(np.zeros(m.beta.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.beta.data.shape, dtype="float32"))) elif isinstance(m, nn.Dense): m.weight.set_parameter_data(Tensor(np.random.normal( - 0, 0.01, m.weight.data.shape()).astype("float32"))) + 0, 0.01, m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) def mobilenet_v2(**kwargs): diff --git a/model_zoo/mobilenetv3/src/mobilenetV3.py b/model_zoo/mobilenetv3/src/mobilenetV3.py index 820e60493f7..61b63f9ea12 100644 --- a/model_zoo/mobilenetv3/src/mobilenetV3.py +++ b/model_zoo/mobilenetv3/src/mobilenetV3.py @@ -322,21 +322,21 @@ class MobileNetV3(nn.Cell): if isinstance(m, (nn.Conv2d)): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n), - m.weight.data.shape()).astype("float32"))) + m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) elif isinstance(m, nn.BatchNorm2d): m.gamma.set_parameter_data( - Tensor(np.ones(m.gamma.data.shape(), dtype="float32"))) + Tensor(np.ones(m.gamma.data.shape, dtype="float32"))) m.beta.set_parameter_data( - Tensor(np.zeros(m.beta.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.beta.data.shape, dtype="float32"))) elif isinstance(m, nn.Dense): m.weight.set_parameter_data(Tensor(np.random.normal( - 0, 0.01, m.weight.data.shape()).astype("float32"))) + 0, 0.01, m.weight.data.shape).astype("float32"))) if m.bias is not None: m.bias.set_parameter_data( - Tensor(np.zeros(m.bias.data.shape(), dtype="float32"))) + Tensor(np.zeros(m.bias.data.shape, dtype="float32"))) def mobilenet_v3(model_name, **kwargs): diff --git a/model_zoo/resnet101/train.py b/model_zoo/resnet101/train.py index dc3fad64736..1cd3627a119 100755 --- a/model_zoo/resnet101/train.py +++ b/model_zoo/resnet101/train.py @@ -66,12 +66,12 @@ if __name__ == '__main__': for _, cell in net.cells_and_names(): if isinstance(cell, nn.Conv2d): cell.weight.default_input = weight_init.initializer(weight_init.XavierUniform(), - cell.weight.default_input.shape(), - cell.weight.default_input.dtype()).to_tensor() + cell.weight.default_input.shape, + cell.weight.default_input.dtype).to_tensor() if isinstance(cell, nn.Dense): cell.weight.default_input = weight_init.initializer(weight_init.TruncatedNormal(), - cell.weight.default_input.shape(), - cell.weight.default_input.dtype()).to_tensor() + cell.weight.default_input.shape, + cell.weight.default_input.dtype).to_tensor() if not config.label_smooth: config.label_smooth_factor = 0.0 loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num) diff --git a/model_zoo/ssd/src/init_params.py b/model_zoo/ssd/src/init_params.py index 3ab164219c8..6e1f8869b34 100644 --- a/model_zoo/ssd/src/init_params.py +++ b/model_zoo/ssd/src/init_params.py @@ -23,9 +23,9 @@ def init_net_param(network, initialize_mode='TruncatedNormal'): for p in params: if isinstance(p.data, Tensor) and 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name: if initialize_mode == 'TruncatedNormal': - p.set_parameter_data(initializer(TruncatedNormal(0.03), p.data.shape(), p.data.dtype())) + p.set_parameter_data(initializer(TruncatedNormal(0.03), p.data.shape, p.data.dtype)) else: - p.set_parameter_data(initialize_mode, p.data.shape(), p.data.dtype()) + p.set_parameter_data(initialize_mode, p.data.shape, p.data.dtype) def load_backbone_params(network, param_dict): diff --git a/tests/st/gnn/aggregator.py b/tests/st/gnn/aggregator.py index 271e383d06d..373df5f9618 100644 --- a/tests/st/gnn/aggregator.py +++ b/tests/st/gnn/aggregator.py @@ -78,15 +78,15 @@ class GNNFeatureTransform(nn.Cell): self.has_bias = check_bool(has_bias) if isinstance(weight_init, Tensor): - if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ - weight_init.shape()[1] != in_channels: + if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ + weight_init.shape[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") if self.has_bias: if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") diff --git a/tests/st/nccl/test_nccl_all_gather_op.py b/tests/st/nccl/test_nccl_all_gather_op.py index 36c2ccd29df..9d7ad205c9e 100644 --- a/tests/st/nccl/test_nccl_all_gather_op.py +++ b/tests/st/nccl/test_nccl_all_gather_op.py @@ -51,4 +51,4 @@ def test_AllGather(): diff = output.asnumpy() - expect error = np.ones(shape=expect.shape) * 1.0e-5 assert np.all(diff < error) - assert output.shape() == expect.shape + assert output.shape == expect.shape diff --git a/tests/st/nccl/test_nccl_all_reduce_op.py b/tests/st/nccl/test_nccl_all_reduce_op.py index dc21a6b4387..0f00c6aef7e 100644 --- a/tests/st/nccl/test_nccl_all_reduce_op.py +++ b/tests/st/nccl/test_nccl_all_reduce_op.py @@ -62,19 +62,19 @@ def test_AllReduce(): diff0 = output[0].asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = expect0 diff1 = output[1].asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = expect1 diff2 = output[2].asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape class Net2(nn.Cell): @@ -108,16 +108,16 @@ def test_AllReduce2(): diff0 = abs(output[0].asnumpy() - expect0) error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = expect0 * size diff1 = abs(output[1].asnumpy() - expect1) error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = expect1 * size diff2 = abs(output[2].asnumpy() - expect2) error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape diff --git a/tests/st/nccl/test_nccl_reduce_scatter_op.py b/tests/st/nccl/test_nccl_reduce_scatter_op.py index c3e19038837..59ede9cd76e 100644 --- a/tests/st/nccl/test_nccl_reduce_scatter_op.py +++ b/tests/st/nccl/test_nccl_reduce_scatter_op.py @@ -61,16 +61,16 @@ def test_ReduceScatter(): diff0 = output[0].asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * size diff1 = output[1].asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * 1 diff2 = output[2].asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape diff --git a/tests/st/networks/models/bert/src/fused_layer_norm.py b/tests/st/networks/models/bert/src/fused_layer_norm.py index ee3160b036f..5dbe9999ad7 100644 --- a/tests/st/networks/models/bert/src/fused_layer_norm.py +++ b/tests/st/networks/models/bert/src/fused_layer_norm.py @@ -73,7 +73,7 @@ class FusedLayerNorm(Cell): Examples: >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) - >>> shape1 = x.shape()[1:] + >>> shape1 = x.shape[1:] >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m(x) """ diff --git a/tests/st/ops/ascend/test_autocast.py b/tests/st/ops/ascend/test_autocast.py index 2891e79d69c..448dc9b4d60 100644 --- a/tests/st/ops/ascend/test_autocast.py +++ b/tests/st/ops/ascend/test_autocast.py @@ -75,93 +75,93 @@ def test_tensor_auto_cast(): t_fp64 = Tensor(np.ones([2, 1, 2, 2]), mstype.float64) net = TensorAutoCast() rs = net(t_uint8, t_int8) - assert rs.dtype() == mstype.int16 + assert rs.dtype == mstype.int16 rs = net(t_uint8, t_int16) - assert rs.dtype() == mstype.int16 + assert rs.dtype == mstype.int16 rs = net(t_uint8, t_int32) - assert rs.dtype() == mstype.int32 + assert rs.dtype == mstype.int32 rs = net(t_uint8, t_int64) - assert rs.dtype() == mstype.int64 + assert rs.dtype == mstype.int64 rs = net(t_int8, t_int16) - assert rs.dtype() == mstype.int16 + assert rs.dtype == mstype.int16 rs = net(t_int8, t_int32) - assert rs.dtype() == mstype.int32 + assert rs.dtype == mstype.int32 rs = net(t_int8, t_int64) - assert rs.dtype() == mstype.int64 + assert rs.dtype == mstype.int64 rs = net(t_int16, t_int32) - assert rs.dtype() == mstype.int32 + assert rs.dtype == mstype.int32 rs = net(t_int16, t_int64) - assert rs.dtype() == mstype.int64 + assert rs.dtype == mstype.int64 rs = net(t_int32, t_int64) - assert rs.dtype() == mstype.int64 + assert rs.dtype == mstype.int64 rs = net(t_fp16, t_fp32) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = net(t_fp16, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_fp32, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_uint8, t_fp16) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = net(t_uint8, t_fp32) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = net(t_uint8, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_int8, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_int16, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_int32, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_int64, t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 rs = net(t_fp16, t_int8) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = net(t_fp16, t_uint8) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = net(t_fp16, t_int16) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = net(t_fp16, t_int32) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = net(t_fp16, t_int64) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 tint = TensorIntAutoCast() rs = tint(t_uint8) - assert rs.dtype() == mstype.uint8 + assert rs.dtype == mstype.uint8 rs = tint(t_int8) - assert rs.dtype() == mstype.int8 + assert rs.dtype == mstype.int8 rs = tint(t_int16) - assert rs.dtype() == mstype.int16 + assert rs.dtype == mstype.int16 rs = tint(t_int32) - assert rs.dtype() == mstype.int32 + assert rs.dtype == mstype.int32 rs = tint(t_int64) - assert rs.dtype() == mstype.int64 + assert rs.dtype == mstype.int64 rs = tint(t_fp16) - assert rs.dtype() == mstype.float16 + assert rs.dtype == mstype.float16 rs = tint(t_fp32) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tint(t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 tfp = TensorFPAutoCast() rs = tfp(t_uint8) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_int8) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_int16) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_int32) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_int64) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_fp16) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_fp32) - assert rs.dtype() == mstype.float32 + assert rs.dtype == mstype.float32 rs = tfp(t_fp64) - assert rs.dtype() == mstype.float64 + assert rs.dtype == mstype.float64 t_uint16 = Tensor(np.ones([2, 1, 2, 2]), mstype.uint16) t_uint32 = Tensor(np.ones([2, 1, 2, 2]), mstype.uint32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py index bc4dcf7cdc3..9455b7902b1 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py @@ -35,7 +35,7 @@ class Net(nn.Cell): self.biasAdd = P.BiasAdd() if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != output_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != output_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer( diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index 1cac1004fd0..b4fae1c2d38 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -64,7 +64,7 @@ def convert_type(shapes, types): for np_shape, np_type in zip(shapes, types): input_np = np.zeros(np_shape, np_type) tensor = Tensor(input_np) - ms_types.append(tensor.dtype()) + ms_types.append(tensor.dtype) return ms_types diff --git a/tests/st/ops/cpu/test_argmax_op.py b/tests/st/ops/cpu/test_argmax_op.py index 01481e427a7..fdafd7750f8 100644 --- a/tests/st/ops/cpu/test_argmax_op.py +++ b/tests/st/ops/cpu/test_argmax_op.py @@ -34,7 +34,7 @@ class NetArgmax(nn.Cell): x = Tensor(np.array([[1., 20., 5.], [67., 8., 9.], [130., 24., 15.]]).astype(np.float32)) - self.x = Parameter(initializer(x, x.shape()), name='x') + self.x = Parameter(initializer(x, x.shape), name='x') def construct(self): return self.argmax(self.x) diff --git a/tests/st/ops/cpu/test_equalcount_op.py b/tests/st/ops/cpu/test_equalcount_op.py index 040e8a99caf..3f182f6bcc8 100644 --- a/tests/st/ops/cpu/test_equalcount_op.py +++ b/tests/st/ops/cpu/test_equalcount_op.py @@ -32,8 +32,8 @@ class NetEqualCount(nn.Cell): self.equalcount = P.EqualCount() x = Tensor(np.array([1, 20, 5]).astype(np.int32)) y = Tensor(np.array([2, 20, 5]).astype(np.int32)) - self.x = Parameter(initializer(x, x.shape()), name='x') - self.y = Parameter(initializer(y, y.shape()), name='y') + self.x = Parameter(initializer(x, x.shape), name='x') + self.y = Parameter(initializer(y, y.shape), name='y') def construct(self): return self.equalcount(self.x, self.y) diff --git a/tests/st/ops/cpu/test_softmax_op.py b/tests/st/ops/cpu/test_softmax_op.py index ca3fef003da..65623810760 100644 --- a/tests/st/ops/cpu/test_softmax_op.py +++ b/tests/st/ops/cpu/test_softmax_op.py @@ -33,7 +33,7 @@ class NetSoftmax(nn.Cell): x = Tensor(np.array([[0.1, 0.3, 0.6], [0.2, -0.6, 0.8], [0.6, 1, 0.4]]).astype(np.float32)) - self.x = Parameter(initializer(x, x.shape()), name='x') + self.x = Parameter(initializer(x, x.shape), name='x') def construct(self): return self.softmax(self.x) diff --git a/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py b/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py index b00dc9e3238..45430e1e980 100644 --- a/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py +++ b/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py @@ -32,9 +32,9 @@ class NetSoftmaxWithCrossEntropy(nn.Cell): logits = Tensor(np.array([[1, 1, 10], [1, 10, 1], [10, 1, 1]]).astype(np.float32)) - self.logits = Parameter(initializer(logits, logits.shape()), name='logits') + self.logits = Parameter(initializer(logits, logits.shape), name='logits') labels = Tensor(np.array([2, 1, 0]).astype(np.int32)) - self.labels = Parameter(initializer(labels, labels.shape()), name='labels') + self.labels = Parameter(initializer(labels, labels.shape), name='labels') self.SoftmaxWithCrossEntropy = P.SparseSoftmaxCrossEntropyWithLogits(True) def construct(self): diff --git a/tests/st/ops/gpu/test_correction_mul_op.py b/tests/st/ops/gpu/test_correction_mul_op.py index f505c62650b..88ce269e36e 100644 --- a/tests/st/ops/gpu/test_correction_mul_op.py +++ b/tests/st/ops/gpu/test_correction_mul_op.py @@ -50,4 +50,4 @@ def test_correction_mul(): diff = output.asnumpy() - expect assert np.all(diff < error) assert np.all(diff > error * -1) - assert output.shape() == expect.shape + assert output.shape == expect.shape diff --git a/tests/st/ops/gpu/test_equal_op.py b/tests/st/ops/gpu/test_equal_op.py index de9e396bd7e..6dc08b08bb3 100644 --- a/tests/st/ops/gpu/test_equal_op.py +++ b/tests/st/ops/gpu/test_equal_op.py @@ -65,19 +65,19 @@ def test_equal(): equal = NetEqual() output0 = equal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = equal(x1, y1) assert np.all(output1.asnumpy() == expect1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") equal = NetEqual() output0 = equal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = equal(x1, y1) assert np.all(output1.asnumpy() == expect1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape @pytest.mark.level0 @@ -92,13 +92,13 @@ def test_notequal(): notequal = NetNotEqual() output0 = notequal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") notequal = NetNotEqual() output0 = notequal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape @pytest.mark.level0 @@ -113,10 +113,10 @@ def test_greaterqual(): gequal = NetGreaterEqual() output0 = gequal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") gequal = NetGreaterEqual() output0 = gequal(x0, y0) assert np.all(output0.asnumpy() == expect0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape diff --git a/tests/st/ops/gpu/test_exp_op.py b/tests/st/ops/gpu/test_exp_op.py index ea205c707ae..d84ba4a3b35 100644 --- a/tests/st/ops/gpu/test_exp_op.py +++ b/tests/st/ops/gpu/test_exp_op.py @@ -49,19 +49,19 @@ def test_exp(): output0 = exp(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = exp(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") exp = NetExp() output0 = exp(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = exp(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff --git a/tests/st/ops/gpu/test_log_op.py b/tests/st/ops/gpu/test_log_op.py index 00007b43304..47af674f814 100644 --- a/tests/st/ops/gpu/test_log_op.py +++ b/tests/st/ops/gpu/test_log_op.py @@ -50,10 +50,10 @@ def test_log(): output1 = log(x1) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") log = NetLog() @@ -61,7 +61,7 @@ def test_log(): output1 = log(x1) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff --git a/tests/st/ops/gpu/test_mul_op.py b/tests/st/ops/gpu/test_mul_op.py index 2fbd744d242..6ae038a3fd8 100644 --- a/tests/st/ops/gpu/test_mul_op.py +++ b/tests/st/ops/gpu/test_mul_op.py @@ -64,35 +64,35 @@ def test_mul(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = mul(x1, y1) expect1 = np.multiply(x1_np, y1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape output2 = mul(x2, y2) expect2 = np.multiply(x2_np, y2_np) diff2 = output2.asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape output3 = mul(x3, y3) expect3 = np.multiply(x3_np, y3_np) diff3 = output3.asnumpy() - expect3 error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape output4 = mul(x4, y4) expect4 = np.multiply(x4_np, y4_np) diff4 = output4.asnumpy() - expect4 error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") mul = NetMul() @@ -101,32 +101,32 @@ def test_mul(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = mul(x1, y1) expect1 = np.multiply(x1_np, y1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape output2 = mul(x2, y2) expect2 = np.multiply(x2_np, y2_np) diff2 = output2.asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape output3 = mul(x3, y3) expect3 = np.multiply(x3_np, y3_np) diff3 = output3.asnumpy() - expect3 error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape output4 = mul(x4, y4) expect4 = np.multiply(x4_np, y4_np) diff4 = output4.asnumpy() - expect4 error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape diff --git a/tests/st/ops/gpu/test_neg_op.py b/tests/st/ops/gpu/test_neg_op.py index 8d93c2d5f0a..2f1d662bfc7 100644 --- a/tests/st/ops/gpu/test_neg_op.py +++ b/tests/st/ops/gpu/test_neg_op.py @@ -49,19 +49,19 @@ def test_neg(): output0 = neg(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = neg(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") neg = NetNeg() output0 = neg(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = neg(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff --git a/tests/st/ops/gpu/test_realdiv_op.py b/tests/st/ops/gpu/test_realdiv_op.py index 92686602609..29144ba9727 100644 --- a/tests/st/ops/gpu/test_realdiv_op.py +++ b/tests/st/ops/gpu/test_realdiv_op.py @@ -64,35 +64,35 @@ def test_real_div(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = real_div(x1, y1) expect1 = np.divide(x1_np, y1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape output2 = real_div(x2, y2) expect2 = np.divide(x2_np, y2_np) diff2 = output2.asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape output3 = real_div(x3, y3) expect3 = np.divide(x3_np, y3_np) diff3 = output3.asnumpy() - expect3 error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape output4 = real_div(x4, y4) expect4 = np.divide(x4_np, y4_np) diff4 = output4.asnumpy() - expect4 error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') real_div = NetRealDiv() @@ -101,32 +101,32 @@ def test_real_div(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = real_div(x1, y1) expect1 = np.divide(x1_np, y1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape output2 = real_div(x2, y2) expect2 = np.divide(x2_np, y2_np) diff2 = output2.asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape output3 = real_div(x3, y3) expect3 = np.divide(x3_np, y3_np) diff3 = output3.asnumpy() - expect3 error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape output4 = real_div(x4, y4) expect4 = np.divide(x4_np, y4_np) diff4 = output4.asnumpy() - expect4 error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape diff --git a/tests/st/ops/gpu/test_reciprocal_op.py b/tests/st/ops/gpu/test_reciprocal_op.py index b03558cf4e6..6fd20953530 100644 --- a/tests/st/ops/gpu/test_reciprocal_op.py +++ b/tests/st/ops/gpu/test_reciprocal_op.py @@ -49,19 +49,19 @@ def test_Reciprocal(): output0 = reciprocal(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = reciprocal(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") reciprocal = NetReciprocal() output0 = reciprocal(x0) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = reciprocal(x1) diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff --git a/tests/st/ops/gpu/test_reduce_max_op.py b/tests/st/ops/gpu/test_reduce_max_op.py index b2ab808c794..afe558452de 100644 --- a/tests/st/ops/gpu/test_reduce_max_op.py +++ b/tests/st/ops/gpu/test_reduce_max_op.py @@ -128,43 +128,43 @@ def test_ReduceMax(): diff0 = abs(output[0].asnumpy() - expect0) error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = np.max(x1, axis=axis1, keepdims=keep_dims1) diff1 = abs(output[1].asnumpy() - expect1) error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = np.max(x2, axis=axis2, keepdims=keep_dims2) diff2 = abs(output[2].asnumpy() - expect2) error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape expect3 = np.max(x3, axis=axis3, keepdims=keep_dims3) diff3 = abs(output[3].asnumpy() - expect3) error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output[3].shape() == expect3.shape + assert output[3].shape == expect3.shape expect4 = np.max(x4, axis=np_axis4, keepdims=keep_dims4) diff4 = abs(output[4].asnumpy() - expect4) error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output[4].shape() == expect4.shape + assert output[4].shape == expect4.shape expect5 = np.max(x5, axis=np_axis5, keepdims=keep_dims5) diff5 = abs(output[5].asnumpy() - expect5) error5 = np.ones(shape=expect5.shape) * 1.0e-5 assert np.all(diff5 < error5) - assert output[5].shape() == expect5.shape + assert output[5].shape == expect5.shape expect6 = np.max(x6, axis=axis6, keepdims=keep_dims6) diff6 = abs(output[6].asnumpy() - expect6) error6 = np.ones(shape=expect6.shape) * 1.0e-5 assert np.all(diff6 < error6) - assert output[6].shape() == expect6.shape + assert output[6].shape == expect6.shape expect7 = np.max(x7, axis=axis7, keepdims=keep_dims7) diff7 = abs(output[7].asnumpy() - expect7) diff --git a/tests/st/ops/gpu/test_reduce_mean_op.py b/tests/st/ops/gpu/test_reduce_mean_op.py index d6a10c631fe..867d7e8a330 100644 --- a/tests/st/ops/gpu/test_reduce_mean_op.py +++ b/tests/st/ops/gpu/test_reduce_mean_op.py @@ -180,88 +180,88 @@ def test_ReduceMean(): diff0 = abs(output[0].asnumpy() - expect0) error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = np.mean(x1, axis=axis1, keepdims=keep_dims1) diff1 = abs(output[1].asnumpy() - expect1) error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = np.mean(x2, axis=axis2, keepdims=keep_dims2) diff2 = abs(output[2].asnumpy() - expect2) error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape expect3 = np.mean(x3, axis=axis3, keepdims=keep_dims3) diff3 = abs(output[3].asnumpy() - expect3) error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output[3].shape() == expect3.shape + assert output[3].shape == expect3.shape expect4 = np.mean(x4, axis=axis4, keepdims=keep_dims4) diff4 = abs(output[4].asnumpy() - expect4) error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output[4].shape() == expect4.shape + assert output[4].shape == expect4.shape expect5 = np.mean(x5, axis=axis5, keepdims=keep_dims5) diff5 = abs(output[5].asnumpy() - expect5) error5 = np.ones(shape=expect5.shape) * 1.0e-5 assert np.all(diff5 < error5) - assert output[5].shape() == expect5.shape + assert output[5].shape == expect5.shape expect6 = np.mean(x6, axis=axis6, keepdims=keep_dims6) diff6 = abs(output[6].asnumpy() - expect6) error6 = np.ones(shape=expect6.shape) * 1.0e-5 assert np.all(diff6 < error6) - assert output[6].shape() == expect6.shape + assert output[6].shape == expect6.shape expect7 = np.mean(x7, axis=axis7, keepdims=keep_dims7) diff7 = abs(output[7].asnumpy() - expect7) error7 = np.ones(shape=expect7.shape) * 1.0e-5 assert np.all(diff7 < error7) - assert output[7].shape() == expect7.shape + assert output[7].shape == expect7.shape expect8 = np.mean(x8, axis=axis8, keepdims=keep_dims8) diff8 = abs(output[8].asnumpy() - expect8) error8 = np.ones(shape=expect8.shape) * 1.0e-5 assert np.all(diff8 < error8) - assert output[8].shape() == expect8.shape + assert output[8].shape == expect8.shape expect9 = np.mean(x9, axis=axis9, keepdims=keep_dims9) diff9 = abs(output[9].asnumpy() - expect9) error9 = np.ones(shape=expect9.shape) * 1.0e-5 assert np.all(diff9 < error9) - assert output[9].shape() == expect9.shape + assert output[9].shape == expect9.shape expect10 = np.mean(x10, axis=axis10, keepdims=keep_dims10) diff10 = abs(output[10].asnumpy() - expect10) error10 = np.ones(shape=expect10.shape) * 1.0e-5 assert np.all(diff10 < error10) - assert output[10].shape() == expect10.shape + assert output[10].shape == expect10.shape expect11 = np.mean(x11, axis=axis11, keepdims=keep_dims11) diff11 = abs(output[11].asnumpy() - expect11) error11 = np.ones(shape=expect11.shape) * 1.0e-5 assert np.all(diff11 < error11) - assert output[11].shape() == expect11.shape + assert output[11].shape == expect11.shape expect12 = np.mean(x12, axis=axis12, keepdims=keep_dims12) diff12 = abs(output[12].asnumpy() - expect12) error12 = np.ones(shape=expect12.shape) * 1.0e-5 assert np.all(diff12 < error12) - assert output[12].shape() == expect12.shape + assert output[12].shape == expect12.shape expect13 = np.mean(x13, axis=axis13, keepdims=keep_dims13) diff13 = abs(output[13].asnumpy() - expect13) error13 = np.ones(shape=expect13.shape) * 1.0e-5 assert np.all(diff13 < error13) - assert output[13].shape() == expect13.shape + assert output[13].shape == expect13.shape expect14 = np.mean(x14, axis=np_axis14, keepdims=keep_dims14) diff14 = abs(output[14].asnumpy() - expect14) error14 = np.ones(shape=expect14.shape) * 1.0e-5 assert np.all(diff14 < error14) - assert output[14].shape() == expect14.shape + assert output[14].shape == expect14.shape diff --git a/tests/st/ops/gpu/test_reduce_sum_op.py b/tests/st/ops/gpu/test_reduce_sum_op.py index 6c16235a804..f80c421cbef 100644 --- a/tests/st/ops/gpu/test_reduce_sum_op.py +++ b/tests/st/ops/gpu/test_reduce_sum_op.py @@ -182,88 +182,88 @@ def test_ReduceSum(): diff0 = abs(output[0].asnumpy() - expect0) error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = np.sum(x1, axis=axis1, keepdims=keep_dims1) diff1 = abs(output[1].asnumpy() - expect1) error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = np.sum(x2, axis=axis2, keepdims=keep_dims2) diff2 = abs(output[2].asnumpy() - expect2) error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape expect3 = np.sum(x3, axis=axis3, keepdims=keep_dims3) diff3 = abs(output[3].asnumpy() - expect3) error3 = np.ones(shape=expect3.shape) * 1.0e-5 assert np.all(diff3 < error3) - assert output[3].shape() == expect3.shape + assert output[3].shape == expect3.shape expect4 = np.sum(x4, axis=np_axis4, keepdims=keep_dims4) diff4 = abs(output[4].asnumpy() - expect4) error4 = np.ones(shape=expect4.shape) * 1.0e-5 assert np.all(diff4 < error4) - assert output[4].shape() == expect4.shape + assert output[4].shape == expect4.shape expect5 = np.sum(x5, axis=np_axis5, keepdims=keep_dims5) diff5 = abs(output[5].asnumpy() - expect5) error5 = np.ones(shape=expect5.shape) * 1.0e-5 assert np.all(diff5 < error5) - assert output[5].shape() == expect5.shape + assert output[5].shape == expect5.shape expect6 = np.sum(x6, axis=axis6, keepdims=keep_dims6) diff6 = abs(output[6].asnumpy() - expect6) error6 = np.ones(shape=expect6.shape) * 1.0e-5 assert np.all(diff6 < error6) - assert output[6].shape() == expect6.shape + assert output[6].shape == expect6.shape expect7 = np.sum(x7, axis=axis7, keepdims=keep_dims7) diff7 = abs(output[7].asnumpy() - expect7) error7 = np.ones(shape=expect7.shape) * 1.0e-5 assert np.all(diff7 < error7) - assert output[7].shape() == expect7.shape + assert output[7].shape == expect7.shape expect8 = np.sum(x8, axis=axis8, keepdims=keep_dims8) diff8 = abs(output[8].asnumpy() - expect8) error8 = np.ones(shape=expect8.shape) * 1.0e-5 assert np.all(diff8 < error8) - assert output[8].shape() == expect8.shape + assert output[8].shape == expect8.shape expect9 = np.sum(x9, axis=axis9, keepdims=keep_dims9) diff9 = abs(output[9].asnumpy() - expect9) error9 = np.ones(shape=expect9.shape) * 1.0e-5 assert np.all(diff9 < error9) - assert output[9].shape() == expect9.shape + assert output[9].shape == expect9.shape expect10 = np.sum(x10, axis=axis10, keepdims=keep_dims10) diff10 = abs(output[10].asnumpy() - expect10) error10 = np.ones(shape=expect10.shape) * 1.0e-5 assert np.all(diff10 < error10) - assert output[10].shape() == expect10.shape + assert output[10].shape == expect10.shape expect11 = np.sum(x11, axis=axis11, keepdims=keep_dims11) diff11 = abs(output[11].asnumpy() - expect11) error11 = np.ones(shape=expect11.shape) * 1.0e-5 assert np.all(diff11 < error11) - assert output[11].shape() == expect11.shape + assert output[11].shape == expect11.shape expect12 = np.sum(x12, axis=axis12, keepdims=keep_dims12) diff12 = abs(output[12].asnumpy() - expect12) error12 = np.ones(shape=expect12.shape) * 1.0e-5 assert np.all(diff12 < error12) - assert output[12].shape() == expect12.shape + assert output[12].shape == expect12.shape expect13 = np.sum(x13, axis=axis13, keepdims=keep_dims13) diff13 = abs(output[13].asnumpy() - expect13) error13 = np.ones(shape=expect13.shape) * 1.0e-5 assert np.all(diff13 < error13) - assert output[13].shape() == expect13.shape + assert output[13].shape == expect13.shape expect14 = np.sum(x14, axis=np_axis14, keepdims=keep_dims14) diff14 = abs(output[14].asnumpy() - expect14) error14 = np.ones(shape=expect14.shape) * 1.0e-5 assert np.all(diff14 < error14) - assert output[14].shape() == expect14.shape + assert output[14].shape == expect14.shape diff --git a/tests/st/ops/gpu/test_sub_op.py b/tests/st/ops/gpu/test_sub_op.py index c28e745a7ba..3ba03c6586f 100644 --- a/tests/st/ops/gpu/test_sub_op.py +++ b/tests/st/ops/gpu/test_sub_op.py @@ -76,19 +76,19 @@ def test_Sub(): output4 = sub(x4, y4) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff2 = output2.asnumpy() - expect2 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape diff3 = output3.asnumpy() - expect3 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape diff4 = output4.asnumpy() - expect4 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") sub = Net() @@ -99,16 +99,16 @@ def test_Sub(): output4 = sub(x4, y4) diff0 = output0.asnumpy() - expect0 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape diff1 = output1.asnumpy() - expect1 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff2 = output2.asnumpy() - expect2 assert np.all(diff2 < error2) - assert output2.shape() == expect2.shape + assert output2.shape == expect2.shape diff3 = output3.asnumpy() - expect3 assert np.all(diff3 < error3) - assert output3.shape() == expect3.shape + assert output3.shape == expect3.shape diff4 = output4.asnumpy() - expect4 assert np.all(diff4 < error4) - assert output4.shape() == expect4.shape + assert output4.shape == expect4.shape diff --git a/tests/st/ops/gpu/test_tile_op.py b/tests/st/ops/gpu/test_tile_op.py index 6e1b07fe175..6973ddcdb5a 100644 --- a/tests/st/ops/gpu/test_tile_op.py +++ b/tests/st/ops/gpu/test_tile_op.py @@ -65,16 +65,16 @@ def test_tile(): diff0 = output[0].asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output[0].shape() == expect0.shape + assert output[0].shape == expect0.shape expect1 = np.tile(input_x1, mul1) diff1 = output[1].asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output[1].shape() == expect1.shape + assert output[1].shape == expect1.shape expect2 = np.tile(input_x2, mul2) diff2 = output[2].asnumpy() - expect2 error2 = np.ones(shape=expect2.shape) * 1.0e-5 assert np.all(diff2 < error2) - assert output[2].shape() == expect2.shape + assert output[2].shape == expect2.shape diff --git a/tests/st/ops/gpu/test_zeroslike_op.py b/tests/st/ops/gpu/test_zeroslike_op.py index d8aa8ebdf11..d31197badd8 100644 --- a/tests/st/ops/gpu/test_zeroslike_op.py +++ b/tests/st/ops/gpu/test_zeroslike_op.py @@ -50,14 +50,14 @@ def test_ZerosLike(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = zeros_like(x1) expect1 = np.zeros_like(x1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape context.set_context(mode=context.GRAPH_MODE, device_target="GPU") zeros_like = NetZerosLike() @@ -66,11 +66,11 @@ def test_ZerosLike(): diff0 = output0.asnumpy() - expect0 error0 = np.ones(shape=expect0.shape) * 1.0e-5 assert np.all(diff0 < error0) - assert output0.shape() == expect0.shape + assert output0.shape == expect0.shape output1 = zeros_like(x1) expect1 = np.zeros_like(x1_np) diff1 = output1.asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 assert np.all(diff1 < error1) - assert output1.shape() == expect1.shape + assert output1.shape == expect1.shape diff --git a/tests/ut/python/dtype/test_list.py b/tests/ut/python/dtype/test_list.py index 971bf85bef0..ffd8c8eae33 100644 --- a/tests/ut/python/dtype/test_list.py +++ b/tests/ut/python/dtype/test_list.py @@ -20,6 +20,7 @@ import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor from mindspore.ops import operations as P +from mindspore.common import dtype as mstype from tests.ut.python.ut_filter import non_graph_engine from tests.mindspore_test_framework.mindspore_test import mindspore_test from tests.mindspore_test_framework.pipeline.forward.compile_forward \ @@ -44,7 +45,12 @@ def test_list_equal(): y = Tensor(np.zeros([3, 4, 5], np.int32)) z = [1, 2, 3] net = Net(z) - assert net(x, y) == x + ret = net(x, y) + + print(ret.asnumpy()) + assert ret == x + assert ret.dtype == mstype.int32 + assert ret.shape == (6, 8, 10) def test_list_not_equal(): diff --git a/tests/ut/python/exec/test_bias_add.py b/tests/ut/python/exec/test_bias_add.py index a80be608fd0..2349c61a9b9 100644 --- a/tests/ut/python/exec/test_bias_add.py +++ b/tests/ut/python/exec/test_bias_add.py @@ -33,7 +33,7 @@ class Net(nn.Cell): self.biasAdd = P.BiasAdd() if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != output_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != output_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer( diff --git a/tests/ut/python/exec/test_train.py b/tests/ut/python/exec/test_train.py index 9f0e2671976..2cd9b9cad47 100644 --- a/tests/ut/python/exec/test_train.py +++ b/tests/ut/python/exec/test_train.py @@ -65,7 +65,7 @@ def test_bias_add(test_with_simu): self.biasAdd = P.BiasAdd() if isinstance(bias_init, Tensor): - if bias_init.dim() != 1 or bias_init.shape()[0] != output_channels: + if bias_init.dim() != 1 or bias_init.shape[0] != output_channels: raise ValueError("bias_init shape error") self.bias = Parameter(initializer( diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index 3786505a50f..43e7d4956a2 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -50,148 +50,148 @@ def test_tensor(): """test_tensor""" t1 = ms.Tensor(ndarr) assert isinstance(t1, ms.Tensor) - assert t1.dtype() == ms.float64 + assert t1.dtype == ms.float64 t2 = ms.Tensor(np.zeros([1, 2, 3]), ms.float32) assert isinstance(t2, ms.Tensor) - assert t2.shape() == (1, 2, 3) - assert t2.dtype() == ms.float32 + assert t2.shape == (1, 2, 3) + assert t2.dtype == ms.float32 t3 = ms.Tensor(0.1) assert isinstance(t3, ms.Tensor) - assert t3.dtype() == ms.float64 + assert t3.dtype == ms.float64 t4 = ms.Tensor(1) assert isinstance(t4, ms.Tensor) - assert t4.dtype() == ms.int64 + assert t4.dtype == ms.int64 def test_tensor_type_float16(): t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) assert isinstance(t_float16, ms.Tensor) - assert t_float16.shape() == (2, 3) - assert t_float16.dtype() == ms.float16 + assert t_float16.shape == (2, 3) + assert t_float16.dtype == ms.float16 def test_tensor_type_float32(): t_float32 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)) assert isinstance(t_float32, ms.Tensor) - assert t_float32.shape() == (2, 3) - assert t_float32.dtype() == ms.float32 + assert t_float32.shape == (2, 3) + assert t_float32.dtype == ms.float32 def test_tensor_type_float32_user_define(): t = ms.Tensor(np.zeros([1, 2, 3]), ms.float32) assert isinstance(t, ms.Tensor) - assert t.shape() == (1, 2, 3) - assert t.dtype() == ms.float32 + assert t.shape == (1, 2, 3) + assert t.dtype == ms.float32 def test_tensor_type_float64(): t = ms.Tensor([[1.0, 2, 3], [4, 5, 6]]) assert isinstance(t, ms.Tensor) - assert t.shape() == (2, 3) - assert t.dtype() == ms.float64 + assert t.shape == (2, 3) + assert t.dtype == ms.float64 t_zero = ms.Tensor(np.zeros([1, 2, 3])) assert isinstance(t_zero, ms.Tensor) - assert t_zero.shape() == (1, 2, 3) - assert t_zero.dtype() == ms.float64 + assert t_zero.shape == (1, 2, 3) + assert t_zero.dtype == ms.float64 def test_tensor_type_float64_user_define(): t = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=float)) assert isinstance(t, ms.Tensor) - assert t.shape() == (2, 3) - assert t.dtype() == ms.float64 + assert t.shape == (2, 3) + assert t.dtype == ms.float64 t_float64 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]]), ms.float64) assert isinstance(t_float64, ms.Tensor) - assert t_float64.shape() == (2, 3) - assert t_float64.dtype() == ms.float64 + assert t_float64.shape == (2, 3) + assert t_float64.dtype == ms.float64 def test_tensor_type_bool(): # init a tensor with bool type ts_bool_array = ms.Tensor(np.zeros([2, 3], np.bool), ms.bool_) assert isinstance(ts_bool_array, ms.Tensor) - assert ts_bool_array.dtype() == ms.bool_ + assert ts_bool_array.dtype == ms.bool_ t_bool = ms.Tensor(True) assert isinstance(t_bool, ms.Tensor) - assert t_bool.dtype() == ms.bool_ + assert t_bool.dtype == ms.bool_ t_bool_array = ms.Tensor(np.array([[True, False, True], [False, False, False]])) assert isinstance(t_bool_array, ms.Tensor) - assert t_bool_array.shape() == (2, 3) - assert t_bool_array.dtype() == ms.bool_ + assert t_bool_array.shape == (2, 3) + assert t_bool_array.dtype == ms.bool_ def test_tensor_type_int8(): t_int8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) assert isinstance(t_int8_array, ms.Tensor) - assert t_int8_array.shape() == (2, 3) - assert t_int8_array.dtype() == ms.int8 + assert t_int8_array.shape == (2, 3) + assert t_int8_array.dtype == ms.int8 def test_tensor_type_int16(): t_int16_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)) assert isinstance(t_int16_array, ms.Tensor) - assert t_int16_array.shape() == (2, 3) - assert t_int16_array.dtype() == ms.int16 + assert t_int16_array.shape == (2, 3) + assert t_int16_array.dtype == ms.int16 def test_tensor_type_int32(): t_int = ms.Tensor([[1, 2, 3], [4, 5, 6]]) assert isinstance(t_int, ms.Tensor) - assert t_int.shape() == (2, 3) - assert t_int.dtype() == ms.int64 + assert t_int.shape == (2, 3) + assert t_int.dtype == ms.int64 t_int_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)) assert isinstance(t_int_array, ms.Tensor) - assert t_int_array.shape() == (2, 3) - assert t_int_array.dtype() == ms.int32 + assert t_int_array.shape == (2, 3) + assert t_int_array.dtype == ms.int32 def test_tensor_type_int64(): t_int64 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)) assert isinstance(t_int64, ms.Tensor) - assert t_int64.shape() == (2, 3) - assert t_int64.dtype() == ms.int64 + assert t_int64.shape == (2, 3) + assert t_int64.dtype == ms.int64 def test_tensor_type_uint8(): t_uint8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) assert isinstance(t_uint8_array, ms.Tensor) - assert t_uint8_array.shape() == (2, 3) - assert t_uint8_array.dtype() == ms.uint8 + assert t_uint8_array.shape == (2, 3) + assert t_uint8_array.dtype == ms.uint8 def test_tensor_type_uint16(): t_uint16_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint16)) assert isinstance(t_uint16_array, ms.Tensor) - assert t_uint16_array.shape() == (2, 3) - assert t_uint16_array.dtype() == ms.uint16 + assert t_uint16_array.shape == (2, 3) + assert t_uint16_array.dtype == ms.uint16 def test_tensor_type_uint32(): t_uint32_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint32)) assert isinstance(t_uint32_array, ms.Tensor) - assert t_uint32_array.shape() == (2, 3) - assert t_uint32_array.dtype() == ms.uint32 + assert t_uint32_array.shape == (2, 3) + assert t_uint32_array.dtype == ms.uint32 def test_tensor_type_uint64(): t_uint64 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint64)) assert isinstance(t_uint64, ms.Tensor) - assert t_uint64.shape() == (2, 3) - assert t_uint64.dtype() == ms.uint64 + assert t_uint64.shape == (2, 3) + assert t_uint64.dtype == ms.uint64 def test_set_type(): t = ms.Tensor(ndarr) t.set_dtype(ms.float32) - assert t.dtype() == ms.float32 + assert t.dtype == ms.float32 @non_graph_engine @@ -250,11 +250,11 @@ def test_return_tensor(): tensor_ = exe(net, input_data) # get shape - shape_ = tensor_.shape() + shape_ = tensor_.shape print("shape = ", shape_) # get type - type_ = tensor_.dtype() + type_ = tensor_.dtype print("type = ", type_) # get value diff --git a/tests/ut/python/ir/test_tensor_py.py b/tests/ut/python/ir/test_tensor_py.py index 389730413bc..2aa08aa4efe 100644 --- a/tests/ut/python/ir/test_tensor_py.py +++ b/tests/ut/python/ir/test_tensor_py.py @@ -71,7 +71,7 @@ def test_tensor_size(): def test_dtype(): a = ms.Tensor(np.ones((2, 3), dtype=np.int32)) - assert a.dtype() == ms.int32 + assert a.dtype == ms.int32 def test_asnumpy(): @@ -89,7 +89,7 @@ def test_print(): def test_float(): a = ms.Tensor(np.ones((2, 3)), ms.float16) - assert a.dtype() == ms.float16 + assert a.dtype == ms.float16 def test_tensor_method_sub(): diff --git a/tests/ut/python/pipeline/infer/infer.py b/tests/ut/python/pipeline/infer/infer.py index 4945b0ed4fc..a85f19bdc36 100644 --- a/tests/ut/python/pipeline/infer/infer.py +++ b/tests/ut/python/pipeline/infer/infer.py @@ -71,7 +71,7 @@ def test(name, file_path, batch_size): data_list.append(data.asnumpy()) batch_data = np.concatenate(data_list, axis=0).transpose((0, 3, 1, 2)) input_tensor = Tensor(batch_data) - print(input_tensor.shape()) + print(input_tensor.shape) network(input_tensor) diff --git a/tests/ut/python/pynative_mode/nn/test_layernorm.py b/tests/ut/python/pynative_mode/nn/test_layernorm.py index 1362d6ba9e9..eb95e1f8a3d 100644 --- a/tests/ut/python/pynative_mode/nn/test_layernorm.py +++ b/tests/ut/python/pynative_mode/nn/test_layernorm.py @@ -23,7 +23,7 @@ from mindspore import dtype as mstype def test_check_layer_norm_1(): x = Tensor(np.ones([20, 5, 10, 10]), mstype.float32) - shape1 = x.shape()[1:] + shape1 = x.shape[1:] m = nn.LayerNorm(shape1, -1, 1) with pytest.raises(NotImplementedError): m(x) @@ -31,7 +31,7 @@ def test_check_layer_norm_1(): def test_check_layer_norm_2(): x = Tensor(np.ones([20, 5, 10, 10]), mstype.float32) - shape1 = x.shape()[1:] + shape1 = x.shape[1:] m = nn.LayerNorm(shape1, begin_params_axis=1) with pytest.raises(NotImplementedError): m(x) diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index 57709baa766..44d12fa94d4 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -65,7 +65,7 @@ def test_init_Initializer(): def test_init_tensor(): tensor = ms.Tensor(np.zeros([1, 2, 3])) tensor = init.initializer(tensor, [1, 2, 3], ms.float32) - assert tensor.shape() == (1, 2, 3) + assert tensor.shape == (1, 2, 3) def test_init_zero_default_dtype(): diff --git a/tests/ut/python/utils/test_serialize.py b/tests/ut/python/utils/test_serialize.py index 0cb83135137..b312f9f7d12 100644 --- a/tests/ut/python/utils/test_serialize.py +++ b/tests/ut/python/utils/test_serialize.py @@ -126,8 +126,8 @@ def test_load_checkpoint(): assert len(par_dict) == 3 assert par_dict['param_test'].name == 'param_test' - assert par_dict['param_test'].data.dtype() == mstype.float32 - assert par_dict['param_test'].data.shape() == (1, 3, 224, 224) + assert par_dict['param_test'].data.dtype == mstype.float32 + assert par_dict['param_test'].data.shape == (1, 3, 224, 224) assert isinstance(par_dict, dict) diff --git a/tests/vm_impl/array_ops_vm_impl.py b/tests/vm_impl/array_ops_vm_impl.py index 5c45dbc64b4..91493a4c077 100644 --- a/tests/vm_impl/array_ops_vm_impl.py +++ b/tests/vm_impl/array_ops_vm_impl.py @@ -46,7 +46,7 @@ def vm_impl_dType(self): def vm_impl(x): # update the src type - return x.dtype() + return x.dtype return vm_impl