From 8ce4e62725e9a09851f236de28b88766f61844e4 Mon Sep 17 00:00:00 2001 From: zhunaipan Date: Mon, 18 Oct 2021 21:18:43 +0800 Subject: [PATCH] optimize the comment and log description MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修改: ops/operations/_inner_ops.py 修改: ops/operations/_quant_ops.py 修改: ops/operations/array_ops.py 修改: ops/operations/comm_ops.py 修改: ops/operations/math_ops.py 修改: ops/operations/quantum_ops.py 修改: ops/operations/rl_ops.py 修改: ops/operations/sponge_ops.py 修改: ops/operations/sponge_update_ops.py 修改: train/__init__.py 修改: common/tensor.py 修改: train/serialization.py 修改: ccsrc/pipeline/jit/parse/parse.h 修改: explainer/benchmark/_attribution/metric.py 修改: ops/composite/multitype_ops/_constexpr_utils.py 修改: ops/operations/comm_ops.py 修改: RELEASE.md 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc 修改: mindspore/ccsrc/frontend/parallel/strategy.h 修改: mindspore/common/tensor.py 修改: mindspore/core/abstract/prim_arrays.cc 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/core/ops/logical_and.h 修改: mindspore/core/ops/logical_not.h 修改: mindspore/core/ops/logical_or.h 修改: mindspore/core/ops/reduce_all.h 修改: mindspore/core/ops/reduce_any.h 修改: mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc 修改: mindspore/nn/layer/quant.py 修改: mindspore/nn/optim/sgd.py 修改: mindspore/nn/sparse/sparse.py 修改: mindspore/numpy/array_creations.py 修改: mindspore/numpy/array_ops.py 修改: mindspore/numpy/logic_ops.py 修改: mindspore/numpy/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/train/_utils.py 修改: tests/ut/python/model/test_lenet_core_after_exception.py 修改: mindspore/_extends/parse/standard_method.py 修改: mindspore/ops/operations/rl_ops.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/core/ops/conv2d.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h 修改: mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h 修改: mindspore/ccsrc/fl/server/server.cc 修改: mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc 修改: mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h 修改: mindspore/ccsrc/frontend/optimizer/irpass/inline.h 修改: mindspore/ccsrc/minddata/dataset/core/device_tensor.cc 修改: mindspore/ccsrc/minddata/dataset/core/tensor.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc 修改: mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc 修改: mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc 修改: mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc 修改: mindspore/ccsrc/pipeline/jit/action.cc 修改: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc 修改: mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc 修改: mindspore/compression/quant/quant_utils.py 修改: mindspore/core/abstract/prim_nn.cc 修改: mindspore/dataset/engine/validators.py 修改: mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc 修改: mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc 修改: mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc 修改: mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/lite/tools/optimizer/fisson/fisson_util.cc 修改: mindspore/ops/composite/math_ops.py 修改: mindspore/ops/operations/_inner_ops.py 修改: mindspore/ops/operations/array_ops.py 修改: mindspore/ops/operations/math_ops.py 修改: mindspore/ops/operations/other_ops.py 修改: mindspore/boost/boost_cell_wrapper.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc 修改: mindspore/ccsrc/common/trans.cc 修改: mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/lite/src/common/log_util.h 修改: mindspore/nn/wrap/loss_scale.py 修改: mindspore/parallel/nn/moe.py 修改: tests/mindspore_test_framework/mindspore_test.py 修改: mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc 修改: mindspore/lite/tools/common/graph_util.h 修改: mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc 修改: mindspore/core/ops/conv2d.cc 修改: tests/ut/python/model/test_lenet_core_after_exception.py --- RELEASE.md | 2 +- mindspore/_extends/parse/standard_method.py | 22 +++--- mindspore/boost/boost_cell_wrapper.py | 2 +- .../cpu/concat_offset_cpu_kernel.cc | 2 +- .../kernel_compiler/cpu/ctcloss_cpu_kernel.cc | 2 +- .../cpu/dynamic_shape_cpu_kernel.cc | 2 +- .../cpu/fl/fused_pull_weight_kernel.h | 2 +- .../cpu/fl/fused_push_weight_kernel.h | 2 +- .../mkldnn/conv2d_grad_filter_cpu_kernel.cc | 2 +- .../mkldnn/conv2d_grad_input_cpu_kernel.cc | 4 +- .../cpu/ps/sparse_apply_ftrl_ps_kernel.cc | 2 +- .../ps/sparse_apply_lazy_adam_ps_kernel.cc | 2 +- .../kernel_compiler/cpu/rolling_cpu_kernel.cc | 4 +- .../cpu/scatter_arithmetic_cpu_kernel.cc | 2 +- .../kernel_compiler/cpu/split_cpu_kernel.cc | 6 +- .../cpu/update_cache_cpu_kernel.cc | 2 +- .../gpu/arrays/split_gpu_kernel.h | 2 +- .../gpu/math/broadcast_gpu_kernel.h | 3 +- .../gpu/nn/conv2d_grad_input_gpu_kernel.h | 2 +- mindspore/ccsrc/common/trans.cc | 2 +- mindspore/ccsrc/fl/server/server.cc | 2 +- .../ccsrc/frontend/optimizer/ad/kpynative.cc | 2 +- .../optimizer/irpass/incorporate_getitem.h | 8 +- .../ccsrc/frontend/optimizer/irpass/inline.h | 4 +- .../cache_embedding/cache_embedding.cc | 2 +- .../frontend/parallel/ops_info/gather_info.cc | 6 +- .../parallel/ops_info/reshape_info.cc | 2 +- .../frontend/parallel/ops_info/tile_info.cc | 2 +- .../parallel/ops_info/transpose_info.cc | 2 +- mindspore/ccsrc/frontend/parallel/strategy.h | 2 +- .../minddata/dataset/core/device_tensor.cc | 2 +- .../ccsrc/minddata/dataset/core/tensor.cc | 2 +- .../engine/datasetops/source/emnist_op.cc | 2 +- .../engine/datasetops/source/mnist_op.cc | 2 +- .../engine/datasetops/source/qmnist_op.cc | 2 +- .../engine/ir/datasetops/dataset_node.cc | 2 +- .../dataset/engine/opt/pre/epoch_ctrl_pass.cc | 4 +- .../dataset/kernels/image/lite_image_utils.cc | 2 +- mindspore/ccsrc/pipeline/jit/action.cc | 3 +- mindspore/ccsrc/pipeline/jit/parse/parse.h | 2 +- .../pipeline/jit/static_analysis/evaluator.cc | 2 +- .../executor/tiling/op_tiling_adapter.cc | 8 +- mindspore/common/tensor.py | 30 +++---- mindspore/compression/quant/quant_utils.py | 6 +- mindspore/core/abstract/prim_arrays.cc | 2 +- mindspore/core/abstract/prim_nn.cc | 14 +++- mindspore/core/ops/conv2d.cc | 11 ++- mindspore/core/ops/logical_and.h | 2 +- mindspore/core/ops/logical_not.h | 2 +- mindspore/core/ops/logical_or.h | 2 +- mindspore/core/ops/reduce_all.h | 2 +- mindspore/core/ops/reduce_any.h | 2 +- mindspore/dataset/engine/validators.py | 2 +- .../benchmark/_attribution/metric.py | 2 +- .../opcoders/nnacl/fp32/affine_fp32_coder.cc | 2 +- .../opcoders/nnacl/int8/affine_int8_coder.cc | 2 +- mindspore/lite/src/common/log_util.h | 12 +-- .../src/runtime/kernel/arm/fp32_grad/sgd.cc | 2 +- .../kernel/ascend310/src/custom_kernel.cc | 2 +- .../runtime/kernel/opencl/kernel/matmul.cc | 2 +- .../runtime/kernel/opencl/kernel/strassen.cc | 2 +- mindspore/lite/tools/common/graph_util.h | 5 +- .../tools/optimizer/fisson/fisson_util.cc | 4 +- mindspore/nn/layer/quant.py | 2 +- mindspore/nn/optim/sgd.py | 4 +- mindspore/nn/sparse/sparse.py | 2 +- mindspore/nn/wrap/loss_scale.py | 2 +- mindspore/numpy/array_creations.py | 14 ++-- mindspore/numpy/array_ops.py | 24 +++--- mindspore/numpy/logic_ops.py | 4 +- mindspore/numpy/math_ops.py | 32 ++++---- mindspore/ops/composite/math_ops.py | 2 +- .../multitype_ops/_constexpr_utils.py | 2 +- mindspore/ops/operations/_inner_ops.py | 6 +- mindspore/ops/operations/_quant_ops.py | 2 +- mindspore/ops/operations/array_ops.py | 14 ++-- mindspore/ops/operations/comm_ops.py | 6 +- mindspore/ops/operations/math_ops.py | 78 +++++++++---------- mindspore/ops/operations/other_ops.py | 2 +- mindspore/ops/operations/quantum_ops.py | 2 +- mindspore/ops/operations/rl_ops.py | 30 +++---- mindspore/ops/operations/sponge_ops.py | 16 ++-- mindspore/ops/operations/sponge_update_ops.py | 6 +- mindspore/parallel/nn/moe.py | 2 +- mindspore/train/__init__.py | 2 +- mindspore/train/_utils.py | 2 +- mindspore/train/serialization.py | 2 +- .../mindspore_test.py | 2 +- .../model/test_lenet_core_after_exception.py | 2 +- 89 files changed, 260 insertions(+), 246 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 1941ddaf559..0fc0d3678e5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -2254,7 +2254,7 @@ In Ascend platform, if group > 1, the weight shape of Conv2D change from [in_cha 6. Support Model(.ms) visualization on Netron. 7. Support Tensorflow model in MindSpore Lite Converter 8. Add 86 converter parsers. -9. Convert aware training model without user’s awareness +9. Convert aware training model without user's awareness 10. Support scalar tensor in MindSpore Lite Converter and Runtime 11. Support NPU backend on HUAWEI Kirin SoC.[BETA] 12. Merge timeprofiler into benchmark diff --git a/mindspore/_extends/parse/standard_method.py b/mindspore/_extends/parse/standard_method.py index 50e765d9692..7d1017cce7a 100644 --- a/mindspore/_extends/parse/standard_method.py +++ b/mindspore/_extends/parse/standard_method.py @@ -852,12 +852,12 @@ def take(x, indices, axis=None, mode='clip'): indices (Tensor): The indices with shape `(Nj...)` of the values to extract. axis (int, optional): The axis over which to select values. By default, the flattened input array is used. - mode (‘raise’, ‘wrap’, ‘clip’, optional): + mode ('raise', 'wrap', 'clip', optional): - edge: Pads with the edge values of `arr`. - raise: Raises an error; - wrap: Wraps around; - - clip: Clips to the range. `clip` mode means that all indices that are + - clip: Clips to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. @@ -915,15 +915,15 @@ def choose(x, choices, mode='clip'): choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must be broadcastable to the same shape. If `choices` is itself an array, then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``) - is taken as defining the “sequence”. - mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside + is taken as defining the "sequence". + mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside ``[0, n-1]`` will be treated: - ‘raise’ – raise an error (default); + 'raise' – raise an error (default); - ‘wrap’ – wrap around; + 'wrap' – wrap around; - ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are + 'clip' – clip to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. @@ -988,8 +988,8 @@ def searchsorted(x, v, side='left', sorter=None): Args: v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`. - side ('left', 'right', optional): If ‘left’, the index of the first suitable - location found is given. If ‘right’, return the last such index. If there is + side ('left', 'right', optional): If 'left', the index of the first suitable + location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `a`). sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of integer indices that sort array `a` into ascending order. They are typically @@ -1076,7 +1076,7 @@ def fill(x, value): def ptp(x, axis=None, keepdims=False): """ - The name of the function comes from the acronym for ‘peak to peak’. + The name of the function comes from the acronym for "peak to peak". Note: Numpy arguments `dtype` and `out` are not supported. @@ -1288,7 +1288,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disab keepdims (bool): If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the sum method of - sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. initial (scalar): Starting value for the sum. diff --git a/mindspore/boost/boost_cell_wrapper.py b/mindspore/boost/boost_cell_wrapper.py index 1089ad38ed9..122698fe9d0 100644 --- a/mindspore/boost/boost_cell_wrapper.py +++ b/mindspore/boost/boost_cell_wrapper.py @@ -424,7 +424,7 @@ class BoostTrainOneStepWithLossScaleCell(BoostTrainOneStepCell): cleared before executing the computation. Outputs: - Tuple[object, object], the first value is False for GPU backend, while it is a instance of + Tuple[object, object], the first value is False for GPU backend, while it is an instance of NPUAllocFloatStatus for other backend. The status is used to detect overflow during overflow detection. The second value is the same as the input of `compute_input`, but contains some information about the execution order. diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc index 2f359608a3b..1a1bd0c4558 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc @@ -73,7 +73,7 @@ bool ConcatOffsetCPUKernel::Launch(const std::vector &inp << ", but got:" << output_shape.size(); } if (output_shape[0] != input_num) { - MS_LOG(EXCEPTION) << "ConcatOffset output_shape[0] must equal to input_num, but got " << output_shape[0]; + MS_LOG(EXCEPTION) << "ConcatOffset output_shape[0] must be equal to input_num, but got " << output_shape[0]; } size_t rank = output_shape[1]; size_t idx = 0; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc index 33bcba27c80..d004c0d9538 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc @@ -241,7 +241,7 @@ void CTCLossCPUKernel::GenLabelWithBlank(const uint32_t *seq_len, const std::vec } } if (!ignore_longer_outputs_than_inputs_ && l.size() > seq_len[b]) { - MS_LOG(EXCEPTION) << "Input time(sequence length) should greater than output size(label length), but gets " + MS_LOG(EXCEPTION) << "Input time(sequence length) should be greater than output size(label length), but gets " << seq_len[b] << "< " << l.size(); } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc index f51023514b1..de9f10d2ef8 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc @@ -50,7 +50,7 @@ bool DynamicShapeCPUKernel::Launch(const std::vector &inp MS_LOG(EXCEPTION) << "The length of output_shape must be 1, but got:" << output_shape.size(); } if (output_shape[0] != input_shape.size()) { - MS_LOG(EXCEPTION) << "DynamicShape output_shape[0] must equal to the size of input_shape, but got " + MS_LOG(EXCEPTION) << "DynamicShape output_shape[0] must be equal to the size of input_shape, but got " << output_shape[0]; } for (size_t i = 0; i < output_shape[0]; ++i) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h index eadff3d87c9..ce95d308b0d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h @@ -52,7 +52,7 @@ class FusedPullWeightKernel : public CPUKernel { total_iteration_++; uint64_t step_num_per_iteration = fl::worker::FLWorker::GetInstance().worker_step_num_per_iteration(); if (step_num_per_iteration == 0) { - MS_LOG(EXCEPTION) << "Step numbers of per iteration should not equal to 0"; + MS_LOG(EXCEPTION) << "Step numbers of per iteration should not be equal to 0"; } // The worker has to train kWorkerTrainStepNum standalone iterations before it communicates with server. MS_LOG(INFO) << "Try to pull weights. Local step number: " << total_iteration_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h index ea32c41f503..288af63fb9d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h @@ -50,7 +50,7 @@ class FusedPushWeightKernel : public CPUKernel { total_iteration_++; uint64_t step_num_per_iteration = fl::worker::FLWorker::GetInstance().worker_step_num_per_iteration(); if (step_num_per_iteration == 0) { - MS_LOG(EXCEPTION) << "Step numbers of per iteration should not equal to 0"; + MS_LOG(EXCEPTION) << "Step numbers of per iteration should not be equal to 0"; } // The worker has to train kWorkerTrainStepNum standalone iterations before it communicates with server. MS_LOG(INFO) << "Try to push weights. Local step number: " << total_iteration_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc index 20f499290bb..f44f6297445 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc @@ -67,7 +67,7 @@ void Conv2dGradFilterCPUKernel::InitKernel(const CNodePtr &kernel_node) { MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel dilation only support 1 in N axis and C axis!"; } if (stride_ori.size() < kShapeSize2D) { - MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel stride_ori should not less than 2d!"; + MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel stride_ori should not be less than 2d!"; } std::vector stride{stride_ori[0], stride_ori[1]}; std::vector dilation{dilation_ori[2], dilation_ori[3]}; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc index 814d325bf1e..fa7a33147d1 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc @@ -65,7 +65,7 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) { } size_t h_index = iter->second; if (stride_me.size() < h_index + 2) { - MS_LOG(EXCEPTION) << "Strides should greater than " << (h_index + 1) << ", but got " << stride_me.size(); + MS_LOG(EXCEPTION) << "Strides should be greater than " << (h_index + 1) << ", but got " << stride_me.size(); } auto h_index_int64 = SizeToLong(h_index); (void)std::transform(stride_me.begin() + h_index_int64, stride_me.begin() + h_index_int64 + 2, @@ -80,7 +80,7 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) { MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel dilation only support 1 in N axis and C axis!"; } if (stride_ori.size() < kShapeSize2D) { - MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel stride_ori should not less than 2d!"; + MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel stride_ori should not be less than 2d!"; } std::vector stride{stride_ori[0], stride_ori[1]}; std::vector dilation{dilation_ori[2], dilation_ori[3]}; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc index b1be4d2c254..63cab0d25e0 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc @@ -101,7 +101,7 @@ void SparseApplyFtrlPSKernel::ReInit(const std::vector> &sha void SparseApplyFtrlPSKernel::ReInit(const std::vector &inputs) { if (inputs.size() < kSparseApplyFtrlPSInputSize) { - MS_LOG(EXCEPTION) << "Input numbers should not less than " << kSparseApplyFtrlPSInputSize << ", but got " + MS_LOG(EXCEPTION) << "Input numbers should not be less than " << kSparseApplyFtrlPSInputSize << ", but got " << inputs.size(); } const auto &indices_addr = inputs[4]; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc index 216b5205c4f..cb3ddfc78eb 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc @@ -91,7 +91,7 @@ void SparseApplyLazyAdamPSKernel::ReInit(const std::vector> void SparseApplyLazyAdamPSKernel::ReInit(const std::vector &inputs) { if (inputs.size() < kSparseApplyLazyAdamPSInputsSize) { - MS_LOG(EXCEPTION) << "Input shape size should not less than " << kSparseApplyLazyAdamPSInputsSize << ", but got " + MS_LOG(EXCEPTION) << "Input shape size should not be less than " << kSparseApplyLazyAdamPSInputsSize << ", but got " << inputs.size(); } const auto &indices_addr = inputs[10]; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc index ded56aeb0e8..b69862b669a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc @@ -39,12 +39,12 @@ void RollingCpuKernel::InitKernel(const CNodePtr &kernel_node) { method_ = kValidMethods.at(method); auto window = AnfAlgo::GetNodeAttr(kernel_node, WINDOW); if (window <= 0) { - MS_LOG(EXCEPTION) << "window size should not less than 0, but got " << window; + MS_LOG(EXCEPTION) << "window size should not be less than 0, but got " << window; } window_ = LongToInt(window); min_periods_ = AnfAlgo::GetNodeAttr(kernel_node, MIN_PERIODS); if (min_periods_ <= 0) { - MS_LOG(EXCEPTION) << "min_periods should not less than 0, but got " << min_periods_; + MS_LOG(EXCEPTION) << "min_periods should not be less than 0, but got " << min_periods_; } center_ = AnfAlgo::GetNodeAttr(kernel_node, CENTER); auto axis = AnfAlgo::GetNodeAttr(kernel_node, AXIS); diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc index 098aef2f83f..8e5d12cba5c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc @@ -48,7 +48,7 @@ void ScatterArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) { kernel_name_ = AnfAlgo::GetCNodeName(kernel_node); auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); if (input_shape.size() < 1) { - MS_LOG(EXCEPTION) << "Input shape size should not less than 1"; + MS_LOG(EXCEPTION) << "Input shape size should not be less than 1"; } input_size_ = 1; inner_size_ = 1; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc index 42f6e07e3da..d3c4ae5e2e3 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc @@ -38,8 +38,8 @@ void SplitCPUKernel::InitKernel(const CNodePtr &kernel_node) { (void)std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_), [](const size_t &value) { return SizeToInt(value); }); if (input_shape_.size() < 1 || input_shape_.size() > SPLIT_STRIDES_SIZE) { - MS_LOG(EXCEPTION) << "Inpu shape size should not less than 1 or greater than " << SPLIT_STRIDES_SIZE << ", but got " - << input_shape_.size(); + MS_LOG(EXCEPTION) << "Inpu shape size should not be less than 1 or greater than " << SPLIT_STRIDES_SIZE + << ", but got " << input_shape_.size(); } CheckParam(kernel_node); } @@ -114,7 +114,7 @@ void SplitCPUKernel::CheckParam(const CNodePtr &kernel_node) { axis_ += SizeToLong(input_shape_.size()); } if (output_num_ > IntToSize(input_shape_[LongToUlong(axis_)])) { - MS_LOG(EXCEPTION) << "Attr output_num " << output_num_ << " must less than " << input_shape_[axis_]; + MS_LOG(EXCEPTION) << "Attr output_num " << output_num_ << " must be less than " << input_shape_[axis_]; } } } // namespace kernel diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc index 58e9b711840..fe1d87e77f8 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc @@ -65,7 +65,7 @@ void UpdateCacheCPUKernel::LaunchKernel(const std::vector &inputs, auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 1); auto update_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 2); if (update_shape.size() < kMinUpdateShapeSize) { - MS_LOG(EXCEPTION) << "Updata shape should not less than " << kMinUpdateShapeSize; + MS_LOG(EXCEPTION) << "Update shape should not be less than " << kMinUpdateShapeSize; } batch_size_ = 1; for (size_t i = 0; i < indices_shape.size(); ++i) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h index 766b10e351d..f0881c99b94 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h @@ -157,7 +157,7 @@ class SplitGpuFwdKernel : public GpuKernel { return false; } if (output_num_ > SizeToInt(input_shape[axis_])) { - MS_LOG(ERROR) << "Attr output_num " << output_num_ << "must less than" << input_shape[axis_]; + MS_LOG(ERROR) << "Attr output_num " << output_num_ << "must be less than" << input_shape[axis_]; return false; } if (output_num_ != output_num) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h index b3e55bddc31..1ae7dfa024e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h @@ -68,6 +68,7 @@ class BroadcastOpGpuKernel : public GpuKernel { return true; } + bool Init(const CNodePtr &kernel_node) override { GetOpType(kernel_node); auto shape1 = AnfAlgo::GetInputRealDeviceShapeIfExist(kernel_node, 0); @@ -93,7 +94,7 @@ class BroadcastOpGpuKernel : public GpuKernel { if (i < MAX_DIMS) { output_shape_[i] = shape3[i]; } else { - MS_LOG(EXCEPTION) << "Output index: " << i << " should less than " << MAX_DIMS; + MS_LOG(EXCEPTION) << "Output index: " << i << " should be less than " << MAX_DIMS; } } output_num_ *= shape3[i]; diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h index c0b3a3378c9..614eedb050e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h @@ -362,7 +362,7 @@ class ConvGradInputGpuBkwKernel : public GpuKernel { } size_t h_index = iter->second; if (stride_me.size() < h_index + 2) { - MS_LOG(EXCEPTION) << "Strides should greater than " << h_index + 1 << ", but got " << stride_me.size(); + MS_LOG(EXCEPTION) << "Strides should be greater than " << h_index + 1 << ", but got " << stride_me.size(); } (void)std::transform(stride_me.begin() + h_index, stride_me.begin() + h_index + 2, std::back_inserter(stride_), [](const int64_t &value) { return static_cast(value); }); diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 172748fee3a..fb4e5741ebc 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -1985,7 +1985,7 @@ bool NchwFracZTransWithGroups(const FormatArgs &args, void *result, bool to_devi auto cin_ori = c_dim; auto cout_ori = n_dim / group_size; if (cin_ori == 0 || cout_ori == 0) { - MS_LOG(ERROR) << "cin_ori, cout_ori must not equal to 0"; + MS_LOG(ERROR) << "cin_ori, cout_ori must not be equal to 0"; return false; } size_t e_mult = std::min(Lcm(Lcm(cin_ori, kCubeSize) / cin_ori, Lcm(cout_ori, kCubeSize) / cout_ori), group_size); diff --git a/mindspore/ccsrc/fl/server/server.cc b/mindspore/ccsrc/fl/server/server.cc index d11ff5024b1..754918933f6 100644 --- a/mindspore/ccsrc/fl/server/server.cc +++ b/mindspore/ccsrc/fl/server/server.cc @@ -371,7 +371,7 @@ void Server::RegisterMessageCallback(const std::shared_ptrDebugString(); } if (index_value->value() < 0) { - MS_LOG(EXCEPTION) << "CNode input 2 should not less than 0, CNode: " << cnode->DebugString(); + MS_LOG(EXCEPTION) << "CNode input 2 should not be less than 0, CNode: " << cnode->DebugString(); } size_t index_value_imm = LongToSize(index_value->value()); if (index_value_imm >= input_1_out->size()) { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h index d4b37616626..543537387eb 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h @@ -203,7 +203,7 @@ AbstractBasePtr ShrinkAbstract(const AbstractBasePtr &original_abstract, std::back_inserter(shrunk_abstract_elements), [abs_tuple_elements, before_shrink_tuple_size](const auto &node_and_index) { if (node_and_index.index >= before_shrink_tuple_size) { - MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index + MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index << ", abstract tuple size: " << before_shrink_tuple_size; } return abs_tuple_elements[node_and_index.index]; @@ -227,7 +227,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vectorinputs(); constexpr auto kMinimalSize = 2; if (new_fg_output_inputs.size() <= kMinimalSize) { - MS_LOG(EXCEPTION) << "New fg output should at least 2 elements, but: " << new_fg_output->DebugString(); + MS_LOG(EXCEPTION) << "New fg output should have at least 2 elements, but: " << new_fg_output->DebugString(); } before_shrink_inputs_size = SizeToLong(new_fg_output_inputs.size() - 1); AnfNodePtrList shrunk_inputs{NewValueNode({prim::kPrimMakeTuple})}; @@ -235,7 +235,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector= before_shrink_inputs_size) { - MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index + MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index << ", output: " << new_fg_output->DebugString(); } return new_fg_output_inputs[node_and_index.index + 1]; @@ -251,7 +251,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector= before_shrink_inputs_size) { - MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index + MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index << ", output: " << new_fg_output->DebugString(); } return (*value_tuple)[node_and_index.index]; diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/inline.h b/mindspore/ccsrc/frontend/optimizer/irpass/inline.h index cdfd02f7a7a..b266e9e721c 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/inline.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/inline.h @@ -327,7 +327,7 @@ class InlinerBase : public AnfVisitor { } else if (IsCNodeGraph(item)) { auto cinputs = item->cast()->inputs(); if (cinputs.size() < 1) { - MS_LOG(EXCEPTION) << "graph call inputs should greater than 1"; + MS_LOG(EXCEPTION) << "graph call inputs should be greater than 1"; } FuncGraphPtr call_fg = GetValueNode(cinputs[0]); bool call_fg_has_branch = GraphHasBranch(call_fg); @@ -338,7 +338,7 @@ class InlinerBase : public AnfVisitor { } else if (IsPrimitiveCNode(item, prim::kPrimPartial)) { auto cinputs = item->cast()->inputs(); if (cinputs.size() < 2) { - MS_LOG(EXCEPTION) << "partial call inputs should greater than 2"; + MS_LOG(EXCEPTION) << "partial call inputs should be greater than 2"; } FuncGraphPtr call_fg = GetValueNode(cinputs[1]); if (call_fg == nullptr) { diff --git a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc index ac0af5adeac..dd7bdf5ae41 100644 --- a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc +++ b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc @@ -667,7 +667,7 @@ void CacheEmbeddingForTrain(const FuncGraphPtr &graph, bool is_pipe, const CNode MS_LOG(EXCEPTION) << "The last cnode after sorting, not return cnode."; } if (return_node->inputs().size() < 2) { - MS_LOG(EXCEPTION) << "Number of return node inputs should be great than or equal to 2."; + MS_LOG(EXCEPTION) << "Number of return node inputs should be greater than or equal to 2."; } auto depend_node = CreateDepend(graph, invalid_nodes, return_node->input(1)); diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc index aedb8747042..9e69e893594 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc @@ -92,8 +92,8 @@ Status GatherInfo::GetManualSplitAttr() { int64_t param_split_row = (GetValue(value_vector[0])); int64_t offset = (GetValue(value_vector[1])); if ((param_split_row <= 0) || (offset < 0)) { - MS_LOG(ERROR) << name_ - << ": The value of param split shape must be positive, and the offset must larger or equal to 0"; + MS_LOG(ERROR) << name_ << ": The value of param split shape must be positive, " + << "and the offset must be greater than or equal to 0"; return FAILED; } param_split_shapes_.push_back(param_split_row); @@ -105,7 +105,7 @@ Status GatherInfo::GetManualSplitAttr() { return FAILED; } if (std::any_of(index_offsets_.begin(), index_offsets_.end(), [](const int64_t &offset) { return offset < 0; })) { - MS_LOG(ERROR) << name_ << ": Index offset must not less than 0"; + MS_LOG(ERROR) << name_ << ": Index offset must not be less than 0"; return FAILED; } return SUCCESS; diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc index 64ce583b730..27b0f859950 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc @@ -95,7 +95,7 @@ Status ReshapeInfo::GetParameterInput() { } elements = dim_tuple->value(); if (elements.size() != outputs_shape_[0].size()) { - MS_LOG(ERROR) << name_ << ": Elements size must equal to outputs shape[0] size."; + MS_LOG(ERROR) << name_ << ": Elements size must be equal to outputs shape[0] size."; return FAILED; } diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc index 9749f89cb31..41eb4729771 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc @@ -47,7 +47,7 @@ Status TileInfo::GetAttrs() { } elements = multiples->value(); if (elements.size() != outputs_shape_[0].size()) { - MS_LOG(ERROR) << name_ << ": Elements size must equal to outputs shape[0] size."; + MS_LOG(ERROR) << name_ << ": Elements size must be equal to outputs shape[0] size."; return FAILED; } diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc index f608f7f78d0..22c5a40acf1 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc @@ -62,7 +62,7 @@ Status TransposeInfo::ComputeAxis() { } elements = dim_tuple->value(); if (elements.size() != inputs_shape_[0].size()) { - MS_LOG(ERROR) << name_ << ": elements size must equal to inputs shape 0 size."; + MS_LOG(ERROR) << name_ << ": elements size must be equal to inputs shape[0] size."; return FAILED; } axis_v_.clear(); diff --git a/mindspore/ccsrc/frontend/parallel/strategy.h b/mindspore/ccsrc/frontend/parallel/strategy.h index 854245c08c4..c6aaf2c9ab5 100644 --- a/mindspore/ccsrc/frontend/parallel/strategy.h +++ b/mindspore/ccsrc/frontend/parallel/strategy.h @@ -83,7 +83,7 @@ class Strategy { private: const int64_t stage_; - // The size of Dimensions must equal to inputs_ tensor dimension. + // The size of Dimensions must be equal to inputs_ tensor dimension. Strategys inputs_; size_t internal_size_ = 0; std::vector internal_stragies_; diff --git a/mindspore/ccsrc/minddata/dataset/core/device_tensor.cc b/mindspore/ccsrc/minddata/dataset/core/device_tensor.cc index 80cfc532a24..3fbcdc28b9a 100644 --- a/mindspore/ccsrc/minddata/dataset/core/device_tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/core/device_tensor.cc @@ -163,7 +163,7 @@ Status DeviceTensor::DataPop_(std::shared_ptr *host_tensor) { const mindspore::dataset::TensorShape dvpp_shape({dvppDataSize, 1, 1}); CHECK_FAIL_RETURN_UNEXPECTED(this->GetYuvStrideShape().size() >= kYuvDefaultChannels, - "Invalid YuvShape, should greater than 4"); + "Invalid YuvShape, should be greater than 4"); uint32_t _output_width_ = this->GetYuvStrideShape()[0]; uint32_t _output_widthStride_ = this->GetYuvStrideShape()[1]; diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.cc b/mindspore/ccsrc/minddata/dataset/core/tensor.cc index bac2538e7af..d2d080c097d 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.cc @@ -1018,7 +1018,7 @@ Status Tensor::GetSliceOption(const SliceOption &slice_option, const int32_t &sl RETURN_STATUS_UNEXPECTED("Both indices and slices can not be given."); } - CHECK_FAIL_RETURN_UNEXPECTED(shape_.Size() > slice_index, "Invalid shape, should greater than slices index."); + CHECK_FAIL_RETURN_UNEXPECTED(shape_.Size() > slice_index, "Invalid shape, should be greater than slices index."); // if slice object was provided, indices should be empty. Generate indices from the slice object. if (slice_option.indices_.empty()) { // check if slice is valid diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc index 1812b089e99..fa0fb26049f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc @@ -88,7 +88,7 @@ Status EMnistOp::WalkAllFiles() { std::sort(image_names_.begin(), image_names_.end()); std::sort(label_names_.begin(), label_names_.end()); CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(), - "Invalid data, num of images does not equal to num of labels."); + "Invalid data, num of images is not equal to num of labels."); return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc index a36a70e9b96..179d1d395f1 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc @@ -245,7 +245,7 @@ Status MnistOp::WalkAllFiles() { std::sort(label_names_.begin(), label_names_.end()); CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(), - "Invalid data, num of images does not equal to num of labels."); + "Invalid data, num of images is not equal to num of labels."); return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc index fe30d516ed8..629572aab43 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc @@ -163,7 +163,7 @@ Status QMnistOp::WalkAllFiles() { } CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(), - "Invalid data, num of images does not equal to num of labels."); + "Invalid data, num of images is not equal to num of labels."); for (size_t i = 0; i < image_names_.size(); i++) { Path file_path(image_names_[i]); diff --git a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc index bd91f2a88c1..7bd84fb0ffc 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc @@ -51,7 +51,7 @@ Status ComputeShuffleSize(int64_t num_files, int64_t num_devices, int64_t num_ro } // get the average per file - CHECK_FAIL_RETURN_UNEXPECTED(num_files != 0, "The size of dataset_files must greater than 0."); + CHECK_FAIL_RETURN_UNEXPECTED(num_files != 0, "The size of dataset_files must be greater than 0."); avg_rows_per_file = num_rows / num_files; *shuffle_size = std::max(avg_rows_per_file * average_files_multiplier, shuffle_max); diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc index 082557c2ae2..6c362eef3bc 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc @@ -32,7 +32,7 @@ Status EpochCtrlPass::InjectionFinder::Visit(std::shared_ptr node, boo RETURN_UNEXPECTED_IF_NULL(node); RETURN_UNEXPECTED_IF_NULL(modified); CHECK_FAIL_RETURN_UNEXPECTED(node->Children().size() > 0, - "Invalid data, the node of child should greater than zero."); + "Invalid data, the node of child should be greater than zero."); // The injection is at the child of the root node injection_point_ = node->Children()[0]; num_epochs_ = node->num_epochs(); @@ -61,7 +61,7 @@ Status EpochCtrlPass::InjectionFinder::VisitAfter(std::shared_ptr RETURN_UNEXPECTED_IF_NULL(node); RETURN_UNEXPECTED_IF_NULL(modified); CHECK_FAIL_RETURN_UNEXPECTED(node->Children().size() > 0, - "Invalid data, the node of child should greater than zero."); + "Invalid data, the node of child should be greater than zero."); // Assumption: There is only one TransferNode in a pipeline. This assumption is not validated here. // Move the injection point to the child of this node. injection_point_ = node->Children()[0]; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc index b7543cef3ed..10bd98abee2 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc @@ -420,7 +420,7 @@ Status ResizePreserve(const TensorRow &inputs, int32_t height, int32_t width, in TensorRow *outputs) { outputs->resize(3); CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() > 0, - "Invalid input, should greater than 0, but got " + std::to_string(inputs.size())); + "Invalid input, should be greater than 0, but got " + std::to_string(inputs.size())); std::shared_ptr input = inputs[0]; CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Size() >= 3, "Invalid input shape, should be greater than 3 dimensions."); LiteMat lite_mat_src(input->shape()[1], input->shape()[0], input->shape()[2], diff --git a/mindspore/ccsrc/pipeline/jit/action.cc b/mindspore/ccsrc/pipeline/jit/action.cc index 24386de616f..f6f5c07f609 100644 --- a/mindspore/ccsrc/pipeline/jit/action.cc +++ b/mindspore/ccsrc/pipeline/jit/action.cc @@ -284,7 +284,7 @@ const FuncGraphPtr GetLoadedGraph(const ResourcePtr &res) { if (loaded_graph_num == 1) { return loaded_graph; } - MS_LOG(EXCEPTION) << "The loaded sub graph currently should less than 2, but got " << loaded_graph_num; + MS_LOG(EXCEPTION) << "The loaded sub graph currently should be less than 2, but got " << loaded_graph_num; } void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &loaded_graph) { @@ -302,6 +302,7 @@ void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &load MS_LOG(EXCEPTION) << "The inputs number " << root_inputs_num << " not equal to the inputs number of loaded graph " << loaded_inputs_num; } + for (size_t index = 0; index < root_inputs_num; index++) { auto root_input = root_inputs[index]; auto loaded_input = loaded_inputs[index]; diff --git a/mindspore/ccsrc/pipeline/jit/parse/parse.h b/mindspore/ccsrc/pipeline/jit/parse/parse.h index 45a9b06898a..54617b1fdb9 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/parse.h +++ b/mindspore/ccsrc/pipeline/jit/parse/parse.h @@ -140,7 +140,7 @@ class Parser { AnfNodePtr ParseNone(const FunctionBlockPtr &block, const py::object &node); // Process Ellipsis AnfNodePtr ParseEllipsis(const FunctionBlockPtr &block, const py::object &node); - // Process a integer or float number + // Process an integer or float number AnfNodePtr ParseNum(const FunctionBlockPtr &block, const py::object &node); // Process a string variable AnfNodePtr ParseStr(const FunctionBlockPtr &block, const py::object &node); diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc index e77a2f9692c..054372e4d73 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc @@ -459,7 +459,7 @@ EvalResultPtr TrivialPrimEvaluator::Run(AnalysisEnginePtr engine, const ConfigPt EvalResultPtr TransitionPrimEvaluator::Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list, const AnfNodeConfigPtr &out_conf) { if (args_conf_list.empty()) { - MS_LOG(EXCEPTION) << "Size should greater than 0"; + MS_LOG(EXCEPTION) << "Size should be greater than 0"; } AbstractBasePtrList args_spec_list; (void)std::transform(args_conf_list.begin(), args_conf_list.end(), std::back_inserter(args_spec_list), diff --git a/mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc b/mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc index 0f9b887571e..9b41a41f2b1 100644 --- a/mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc +++ b/mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc @@ -194,16 +194,18 @@ std::vector> OpTilingCalculateAdapter::Conv if (!has_input_name_attr) { MS_LOG(EXCEPTION) << "Node should has attr: input_names. " << node->fullname_with_scope(); } + auto input_names_attr = AnfAlgo ::GetNodeAttr>(node, "input_names"); std::vector op_infer_depends; std::vector> constant_ops; for (auto index : depends_list_me) { if (LongToSize(index) > input_names_attr.size()) { - MS_LOG(EXCEPTION) << "Input index " << index << " should less input_names' size " << input_names_attr.size(); + MS_LOG(EXCEPTION) << "Input index " << index << " should not be greater than input_names' size " + << input_names_attr.size(); } auto iter = depend_tensor_map.find(LongToSize(index)); if (iter == depend_tensor_map.end()) { - MS_LOG(EXCEPTION) << "Input index " << index << " should less than depend_tensor_map' size " + MS_LOG(EXCEPTION) << "Input index " << index << " should be less than depend_tensor_map' size " << input_names_attr.size(); } auto depend_name = input_names_attr[index]; @@ -245,7 +247,7 @@ void OpTilingCalculateAdapter::InitOpIoName(const CNodePtr &node) { MS_EXCEPTION_IF_NULL(item); if (item->param_type() == PARAM_DYNAMIC) { if (dynamic_input_index > dynamic_inputs_list.size()) { - MS_LOG(EXCEPTION) << "Dynamic input index should less than the dynamic input's size."; + MS_LOG(EXCEPTION) << "Dynamic input index should be less than the dynamic input's size."; } auto real_inputs_num = dynamic_inputs_list[dynamic_input_index]; for (auto k = 0; k < real_inputs_num; k++) { diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 2bb4f978e4f..a73d483fcdf 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -380,7 +380,7 @@ class Tensor(Tensor_): def itemset(self, *args): r""" - Insert scalar into a tensor (scalar is cast to tensor’s dtype, if possible). + Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible). There must be at least 1 argument, and define the last argument as item. Then, tensor.itemset(\*args) is equivalent to :math:`tensor[args] = item`. @@ -1093,7 +1093,7 @@ class Tensor(Tensor_): def ptp(self, axis=None, keepdims=False): """ - The name of the function comes from the acronym for ‘peak to peak’. + The name of the function comes from the acronym for "peak to peak". Note: Numpy arguments `dtype` and `out` are not supported. @@ -1477,22 +1477,22 @@ class Tensor(Tensor_): indices (Tensor): The indices with shape `(Nj...)` of the values to extract. axis (int, optional): The axis over which to select values. By default, the flattened input array is used. Default: `None`. - mode (‘raise’, ‘wrap’, ‘clip’, optional): + mode ('raise', 'wrap', 'clip', optional): - edge: Pads with the edge values of `arr`. - raise: Raises an error; - wrap: Wraps around; - - clip: Clips to the range. `clip` mode means that all indices that are + - clip: Clips to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. - Default: `clip`. + Default: 'clip'. Returns: Tensor, the indexed result. Raises: - ValueError: if `axis` is out of range, or `mode` has values other than (‘raise’, ‘wrap’, ‘clip’) + ValueError: if `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip') Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` @@ -1542,15 +1542,15 @@ class Tensor(Tensor_): choices (Union[tuple, list, Tensor]): Choice arrays. `a` and all of the `choices` must be broadcasted to the same shape. If `choices` is itself an array, then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``) - is taken as defining the “sequence”. - mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside + is taken as defining the "sequence". + mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside ``[0, n-1]`` will be treated: - ‘raise’ – raise an error (default); + 'raise' – raise an error (default); - ‘wrap’ – wrap around; + 'wrap' – wrap around; - ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are + 'clip' – clip to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. @@ -1615,10 +1615,10 @@ class Tensor(Tensor_): Args: v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`. - side ('left', 'right', optional): If ‘left’, the index of the first suitable - location found is given. If ‘right’, return the last such index. If there is + side ('left', 'right', optional): If 'left', the index of the first suitable + location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `a`). - Default: `left`. + Default: 'left'. sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of integer indices that sort array `a` into ascending order. They are typically the result of argsort. @@ -1778,7 +1778,7 @@ class Tensor(Tensor_): keepdims (bool): If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the sum method of - sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Default: `False`. initial (scalar): Starting value for the sum. Default: `None`. diff --git a/mindspore/compression/quant/quant_utils.py b/mindspore/compression/quant/quant_utils.py index 52d1fb18f70..9406f3f164b 100644 --- a/mindspore/compression/quant/quant_utils.py +++ b/mindspore/compression/quant/quant_utils.py @@ -46,11 +46,11 @@ def cal_quantization_params(input_min, input_min = np.minimum(0.0, input_min) if input_min.shape != input_max.shape: - raise ValueError("input min shape should equal to input max.") + raise ValueError("input min shape should be equal to input max.") if len(input_min.shape) > 1: raise ValueError("input min and max shape should be one dim.") if (input_min > input_max).all(): - raise ValueError("input_min min should less than input max.") + raise ValueError("input_min min should be less than input max.") if (input_max == input_min).all(): return np.ones(input_min.shape), np.zeros(input_min.shape) @@ -105,7 +105,7 @@ def weight2int(data, scale, zero_point, quant_min, quant_max): if scale.shape != zero_point.shape: raise ValueError("`scale` and `zero_point` should have the same shape.") if scale.shape[0] < 0: - raise ValueError("`scale` and `zero_point` shape should greater than zero.") + raise ValueError("`scale` and `zero_point` shape should be greater than zero.") if len(scale.shape) >= 1 and scale.shape[0] > 1: # for perchannel if scale.shape[0] == data.shape[0]: diff --git a/mindspore/core/abstract/prim_arrays.cc b/mindspore/core/abstract/prim_arrays.cc index 6d61820df47..361dc62ad02 100644 --- a/mindspore/core/abstract/prim_arrays.cc +++ b/mindspore/core/abstract/prim_arrays.cc @@ -881,7 +881,7 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr & shape_num = LongMulWithOverflowCheck(value, shape_num); } if (shape_num != x_num) { - MS_LOG(EXCEPTION) << "The accumulate of x_shape must equal to out_shape, but got x_shape: " << x_shape + MS_LOG(EXCEPTION) << "The accumulate of x_shape must be equal to out_shape, but got x_shape: " << x_shape << ", and out_shape: " << shape; } diff --git a/mindspore/core/abstract/prim_nn.cc b/mindspore/core/abstract/prim_nn.cc index c3ee5d27cb9..a53bd4528ca 100644 --- a/mindspore/core/abstract/prim_nn.cc +++ b/mindspore/core/abstract/prim_nn.cc @@ -264,6 +264,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p uint64_t c_axis = 1; uint64_t h_axis = 2; uint64_t w_axis = 3; + int64_t data_format = GetAndCheckFormat(primitive->GetAttr("format")); if (data_format == Format::NHWC) { c_axis = 3; @@ -273,22 +274,25 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p int64_t group = CheckAttrPositiveInt64(op_name, primitive->GetAttr("group"), "group"); if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) && ((x_shape[c_axis] / group) != w_shape[c_axis])) { - MS_LOG(EXCEPTION) << "x_shape[C_in] / group must equal to w_shape[C_in] = " << w_shape[c_axis] << ", but got " + MS_LOG(EXCEPTION) << "x_shape[C_in] / group must be equal to w_shape[C_in]: " << w_shape[c_axis] << ", but got " << (x_shape[c_axis] / group); } + int64_t out_channel = CheckAttrPositiveInt64(op_name, primitive->GetAttr("out_channel"), "out_channel"); if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) { - MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must equal to = " << out_channel; + MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must be equal to " << out_channel; } + const size_t kernel_size_num_element = 2; std::vector kernel_size = CheckAttrIntOrTuple(op_name, primitive->GetAttr("kernel_size"), 0, kernel_size_num_element); if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) { - MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] << ", must equal to = " << kernel_size[0]; + MS_LOG(EXCEPTION) << "weight height: " << w_shape[h_axis] << " must be equal to " << kernel_size[0]; } if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) { - MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] << ", must equal to = " << kernel_size[1]; + MS_LOG(EXCEPTION) << "weight width: " << w_shape[w_axis] << " must be equal to " << kernel_size[1]; } + std::vector stride = CheckAttrIntOrTuple(op_name, primitive->GetAttr("stride"), stride_start_idx, stride_num_element); std::vector dilation = @@ -318,6 +322,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p std::vector pad_list_val = {MakeValue(pad_list[0]), MakeValue(pad_list[1]), MakeValue(pad_list[2]), MakeValue(pad_list[3])}; primitive->set_attr("pad_list", MakeValue(pad_list_val)); + ShapeVector output_shape; ShapeVector output_shape_min; ShapeVector output_shape_max; @@ -333,6 +338,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p CheckShapeAnyAndPositive(op_name + " output_shape", output_shape); CheckShapeAllPositive(op_name + " output_shape_min", output_shape_min); CheckShapeAllPositive(op_name + " output_shape_max", output_shape_max); + TypePtr x_type = input_x->element()->GetTypeTrack(); if (x_type->type_id() == TypeId::kNumberTypeInt8) { x_type = kInt32; diff --git a/mindspore/core/ops/conv2d.cc b/mindspore/core/ops/conv2d.cc index 6e1af1f0426..d9c6f2d9e51 100644 --- a/mindspore/core/ops/conv2d.cc +++ b/mindspore/core/ops/conv2d.cc @@ -182,12 +182,13 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve int64_t group = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("group"), "group"); if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) && ((x_shape[c_axis] / group) != w_shape[c_axis])) { - MS_LOG(EXCEPTION) << "x_shape[C_in] / group must equal to w_shape[C_in] = " << w_shape[c_axis] << ", but got " + MS_LOG(EXCEPTION) << "x_shape[C_in] / group must be equal to w_shape[C_in]: " << w_shape[c_axis] << ", but got " << (x_shape[c_axis] / group); } int64_t out_channel = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("out_channel"), "out_channel"); if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) { - MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must equal to = " << out_channel; + MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] + << " must be equal to out_channel: " << out_channel; } constexpr size_t kernel_size_num = 2; constexpr size_t stride_num = 2; @@ -196,10 +197,12 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve constexpr size_t start_index = 2; std::vector kernel_size = CheckAttrIntOrTuple(primitive->GetAttr("kernel_size"), 0, kernel_size_num); if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) { - MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] << ", must equal to = " << kernel_size[0]; + MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] + << ", must be equal to kernel_size[0]: " << kernel_size[0]; } if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) { - MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] << ", must equal to = " << kernel_size[1]; + MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] + << ", must be equal to kernel_size[1]: " << kernel_size[1]; } std::vector stride = CheckAttrIntOrTuple(primitive->GetAttr("stride"), start_index, stride_num); std::vector dilation = CheckAttrIntOrTuple(primitive->GetAttr("dilation"), start_index, dilation_num); diff --git a/mindspore/core/ops/logical_and.h b/mindspore/core/ops/logical_and.h index 3063ecc25ea..7d0228be519 100644 --- a/mindspore/core/ops/logical_and.h +++ b/mindspore/core/ops/logical_and.h @@ -27,7 +27,7 @@ namespace mindspore { namespace ops { constexpr auto kNameLogicalAnd = "LogicalAnd"; -/// \brief Computes the “logical AND” of two tensors element-wise. +/// \brief Computes the "logical AND" of two tensors element-wise. /// Refer to Python API @ref mindspore.ops.LogicalAnd for more details. class MS_CORE_API LogicalAnd : public PrimitiveC { public: diff --git a/mindspore/core/ops/logical_not.h b/mindspore/core/ops/logical_not.h index 3155a87812b..fe0ff299c86 100644 --- a/mindspore/core/ops/logical_not.h +++ b/mindspore/core/ops/logical_not.h @@ -25,7 +25,7 @@ namespace mindspore { namespace ops { constexpr auto kNameLogicalNot = "LogicalNot"; -/// \brief Computes the “logical NOT” of a tensor element-wise. +/// \brief Computes the "logical NOT" of a tensor element-wise. /// Refer to Python API @ref mindspore.ops.LogicalNot for more details. class MS_CORE_API LogicalNot : public PrimitiveC { public: diff --git a/mindspore/core/ops/logical_or.h b/mindspore/core/ops/logical_or.h index bb4256d6180..176ab7e18dc 100644 --- a/mindspore/core/ops/logical_or.h +++ b/mindspore/core/ops/logical_or.h @@ -25,7 +25,7 @@ namespace mindspore { namespace ops { constexpr auto kNameLogicalOr = "LogicalOr"; -/// \brief Computes the “logical OR” of two tensors element-wise. +/// \brief Computes the "logical OR" of two tensors element-wise. /// Refer to Python API @ref mindspore.ops.LogicalOr for more details. class MS_CORE_API LogicalOr : public PrimitiveC { public: diff --git a/mindspore/core/ops/reduce_all.h b/mindspore/core/ops/reduce_all.h index c235e49aec4..4ca74e4111d 100644 --- a/mindspore/core/ops/reduce_all.h +++ b/mindspore/core/ops/reduce_all.h @@ -27,7 +27,7 @@ namespace mindspore { namespace ops { constexpr auto kNameReduceAll = "ReduceAll"; -/// \brief Reduces a dimension of a tensor by the “logicalAND” of all elements in the dimension. +/// \brief Reduces a dimension of a tensor by the "logical AND" of all elements in the dimension. /// Refer to Python API @ref mindspore.ops.ReduceAll for more details. class MS_CORE_API ReduceAll : public Reduce { public: diff --git a/mindspore/core/ops/reduce_any.h b/mindspore/core/ops/reduce_any.h index 53f080a4525..36896f411a7 100644 --- a/mindspore/core/ops/reduce_any.h +++ b/mindspore/core/ops/reduce_any.h @@ -27,7 +27,7 @@ namespace mindspore { namespace ops { constexpr auto kNameReduceAny = "ReduceAny"; -/// \brief Reduces a dimension of a tensor by the “logical OR” of all elements in the dimension. +/// \brief Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension. /// Refer to Python API @ref mindspore.ops.ReduceAny for more details. class MS_CORE_API ReduceAny : public Reduce { public: diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 3034b89ffdd..cfb50aa4421 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -432,7 +432,7 @@ def check_minddataset(method): dataset_file = param_dict.get('dataset_file') if isinstance(dataset_file, list): if len(dataset_file) > 4096: - raise ValueError("length of dataset_file should less than or equal to {}.".format(4096)) + raise ValueError("length of dataset_file should be less than or equal to {}.".format(4096)) for f in dataset_file: check_file(f) else: diff --git a/mindspore/explainer/benchmark/_attribution/metric.py b/mindspore/explainer/benchmark/_attribution/metric.py index b6002508b65..10eef90bb62 100644 --- a/mindspore/explainer/benchmark/_attribution/metric.py +++ b/mindspore/explainer/benchmark/_attribution/metric.py @@ -140,7 +140,7 @@ class LabelSensitiveMetric(AttributionMetric): """Checks whether num_labels is valid.""" check_value_type("num_labels", num_labels, int) if num_labels < 1: - raise ValueError("Argument num_labels must be parsed with a integer > 0.") + raise ValueError("Argument num_labels must be parsed with an integer > 0.") def aggregate(self, result, targets): """Aggregates single result to global_results.""" diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc index cab67eab211..9186cbd0544 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc @@ -42,7 +42,7 @@ int AffineFP32Coder::PrepareSpliceOp() { // init splice param splice_param_ = new SpliceWrapperParam(); if (affine_param_->context_size_ > MAX_SHAPE_SIZE) { - MS_LOG(ERROR) << "Context size should less than MAX_SHAPE_SIZE."; + MS_LOG(ERROR) << "Context size should be less than MAX_SHAPE_SIZE."; return RET_ERROR; } for (int i = 0; i < affine_param_->context_size_; i++) { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc index ba0049cc76f..0237c6c795a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc @@ -63,7 +63,7 @@ int AffineInt8Coder::PrepareSpliceOp() { // init splice param splice_param_ = new SpliceWrapperParam(); if (affine_param_->context_size_ > MAX_SHAPE_SIZE) { - MS_LOG(ERROR) << "Context size should less than MAX_SHAPE_SIZE."; + MS_LOG(ERROR) << "Context size should be less than MAX_SHAPE_SIZE."; return RET_ERROR; } for (int i = 0; i < affine_param_->context_size_; i++) { diff --git a/mindspore/lite/src/common/log_util.h b/mindspore/lite/src/common/log_util.h index 43a7836d3c2..72eb5d7ae4c 100644 --- a/mindspore/lite/src/common/log_util.h +++ b/mindspore/lite/src/common/log_util.h @@ -45,12 +45,12 @@ } \ } while (0) -#define CHECK_LESS_RETURN(size1, size2) \ - do { \ - if ((size1) < (size2)) { \ - MS_LOG(ERROR) << #size1 << " must not less than " << #size2; \ - return mindspore::lite::RET_ERROR; \ - } \ +#define CHECK_LESS_RETURN(size1, size2) \ + do { \ + if ((size1) < (size2)) { \ + MS_LOG(ERROR) << #size1 << " must not be less than " << #size2; \ + return mindspore::lite::RET_ERROR; \ + } \ } while (0) #else diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index 891c2cbfb8c..d83bfc6f391 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -183,7 +183,7 @@ int SgdCPUKernel::Prepare() { } if (sgd_param_->use_nesterov_ && sgd_param_->dampening_ > 0.0f) { - MS_LOG(ERROR) << "If use nesterov, dampening must equal to 0.0"; + MS_LOG(ERROR) << "If use nesterov, dampening must be equal to 0.0"; return RET_ERROR; } auto ret = OptimizerKernel::Prepare(); diff --git a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc index a73bd0abfb4..0501c8759f5 100644 --- a/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc +++ b/mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc @@ -67,7 +67,7 @@ AclModelOptions CustomAscend310Kernel::GetAclModelOptions(const mindspore::Conte STATUS CustomAscend310Kernel::PrepareModelInfer() { if (inputs_.size() < 1) { - MS_LOG(ERROR) << "Inputs size should not less than 1."; + MS_LOG(ERROR) << "Inputs size should not be less than 1."; return lite::RET_ERROR; } // last input is om data tensor diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index b696a4373ea..c566915dba1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -341,7 +341,7 @@ int MatMulOpenCLKernel::InitBias() { #endif void MatMulOpenCLKernel::SetGlobalLocal() { - // local size should less than MAX_GROUP_SIZE + // local size should be less than MAX_GROUP_SIZE local_size_ = {32, 4, 1}; global_size_ = {1, 1, 1}; global_size_ = {UP_DIV(static_cast(outShape[3]), C4NUM), diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc index 82738b2b940..7a7ab00d2cd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc @@ -226,7 +226,7 @@ int StrassenOpenCLKernel::StrassenSetGlobalLocal(size_t strassen_size, int type_ } void StrassenOpenCLKernel::SetGlobalLocal() { - // local size should less than MAX_GROUP_SIZE + // local size should be less than MAX_GROUP_SIZE local_size_ = {32, 4, 1}; global_size_ = {1, 1, 1}; size_t strassen_size = outShape[3] / 2; diff --git a/mindspore/lite/tools/common/graph_util.h b/mindspore/lite/tools/common/graph_util.h index d1acfe17eee..db6d4eb510f 100644 --- a/mindspore/lite/tools/common/graph_util.h +++ b/mindspore/lite/tools/common/graph_util.h @@ -113,7 +113,8 @@ bool IndexingCompress(const std::set &quant_data_set, const std::map pack_repetition_size_in_byte * 8) { - MS_LOG(ERROR) << "unexpected index: " << index << " should not greater than " << pack_repetition_size_in_byte * 8; + MS_LOG(ERROR) << "unexpected index: " << index << " should not be greater than " + << pack_repetition_size_in_byte * 8; return false; } // update tensor data @@ -186,7 +187,7 @@ bool SparsityCompress(const std::set &quant_data_set, const std::map pack_sparsity_size_in_byte * 8) { - MS_LOG(ERROR) << "unexpected index: " << index << " should not greater than " << pack_sparsity_size_in_byte * 8; + MS_LOG(ERROR) << "unexpected index: " << index << " should not be greater than " << pack_sparsity_size_in_byte * 8; return false; } auto new_data_str = BoolVectorToString(bits); diff --git a/mindspore/lite/tools/optimizer/fisson/fisson_util.cc b/mindspore/lite/tools/optimizer/fisson/fisson_util.cc index 76e2ba4ad74..b736f79826b 100644 --- a/mindspore/lite/tools/optimizer/fisson/fisson_util.cc +++ b/mindspore/lite/tools/optimizer/fisson/fisson_util.cc @@ -89,8 +89,8 @@ bool CalSplitOutputShape(int64_t splited_axis_value, const SplitInfo *split_info } // out-shape after splited int64_t tmp_value = 0; - MS_CHECK_TRUE_MSG(split_num > 0, false, "out_num of split_info should greater than zero"); - MS_CHECK_TRUE_MSG(split_len > 0, false, "split_len should greater than zero"); + MS_CHECK_TRUE_MSG(split_num > 0, false, "out_num of split_info should be greater than zero"); + MS_CHECK_TRUE_MSG(split_len > 0, false, "split_len should be greater than zero"); for (int64_t i = 0; i < split_num - 1; i++) { if (INT_MUL_OVERFLOW_THRESHOLD(split_info->size_splits[i], splited_axis_value, INT64_MAX)) { MS_LOG(ERROR) << "int mul overflow"; diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index ea48ffc9876..0474debeb0d 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -335,7 +335,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver): symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False. narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False. quant_delay (int): Quantization delay parameters according to the global step. Default: 0. - neg_trunc (bool): Whether the quantization algorithm uses nagetive truncation or not. Default: False. + neg_trunc (bool): Whether the quantization algorithm uses negative truncation or not. Default: False. mode (str): Optional quantization mode, currently only `DEFAULT`(QAT) and `LEARNED_SCALE` are supported. Default: ("DEFAULT") Inputs: diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index ae7b7a303e6..9cdb9793ad3 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -94,7 +94,7 @@ class SGD(Optimizer): dampening (float): A floating point value of dampening for momentum. must be at least 0.0. Default: 0.0. weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0. nesterov (bool): Enables the Nesterov momentum. If use nesterov, momentum must be positive, - and dampening must equal to 0.0. Default: False. + and dampening must be equal to 0.0. Default: False. loss_scale (float): A floating point value for the loss scale, which must be larger than 0.0. In general, use the default value. Only when `FixedLossScaleManager` is used for training and the `drop_overflow_update` in `FixedLossScaleManager` is set to False, then this value needs to be the same as the `loss_scale` in @@ -164,7 +164,7 @@ class SGD(Optimizer): validator.check_value_type("nesterov", nesterov, [bool], self.cls_name) if nesterov and (momentum <= 0.0 or dampening != 0.0): - raise ValueError("If use nesterov, momentum must be positive and dampening must equal to 0.0," + raise ValueError("If use nesterov, momentum must be positive and dampening must be equal to 0.0," "but got momentum {}, dampening {}".format(momentum, dampening)) self.nesterov = nesterov diff --git a/mindspore/nn/sparse/sparse.py b/mindspore/nn/sparse/sparse.py index c0e4937b950..cb3ebc31907 100644 --- a/mindspore/nn/sparse/sparse.py +++ b/mindspore/nn/sparse/sparse.py @@ -70,7 +70,7 @@ class SparseToDense(Cell): class SparseTensorDenseMatmul(Cell): """ Multiplies sparse matrix `a` and dense matrix `b`. - The rank of sparse matrix and dense matrix must equal to `2`. + The rank of sparse matrix and dense matrix must be equal to `2`. Args: adjoint_st (bool): If true, sparse tensor is transposed before multiplication. Default: False. diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index b138b2d153f..9faab9df8cf 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -364,7 +364,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell): cleared before executing the computation. Outputs: - Tuple[object, object], the first value is False for GPU backend, while it is a instance of + Tuple[object, object], the first value is False for GPU backend, while it is an instance of NPUAllocFloatStatus for other backend. The status is used to detect overflow during overflow detection. The second value is the same as the input of `compute_input`, but contains some information about the execution order. diff --git a/mindspore/numpy/array_creations.py b/mindspore/numpy/array_creations.py index 25e997deb34..5ad37156481 100644 --- a/mindspore/numpy/array_creations.py +++ b/mindspore/numpy/array_creations.py @@ -1226,20 +1226,20 @@ def meshgrid(*xi, sparse=False, indexing='xy'): Args: *xi (Tensor): 1-D arrays representing the coordinates of a grid. - indexing (‘xy’, ‘ij’, optional): Cartesian (‘xy’, default) or - matrix (‘ij’) indexing of output. In the 2-D case with + indexing ('xy', 'ij', optional): Cartesian ('xy', default) or + matrix ('ij') indexing of output. In the 2-D case with inputs of length `M` and `N`, the outputs are of shape `(N, M)` - for ‘xy’ indexing and `(M, N)` for ‘ij’ indexing. In the 3-D + for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D case with inputs of length `M`, `N` and `P`, outputs are of shape - `(N, M, P)` for ‘xy’ indexing and `(M, N, P)` for ‘ij’ indexing. + `(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing. sparse (bool, optional): If True a sparse grid is returned in order to conserve memory. Default is False. Returns: Tuple of tensors, for vectors `x1, x2,…, xn` with lengths ``Ni=len(xi)``, return `(N1, N2, N3,...Nn)` shaped arrays if - ``indexing=’ij’`` or `(N2, N1, N3,...Nn)` shaped arrays if - ``indexing=’xy’`` with the elements of `xi` repeated to fill the matrix + ``indexing='ij'`` or `(N2, N1, N3,...Nn)` shaped arrays if + ``indexing='xy'`` with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Raises: @@ -1530,7 +1530,7 @@ def diagflat(v, k=0): v (Tensor): Input data, which is flattened and set as the `k-th` diagonal of the output. k (int, optional): Diagonal to set; 0, the default, corresponds to the - “main” diagonal, a positive (negative) `k` giving the number of the + "main" diagonal, a positive (negative) `k` giving the number of the diagonal above (below) the main. Returns: diff --git a/mindspore/numpy/array_ops.py b/mindspore/numpy/array_ops.py index 92189ae52c0..f2131831f25 100644 --- a/mindspore/numpy/array_ops.py +++ b/mindspore/numpy/array_ops.py @@ -1187,7 +1187,7 @@ def tile(a, reps): So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) for 3-D replication. If this is not the desired behavior, promote `a` to d-dimensions manually before calling this function. - If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1’s to it. Thus + If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1's to it. Thus for an `a` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). Args: @@ -1839,7 +1839,7 @@ def take(a, indices, axis=None, mode='clip'): """ Takes elements from an array along an axis. - When axis is not None, this function does the same thing as “fancy” indexing + When axis is not None, this function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is equivalent to ``arr[:,:,:,indices,...]``. @@ -1853,14 +1853,14 @@ def take(a, indices, axis=None, mode='clip'): indices (Tensor): The indices with shape `(Nj...)` of the values to extract. axis (int, optional): The axis over which to select values. By default, the flattened input array is used. - mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how out-of-bounds + mode ('raise', 'wrap', 'clip', optional): Specifies how out-of-bounds indices will behave. - ‘raise’ – raise an error; + 'raise' – raise an error; - ‘wrap’ – wrap around; + 'wrap' – wrap around; - ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are + 'clip' – clip to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. @@ -2097,7 +2097,7 @@ def _get_grid(shape): def choose(a, choices, mode='clip'): """ Construct an array from an index array and a list of arrays to choose from. - Given an “index” array `a` of integers and a sequence of n arrays (choices), + Given an "index" array `a` of integers and a sequence of n arrays (choices), `a` and each choice array are first broadcast, as necessary, to arrays of a common shape; calling these `Ba` and `Bchoices[i], i = 0,…,n-1` we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array @@ -2129,15 +2129,15 @@ def choose(a, choices, mode='clip'): choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must be broadcastable to the same shape. If `choices` is itself an array, then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``) - is taken as defining the “sequence”. - mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside + is taken as defining the "sequence". + mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside ``[0, n-1]`` will be treated: - ‘raise’ – raise an error; + 'raise' – raise an error; - ‘wrap’ – wrap around; + 'wrap' – wrap around; - ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are + 'clip' – clip to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. diff --git a/mindspore/numpy/logic_ops.py b/mindspore/numpy/logic_ops.py index 8d00660737d..6fc53c0a75e 100644 --- a/mindspore/numpy/logic_ops.py +++ b/mindspore/numpy/logic_ops.py @@ -755,7 +755,7 @@ def array_equal(a1, a2, equal_nan=False): Args: a1/a2 (Union[int, float, bool, list, tuple, Tensor]): Input arrays. - equal_nan (bool): Whether to compare NaN’s as equal. + equal_nan (bool): Whether to compare NaN's as equal. Returns: Scalar bool tensor, value is `True` if inputs are equal, `False` otherwise. @@ -878,7 +878,7 @@ def sometrue(a, axis=None, keepdims=False): If True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the any method of - sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Returns: diff --git a/mindspore/numpy/math_ops.py b/mindspore/numpy/math_ops.py index 6576e52589b..368a4c30214 100644 --- a/mindspore/numpy/math_ops.py +++ b/mindspore/numpy/math_ops.py @@ -368,7 +368,7 @@ def divide(x1, x2, dtype=None): """ Returns a true division of the inputs, element-wise. - Instead of the Python traditional ‘floor division’, this returns a true + Instead of the Python traditional "floor division", this returns a true division. Note: @@ -408,7 +408,7 @@ def true_divide(x1, x2, dtype=None): """ Returns a true division of the inputs, element-wise. - Instead of the Python traditional ‘floor division’, this returns a true + Instead of the Python traditional "floor division", this returns a true division. Note: @@ -814,7 +814,7 @@ def tensordot(a, b, axes=2): Computes tensor dot product along specified axes. Given two tensors, `a` and `b`, and an array_like object containing two array_like - objects, `(a_axes, b_axes)`, sum the products of `a`’s and `b`’s elements (components) + objects, `(a_axes, b_axes)`, sum the products of `a`'s and `b`'s elements (components) over the axes specified by `a_axes` and `b_axes`. The third argument can be a single non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of `a` and the first `N` dimensions of `b` are summed over. @@ -841,7 +841,7 @@ def tensordot(a, b, axes=2): Args: a (Tensor): Tensor to "dot". - b (Tensor): Tensor to “dot”. + b (Tensor): Tensor to "dot". axes (int or sequence of ints): integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N` @@ -930,7 +930,7 @@ def var(x, axis=None, ddof=0, keepdims=False): keepdims (bool): If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the var method of - sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not + sub-classes of tensor, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Default: `False`. Supported Platforms: @@ -953,7 +953,7 @@ def var(x, axis=None, ddof=0, keepdims=False): def ptp(x, axis=None, keepdims=False): """ Range of values (maximum - minimum) along an axis. - The name of the function comes from the acronym for ‘peak to peak’. + The name of the function comes from the acronym for "peak to peak". Note: Numpy arguments `dtype` and `out` are not supported. @@ -1487,7 +1487,7 @@ def amin(a, axis=None, keepdims=False, initial=None, where=True): def hypot(x1, x2, dtype=None): """ - Given the “legs” of a right triangle, returns its hypotenuse. + Given the "legs" of a right triangle, returns its hypotenuse. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), it is broadcast for use @@ -2706,7 +2706,7 @@ def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False): computed. The default is to compute the variance of the flattened array. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. - ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is + ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof` is zero. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which @@ -2761,7 +2761,7 @@ def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False): flattened array. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. - ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is + ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof` is zero. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which @@ -4043,7 +4043,7 @@ def sum_(a, axis=None, dtype=None, keepdims=False, initial=None): keepdims (bool): If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the sum method of - sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not + sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Default: `False`. initial (scalar): Starting value for the sum. @@ -4286,8 +4286,8 @@ def searchsorted(a, v, side='left', sorter=None): None, then it must be sorted in ascending order, otherwise `sorter` must be an array of indices that sort it. v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`. - side ('left', 'right', optional): If ‘left’, the index of the first suitable - location found is given. If ‘right’, return the last such index. If there is + side ('left', 'right', optional): If 'left', the index of the first suitable + location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `a`). sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of integer indices that sort array `a` into ascending order. They are typically @@ -5554,7 +5554,7 @@ def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-bu `x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None, the 2-norm of ``x.ravel`` will be returned. ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm. - inf means numpy’s inf object. The default is None. + inf means numpy's inf object. The default is None. axis (Union[None, int, 2-tuple of ints], optional): If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of @@ -5703,10 +5703,10 @@ def invert(x, dtype=None): Computes bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ~. - For signed integer inputs, the two’s complement is returned. In a two’s-complement system - negative numbers are represented by the two’s complement of the absolute value. This is + For signed integer inputs, the two's complement is returned. In a two's-complement system + negative numbers are represented by the two's complement of the absolute value. This is the most common method of representing signed integers on computers - `[1] `_. A N-bit two’s-complement system + `[1] `_. A N-bit two's-complement system can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``. Note: diff --git a/mindspore/ops/composite/math_ops.py b/mindspore/ops/composite/math_ops.py index 50f9b055033..ae2bd706a7d 100644 --- a/mindspore/ops/composite/math_ops.py +++ b/mindspore/ops/composite/math_ops.py @@ -477,7 +477,7 @@ def _check_axes_for_batch_dot(x1_shape, x2_shape, axes, prim_name=None): f"But got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.") elif isinstance(axes, int): if axes == 0: - raise ValueError(f"{msg_prefix} 'axes' should not equal to 0, but got {axes}.") + raise ValueError(f"{msg_prefix} 'axes' should not be equal to 0, but got {axes}.") if axes < 0: axes = [axes + len(x1_shape), axes + len(x2_shape)] validator.check_non_negative_int(axes[0], 'reversed axes', 'batch_dot') diff --git a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py index cc3418ed522..17aec1d7895 100644 --- a/mindspore/ops/composite/multitype_ops/_constexpr_utils.py +++ b/mindspore/ops/composite/multitype_ops/_constexpr_utils.py @@ -576,7 +576,7 @@ def get_stride_info_from_slice(data_shape, slice_index): @constexpr def get_stride_info_from_integer(data_shape, number): - """Get stride info from a integer""" + """Get stride info from an integer""" begin_strides = [number] end_strides = [number + 1] step_strides = [1] diff --git a/mindspore/ops/operations/_inner_ops.py b/mindspore/ops/operations/_inner_ops.py index b4c2ef90d27..627470373c4 100755 --- a/mindspore/ops/operations/_inner_ops.py +++ b/mindspore/ops/operations/_inner_ops.py @@ -287,8 +287,8 @@ class MatrixDiag(PrimitiveWithInfer): Inputs: - **x** (Tensor) - A tensor which to be element-wise multi by `assist`. It can be one of the following data types: float32, float16, int32, int8, and uint8. - - **assist** (Tensor) - A eye tensor of the same type as `x`. It's rank must greater than or equal to 2 and - it's last dimension must equal to the second to last dimension. + - **assist** (Tensor) - A eye tensor of the same type as `x`. It's rank must be greater than or equal to 2 and + it's last dimension must be equal to the second to last dimension. Outputs: Tensor, has the same type and shape as input `assist`. @@ -382,7 +382,7 @@ class Send(PrimitiveWithInfer): Send tensors from src_rank to the specified dest_rank. Note: - Send and Recveive must be used in combination and have same sr_tag. + Send and Receive must be used in combination and have same sr_tag. Send must be used between servers. Args: diff --git a/mindspore/ops/operations/_quant_ops.py b/mindspore/ops/operations/_quant_ops.py index 2e729f92964..f6b0f100eda 100755 --- a/mindspore/ops/operations/_quant_ops.py +++ b/mindspore/ops/operations/_quant_ops.py @@ -188,7 +188,7 @@ class FakeLearnedScaleQuantPerLayer(PrimitiveWithInfer): quant_delay (int): Quantilization delay parameter. Before delay step in training time not update simulate quantization aware function. After delay step in training time begin simulate the aware quantize function. Default: 0. - neg_trunc (bool): Whether the quantization algorithm uses nagetive truncation or not. Default: False. + neg_trunc (bool): Whether the quantization algorithm uses negative truncation or not. Default: False. training (bool): Training the network or not. Default: True. Inputs: diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index c88a192beca..1bbfc9b44f7 100755 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -666,7 +666,7 @@ class Squeeze(PrimitiveWithInfer): Raises: TypeError: If `axis` is neither an int nor tuple. TypeError: If `axis` is a tuple whose elements are not all int. - ValueError: If the corresponding dimension of the specified axis does not equal to 1. + ValueError: If the corresponding dimension of the specified axis isn't equal to 1. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` @@ -5516,12 +5516,12 @@ class Meshgrid(PrimitiveWithInfer): coordinate tensors for evaluating expressions on an N-D grid. Args: - indexing (‘xy’, ‘ij’, optional): Cartesian (‘xy’, default) or - matrix (‘ij’) indexing of output. In the 2-D case with + indexing ('xy', 'ij', optional): Cartesian ('xy', default) or + matrix ('ij') indexing of output. In the 2-D case with inputs of length `M` and `N`, the outputs are of shape `(N, M)` - for ‘xy’ indexing and `(M, N)` for ‘ij’ indexing. In the 3-D + for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D case with inputs of length `M`, `N` and `P`, outputs are of shape - `(N, M, P)` for ‘xy’ indexing and `(M, N, P)` for ‘ij’ indexing. + `(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing. Inputs: - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects. @@ -6223,7 +6223,7 @@ class MaskedFill(Primitive): class MaskedSelect(PrimitiveWithCheck): """ Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask. - The shapes of the mask tensor and the input tensor don’t need to match, but they must be broadcastable. + The shapes of the mask tensor and the input tensor don't need to match, but they must be broadcastable. Inputs: - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -6741,7 +6741,7 @@ class ExtractVolumePatches(Primitive): ValueError: If one of kernel_size or strides' first two numbers is not 1. ValueError: If padding = "VALID" and input - kernel_size is less than 0 in d, h or w dimension. ValueError: If padding = "SAME" and :math:`padding_needed = ((input_x + strides - 1) / strides - 1) * - strides + kernelz_size - input` is less than 0 in d, h or w dimension. + strides + kernel_size - input` is less than 0 in d, h or w dimension. ValueError: If x_h is not 1 or x_w is not 1 and x_w + padding_needed - k_w - s_w is less than 0. ValueError: If x_d * x_h * x_w is greater than 2048. diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 69945b94264..e9d58a8f29a 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -82,11 +82,11 @@ target_dtypes = (mstype.int8, mstype.int32, mstype.float16, mstype.float32) def check_hcom_group_valid(group, prim_name=None): """Check if hcom group is valid.""" - msg_pfefix = f"For '{prim_name}', only" if prim_name else "Only" + msg_prefix = f"For '{prim_name}', only" if prim_name else "Only" if context.get_context("mode") == context.PYNATIVE_MODE and \ context.get_context("device_target") == "Ascend" and \ group != GlobalComm.WORLD_COMM_GROUP: - raise RuntimeError(f"{msg_pfefix} hccl_world_group is supported in Pynative mode, but got 'group': {group}.") + raise RuntimeError(f"{msg_prefix} hccl_world_group is supported in Pynative mode, but got 'group': {group}.") class AllReduce(PrimitiveWithInfer): @@ -515,7 +515,7 @@ class Broadcast(PrimitiveWithInfer): The contents depend on the data of the `root_rank` device. Raises: - TypeError: If root_rank is not a integer or group is not a string. + TypeError: If root_rank is not an integer or group is not a string. Supported Platforms: ``Ascend`` ``GPU`` diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 2df036d0b64..2e4a3ef3326 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -94,7 +94,7 @@ class _MathBinaryOp(_BinaryOp): args_type = {"x": x_dtype, "y": y_dtype} complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)] if x_dtype in complex_types or y_dtype in complex_types: - tpye_infer_dict = { + type_infer_dict = { (mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64), (mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64), (mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64), @@ -102,12 +102,12 @@ class _MathBinaryOp(_BinaryOp): (mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128), (mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128), } - if (x_dtype.element_type(), y_dtype.element_type()) not in tpye_infer_dict.keys(): + if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys(): raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],' + '[complex64, float32], [float32, complex64], [complex128, complex128],' + '[complex128, float64], [float64, complex128],' + f'but got : [{format(x_dtype)},{format(y_dtype)}].') - return tpye_infer_dict.get((x_dtype.element_type(), y_dtype.element_type())) + return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type())) validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name) return x_dtype @@ -225,7 +225,7 @@ class Add(_MathBinaryOp): >>> print(output) [5. 6. 7.] >>> # the data type of x is int32, the data type of y is float32, - >>> # and the output is the data format of higher precision flost32. + >>> # and the output is the data format of higher precision float32. >>> print(output.dtype) Float32 """ @@ -280,7 +280,7 @@ class AssignAdd(PrimitiveWithInfer): Inputs: - **variable** (Parameter) - The `Parameter`. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`. It must have the same shape as `variable` if it is a Tensor. it is recommended to use the same data type when using this operator. @@ -351,7 +351,7 @@ class AssignSub(PrimitiveWithInfer): Inputs: - **variable** (Parameter) - The `Parameter`. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank be should be less than 8. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`. It must have the same shape as `variable` if it is a Tensor. it is recommended to use the same data type when using this operator. @@ -516,7 +516,7 @@ class ReduceMean(_Reduce): Inputs: - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)). @@ -593,7 +593,7 @@ class ReduceSum(_Reduce): Inputs: - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)). @@ -676,7 +676,7 @@ class ReduceAll(_Reduce): Inputs: - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). @@ -735,7 +735,7 @@ class ReduceAny(_Reduce): Inputs: - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). @@ -794,7 +794,7 @@ class ReduceMax(_Reduce): Inputs: - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). @@ -881,7 +881,7 @@ class ReduceMin(_Reduce): Inputs: - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). @@ -959,7 +959,7 @@ class ReduceProd(_Reduce): Inputs: - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). @@ -1038,7 +1038,7 @@ class CumProd(PrimitiveWithInfer): Inputs: - **x** (Tensor[Number]) - The input tensor. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **axis** (int) - The dimensions to compute the cumulative product. Only constant value is allowed. @@ -1438,7 +1438,7 @@ class AddN(Primitive): return True, inputs[0] raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, but " f"got {type(inputs[0]).__name__}, " - f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).") + f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).") class AccumulateNV2(PrimitiveWithInfer): @@ -1494,7 +1494,7 @@ class AccumulateNV2(PrimitiveWithInfer): return True, inputs[0] raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, " f"but got {type(inputs[0]).__name__}, " - f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).") + f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).") def infer_shape(self, inputs): cls_name = self.name @@ -1526,7 +1526,7 @@ class Neg(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor whose dtype is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and dtype as input. @@ -1576,7 +1576,7 @@ class InplaceAdd(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except the first dimension, which must be the same as indices' size. It has the same data type with `x`. @@ -1645,7 +1645,7 @@ class InplaceSub(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except the first dimension, which must be the same as indices' size. It has the same data type with `x`. @@ -1860,7 +1860,7 @@ class Square(Primitive): Inputs: - **x** (Tensor) - The input tensor whose dtype is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and dtype as the `x`. @@ -1895,7 +1895,7 @@ class Rsqrt(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same type and shape as `x`. @@ -1947,7 +1947,7 @@ class Sqrt(PrimitiveWithCheck): Inputs: - **x** (Tensor) - The input tensor whose dtype is number. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and data type as the `x`. @@ -1994,7 +1994,7 @@ class Reciprocal(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as the `x`. @@ -2106,7 +2106,7 @@ class Exp(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and dtype as the `x`. @@ -2156,7 +2156,7 @@ class Expm1(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. With float16 or float32 data type. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as the `x`. @@ -2256,7 +2256,7 @@ class Log(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. The value must be greater than 0. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as the `x`. @@ -2306,7 +2306,7 @@ class Log1p(Primitive): Inputs: - **x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than -1. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as the `x`. @@ -2341,7 +2341,7 @@ class Erf(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. The data type must be float16 or float32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and dtype as the `x`. @@ -2383,7 +2383,7 @@ class Erfc(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. The data type must be float16 or float32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shap dtype as the `x`. @@ -2720,9 +2720,9 @@ class MulNoNan(_MathBinaryOp): Inputs: - **x** (Union[Tensor]) - The first input is a tensor whose data type is one of - flota16, float32, int32, int64 currently or scalar. + float16, float32, int32, int64 currently or scalar. - **y** (Union[Tensor]) - The second input is a tensor whose data type is one of - flota16, float32, int32, int64 currently or scalar. + float16, float32, int32, int64 currently or scalar. Outputs: Tensor, the shape is the same as the shape after broadcasting, @@ -2969,7 +2969,7 @@ class Floor(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. Its element data type must be float. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as `x`. @@ -3062,7 +3062,7 @@ class Ceil(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The input tensor. It's element data type must be float16 or float32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as `x`. @@ -3194,7 +3194,7 @@ class Acosh(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The data type should be one of the following types: float16, float32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape and type as `x`. @@ -3238,7 +3238,7 @@ class Cosh(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The shape of tensor is - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as `x`. @@ -3279,7 +3279,7 @@ class Asinh(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The shape of tensor is - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. The data type should be one of the following types: float16, float32. Outputs: @@ -3321,7 +3321,7 @@ class Sinh(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The shape of tensor is - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as `x`. @@ -3451,7 +3451,7 @@ class ApproximateEqual(_LogicBinaryOp): Inputs: - **x** (Tensor) - A tensor. Must be one of the following types: float32, float16. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - **y** (Tensor) - A tensor of the same type and shape as 'x'. Outputs: @@ -4219,7 +4219,7 @@ class NPUGetFloatStatus(PrimitiveWithInfer): Inputs: - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`. The data type must be float16 or float32. - :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8. + :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. Outputs: Tensor, has the same shape as `x`. All the elements in the tensor will be zero. diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index ecc54afbe19..0d63053f784 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -35,7 +35,7 @@ class Assign(Primitive): Inputs: - **variable** (Parameter) - The `Parameter`. :math:`(N,*)` where :math:`*` means, - any number of additional dimensions, its rank should less than 8. + any number of additional dimensions, its rank should be less than 8. - **value** (Tensor) - The value to be assigned, has the same shape with `variable`. Outputs: diff --git a/mindspore/ops/operations/quantum_ops.py b/mindspore/ops/operations/quantum_ops.py index d4fd0c54372..f68cc10b871 100755 --- a/mindspore/ops/operations/quantum_ops.py +++ b/mindspore/ops/operations/quantum_ops.py @@ -116,7 +116,7 @@ class Evolution(PrimitiveWithInfer): gate_obj_qubits, gate_ctrl_qubits, gate_params_names, gate_coeff, gate_requires_grad, hams_pauli_coeff, hams_pauli_word, hams_pauli_qubit): - """Initialize Evolutino""" + """Initialize Evolution""" self.init_prim_io_names(inputs=['param_data'], outputs=['state']) self.n_qubits = n_qubits diff --git a/mindspore/ops/operations/rl_ops.py b/mindspore/ops/operations/rl_ops.py index e0b63281d3e..c0a784f80a6 100644 --- a/mindspore/ops/operations/rl_ops.py +++ b/mindspore/ops/operations/rl_ops.py @@ -30,7 +30,7 @@ class BufferSample(PrimitiveWithInfer): Returns the tuple tensor with the given shape, decided by the given batchsize. .. warning:: - This is an experiental prototype that is subject to change and/or deletion. + This is an experimental prototype that is subject to change and/or deletion. Args: capacity (int64): Capacity of the buffer, must be non-negative. @@ -45,7 +45,7 @@ class BufferSample(PrimitiveWithInfer): Inputs: - **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer, each tensor is described by the `buffer_shape` and `buffer_type`. - - **count** (Parameter) - The count mean the real available size of the buffer, + - **count** (Parameter) - The count means the real available size of the buffer, data type: int32. - **head** (Parameter) - The position of the first data in buffer, data type: int32. @@ -142,7 +142,7 @@ class BufferAppend(PrimitiveWithInfer): push data to the bottom of buffer under the First-In-First-Out rule. .. warning:: - This is an experiental prototype that is subject to change and/or deletion. + This is an experimental prototype that is subject to change and/or deletion. Args: capacity (int64): Capacity of the buffer, must be non-negative. @@ -152,9 +152,9 @@ class BufferAppend(PrimitiveWithInfer): Inputs: - **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer, each tensor is described by the `buffer_shape` and `buffer_type`. - - **exp** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents one list of experince data, + - **exp** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents one list of experience data, each tensor is described by the `buffer_shape` and `buffer_type`. - - **count** (Parameter) - The count mean the real available size of the buffer, + - **count** (Parameter) - The count means the real available size of the buffer, data type: int32. - **head** (Parameter) - The position of the first data in buffer, data type: int32. @@ -162,11 +162,11 @@ class BufferAppend(PrimitiveWithInfer): None. Raises: - ValueError: If `count` and `head` is not a integer. + ValueError: If `count` and `head` is not an integer. ValueError: If `capacity` is not a positive integer. - ValueError: If length of `data` not equal to length of `exp`. - ValueError: If dim of data euqals to dim of exp, but `data[1:]` not equal to the shape in `exp`. - ValueError: If the shape of `data[1:]` not equal to the shape in `exp`. + ValueError: If length of `data` is not equal to length of `exp`. + ValueError: If dim of data is equal to dim of exp, but `data[1:]` is not equal to the shape in `exp`. + ValueError: If the shape of `data[1:]` is not equal to the shape in `exp`. TypeError: If the type in `exp` is not the same with `data`. Supported Platforms: @@ -211,7 +211,7 @@ class BufferAppend(PrimitiveWithInfer): exp_batch = exp_shape[0][0] for i in range(len(data_shape)): if len(data_shape[i]) != len(exp_shape[i]): - raise ValueError(f"For '{self.name}', the dimension of {i}th 'exp_shape' must equal to " + raise ValueError(f"For '{self.name}', the dimension of {i}th 'exp_shape' must be equal to " f"the dimension of {i}th 'data_shape', but got the {i}th 'exp_shape': " f"{exp_shape[i]}, the {i}th 'data_shape': {data_shape[i]}.") if data_shape[i][0] < exp_shape[i][0]: @@ -221,7 +221,7 @@ class BufferAppend(PrimitiveWithInfer): else: for i in range(len(data_shape)): if data_shape[i][1:] != exp_shape[i]: - raise ValueError(f"For '{self.name}', the {i}th 'exp_shape' must equal to the {i}th 'data_shape'" + raise ValueError(f"For '{self.name}', the {i}th 'exp_shape' must be equal to the {i}th 'data_shape'" f"which excepts the first dimension. but got the {i}th 'exp_shape': " f"{exp_shape[i]}, the {i}th 'data_shape': {data_shape[i]}.") self.add_prim_attr('exp_batch', exp_batch) @@ -239,10 +239,10 @@ class BufferAppend(PrimitiveWithInfer): class BufferGetItem(PrimitiveWithInfer): r""" - Get the data from buffer in the position of input inedx. + Get the data from buffer in the position of input index. .. warning:: - This is an experiental prototype that is subject to change and/or deletion. + This is an experimental prototype that is subject to change and/or deletion. Args: capacity (int64): Capacity of the buffer, must be non-negative. @@ -252,7 +252,7 @@ class BufferGetItem(PrimitiveWithInfer): Inputs: - **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer, each tensor is described by the `buffer_shape` and `buffer_type`. - - **count** (Parameter) - The count mean the real available size of the buffer, + - **count** (Parameter) - The count means the real available size of the buffer, data type: int32. - **head** (Parameter) - The position of the first data in buffer, data type: int32. - **index** (int64) - The position of the data in buffer. @@ -261,7 +261,7 @@ class BufferGetItem(PrimitiveWithInfer): tuple(Tensor). The shape is `buffer_shape`. The dtype is `buffer_dtype`. Raises: - ValueError: If `count` and `head` is not a integer. + ValueError: If `count` and `head` is not an integer. ValueError: If `capacity` is not a positive integer. TypeError: If `buffer_shape` is not a tuple. diff --git a/mindspore/ops/operations/sponge_ops.py b/mindspore/ops/operations/sponge_ops.py index 1efe4f73476..7d2532c059d 100644 --- a/mindspore/ops/operations/sponge_ops.py +++ b/mindspore/ops/operations/sponge_ops.py @@ -683,7 +683,7 @@ class DihedralAtomEnergy(PrimitiveWithInfer): The data type is int32 and the shape is :math:`(m,)`. - **atom_b** (Tensor) - The 2nd atom index of each dihedral. The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tenso) - The 3rd atom index of each dihedral. + - **atom_c** (Tensor) - The 3rd atom index of each dihedral. The data type is int32 and the shape is :math:`(m,)`. - **atom_d** (Tensor) - The 4th atom index of each dihedral. 4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`. @@ -786,7 +786,7 @@ class DihedralForceWithAtomEnergy(PrimitiveWithInfer): The data type is int32 and the shape is :math:`(m,)`. - **atom_b** (Tensor) - The 2nd atom index of each dihedral. The data type is int32 and the shape is :math:`(m,)`. - - **atom_c** (Tenso) - The 3rd atom index of each dihedral. + - **atom_c** (Tensor) - The 3rd atom index of each dihedral. The data type is int32 and the shape is :math:`(m,)`. - **atom_d** (Tensor) - The 4th atom index of each dihedral. 4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`. @@ -1263,7 +1263,7 @@ class Dihedral14LJForce(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(m,)`. - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. + - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. Outputs: @@ -1366,7 +1366,7 @@ class Dihedral14LJEnergy(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(m,)`. - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. + - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. Outputs: @@ -1585,7 +1585,7 @@ class Dihedral14LJCFForceWithAtomEnergy(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(m,)`. - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. + - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. Outputs: @@ -1694,7 +1694,7 @@ class Dihedral14LJAtomEnergy(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(m,)`. - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. + - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`. Outputs: @@ -2640,7 +2640,7 @@ class MDIterationLeapFrogWithRF(PrimitiveWithInfer): exp(-gamma_ln * dt), where gamma_ln is the firction factor in Langvin dynamics. The data type is float32. - **max_velocity** (Scalar) - The upper limit of velocity, when the - veclocity overflows, scale it to the upper limit. The data type is float32. + velocity overflows, scale it to the upper limit. The data type is float32. - **is_max_velocity** (Scalar) - whether the max velocity control is open or not. The data type is int32. - **mass_inverse** (Tensor) - The inverse value of @@ -3077,7 +3077,7 @@ class NeighborListUpdate(PrimitiveWithInfer): excluded_atom_numbers(int32): the total atom numbers in the excluded list. cutoff(float32): the cutoff distance for short-range force calculation. skin(float32): the overflow value of cutoff to maintain a neighbor list. - cutoff_square(float32): the suqare value of cutoff. + cutoff_square(float32): the square value of cutoff. half_skin_square(float32): skin*skin/4, indicates the maximum square value of the distance atom allowed to move between two updates. cutoff_with_skin(float32): cutoff + skin, indicates the diff --git a/mindspore/ops/operations/sponge_update_ops.py b/mindspore/ops/operations/sponge_update_ops.py index 2147a035adc..99b8ed30115 100644 --- a/mindspore/ops/operations/sponge_update_ops.py +++ b/mindspore/ops/operations/sponge_update_ops.py @@ -179,7 +179,7 @@ class ConstrainForceCycleWithVirial(PrimitiveWithInfer): class LastCrdToDr(PrimitiveWithInfer): """ - Calculate the diplacement vector of each constrained atom pair. + Calculate the displacement vector of each constrained atom pair. .. warning:: This is an experimental prototype that is subject to change and/or deletion. @@ -279,7 +279,7 @@ class RefreshCrdVel(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(n, 3)`. - **vel** (Tensor) - The velocity of each atom. The data type is float32 and the shape is :math:`(n, 3)`. - - **test_frc** (Tensor) - The constraint force calculated in the last oteration. + - **test_frc** (Tensor) - The constraint force calculated in the last iteration. The data type is float32 and the shape is :math:`(n, 3)`. - **mass_inverse** (Tensor) - The inverse value of mass of each atom. The data type is float32 and the shape is :math:`(n,)`. @@ -1903,7 +1903,7 @@ class Dihedral14ForceWithAtomEnergyVirial(PrimitiveWithInfer): The data type is float32 and the shape is :math:`(m,)`. - **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type. The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. - - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type. + - **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type. The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`. Outputs: diff --git a/mindspore/parallel/nn/moe.py b/mindspore/parallel/nn/moe.py index 6bf6cf714eb..cf818881a85 100644 --- a/mindspore/parallel/nn/moe.py +++ b/mindspore/parallel/nn/moe.py @@ -76,7 +76,7 @@ class MoE(Cell): param_init_type (dtype.Number): The parameter initialization type. Can be dtype.float32 or dtype.float16. moe_config(MoEConfig): The configuration of MoE (Mixture of Expert). parallel_config(OpParallelConfig): The config of parallel setting, see `OpParallelConfig`. - Default `default_dpmp_config`, a instance of `OpParallelConfig` with default + Default `default_dpmp_config`, an instance of `OpParallelConfig` with default args. Inputs: diff --git a/mindspore/train/__init__.py b/mindspore/train/__init__.py index 2f567d39e64..33b73c937ce 100644 --- a/mindspore/train/__init__.py +++ b/mindspore/train/__init__.py @@ -15,7 +15,7 @@ """ High-Level training interfaces. -Helper functions in train piplines. +Helper functions in train pipelines. """ from .model import Model from .dataset_helper import DatasetHelper, connect_network_with_dataset diff --git a/mindspore/train/_utils.py b/mindspore/train/_utils.py index 841ef3f7135..3398a7f6556 100644 --- a/mindspore/train/_utils.py +++ b/mindspore/train/_utils.py @@ -128,7 +128,7 @@ def _construct_tensor_list(types, shapes, batch_expand_num=1): List, list of Tensors. """ if len(types) != len(shapes): - raise ValueError("The length of dataset types must equal to dataset shapes, " + raise ValueError("The length of dataset types must be equal to dataset shapes, " "but got dataset types={} and dataset shapes={}".format(types, shapes)) tensor_list = [] for type_, shape in zip(types, shapes): diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index f479ab49e90..e7706a72810 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -207,7 +207,7 @@ def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, be parameter or Tensor). ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten. integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True - async_save (bool): Whether to open a independent thread to save the checkpoint file. Default: False + async_save (bool): Whether to open an independent thread to save the checkpoint file. Default: False append_dict (dict): Additional information that needs to be saved. The key of dict must be str, the value of dict must be one of int float and bool. Default: None enc_key (Union[None, bytes]): Byte type key used for encryption. If the value is None, the encryption diff --git a/tests/mindspore_test_framework/mindspore_test.py b/tests/mindspore_test_framework/mindspore_test.py index 7a310ffdf52..5ec5d8b64d5 100644 --- a/tests/mindspore_test_framework/mindspore_test.py +++ b/tests/mindspore_test_framework/mindspore_test.py @@ -60,7 +60,7 @@ def mindspore_test(verification_pipeline): elif issubclass(component, IERPolicyComponent): er_policy_components.append(component) else: - raise Exception(f'{component} is not a instance of {IComponent}') + raise Exception(f'{component} is not an instance of {IComponent}') for component in facade_components: fc = component(verification_set) diff --git a/tests/ut/python/model/test_lenet_core_after_exception.py b/tests/ut/python/model/test_lenet_core_after_exception.py index dfd47ac599b..c1bdc920237 100644 --- a/tests/ut/python/model/test_lenet_core_after_exception.py +++ b/tests/ut/python/model/test_lenet_core_after_exception.py @@ -55,4 +55,4 @@ def test_lenet5_exception(): net = train_step_with_loss_warp(LeNet5()) with pytest.raises(RuntimeError) as info: _cell_graph_executor.compile(net, predict, label) - assert "x_shape[C_in] / group must equal to w_shape[C_in] = " in str(info.value) + assert "x_shape[C_in] / group must be equal to w_shape[C_in]: " in str(info.value)