optimize the comment and log description

修改:     ops/operations/_inner_ops.py
	修改:     ops/operations/_quant_ops.py
	修改:     ops/operations/array_ops.py
	修改:     ops/operations/comm_ops.py
	修改:     ops/operations/math_ops.py
	修改:     ops/operations/quantum_ops.py
	修改:     ops/operations/rl_ops.py
	修改:     ops/operations/sponge_ops.py
	修改:     ops/operations/sponge_update_ops.py
	修改:     train/__init__.py
	修改:     common/tensor.py
	修改:     train/serialization.py
	修改:     ccsrc/pipeline/jit/parse/parse.h
	修改:     explainer/benchmark/_attribution/metric.py
	修改:     ops/composite/multitype_ops/_constexpr_utils.py
	修改:     ops/operations/comm_ops.py

	修改:     RELEASE.md
	修改:     mindspore/_extends/parse/standard_method.py
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/concat_offset_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/dynamic_shape_cpu_kernel.cc
	修改:     mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.cc
	修改:     mindspore/ccsrc/frontend/parallel/ops_info/tile_info.cc
	修改:     mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.cc
	修改:     mindspore/ccsrc/frontend/parallel/strategy.h
	修改:     mindspore/common/tensor.py
	修改:     mindspore/core/abstract/prim_arrays.cc
	修改:     mindspore/core/abstract/prim_nn.cc
	修改:     mindspore/core/ops/conv2d.cc
	修改:     mindspore/core/ops/logical_and.h
	修改:     mindspore/core/ops/logical_not.h
	修改:     mindspore/core/ops/logical_or.h
	修改:     mindspore/core/ops/reduce_all.h
	修改:     mindspore/core/ops/reduce_any.h
	修改:     mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc
	修改:     mindspore/nn/layer/quant.py
	修改:     mindspore/nn/optim/sgd.py
	修改:     mindspore/nn/sparse/sparse.py
	修改:     mindspore/numpy/array_creations.py
	修改:     mindspore/numpy/array_ops.py
	修改:     mindspore/numpy/logic_ops.py
	修改:     mindspore/numpy/math_ops.py
	修改:     mindspore/ops/operations/_inner_ops.py
	修改:     mindspore/ops/operations/array_ops.py
	修改:     mindspore/ops/operations/rl_ops.py
	修改:     mindspore/train/_utils.py
	修改:     tests/ut/python/model/test_lenet_core_after_exception.py

	修改:     mindspore/_extends/parse/standard_method.py
	修改:     mindspore/ops/operations/rl_ops.py

	修改:     mindspore/core/abstract/prim_nn.cc
	修改:     mindspore/core/ops/conv2d.cc

	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/ctcloss_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_pull_weight_kernel.h
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/fl/fused_push_weight_kernel.h
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_filter_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/conv2d_grad_input_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/ps/sparse_apply_lazy_adam_ps_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/rolling_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/scatter_arithmetic_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc
	修改:     mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/split_gpu_kernel.h
	修改:     mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h
	修改:     mindspore/ccsrc/backend/kernel_compiler/gpu/nn/conv2d_grad_input_gpu_kernel.h
	修改:     mindspore/ccsrc/fl/server/server.cc
	修改:     mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc
	修改:     mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h
	修改:     mindspore/ccsrc/frontend/optimizer/irpass/inline.h
	修改:     mindspore/ccsrc/minddata/dataset/core/device_tensor.cc
	修改:     mindspore/ccsrc/minddata/dataset/core/tensor.cc
	修改:     mindspore/ccsrc/minddata/dataset/engine/datasetops/source/emnist_op.cc
	修改:     mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc
	修改:     mindspore/ccsrc/minddata/dataset/engine/datasetops/source/qmnist_op.cc
	修改:     mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/dataset_node.cc
	修改:     mindspore/ccsrc/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc
	修改:     mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc
	修改:     mindspore/ccsrc/pipeline/jit/action.cc
	修改:     mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc
	修改:     mindspore/ccsrc/runtime/device/ascend/executor/tiling/op_tiling_adapter.cc
	修改:     mindspore/compression/quant/quant_utils.py
	修改:     mindspore/core/abstract/prim_nn.cc
	修改:     mindspore/dataset/engine/validators.py
	修改:     mindspore/lite/micro/coder/opcoders/nnacl/fp32/affine_fp32_coder.cc
	修改:     mindspore/lite/micro/coder/opcoders/nnacl/int8/affine_int8_coder.cc
	修改:     mindspore/lite/src/runtime/kernel/ascend310/src/custom_kernel.cc
	修改:     mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc
	修改:     mindspore/lite/src/runtime/kernel/opencl/kernel/strassen.cc
	修改:     mindspore/lite/tools/common/graph_util.h
	修改:     mindspore/lite/tools/optimizer/fisson/fisson_util.cc
	修改:     mindspore/ops/composite/math_ops.py
	修改:     mindspore/ops/operations/_inner_ops.py
	修改:     mindspore/ops/operations/array_ops.py
	修改:     mindspore/ops/operations/math_ops.py
	修改:     mindspore/ops/operations/other_ops.py

	修改:     mindspore/boost/boost_cell_wrapper.py
	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/update_cache_cpu_kernel.cc
	修改:     mindspore/ccsrc/common/trans.cc
	修改:     mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc
	修改:     mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc
	修改:     mindspore/lite/src/common/log_util.h
	修改:     mindspore/nn/wrap/loss_scale.py
	修改:     mindspore/parallel/nn/moe.py
	修改:     tests/mindspore_test_framework/mindspore_test.py

	修改:     mindspore/ccsrc/backend/kernel_compiler/cpu/split_cpu_kernel.cc
	修改:     mindspore/lite/tools/common/graph_util.h

	修改:     mindspore/ccsrc/frontend/parallel/ops_info/gather_info.cc

	修改:     mindspore/core/ops/conv2d.cc

	修改:     tests/ut/python/model/test_lenet_core_after_exception.py
This commit is contained in:
zhunaipan 2021-10-18 21:18:43 +08:00
parent 1d1a9ec0ac
commit 8ce4e62725
89 changed files with 260 additions and 246 deletions

View File

@ -2254,7 +2254,7 @@ In Ascend platform, if group > 1, the weight shape of Conv2D change from [in_cha
6. Support Model(.ms) visualization on Netron.
7. Support Tensorflow model in MindSpore Lite Converter
8. Add 86 converter parsers.
9. Convert aware training model without users awareness
9. Convert aware training model without user's awareness
10. Support scalar tensor in MindSpore Lite Converter and Runtime
11. Support NPU backend on HUAWEI Kirin SoC.[BETA]
12. Merge timeprofiler into benchmark

View File

@ -852,12 +852,12 @@ def take(x, indices, axis=None, mode='clip'):
indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
axis (int, optional): The axis over which to select values. By default,
the flattened input array is used.
mode (raise, wrap, clip, optional):
mode ('raise', 'wrap', 'clip', optional):
- edge: Pads with the edge values of `arr`.
- raise: Raises an error;
- wrap: Wraps around;
- clip: Clips to the range. `clip` mode means that all indices that are
- clip: Clips to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.
@ -915,15 +915,15 @@ def choose(x, choices, mode='clip'):
choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
be broadcastable to the same shape. If `choices` is itself an array, then
its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
is taken as defining the sequence.
mode (raise, wrap, clip, optional): Specifies how indices outside
is taken as defining the "sequence".
mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
``[0, n-1]`` will be treated:
raise raise an error (default);
'raise' raise an error (default);
wrap wrap around;
'wrap' wrap around;
clip clip to the range. clip mode means that all indices that are
'clip' clip to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.
@ -988,8 +988,8 @@ def searchsorted(x, v, side='left', sorter=None):
Args:
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
side ('left', 'right', optional): If left, the index of the first suitable
location found is given. If right, return the last such index. If there is
side ('left', 'right', optional): If 'left', the index of the first suitable
location found is given. If 'right', return the last such index. If there is
no suitable index, return either 0 or N (where N is the length of `a`).
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
integer indices that sort array `a` into ascending order. They are typically
@ -1076,7 +1076,7 @@ def fill(x, value):
def ptp(x, axis=None, keepdims=False):
"""
The name of the function comes from the acronym for peak to peak.
The name of the function comes from the acronym for "peak to peak".
Note:
Numpy arguments `dtype` and `out` are not supported.
@ -1288,7 +1288,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disab
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the sum method of
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
implement keepdims any exceptions will be raised.
initial (scalar): Starting value for the sum.

View File

@ -424,7 +424,7 @@ class BoostTrainOneStepWithLossScaleCell(BoostTrainOneStepCell):
cleared before executing the computation.
Outputs:
Tuple[object, object], the first value is False for GPU backend, while it is a instance of
Tuple[object, object], the first value is False for GPU backend, while it is an instance of
NPUAllocFloatStatus for other backend. The status is used to detect overflow during overflow detection.
The second value is the same as the input of `compute_input`, but contains some information about the
execution order.

View File

@ -73,7 +73,7 @@ bool ConcatOffsetCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inp
<< ", but got:" << output_shape.size();
}
if (output_shape[0] != input_num) {
MS_LOG(EXCEPTION) << "ConcatOffset output_shape[0] must equal to input_num, but got " << output_shape[0];
MS_LOG(EXCEPTION) << "ConcatOffset output_shape[0] must be equal to input_num, but got " << output_shape[0];
}
size_t rank = output_shape[1];
size_t idx = 0;

View File

@ -241,7 +241,7 @@ void CTCLossCPUKernel::GenLabelWithBlank(const uint32_t *seq_len, const std::vec
}
}
if (!ignore_longer_outputs_than_inputs_ && l.size() > seq_len[b]) {
MS_LOG(EXCEPTION) << "Input time(sequence length) should greater than output size(label length), but gets "
MS_LOG(EXCEPTION) << "Input time(sequence length) should be greater than output size(label length), but gets "
<< seq_len[b] << "< " << l.size();
}

View File

@ -50,7 +50,7 @@ bool DynamicShapeCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inp
MS_LOG(EXCEPTION) << "The length of output_shape must be 1, but got:" << output_shape.size();
}
if (output_shape[0] != input_shape.size()) {
MS_LOG(EXCEPTION) << "DynamicShape output_shape[0] must equal to the size of input_shape, but got "
MS_LOG(EXCEPTION) << "DynamicShape output_shape[0] must be equal to the size of input_shape, but got "
<< output_shape[0];
}
for (size_t i = 0; i < output_shape[0]; ++i) {

View File

@ -52,7 +52,7 @@ class FusedPullWeightKernel : public CPUKernel {
total_iteration_++;
uint64_t step_num_per_iteration = fl::worker::FLWorker::GetInstance().worker_step_num_per_iteration();
if (step_num_per_iteration == 0) {
MS_LOG(EXCEPTION) << "Step numbers of per iteration should not equal to 0";
MS_LOG(EXCEPTION) << "Step numbers of per iteration should not be equal to 0";
}
// The worker has to train kWorkerTrainStepNum standalone iterations before it communicates with server.
MS_LOG(INFO) << "Try to pull weights. Local step number: " << total_iteration_

View File

@ -50,7 +50,7 @@ class FusedPushWeightKernel : public CPUKernel {
total_iteration_++;
uint64_t step_num_per_iteration = fl::worker::FLWorker::GetInstance().worker_step_num_per_iteration();
if (step_num_per_iteration == 0) {
MS_LOG(EXCEPTION) << "Step numbers of per iteration should not equal to 0";
MS_LOG(EXCEPTION) << "Step numbers of per iteration should not be equal to 0";
}
// The worker has to train kWorkerTrainStepNum standalone iterations before it communicates with server.
MS_LOG(INFO) << "Try to push weights. Local step number: " << total_iteration_

View File

@ -67,7 +67,7 @@ void Conv2dGradFilterCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel dilation only support 1 in N axis and C axis!";
}
if (stride_ori.size() < kShapeSize2D) {
MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel stride_ori should not less than 2d!";
MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel stride_ori should not be less than 2d!";
}
std::vector<int> stride{stride_ori[0], stride_ori[1]};
std::vector<int> dilation{dilation_ori[2], dilation_ori[3]};

View File

@ -65,7 +65,7 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
size_t h_index = iter->second;
if (stride_me.size() < h_index + 2) {
MS_LOG(EXCEPTION) << "Strides should greater than " << (h_index + 1) << ", but got " << stride_me.size();
MS_LOG(EXCEPTION) << "Strides should be greater than " << (h_index + 1) << ", but got " << stride_me.size();
}
auto h_index_int64 = SizeToLong(h_index);
(void)std::transform(stride_me.begin() + h_index_int64, stride_me.begin() + h_index_int64 + 2,
@ -80,7 +80,7 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel dilation only support 1 in N axis and C axis!";
}
if (stride_ori.size() < kShapeSize2D) {
MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel stride_ori should not less than 2d!";
MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel stride_ori should not be less than 2d!";
}
std::vector<int> stride{stride_ori[0], stride_ori[1]};
std::vector<int> dilation{dilation_ori[2], dilation_ori[3]};

View File

@ -101,7 +101,7 @@ void SparseApplyFtrlPSKernel::ReInit(const std::vector<std::vector<size_t>> &sha
void SparseApplyFtrlPSKernel::ReInit(const std::vector<AddressPtr> &inputs) {
if (inputs.size() < kSparseApplyFtrlPSInputSize) {
MS_LOG(EXCEPTION) << "Input numbers should not less than " << kSparseApplyFtrlPSInputSize << ", but got "
MS_LOG(EXCEPTION) << "Input numbers should not be less than " << kSparseApplyFtrlPSInputSize << ", but got "
<< inputs.size();
}
const auto &indices_addr = inputs[4];

View File

@ -91,7 +91,7 @@ void SparseApplyLazyAdamPSKernel::ReInit(const std::vector<std::vector<size_t>>
void SparseApplyLazyAdamPSKernel::ReInit(const std::vector<AddressPtr> &inputs) {
if (inputs.size() < kSparseApplyLazyAdamPSInputsSize) {
MS_LOG(EXCEPTION) << "Input shape size should not less than " << kSparseApplyLazyAdamPSInputsSize << ", but got "
MS_LOG(EXCEPTION) << "Input shape size should not be less than " << kSparseApplyLazyAdamPSInputsSize << ", but got "
<< inputs.size();
}
const auto &indices_addr = inputs[10];

View File

@ -39,12 +39,12 @@ void RollingCpuKernel<T, S>::InitKernel(const CNodePtr &kernel_node) {
method_ = kValidMethods.at(method);
auto window = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, WINDOW);
if (window <= 0) {
MS_LOG(EXCEPTION) << "window size should not less than 0, but got " << window;
MS_LOG(EXCEPTION) << "window size should not be less than 0, but got " << window;
}
window_ = LongToInt(window);
min_periods_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, MIN_PERIODS);
if (min_periods_ <= 0) {
MS_LOG(EXCEPTION) << "min_periods should not less than 0, but got " << min_periods_;
MS_LOG(EXCEPTION) << "min_periods should not be less than 0, but got " << min_periods_;
}
center_ = AnfAlgo::GetNodeAttr<bool>(kernel_node, CENTER);
auto axis = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS);

View File

@ -48,7 +48,7 @@ void ScatterArithmeticCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
kernel_name_ = AnfAlgo::GetCNodeName(kernel_node);
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
if (input_shape.size() < 1) {
MS_LOG(EXCEPTION) << "Input shape size should not less than 1";
MS_LOG(EXCEPTION) << "Input shape size should not be less than 1";
}
input_size_ = 1;
inner_size_ = 1;

View File

@ -38,8 +38,8 @@ void SplitCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
(void)std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_),
[](const size_t &value) { return SizeToInt(value); });
if (input_shape_.size() < 1 || input_shape_.size() > SPLIT_STRIDES_SIZE) {
MS_LOG(EXCEPTION) << "Inpu shape size should not less than 1 or greater than " << SPLIT_STRIDES_SIZE << ", but got "
<< input_shape_.size();
MS_LOG(EXCEPTION) << "Inpu shape size should not be less than 1 or greater than " << SPLIT_STRIDES_SIZE
<< ", but got " << input_shape_.size();
}
CheckParam(kernel_node);
}
@ -114,7 +114,7 @@ void SplitCPUKernel<T>::CheckParam(const CNodePtr &kernel_node) {
axis_ += SizeToLong(input_shape_.size());
}
if (output_num_ > IntToSize(input_shape_[LongToUlong(axis_)])) {
MS_LOG(EXCEPTION) << "Attr output_num " << output_num_ << " must less than " << input_shape_[axis_];
MS_LOG(EXCEPTION) << "Attr output_num " << output_num_ << " must be less than " << input_shape_[axis_];
}
}
} // namespace kernel

View File

@ -65,7 +65,7 @@ void UpdateCacheCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 1);
auto update_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 2);
if (update_shape.size() < kMinUpdateShapeSize) {
MS_LOG(EXCEPTION) << "Updata shape should not less than " << kMinUpdateShapeSize;
MS_LOG(EXCEPTION) << "Update shape should not be less than " << kMinUpdateShapeSize;
}
batch_size_ = 1;
for (size_t i = 0; i < indices_shape.size(); ++i) {

View File

@ -157,7 +157,7 @@ class SplitGpuFwdKernel : public GpuKernel {
return false;
}
if (output_num_ > SizeToInt(input_shape[axis_])) {
MS_LOG(ERROR) << "Attr output_num " << output_num_ << "must less than" << input_shape[axis_];
MS_LOG(ERROR) << "Attr output_num " << output_num_ << "must be less than" << input_shape[axis_];
return false;
}
if (output_num_ != output_num) {

View File

@ -68,6 +68,7 @@ class BroadcastOpGpuKernel : public GpuKernel {
return true;
}
bool Init(const CNodePtr &kernel_node) override {
GetOpType(kernel_node);
auto shape1 = AnfAlgo::GetInputRealDeviceShapeIfExist(kernel_node, 0);
@ -93,7 +94,7 @@ class BroadcastOpGpuKernel : public GpuKernel {
if (i < MAX_DIMS) {
output_shape_[i] = shape3[i];
} else {
MS_LOG(EXCEPTION) << "Output index: " << i << " should less than " << MAX_DIMS;
MS_LOG(EXCEPTION) << "Output index: " << i << " should be less than " << MAX_DIMS;
}
}
output_num_ *= shape3[i];

View File

@ -362,7 +362,7 @@ class ConvGradInputGpuBkwKernel : public GpuKernel {
}
size_t h_index = iter->second;
if (stride_me.size() < h_index + 2) {
MS_LOG(EXCEPTION) << "Strides should greater than " << h_index + 1 << ", but got " << stride_me.size();
MS_LOG(EXCEPTION) << "Strides should be greater than " << h_index + 1 << ", but got " << stride_me.size();
}
(void)std::transform(stride_me.begin() + h_index, stride_me.begin() + h_index + 2, std::back_inserter(stride_),
[](const int64_t &value) { return static_cast<int>(value); });

View File

@ -1985,7 +1985,7 @@ bool NchwFracZTransWithGroups(const FormatArgs &args, void *result, bool to_devi
auto cin_ori = c_dim;
auto cout_ori = n_dim / group_size;
if (cin_ori == 0 || cout_ori == 0) {
MS_LOG(ERROR) << "cin_ori, cout_ori must not equal to 0";
MS_LOG(ERROR) << "cin_ori, cout_ori must not be equal to 0";
return false;
}
size_t e_mult = std::min(Lcm(Lcm(cin_ori, kCubeSize) / cin_ori, Lcm(cout_ori, kCubeSize) / cout_ori), group_size);

View File

@ -371,7 +371,7 @@ void Server::RegisterMessageCallback(const std::shared_ptr<ps::core::TcpCommunic
void Server::InitExecutor() {
MS_EXCEPTION_IF_NULL(func_graph_);
if (executor_threshold_ == 0) {
MS_LOG(EXCEPTION) << "The executor's threshold should greater than 0.";
MS_LOG(EXCEPTION) << "The executor's threshold should be greater than 0.";
return;
}
// The train engine instance is used in both push-type and pull-type kernels,

View File

@ -513,7 +513,7 @@ PynativeAdjointPtr KPynativeCellImpl::ForgeGetItemAdjoint(const CNodePtr &cnode)
MS_LOG(EXCEPTION) << "CNode input 2 should be a Int64Imm, CNode: " << cnode->DebugString();
}
if (index_value->value() < 0) {
MS_LOG(EXCEPTION) << "CNode input 2 should not less than 0, CNode: " << cnode->DebugString();
MS_LOG(EXCEPTION) << "CNode input 2 should not be less than 0, CNode: " << cnode->DebugString();
}
size_t index_value_imm = LongToSize(index_value->value());
if (index_value_imm >= input_1_out->size()) {

View File

@ -203,7 +203,7 @@ AbstractBasePtr ShrinkAbstract(const AbstractBasePtr &original_abstract,
std::back_inserter(shrunk_abstract_elements),
[abs_tuple_elements, before_shrink_tuple_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_tuple_size) {
MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", abstract tuple size: " << before_shrink_tuple_size;
}
return abs_tuple_elements[node_and_index.index];
@ -227,7 +227,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector<TpCNode
const auto &new_fg_output_inputs = new_fg_output_cnode->inputs();
constexpr auto kMinimalSize = 2;
if (new_fg_output_inputs.size() <= kMinimalSize) {
MS_LOG(EXCEPTION) << "New fg output should at least 2 elements, but: " << new_fg_output->DebugString();
MS_LOG(EXCEPTION) << "New fg output should have at least 2 elements, but: " << new_fg_output->DebugString();
}
before_shrink_inputs_size = SizeToLong(new_fg_output_inputs.size() - 1);
AnfNodePtrList shrunk_inputs{NewValueNode({prim::kPrimMakeTuple})};
@ -235,7 +235,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector<TpCNode
std::transform(tp_cnodes_and_index.cbegin(), tp_cnodes_and_index.cend(), std::back_inserter(shrunk_inputs),
[new_fg_output, new_fg_output_inputs, before_shrink_inputs_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_inputs_size) {
MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", output: " << new_fg_output->DebugString();
}
return new_fg_output_inputs[node_and_index.index + 1];
@ -251,7 +251,7 @@ FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector<TpCNode
std::transform(tp_cnodes_and_index.cbegin(), tp_cnodes_and_index.cend(), std::back_inserter(shrunk_inputs),
[new_fg_output, value_tuple, before_shrink_inputs_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_inputs_size) {
MS_LOG(EXCEPTION) << "index should less than inputs size, index: " << node_and_index.index
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", output: " << new_fg_output->DebugString();
}
return (*value_tuple)[node_and_index.index];

View File

@ -327,7 +327,7 @@ class InlinerBase : public AnfVisitor {
} else if (IsCNodeGraph(item)) {
auto cinputs = item->cast<CNodePtr>()->inputs();
if (cinputs.size() < 1) {
MS_LOG(EXCEPTION) << "graph call inputs should greater than 1";
MS_LOG(EXCEPTION) << "graph call inputs should be greater than 1";
}
FuncGraphPtr call_fg = GetValueNode<FuncGraphPtr>(cinputs[0]);
bool call_fg_has_branch = GraphHasBranch(call_fg);
@ -338,7 +338,7 @@ class InlinerBase : public AnfVisitor {
} else if (IsPrimitiveCNode(item, prim::kPrimPartial)) {
auto cinputs = item->cast<CNodePtr>()->inputs();
if (cinputs.size() < 2) {
MS_LOG(EXCEPTION) << "partial call inputs should greater than 2";
MS_LOG(EXCEPTION) << "partial call inputs should be greater than 2";
}
FuncGraphPtr call_fg = GetValueNode<FuncGraphPtr>(cinputs[1]);
if (call_fg == nullptr) {

View File

@ -667,7 +667,7 @@ void CacheEmbeddingForTrain(const FuncGraphPtr &graph, bool is_pipe, const CNode
MS_LOG(EXCEPTION) << "The last cnode after sorting, not return cnode.";
}
if (return_node->inputs().size() < 2) {
MS_LOG(EXCEPTION) << "Number of return node inputs should be great than or equal to 2.";
MS_LOG(EXCEPTION) << "Number of return node inputs should be greater than or equal to 2.";
}
auto depend_node = CreateDepend(graph, invalid_nodes, return_node->input(1));

View File

@ -92,8 +92,8 @@ Status GatherInfo::GetManualSplitAttr() {
int64_t param_split_row = (GetValue<int64_t>(value_vector[0]));
int64_t offset = (GetValue<int64_t>(value_vector[1]));
if ((param_split_row <= 0) || (offset < 0)) {
MS_LOG(ERROR) << name_
<< ": The value of param split shape must be positive, and the offset must larger or equal to 0";
MS_LOG(ERROR) << name_ << ": The value of param split shape must be positive, "
<< "and the offset must be greater than or equal to 0";
return FAILED;
}
param_split_shapes_.push_back(param_split_row);
@ -105,7 +105,7 @@ Status GatherInfo::GetManualSplitAttr() {
return FAILED;
}
if (std::any_of(index_offsets_.begin(), index_offsets_.end(), [](const int64_t &offset) { return offset < 0; })) {
MS_LOG(ERROR) << name_ << ": Index offset must not less than 0";
MS_LOG(ERROR) << name_ << ": Index offset must not be less than 0";
return FAILED;
}
return SUCCESS;

View File

@ -95,7 +95,7 @@ Status ReshapeInfo::GetParameterInput() {
}
elements = dim_tuple->value();
if (elements.size() != outputs_shape_[0].size()) {
MS_LOG(ERROR) << name_ << ": Elements size must equal to outputs shape[0] size.";
MS_LOG(ERROR) << name_ << ": Elements size must be equal to outputs shape[0] size.";
return FAILED;
}

View File

@ -47,7 +47,7 @@ Status TileInfo::GetAttrs() {
}
elements = multiples->value();
if (elements.size() != outputs_shape_[0].size()) {
MS_LOG(ERROR) << name_ << ": Elements size must equal to outputs shape[0] size.";
MS_LOG(ERROR) << name_ << ": Elements size must be equal to outputs shape[0] size.";
return FAILED;
}

View File

@ -62,7 +62,7 @@ Status TransposeInfo::ComputeAxis() {
}
elements = dim_tuple->value();
if (elements.size() != inputs_shape_[0].size()) {
MS_LOG(ERROR) << name_ << ": elements size must equal to inputs shape 0 size.";
MS_LOG(ERROR) << name_ << ": elements size must be equal to inputs shape[0] size.";
return FAILED;
}
axis_v_.clear();

View File

@ -83,7 +83,7 @@ class Strategy {
private:
const int64_t stage_;
// The size of Dimensions must equal to inputs_ tensor dimension.
// The size of Dimensions must be equal to inputs_ tensor dimension.
Strategys inputs_;
size_t internal_size_ = 0;
std::vector<StrategyPtr> internal_stragies_;

View File

@ -163,7 +163,7 @@ Status DeviceTensor::DataPop_(std::shared_ptr<Tensor> *host_tensor) {
const mindspore::dataset::TensorShape dvpp_shape({dvppDataSize, 1, 1});
CHECK_FAIL_RETURN_UNEXPECTED(this->GetYuvStrideShape().size() >= kYuvDefaultChannels,
"Invalid YuvShape, should greater than 4");
"Invalid YuvShape, should be greater than 4");
uint32_t _output_width_ = this->GetYuvStrideShape()[0];
uint32_t _output_widthStride_ = this->GetYuvStrideShape()[1];

View File

@ -1018,7 +1018,7 @@ Status Tensor::GetSliceOption(const SliceOption &slice_option, const int32_t &sl
RETURN_STATUS_UNEXPECTED("Both indices and slices can not be given.");
}
CHECK_FAIL_RETURN_UNEXPECTED(shape_.Size() > slice_index, "Invalid shape, should greater than slices index.");
CHECK_FAIL_RETURN_UNEXPECTED(shape_.Size() > slice_index, "Invalid shape, should be greater than slices index.");
// if slice object was provided, indices should be empty. Generate indices from the slice object.
if (slice_option.indices_.empty()) {
// check if slice is valid

View File

@ -88,7 +88,7 @@ Status EMnistOp::WalkAllFiles() {
std::sort(image_names_.begin(), image_names_.end());
std::sort(label_names_.begin(), label_names_.end());
CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(),
"Invalid data, num of images does not equal to num of labels.");
"Invalid data, num of images is not equal to num of labels.");
return Status::OK();
}

View File

@ -245,7 +245,7 @@ Status MnistOp::WalkAllFiles() {
std::sort(label_names_.begin(), label_names_.end());
CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(),
"Invalid data, num of images does not equal to num of labels.");
"Invalid data, num of images is not equal to num of labels.");
return Status::OK();
}

View File

@ -163,7 +163,7 @@ Status QMnistOp::WalkAllFiles() {
}
CHECK_FAIL_RETURN_UNEXPECTED(image_names_.size() == label_names_.size(),
"Invalid data, num of images does not equal to num of labels.");
"Invalid data, num of images is not equal to num of labels.");
for (size_t i = 0; i < image_names_.size(); i++) {
Path file_path(image_names_[i]);

View File

@ -51,7 +51,7 @@ Status ComputeShuffleSize(int64_t num_files, int64_t num_devices, int64_t num_ro
}
// get the average per file
CHECK_FAIL_RETURN_UNEXPECTED(num_files != 0, "The size of dataset_files must greater than 0.");
CHECK_FAIL_RETURN_UNEXPECTED(num_files != 0, "The size of dataset_files must be greater than 0.");
avg_rows_per_file = num_rows / num_files;
*shuffle_size = std::max(avg_rows_per_file * average_files_multiplier, shuffle_max);

View File

@ -32,7 +32,7 @@ Status EpochCtrlPass::InjectionFinder::Visit(std::shared_ptr<RootNode> node, boo
RETURN_UNEXPECTED_IF_NULL(node);
RETURN_UNEXPECTED_IF_NULL(modified);
CHECK_FAIL_RETURN_UNEXPECTED(node->Children().size() > 0,
"Invalid data, the node of child should greater than zero.");
"Invalid data, the node of child should be greater than zero.");
// The injection is at the child of the root node
injection_point_ = node->Children()[0];
num_epochs_ = node->num_epochs();
@ -61,7 +61,7 @@ Status EpochCtrlPass::InjectionFinder::VisitAfter(std::shared_ptr<TransferNode>
RETURN_UNEXPECTED_IF_NULL(node);
RETURN_UNEXPECTED_IF_NULL(modified);
CHECK_FAIL_RETURN_UNEXPECTED(node->Children().size() > 0,
"Invalid data, the node of child should greater than zero.");
"Invalid data, the node of child should be greater than zero.");
// Assumption: There is only one TransferNode in a pipeline. This assumption is not validated here.
// Move the injection point to the child of this node.
injection_point_ = node->Children()[0];

View File

@ -420,7 +420,7 @@ Status ResizePreserve(const TensorRow &inputs, int32_t height, int32_t width, in
TensorRow *outputs) {
outputs->resize(3);
CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() > 0,
"Invalid input, should greater than 0, but got " + std::to_string(inputs.size()));
"Invalid input, should be greater than 0, but got " + std::to_string(inputs.size()));
std::shared_ptr<Tensor> input = inputs[0];
CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Size() >= 3, "Invalid input shape, should be greater than 3 dimensions.");
LiteMat lite_mat_src(input->shape()[1], input->shape()[0], input->shape()[2],

View File

@ -284,7 +284,7 @@ const FuncGraphPtr GetLoadedGraph(const ResourcePtr &res) {
if (loaded_graph_num == 1) {
return loaded_graph;
}
MS_LOG(EXCEPTION) << "The loaded sub graph currently should less than 2, but got " << loaded_graph_num;
MS_LOG(EXCEPTION) << "The loaded sub graph currently should be less than 2, but got " << loaded_graph_num;
}
void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &loaded_graph) {
@ -302,6 +302,7 @@ void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &load
MS_LOG(EXCEPTION) << "The inputs number " << root_inputs_num << " not equal to the inputs number of loaded graph "
<< loaded_inputs_num;
}
for (size_t index = 0; index < root_inputs_num; index++) {
auto root_input = root_inputs[index];
auto loaded_input = loaded_inputs[index];

View File

@ -140,7 +140,7 @@ class Parser {
AnfNodePtr ParseNone(const FunctionBlockPtr &block, const py::object &node);
// Process Ellipsis
AnfNodePtr ParseEllipsis(const FunctionBlockPtr &block, const py::object &node);
// Process a integer or float number
// Process an integer or float number
AnfNodePtr ParseNum(const FunctionBlockPtr &block, const py::object &node);
// Process a string variable
AnfNodePtr ParseStr(const FunctionBlockPtr &block, const py::object &node);

View File

@ -459,7 +459,7 @@ EvalResultPtr TrivialPrimEvaluator::Run(AnalysisEnginePtr engine, const ConfigPt
EvalResultPtr TransitionPrimEvaluator::Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) {
if (args_conf_list.empty()) {
MS_LOG(EXCEPTION) << "Size should greater than 0";
MS_LOG(EXCEPTION) << "Size should be greater than 0";
}
AbstractBasePtrList args_spec_list;
(void)std::transform(args_conf_list.begin(), args_conf_list.end(), std::back_inserter(args_spec_list),

View File

@ -194,16 +194,18 @@ std::vector<std::tuple<std::size_t, ge::NodePtr>> OpTilingCalculateAdapter::Conv
if (!has_input_name_attr) {
MS_LOG(EXCEPTION) << "Node should has attr: input_names. " << node->fullname_with_scope();
}
auto input_names_attr = AnfAlgo ::GetNodeAttr<std::vector<std::string>>(node, "input_names");
std::vector<std::string> op_infer_depends;
std::vector<std::tuple<std::size_t, ge::NodePtr>> constant_ops;
for (auto index : depends_list_me) {
if (LongToSize(index) > input_names_attr.size()) {
MS_LOG(EXCEPTION) << "Input index " << index << " should less input_names' size " << input_names_attr.size();
MS_LOG(EXCEPTION) << "Input index " << index << " should not be greater than input_names' size "
<< input_names_attr.size();
}
auto iter = depend_tensor_map.find(LongToSize(index));
if (iter == depend_tensor_map.end()) {
MS_LOG(EXCEPTION) << "Input index " << index << " should less than depend_tensor_map' size "
MS_LOG(EXCEPTION) << "Input index " << index << " should be less than depend_tensor_map' size "
<< input_names_attr.size();
}
auto depend_name = input_names_attr[index];
@ -245,7 +247,7 @@ void OpTilingCalculateAdapter::InitOpIoName(const CNodePtr &node) {
MS_EXCEPTION_IF_NULL(item);
if (item->param_type() == PARAM_DYNAMIC) {
if (dynamic_input_index > dynamic_inputs_list.size()) {
MS_LOG(EXCEPTION) << "Dynamic input index should less than the dynamic input's size.";
MS_LOG(EXCEPTION) << "Dynamic input index should be less than the dynamic input's size.";
}
auto real_inputs_num = dynamic_inputs_list[dynamic_input_index];
for (auto k = 0; k < real_inputs_num; k++) {

View File

@ -380,7 +380,7 @@ class Tensor(Tensor_):
def itemset(self, *args):
r"""
Insert scalar into a tensor (scalar is cast to tensors dtype, if possible).
Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible).
There must be at least 1 argument, and define the last argument as item.
Then, tensor.itemset(\*args) is equivalent to :math:`tensor[args] = item`.
@ -1093,7 +1093,7 @@ class Tensor(Tensor_):
def ptp(self, axis=None, keepdims=False):
"""
The name of the function comes from the acronym for peak to peak.
The name of the function comes from the acronym for "peak to peak".
Note:
Numpy arguments `dtype` and `out` are not supported.
@ -1477,22 +1477,22 @@ class Tensor(Tensor_):
indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
axis (int, optional): The axis over which to select values. By default,
the flattened input array is used. Default: `None`.
mode (raise, wrap, clip, optional):
mode ('raise', 'wrap', 'clip', optional):
- edge: Pads with the edge values of `arr`.
- raise: Raises an error;
- wrap: Wraps around;
- clip: Clips to the range. `clip` mode means that all indices that are
- clip: Clips to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.
Default: `clip`.
Default: 'clip'.
Returns:
Tensor, the indexed result.
Raises:
ValueError: if `axis` is out of range, or `mode` has values other than (raise, wrap, clip)
ValueError: if `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip')
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
@ -1542,15 +1542,15 @@ class Tensor(Tensor_):
choices (Union[tuple, list, Tensor]): Choice arrays. `a` and all of the `choices` must
be broadcasted to the same shape. If `choices` is itself an array, then
its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
is taken as defining the sequence.
mode (raise, wrap, clip, optional): Specifies how indices outside
is taken as defining the "sequence".
mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
``[0, n-1]`` will be treated:
raise raise an error (default);
'raise' raise an error (default);
wrap wrap around;
'wrap' wrap around;
clip clip to the range. clip mode means that all indices that are
'clip' clip to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.
@ -1615,10 +1615,10 @@ class Tensor(Tensor_):
Args:
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
side ('left', 'right', optional): If left, the index of the first suitable
location found is given. If right, return the last such index. If there is
side ('left', 'right', optional): If 'left', the index of the first suitable
location found is given. If 'right', return the last such index. If there is
no suitable index, return either 0 or N (where N is the length of `a`).
Default: `left`.
Default: 'left'.
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
integer indices that sort array `a` into ascending order. They are typically
the result of argsort.
@ -1778,7 +1778,7 @@ class Tensor(Tensor_):
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the sum method of
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
implement keepdims any exceptions will be raised. Default: `False`.
initial (scalar): Starting value for the sum. Default: `None`.

View File

@ -46,11 +46,11 @@ def cal_quantization_params(input_min,
input_min = np.minimum(0.0, input_min)
if input_min.shape != input_max.shape:
raise ValueError("input min shape should equal to input max.")
raise ValueError("input min shape should be equal to input max.")
if len(input_min.shape) > 1:
raise ValueError("input min and max shape should be one dim.")
if (input_min > input_max).all():
raise ValueError("input_min min should less than input max.")
raise ValueError("input_min min should be less than input max.")
if (input_max == input_min).all():
return np.ones(input_min.shape), np.zeros(input_min.shape)
@ -105,7 +105,7 @@ def weight2int(data, scale, zero_point, quant_min, quant_max):
if scale.shape != zero_point.shape:
raise ValueError("`scale` and `zero_point` should have the same shape.")
if scale.shape[0] < 0:
raise ValueError("`scale` and `zero_point` shape should greater than zero.")
raise ValueError("`scale` and `zero_point` shape should be greater than zero.")
if len(scale.shape) >= 1 and scale.shape[0] > 1:
# for perchannel
if scale.shape[0] == data.shape[0]:

View File

@ -881,7 +881,7 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr &
shape_num = LongMulWithOverflowCheck(value, shape_num);
}
if (shape_num != x_num) {
MS_LOG(EXCEPTION) << "The accumulate of x_shape must equal to out_shape, but got x_shape: " << x_shape
MS_LOG(EXCEPTION) << "The accumulate of x_shape must be equal to out_shape, but got x_shape: " << x_shape
<< ", and out_shape: " << shape;
}

View File

@ -264,6 +264,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p
uint64_t c_axis = 1;
uint64_t h_axis = 2;
uint64_t w_axis = 3;
int64_t data_format = GetAndCheckFormat(primitive->GetAttr("format"));
if (data_format == Format::NHWC) {
c_axis = 3;
@ -273,22 +274,25 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p
int64_t group = CheckAttrPositiveInt64(op_name, primitive->GetAttr("group"), "group");
if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) &&
((x_shape[c_axis] / group) != w_shape[c_axis])) {
MS_LOG(EXCEPTION) << "x_shape[C_in] / group must equal to w_shape[C_in] = " << w_shape[c_axis] << ", but got "
MS_LOG(EXCEPTION) << "x_shape[C_in] / group must be equal to w_shape[C_in]: " << w_shape[c_axis] << ", but got "
<< (x_shape[c_axis] / group);
}
int64_t out_channel = CheckAttrPositiveInt64(op_name, primitive->GetAttr("out_channel"), "out_channel");
if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) {
MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must equal to = " << out_channel;
MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must be equal to " << out_channel;
}
const size_t kernel_size_num_element = 2;
std::vector<int64_t> kernel_size =
CheckAttrIntOrTuple(op_name, primitive->GetAttr("kernel_size"), 0, kernel_size_num_element);
if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) {
MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] << ", must equal to = " << kernel_size[0];
MS_LOG(EXCEPTION) << "weight height: " << w_shape[h_axis] << " must be equal to " << kernel_size[0];
}
if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) {
MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] << ", must equal to = " << kernel_size[1];
MS_LOG(EXCEPTION) << "weight width: " << w_shape[w_axis] << " must be equal to " << kernel_size[1];
}
std::vector<int64_t> stride =
CheckAttrIntOrTuple(op_name, primitive->GetAttr("stride"), stride_start_idx, stride_num_element);
std::vector<int64_t> dilation =
@ -318,6 +322,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p
std::vector<ValuePtr> pad_list_val = {MakeValue(pad_list[0]), MakeValue(pad_list[1]), MakeValue(pad_list[2]),
MakeValue(pad_list[3])};
primitive->set_attr("pad_list", MakeValue(pad_list_val));
ShapeVector output_shape;
ShapeVector output_shape_min;
ShapeVector output_shape_max;
@ -333,6 +338,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p
CheckShapeAnyAndPositive(op_name + " output_shape", output_shape);
CheckShapeAllPositive(op_name + " output_shape_min", output_shape_min);
CheckShapeAllPositive(op_name + " output_shape_max", output_shape_max);
TypePtr x_type = input_x->element()->GetTypeTrack();
if (x_type->type_id() == TypeId::kNumberTypeInt8) {
x_type = kInt32;

View File

@ -182,12 +182,13 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
int64_t group = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("group"), "group");
if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) &&
((x_shape[c_axis] / group) != w_shape[c_axis])) {
MS_LOG(EXCEPTION) << "x_shape[C_in] / group must equal to w_shape[C_in] = " << w_shape[c_axis] << ", but got "
MS_LOG(EXCEPTION) << "x_shape[C_in] / group must be equal to w_shape[C_in]: " << w_shape[c_axis] << ", but got "
<< (x_shape[c_axis] / group);
}
int64_t out_channel = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("out_channel"), "out_channel");
if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) {
MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must equal to = " << out_channel;
MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis]
<< " must be equal to out_channel: " << out_channel;
}
constexpr size_t kernel_size_num = 2;
constexpr size_t stride_num = 2;
@ -196,10 +197,12 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
constexpr size_t start_index = 2;
std::vector<int64_t> kernel_size = CheckAttrIntOrTuple(primitive->GetAttr("kernel_size"), 0, kernel_size_num);
if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) {
MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] << ", must equal to = " << kernel_size[0];
MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis]
<< ", must be equal to kernel_size[0]: " << kernel_size[0];
}
if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) {
MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] << ", must equal to = " << kernel_size[1];
MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis]
<< ", must be equal to kernel_size[1]: " << kernel_size[1];
}
std::vector<int64_t> stride = CheckAttrIntOrTuple(primitive->GetAttr("stride"), start_index, stride_num);
std::vector<int64_t> dilation = CheckAttrIntOrTuple(primitive->GetAttr("dilation"), start_index, dilation_num);

View File

@ -27,7 +27,7 @@
namespace mindspore {
namespace ops {
constexpr auto kNameLogicalAnd = "LogicalAnd";
/// \brief Computes the “logical AND” of two tensors element-wise.
/// \brief Computes the "logical AND" of two tensors element-wise.
/// Refer to Python API @ref mindspore.ops.LogicalAnd for more details.
class MS_CORE_API LogicalAnd : public PrimitiveC {
public:

View File

@ -25,7 +25,7 @@
namespace mindspore {
namespace ops {
constexpr auto kNameLogicalNot = "LogicalNot";
/// \brief Computes the “logical NOT” of a tensor element-wise.
/// \brief Computes the "logical NOT" of a tensor element-wise.
/// Refer to Python API @ref mindspore.ops.LogicalNot for more details.
class MS_CORE_API LogicalNot : public PrimitiveC {
public:

View File

@ -25,7 +25,7 @@
namespace mindspore {
namespace ops {
constexpr auto kNameLogicalOr = "LogicalOr";
/// \brief Computes the “logical OR” of two tensors element-wise.
/// \brief Computes the "logical OR" of two tensors element-wise.
/// Refer to Python API @ref mindspore.ops.LogicalOr for more details.
class MS_CORE_API LogicalOr : public PrimitiveC {
public:

View File

@ -27,7 +27,7 @@
namespace mindspore {
namespace ops {
constexpr auto kNameReduceAll = "ReduceAll";
/// \brief Reduces a dimension of a tensor by the “logicalAND” of all elements in the dimension.
/// \brief Reduces a dimension of a tensor by the "logical AND" of all elements in the dimension.
/// Refer to Python API @ref mindspore.ops.ReduceAll for more details.
class MS_CORE_API ReduceAll : public Reduce {
public:

View File

@ -27,7 +27,7 @@
namespace mindspore {
namespace ops {
constexpr auto kNameReduceAny = "ReduceAny";
/// \brief Reduces a dimension of a tensor by the “logical OR” of all elements in the dimension.
/// \brief Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension.
/// Refer to Python API @ref mindspore.ops.ReduceAny for more details.
class MS_CORE_API ReduceAny : public Reduce {
public:

View File

@ -432,7 +432,7 @@ def check_minddataset(method):
dataset_file = param_dict.get('dataset_file')
if isinstance(dataset_file, list):
if len(dataset_file) > 4096:
raise ValueError("length of dataset_file should less than or equal to {}.".format(4096))
raise ValueError("length of dataset_file should be less than or equal to {}.".format(4096))
for f in dataset_file:
check_file(f)
else:

View File

@ -140,7 +140,7 @@ class LabelSensitiveMetric(AttributionMetric):
"""Checks whether num_labels is valid."""
check_value_type("num_labels", num_labels, int)
if num_labels < 1:
raise ValueError("Argument num_labels must be parsed with a integer > 0.")
raise ValueError("Argument num_labels must be parsed with an integer > 0.")
def aggregate(self, result, targets):
"""Aggregates single result to global_results."""

View File

@ -42,7 +42,7 @@ int AffineFP32Coder::PrepareSpliceOp() {
// init splice param
splice_param_ = new SpliceWrapperParam();
if (affine_param_->context_size_ > MAX_SHAPE_SIZE) {
MS_LOG(ERROR) << "Context size should less than MAX_SHAPE_SIZE.";
MS_LOG(ERROR) << "Context size should be less than MAX_SHAPE_SIZE.";
return RET_ERROR;
}
for (int i = 0; i < affine_param_->context_size_; i++) {

View File

@ -63,7 +63,7 @@ int AffineInt8Coder::PrepareSpliceOp() {
// init splice param
splice_param_ = new SpliceWrapperParam();
if (affine_param_->context_size_ > MAX_SHAPE_SIZE) {
MS_LOG(ERROR) << "Context size should less than MAX_SHAPE_SIZE.";
MS_LOG(ERROR) << "Context size should be less than MAX_SHAPE_SIZE.";
return RET_ERROR;
}
for (int i = 0; i < affine_param_->context_size_; i++) {

View File

@ -45,12 +45,12 @@
} \
} while (0)
#define CHECK_LESS_RETURN(size1, size2) \
do { \
if ((size1) < (size2)) { \
MS_LOG(ERROR) << #size1 << " must not less than " << #size2; \
return mindspore::lite::RET_ERROR; \
} \
#define CHECK_LESS_RETURN(size1, size2) \
do { \
if ((size1) < (size2)) { \
MS_LOG(ERROR) << #size1 << " must not be less than " << #size2; \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
#else

View File

@ -183,7 +183,7 @@ int SgdCPUKernel::Prepare() {
}
if (sgd_param_->use_nesterov_ && sgd_param_->dampening_ > 0.0f) {
MS_LOG(ERROR) << "If use nesterov, dampening must equal to 0.0";
MS_LOG(ERROR) << "If use nesterov, dampening must be equal to 0.0";
return RET_ERROR;
}
auto ret = OptimizerKernel::Prepare();

View File

@ -67,7 +67,7 @@ AclModelOptions CustomAscend310Kernel::GetAclModelOptions(const mindspore::Conte
STATUS CustomAscend310Kernel::PrepareModelInfer() {
if (inputs_.size() < 1) {
MS_LOG(ERROR) << "Inputs size should not less than 1.";
MS_LOG(ERROR) << "Inputs size should not be less than 1.";
return lite::RET_ERROR;
}
// last input is om data tensor

View File

@ -341,7 +341,7 @@ int MatMulOpenCLKernel::InitBias() {
#endif
void MatMulOpenCLKernel::SetGlobalLocal() {
// local size should less than MAX_GROUP_SIZE
// local size should be less than MAX_GROUP_SIZE
local_size_ = {32, 4, 1};
global_size_ = {1, 1, 1};
global_size_ = {UP_DIV(static_cast<size_t>(outShape[3]), C4NUM),

View File

@ -226,7 +226,7 @@ int StrassenOpenCLKernel::StrassenSetGlobalLocal(size_t strassen_size, int type_
}
void StrassenOpenCLKernel::SetGlobalLocal() {
// local size should less than MAX_GROUP_SIZE
// local size should be less than MAX_GROUP_SIZE
local_size_ = {32, 4, 1};
global_size_ = {1, 1, 1};
size_t strassen_size = outShape[3] / 2;

View File

@ -113,7 +113,8 @@ bool IndexingCompress(const std::set<T> &quant_data_set, const std::map<T, size_
}
}
if (index > pack_repetition_size_in_byte * 8) {
MS_LOG(ERROR) << "unexpected index: " << index << " should not greater than " << pack_repetition_size_in_byte * 8;
MS_LOG(ERROR) << "unexpected index: " << index << " should not be greater than "
<< pack_repetition_size_in_byte * 8;
return false;
}
// update tensor data
@ -186,7 +187,7 @@ bool SparsityCompress(const std::set<T> &quant_data_set, const std::map<T, size_
}
}
if ((unsigned int)index > pack_sparsity_size_in_byte * 8) {
MS_LOG(ERROR) << "unexpected index: " << index << " should not greater than " << pack_sparsity_size_in_byte * 8;
MS_LOG(ERROR) << "unexpected index: " << index << " should not be greater than " << pack_sparsity_size_in_byte * 8;
return false;
}
auto new_data_str = BoolVectorToString(bits);

View File

@ -89,8 +89,8 @@ bool CalSplitOutputShape(int64_t splited_axis_value, const SplitInfo *split_info
}
// out-shape after splited
int64_t tmp_value = 0;
MS_CHECK_TRUE_MSG(split_num > 0, false, "out_num of split_info should greater than zero");
MS_CHECK_TRUE_MSG(split_len > 0, false, "split_len should greater than zero");
MS_CHECK_TRUE_MSG(split_num > 0, false, "out_num of split_info should be greater than zero");
MS_CHECK_TRUE_MSG(split_len > 0, false, "split_len should be greater than zero");
for (int64_t i = 0; i < split_num - 1; i++) {
if (INT_MUL_OVERFLOW_THRESHOLD(split_info->size_splits[i], splited_axis_value, INT64_MAX)) {
MS_LOG(ERROR) << "int mul overflow";

View File

@ -335,7 +335,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False.
narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False.
quant_delay (int): Quantization delay parameters according to the global step. Default: 0.
neg_trunc (bool): Whether the quantization algorithm uses nagetive truncation or not. Default: False.
neg_trunc (bool): Whether the quantization algorithm uses negative truncation or not. Default: False.
mode (str): Optional quantization mode, currently only `DEFAULT`(QAT) and `LEARNED_SCALE` are supported.
Default: ("DEFAULT")
Inputs:

View File

@ -94,7 +94,7 @@ class SGD(Optimizer):
dampening (float): A floating point value of dampening for momentum. must be at least 0.0. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0.
nesterov (bool): Enables the Nesterov momentum. If use nesterov, momentum must be positive,
and dampening must equal to 0.0. Default: False.
and dampening must be equal to 0.0. Default: False.
loss_scale (float): A floating point value for the loss scale, which must be larger than 0.0. In general, use
the default value. Only when `FixedLossScaleManager` is used for training and the `drop_overflow_update` in
`FixedLossScaleManager` is set to False, then this value needs to be the same as the `loss_scale` in
@ -164,7 +164,7 @@ class SGD(Optimizer):
validator.check_value_type("nesterov", nesterov, [bool], self.cls_name)
if nesterov and (momentum <= 0.0 or dampening != 0.0):
raise ValueError("If use nesterov, momentum must be positive and dampening must equal to 0.0,"
raise ValueError("If use nesterov, momentum must be positive and dampening must be equal to 0.0,"
"but got momentum {}, dampening {}".format(momentum, dampening))
self.nesterov = nesterov

View File

@ -70,7 +70,7 @@ class SparseToDense(Cell):
class SparseTensorDenseMatmul(Cell):
"""
Multiplies sparse matrix `a` and dense matrix `b`.
The rank of sparse matrix and dense matrix must equal to `2`.
The rank of sparse matrix and dense matrix must be equal to `2`.
Args:
adjoint_st (bool): If true, sparse tensor is transposed before multiplication. Default: False.

View File

@ -364,7 +364,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
cleared before executing the computation.
Outputs:
Tuple[object, object], the first value is False for GPU backend, while it is a instance of
Tuple[object, object], the first value is False for GPU backend, while it is an instance of
NPUAllocFloatStatus for other backend. The status is used to detect overflow during overflow detection.
The second value is the same as the input of `compute_input`, but contains some information about the
execution order.

View File

@ -1226,20 +1226,20 @@ def meshgrid(*xi, sparse=False, indexing='xy'):
Args:
*xi (Tensor): 1-D arrays representing the coordinates
of a grid.
indexing (xy, ij, optional): Cartesian (xy, default) or
matrix (ij) indexing of output. In the 2-D case with
indexing ('xy', 'ij', optional): Cartesian ('xy', default) or
matrix ('ij') indexing of output. In the 2-D case with
inputs of length `M` and `N`, the outputs are of shape `(N, M)`
for xy indexing and `(M, N)` for ij indexing. In the 3-D
for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D
case with inputs of length `M`, `N` and `P`, outputs are of shape
`(N, M, P)` for xy indexing and `(M, N, P)` for ij indexing.
`(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing.
sparse (bool, optional): If True a sparse grid is returned in
order to conserve memory. Default is False.
Returns:
Tuple of tensors, for vectors `x1, x2,, xn` with lengths
``Ni=len(xi)``, return `(N1, N2, N3,...Nn)` shaped arrays if
``indexing=ij`` or `(N2, N1, N3,...Nn)` shaped arrays if
``indexing=xy`` with the elements of `xi` repeated to fill the matrix
``indexing='ij'`` or `(N2, N1, N3,...Nn)` shaped arrays if
``indexing='xy'`` with the elements of `xi` repeated to fill the matrix
along the first dimension for `x1`, the second for `x2` and so on.
Raises:
@ -1530,7 +1530,7 @@ def diagflat(v, k=0):
v (Tensor): Input data, which is flattened and set as the `k-th` diagonal
of the output.
k (int, optional): Diagonal to set; 0, the default, corresponds to the
main diagonal, a positive (negative) `k` giving the number of the
"main" diagonal, a positive (negative) `k` giving the number of the
diagonal above (below) the main.
Returns:

View File

@ -1187,7 +1187,7 @@ def tile(a, reps):
So a shape (3,) array is promoted to (1, 3) for 2-D replication, or
shape (1, 1, 3) for 3-D replication. If this is not the desired behavior,
promote `a` to d-dimensions manually before calling this function.
If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1s to it. Thus
If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1's to it. Thus
for an `a` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2).
Args:
@ -1839,7 +1839,7 @@ def take(a, indices, axis=None, mode='clip'):
"""
Takes elements from an array along an axis.
When axis is not None, this function does the same thing as fancy indexing
When axis is not None, this function does the same thing as "fancy" indexing
(indexing arrays using arrays); however, it can be easier to use if you need
elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is
equivalent to ``arr[:,:,:,indices,...]``.
@ -1853,14 +1853,14 @@ def take(a, indices, axis=None, mode='clip'):
indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
axis (int, optional): The axis over which to select values. By default,
the flattened input array is used.
mode (raise, wrap, clip, optional): Specifies how out-of-bounds
mode ('raise', 'wrap', 'clip', optional): Specifies how out-of-bounds
indices will behave.
raise raise an error;
'raise' raise an error;
wrap wrap around;
'wrap' wrap around;
clip clip to the range. clip mode means that all indices that are
'clip' clip to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.
@ -2097,7 +2097,7 @@ def _get_grid(shape):
def choose(a, choices, mode='clip'):
"""
Construct an array from an index array and a list of arrays to choose from.
Given an index array `a` of integers and a sequence of n arrays (choices),
Given an "index" array `a` of integers and a sequence of n arrays (choices),
`a` and each choice array are first broadcast, as necessary, to arrays of a
common shape; calling these `Ba` and `Bchoices[i], i = 0,,n-1` we have that,
necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array
@ -2129,15 +2129,15 @@ def choose(a, choices, mode='clip'):
choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
be broadcastable to the same shape. If `choices` is itself an array, then
its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
is taken as defining the sequence.
mode (raise, wrap, clip, optional): Specifies how indices outside
is taken as defining the "sequence".
mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
``[0, n-1]`` will be treated:
raise raise an error;
'raise' raise an error;
wrap wrap around;
'wrap' wrap around;
clip clip to the range. clip mode means that all indices that are
'clip' clip to the range. 'clip' mode means that all indices that are
too large are replaced by the index that addresses the last element
along that axis. Note that this disables indexing with negative numbers.

View File

@ -755,7 +755,7 @@ def array_equal(a1, a2, equal_nan=False):
Args:
a1/a2 (Union[int, float, bool, list, tuple, Tensor]): Input arrays.
equal_nan (bool): Whether to compare NaNs as equal.
equal_nan (bool): Whether to compare NaN's as equal.
Returns:
Scalar bool tensor, value is `True` if inputs are equal, `False` otherwise.
@ -878,7 +878,7 @@ def sometrue(a, axis=None, keepdims=False):
If True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the any method of
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
implement keepdims any exceptions will be raised.
Returns:

View File

@ -368,7 +368,7 @@ def divide(x1, x2, dtype=None):
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional floor division, this returns a true
Instead of the Python traditional "floor division", this returns a true
division.
Note:
@ -408,7 +408,7 @@ def true_divide(x1, x2, dtype=None):
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional floor division, this returns a true
Instead of the Python traditional "floor division", this returns a true
division.
Note:
@ -814,7 +814,7 @@ def tensordot(a, b, axes=2):
Computes tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing two array_like
objects, `(a_axes, b_axes)`, sum the products of `a`s and `b`s elements (components)
objects, `(a_axes, b_axes)`, sum the products of `a`'s and `b`'s elements (components)
over the axes specified by `a_axes` and `b_axes`. The third argument can be a single
non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of
`a` and the first `N` dimensions of `b` are summed over.
@ -841,7 +841,7 @@ def tensordot(a, b, axes=2):
Args:
a (Tensor): Tensor to "dot".
b (Tensor): Tensor to dot.
b (Tensor): Tensor to "dot".
axes (int or sequence of ints):
integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N`
@ -930,7 +930,7 @@ def var(x, axis=None, ddof=0, keepdims=False):
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the var method of
sub-classes of tensor, however any non-default value will be. If the sub-class method does not
sub-classes of tensor, however any non-default value will be. If the sub-class method does not
implement keepdims any exceptions will be raised. Default: `False`.
Supported Platforms:
@ -953,7 +953,7 @@ def var(x, axis=None, ddof=0, keepdims=False):
def ptp(x, axis=None, keepdims=False):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
The name of the function comes from the acronym for "peak to peak".
Note:
Numpy arguments `dtype` and `out` are not supported.
@ -1487,7 +1487,7 @@ def amin(a, axis=None, keepdims=False, initial=None, where=True):
def hypot(x1, x2, dtype=None):
"""
Given the legs of a right triangle, returns its hypotenuse.
Given the "legs" of a right triangle, returns its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like
(i.e., unambiguously cast-able to a scalar type), it is broadcast for use
@ -2706,7 +2706,7 @@ def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
computed. The default is to compute the variance of the flattened array.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
ddof (int, optional): Delta Degrees of Freedom: the divisor used in the calculation is
ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
is zero.
keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
@ -2761,7 +2761,7 @@ def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
flattened array.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
ddof (int, optional): Delta Degrees of Freedom: the divisor used in the calculation is
ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
is zero.
keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
@ -4043,7 +4043,7 @@ def sum_(a, axis=None, dtype=None, keepdims=False, initial=None):
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the sum method of
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
implement keepdims any exceptions will be raised. Default: `False`.
initial (scalar): Starting value for the sum.
@ -4286,8 +4286,8 @@ def searchsorted(a, v, side='left', sorter=None):
None, then it must be sorted in ascending order, otherwise `sorter` must be
an array of indices that sort it.
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
side ('left', 'right', optional): If left, the index of the first suitable
location found is given. If right, return the last such index. If there is
side ('left', 'right', optional): If 'left', the index of the first suitable
location found is given. If 'right', return the last such index. If there is
no suitable index, return either 0 or N (where N is the length of `a`).
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
integer indices that sort array `a` into ascending order. They are typically
@ -5554,7 +5554,7 @@ def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-bu
`x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None,
the 2-norm of ``x.ravel`` will be returned.
ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm.
inf means numpys inf object. The default is None.
inf means numpy's inf object. The default is None.
axis (Union[None, int, 2-tuple of ints], optional): If `axis` is an integer, it
specifies the axis of `x` along which to compute the vector norms. If `axis` is
a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of
@ -5703,10 +5703,10 @@ def invert(x, dtype=None):
Computes bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of the integers in
the input arrays. This ufunc implements the C/Python operator ~.
For signed integer inputs, the twos complement is returned. In a twos-complement system
negative numbers are represented by the twos complement of the absolute value. This is
For signed integer inputs, the two's complement is returned. In a two's-complement system
negative numbers are represented by the two's complement of the absolute value. This is
the most common method of representing signed integers on computers
`[1] <https://en.wikipedia.org/wiki/Twos_complement>`_. A N-bit twos-complement system
`[1] <https://en.wikipedia.org/wiki/Two's_complement>`_. A N-bit two's-complement system
can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``.
Note:

View File

@ -477,7 +477,7 @@ def _check_axes_for_batch_dot(x1_shape, x2_shape, axes, prim_name=None):
f"But got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.")
elif isinstance(axes, int):
if axes == 0:
raise ValueError(f"{msg_prefix} 'axes' should not equal to 0, but got {axes}.")
raise ValueError(f"{msg_prefix} 'axes' should not be equal to 0, but got {axes}.")
if axes < 0:
axes = [axes + len(x1_shape), axes + len(x2_shape)]
validator.check_non_negative_int(axes[0], 'reversed axes', 'batch_dot')

View File

@ -576,7 +576,7 @@ def get_stride_info_from_slice(data_shape, slice_index):
@constexpr
def get_stride_info_from_integer(data_shape, number):
"""Get stride info from a integer"""
"""Get stride info from an integer"""
begin_strides = [number]
end_strides = [number + 1]
step_strides = [1]

View File

@ -287,8 +287,8 @@ class MatrixDiag(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - A tensor which to be element-wise multi by `assist`. It can be one of the following data
types: float32, float16, int32, int8, and uint8.
- **assist** (Tensor) - A eye tensor of the same type as `x`. It's rank must greater than or equal to 2 and
it's last dimension must equal to the second to last dimension.
- **assist** (Tensor) - A eye tensor of the same type as `x`. It's rank must be greater than or equal to 2 and
it's last dimension must be equal to the second to last dimension.
Outputs:
Tensor, has the same type and shape as input `assist`.
@ -382,7 +382,7 @@ class Send(PrimitiveWithInfer):
Send tensors from src_rank to the specified dest_rank.
Note:
Send and Recveive must be used in combination and have same sr_tag.
Send and Receive must be used in combination and have same sr_tag.
Send must be used between servers.
Args:

View File

@ -188,7 +188,7 @@ class FakeLearnedScaleQuantPerLayer(PrimitiveWithInfer):
quant_delay (int): Quantilization delay parameter. Before delay step in training time not update
simulate quantization aware function. After delay step in training time begin simulate the aware
quantize function. Default: 0.
neg_trunc (bool): Whether the quantization algorithm uses nagetive truncation or not. Default: False.
neg_trunc (bool): Whether the quantization algorithm uses negative truncation or not. Default: False.
training (bool): Training the network or not. Default: True.
Inputs:

View File

@ -666,7 +666,7 @@ class Squeeze(PrimitiveWithInfer):
Raises:
TypeError: If `axis` is neither an int nor tuple.
TypeError: If `axis` is a tuple whose elements are not all int.
ValueError: If the corresponding dimension of the specified axis does not equal to 1.
ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
@ -5516,12 +5516,12 @@ class Meshgrid(PrimitiveWithInfer):
coordinate tensors for evaluating expressions on an N-D grid.
Args:
indexing (xy, ij, optional): Cartesian (xy, default) or
matrix (ij) indexing of output. In the 2-D case with
indexing ('xy', 'ij', optional): Cartesian ('xy', default) or
matrix ('ij') indexing of output. In the 2-D case with
inputs of length `M` and `N`, the outputs are of shape `(N, M)`
for xy indexing and `(M, N)` for ij indexing. In the 3-D
for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D
case with inputs of length `M`, `N` and `P`, outputs are of shape
`(N, M, P)` for xy indexing and `(M, N, P)` for ij indexing.
`(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing.
Inputs:
- **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
@ -6223,7 +6223,7 @@ class MaskedFill(Primitive):
class MaskedSelect(PrimitiveWithCheck):
"""
Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.
The shapes of the mask tensor and the input tensor dont need to match, but they must be broadcastable.
The shapes of the mask tensor and the input tensor don't need to match, but they must be broadcastable.
Inputs:
- **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
@ -6741,7 +6741,7 @@ class ExtractVolumePatches(Primitive):
ValueError: If one of kernel_size or strides' first two numbers is not 1.
ValueError: If padding = "VALID" and input - kernel_size is less than 0 in d, h or w dimension.
ValueError: If padding = "SAME" and :math:`padding_needed = ((input_x + strides - 1) / strides - 1) *
strides + kernelz_size - input` is less than 0 in d, h or w dimension.
strides + kernel_size - input` is less than 0 in d, h or w dimension.
ValueError: If x_h is not 1 or x_w is not 1 and x_w + padding_needed - k_w - s_w is less than 0.
ValueError: If x_d * x_h * x_w is greater than 2048.

View File

@ -82,11 +82,11 @@ target_dtypes = (mstype.int8, mstype.int32, mstype.float16, mstype.float32)
def check_hcom_group_valid(group, prim_name=None):
"""Check if hcom group is valid."""
msg_pfefix = f"For '{prim_name}', only" if prim_name else "Only"
msg_prefix = f"For '{prim_name}', only" if prim_name else "Only"
if context.get_context("mode") == context.PYNATIVE_MODE and \
context.get_context("device_target") == "Ascend" and \
group != GlobalComm.WORLD_COMM_GROUP:
raise RuntimeError(f"{msg_pfefix} hccl_world_group is supported in Pynative mode, but got 'group': {group}.")
raise RuntimeError(f"{msg_prefix} hccl_world_group is supported in Pynative mode, but got 'group': {group}.")
class AllReduce(PrimitiveWithInfer):
@ -515,7 +515,7 @@ class Broadcast(PrimitiveWithInfer):
The contents depend on the data of the `root_rank` device.
Raises:
TypeError: If root_rank is not a integer or group is not a string.
TypeError: If root_rank is not an integer or group is not a string.
Supported Platforms:
``Ascend`` ``GPU``

View File

@ -94,7 +94,7 @@ class _MathBinaryOp(_BinaryOp):
args_type = {"x": x_dtype, "y": y_dtype}
complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)]
if x_dtype in complex_types or y_dtype in complex_types:
tpye_infer_dict = {
type_infer_dict = {
(mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64),
(mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64),
(mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64),
@ -102,12 +102,12 @@ class _MathBinaryOp(_BinaryOp):
(mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128),
(mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
}
if (x_dtype.element_type(), y_dtype.element_type()) not in tpye_infer_dict.keys():
if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],'
+ '[complex64, float32], [float32, complex64], [complex128, complex128],'
+ '[complex128, float64], [float64, complex128],'
+ f'but got : [{format(x_dtype)},{format(y_dtype)}].')
return tpye_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
return x_dtype
@ -225,7 +225,7 @@ class Add(_MathBinaryOp):
>>> print(output)
[5. 6. 7.]
>>> # the data type of x is int32, the data type of y is float32,
>>> # and the output is the data format of higher precision flost32.
>>> # and the output is the data format of higher precision float32.
>>> print(output.dtype)
Float32
"""
@ -280,7 +280,7 @@ class AssignAdd(PrimitiveWithInfer):
Inputs:
- **variable** (Parameter) - The `Parameter`.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
It must have the same shape as `variable` if it is a Tensor.
it is recommended to use the same data type when using this operator.
@ -351,7 +351,7 @@ class AssignSub(PrimitiveWithInfer):
Inputs:
- **variable** (Parameter) - The `Parameter`.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank be should be less than 8.
- **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
It must have the same shape as `variable` if it is a Tensor.
it is recommended to use the same data type when using this operator.
@ -516,7 +516,7 @@ class ReduceMean(_Reduce):
Inputs:
- **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
@ -593,7 +593,7 @@ class ReduceSum(_Reduce):
Inputs:
- **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
@ -676,7 +676,7 @@ class ReduceAll(_Reduce):
Inputs:
- **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
@ -735,7 +735,7 @@ class ReduceAny(_Reduce):
Inputs:
- **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
@ -794,7 +794,7 @@ class ReduceMax(_Reduce):
Inputs:
- **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
@ -881,7 +881,7 @@ class ReduceMin(_Reduce):
Inputs:
- **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
@ -959,7 +959,7 @@ class ReduceProd(_Reduce):
Inputs:
- **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
@ -1038,7 +1038,7 @@ class CumProd(PrimitiveWithInfer):
Inputs:
- **x** (Tensor[Number]) - The input tensor.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **axis** (int) - The dimensions to compute the cumulative product.
Only constant value is allowed.
@ -1438,7 +1438,7 @@ class AddN(Primitive):
return True, inputs[0]
raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, but "
f"got {type(inputs[0]).__name__}, "
f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).")
f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
class AccumulateNV2(PrimitiveWithInfer):
@ -1494,7 +1494,7 @@ class AccumulateNV2(PrimitiveWithInfer):
return True, inputs[0]
raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, "
f"but got {type(inputs[0]).__name__}, "
f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).")
f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
def infer_shape(self, inputs):
cls_name = self.name
@ -1526,7 +1526,7 @@ class Neg(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor whose dtype is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and dtype as input.
@ -1576,7 +1576,7 @@ class InplaceAdd(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
the first dimension, which must be the same as indices' size. It has the same data type with `x`.
@ -1645,7 +1645,7 @@ class InplaceSub(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
the first dimension, which must be the same as indices' size. It has the same data type with `x`.
@ -1860,7 +1860,7 @@ class Square(Primitive):
Inputs:
- **x** (Tensor) - The input tensor whose dtype is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and dtype as the `x`.
@ -1895,7 +1895,7 @@ class Rsqrt(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same type and shape as `x`.
@ -1947,7 +1947,7 @@ class Sqrt(PrimitiveWithCheck):
Inputs:
- **x** (Tensor) - The input tensor whose dtype is number.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and data type as the `x`.
@ -1994,7 +1994,7 @@ class Reciprocal(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as the `x`.
@ -2106,7 +2106,7 @@ class Exp(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and dtype as the `x`.
@ -2156,7 +2156,7 @@ class Expm1(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. With float16 or float32 data type.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as the `x`.
@ -2256,7 +2256,7 @@ class Log(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. The value must be greater than 0.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as the `x`.
@ -2306,7 +2306,7 @@ class Log1p(Primitive):
Inputs:
- **x** (Tensor) - The input tensor. With float16 or float32 data type.
The value must be greater than -1.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as the `x`.
@ -2341,7 +2341,7 @@ class Erf(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. The data type must be float16 or float32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and dtype as the `x`.
@ -2383,7 +2383,7 @@ class Erfc(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. The data type must be float16 or float32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shap dtype as the `x`.
@ -2720,9 +2720,9 @@ class MulNoNan(_MathBinaryOp):
Inputs:
- **x** (Union[Tensor]) - The first input is a tensor whose data type is one of
flota16, float32, int32, int64 currently or scalar.
float16, float32, int32, int64 currently or scalar.
- **y** (Union[Tensor]) - The second input is a tensor whose data type is one of
flota16, float32, int32, int64 currently or scalar.
float16, float32, int32, int64 currently or scalar.
Outputs:
Tensor, the shape is the same as the shape after broadcasting,
@ -2969,7 +2969,7 @@ class Floor(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. Its element data type must be float.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as `x`.
@ -3062,7 +3062,7 @@ class Ceil(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as `x`.
@ -3194,7 +3194,7 @@ class Acosh(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The data type should be one of the following types: float16, float32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape and type as `x`.
@ -3238,7 +3238,7 @@ class Cosh(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as `x`.
@ -3279,7 +3279,7 @@ class Asinh(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
The data type should be one of the following types: float16, float32.
Outputs:
@ -3321,7 +3321,7 @@ class Sinh(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as `x`.
@ -3451,7 +3451,7 @@ class ApproximateEqual(_LogicBinaryOp):
Inputs:
- **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
- **y** (Tensor) - A tensor of the same type and shape as 'x'.
Outputs:
@ -4219,7 +4219,7 @@ class NPUGetFloatStatus(PrimitiveWithInfer):
Inputs:
- **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
The data type must be float16 or float32.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
Outputs:
Tensor, has the same shape as `x`. All the elements in the tensor will be zero.

View File

@ -35,7 +35,7 @@ class Assign(Primitive):
Inputs:
- **variable** (Parameter) - The `Parameter`. :math:`(N,*)` where :math:`*` means,
any number of additional dimensions, its rank should less than 8.
any number of additional dimensions, its rank should be less than 8.
- **value** (Tensor) - The value to be assigned, has the same shape with `variable`.
Outputs:

View File

@ -116,7 +116,7 @@ class Evolution(PrimitiveWithInfer):
gate_obj_qubits, gate_ctrl_qubits, gate_params_names,
gate_coeff, gate_requires_grad, hams_pauli_coeff,
hams_pauli_word, hams_pauli_qubit):
"""Initialize Evolutino"""
"""Initialize Evolution"""
self.init_prim_io_names(inputs=['param_data'], outputs=['state'])
self.n_qubits = n_qubits

View File

@ -30,7 +30,7 @@ class BufferSample(PrimitiveWithInfer):
Returns the tuple tensor with the given shape, decided by the given batchsize.
.. warning::
This is an experiental prototype that is subject to change and/or deletion.
This is an experimental prototype that is subject to change and/or deletion.
Args:
capacity (int64): Capacity of the buffer, must be non-negative.
@ -45,7 +45,7 @@ class BufferSample(PrimitiveWithInfer):
Inputs:
- **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer,
each tensor is described by the `buffer_shape` and `buffer_type`.
- **count** (Parameter) - The count mean the real available size of the buffer,
- **count** (Parameter) - The count means the real available size of the buffer,
data type: int32.
- **head** (Parameter) - The position of the first data in buffer, data type: int32.
@ -142,7 +142,7 @@ class BufferAppend(PrimitiveWithInfer):
push data to the bottom of buffer under the First-In-First-Out rule.
.. warning::
This is an experiental prototype that is subject to change and/or deletion.
This is an experimental prototype that is subject to change and/or deletion.
Args:
capacity (int64): Capacity of the buffer, must be non-negative.
@ -152,9 +152,9 @@ class BufferAppend(PrimitiveWithInfer):
Inputs:
- **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer,
each tensor is described by the `buffer_shape` and `buffer_type`.
- **exp** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents one list of experince data,
- **exp** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents one list of experience data,
each tensor is described by the `buffer_shape` and `buffer_type`.
- **count** (Parameter) - The count mean the real available size of the buffer,
- **count** (Parameter) - The count means the real available size of the buffer,
data type: int32.
- **head** (Parameter) - The position of the first data in buffer, data type: int32.
@ -162,11 +162,11 @@ class BufferAppend(PrimitiveWithInfer):
None.
Raises:
ValueError: If `count` and `head` is not a integer.
ValueError: If `count` and `head` is not an integer.
ValueError: If `capacity` is not a positive integer.
ValueError: If length of `data` not equal to length of `exp`.
ValueError: If dim of data euqals to dim of exp, but `data[1:]` not equal to the shape in `exp`.
ValueError: If the shape of `data[1:]` not equal to the shape in `exp`.
ValueError: If length of `data` is not equal to length of `exp`.
ValueError: If dim of data is equal to dim of exp, but `data[1:]` is not equal to the shape in `exp`.
ValueError: If the shape of `data[1:]` is not equal to the shape in `exp`.
TypeError: If the type in `exp` is not the same with `data`.
Supported Platforms:
@ -211,7 +211,7 @@ class BufferAppend(PrimitiveWithInfer):
exp_batch = exp_shape[0][0]
for i in range(len(data_shape)):
if len(data_shape[i]) != len(exp_shape[i]):
raise ValueError(f"For '{self.name}', the dimension of {i}th 'exp_shape' must equal to "
raise ValueError(f"For '{self.name}', the dimension of {i}th 'exp_shape' must be equal to "
f"the dimension of {i}th 'data_shape', but got the {i}th 'exp_shape': "
f"{exp_shape[i]}, the {i}th 'data_shape': {data_shape[i]}.")
if data_shape[i][0] < exp_shape[i][0]:
@ -221,7 +221,7 @@ class BufferAppend(PrimitiveWithInfer):
else:
for i in range(len(data_shape)):
if data_shape[i][1:] != exp_shape[i]:
raise ValueError(f"For '{self.name}', the {i}th 'exp_shape' must equal to the {i}th 'data_shape'"
raise ValueError(f"For '{self.name}', the {i}th 'exp_shape' must be equal to the {i}th 'data_shape'"
f"which excepts the first dimension. but got the {i}th 'exp_shape': "
f"{exp_shape[i]}, the {i}th 'data_shape': {data_shape[i]}.")
self.add_prim_attr('exp_batch', exp_batch)
@ -239,10 +239,10 @@ class BufferAppend(PrimitiveWithInfer):
class BufferGetItem(PrimitiveWithInfer):
r"""
Get the data from buffer in the position of input inedx.
Get the data from buffer in the position of input index.
.. warning::
This is an experiental prototype that is subject to change and/or deletion.
This is an experimental prototype that is subject to change and/or deletion.
Args:
capacity (int64): Capacity of the buffer, must be non-negative.
@ -252,7 +252,7 @@ class BufferGetItem(PrimitiveWithInfer):
Inputs:
- **data** (tuple(Parameter(Tensor))) - The tuple(Tensor) represents replaybuffer,
each tensor is described by the `buffer_shape` and `buffer_type`.
- **count** (Parameter) - The count mean the real available size of the buffer,
- **count** (Parameter) - The count means the real available size of the buffer,
data type: int32.
- **head** (Parameter) - The position of the first data in buffer, data type: int32.
- **index** (int64) - The position of the data in buffer.
@ -261,7 +261,7 @@ class BufferGetItem(PrimitiveWithInfer):
tuple(Tensor). The shape is `buffer_shape`. The dtype is `buffer_dtype`.
Raises:
ValueError: If `count` and `head` is not a integer.
ValueError: If `count` and `head` is not an integer.
ValueError: If `capacity` is not a positive integer.
TypeError: If `buffer_shape` is not a tuple.

View File

@ -683,7 +683,7 @@ class DihedralAtomEnergy(PrimitiveWithInfer):
The data type is int32 and the shape is :math:`(m,)`.
- **atom_b** (Tensor) - The 2nd atom index of each dihedral.
The data type is int32 and the shape is :math:`(m,)`.
- **atom_c** (Tenso) - The 3rd atom index of each dihedral.
- **atom_c** (Tensor) - The 3rd atom index of each dihedral.
The data type is int32 and the shape is :math:`(m,)`.
- **atom_d** (Tensor) - The 4th atom index of each dihedral.
4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`.
@ -786,7 +786,7 @@ class DihedralForceWithAtomEnergy(PrimitiveWithInfer):
The data type is int32 and the shape is :math:`(m,)`.
- **atom_b** (Tensor) - The 2nd atom index of each dihedral.
The data type is int32 and the shape is :math:`(m,)`.
- **atom_c** (Tenso) - The 3rd atom index of each dihedral.
- **atom_c** (Tensor) - The 3rd atom index of each dihedral.
The data type is int32 and the shape is :math:`(m,)`.
- **atom_d** (Tensor) - The 4th atom index of each dihedral.
4 atoms are connected in the form a-b-c-d. The data type is int32 and the shape is :math:`(m,)`.
@ -1263,7 +1263,7 @@ class Dihedral14LJForce(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(m,)`.
- **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
Outputs:
@ -1366,7 +1366,7 @@ class Dihedral14LJEnergy(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(m,)`.
- **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
Outputs:
@ -1585,7 +1585,7 @@ class Dihedral14LJCFForceWithAtomEnergy(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(m,)`.
- **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
Outputs:
@ -1694,7 +1694,7 @@ class Dihedral14LJAtomEnergy(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(m,)`.
- **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type.
q is the number of atom pair. The data type is float32 and the shape is :math:`(q,)`.
Outputs:
@ -2640,7 +2640,7 @@ class MDIterationLeapFrogWithRF(PrimitiveWithInfer):
exp(-gamma_ln * dt), where gamma_ln is the firction factor in Langvin
dynamics. The data type is float32.
- **max_velocity** (Scalar) - The upper limit of velocity, when the
veclocity overflows, scale it to the upper limit. The data type is float32.
velocity overflows, scale it to the upper limit. The data type is float32.
- **is_max_velocity** (Scalar) - whether the max velocity control is
open or not. The data type is int32.
- **mass_inverse** (Tensor) - The inverse value of
@ -3077,7 +3077,7 @@ class NeighborListUpdate(PrimitiveWithInfer):
excluded_atom_numbers(int32): the total atom numbers in the excluded list.
cutoff(float32): the cutoff distance for short-range force calculation.
skin(float32): the overflow value of cutoff to maintain a neighbor list.
cutoff_square(float32): the suqare value of cutoff.
cutoff_square(float32): the square value of cutoff.
half_skin_square(float32): skin*skin/4, indicates the maximum
square value of the distance atom allowed to move between two updates.
cutoff_with_skin(float32): cutoff + skin, indicates the

View File

@ -179,7 +179,7 @@ class ConstrainForceCycleWithVirial(PrimitiveWithInfer):
class LastCrdToDr(PrimitiveWithInfer):
"""
Calculate the diplacement vector of each constrained atom pair.
Calculate the displacement vector of each constrained atom pair.
.. warning::
This is an experimental prototype that is subject to change and/or deletion.
@ -279,7 +279,7 @@ class RefreshCrdVel(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(n, 3)`.
- **vel** (Tensor) - The velocity of each atom.
The data type is float32 and the shape is :math:`(n, 3)`.
- **test_frc** (Tensor) - The constraint force calculated in the last oteration.
- **test_frc** (Tensor) - The constraint force calculated in the last iteration.
The data type is float32 and the shape is :math:`(n, 3)`.
- **mass_inverse** (Tensor) - The inverse value of mass of each atom.
The data type is float32 and the shape is :math:`(n,)`.
@ -1903,7 +1903,7 @@ class Dihedral14ForceWithAtomEnergyVirial(PrimitiveWithInfer):
The data type is float32 and the shape is :math:`(m,)`.
- **LJ_type_A** (Tensor) - The A parameter in Lennard-Jones scheme of each atom pair type.
The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones shceme of each atom pair type.
- **LJ_type_B** (Tensor) - The B parameter in Lennard-Jones scheme of each atom pair type.
The number of atom pair is q. The data type is float32 and the shape is :math:`(q,)`.
Outputs:

View File

@ -76,7 +76,7 @@ class MoE(Cell):
param_init_type (dtype.Number): The parameter initialization type. Can be dtype.float32 or dtype.float16.
moe_config(MoEConfig): The configuration of MoE (Mixture of Expert).
parallel_config(OpParallelConfig): The config of parallel setting, see `OpParallelConfig`.
Default `default_dpmp_config`, a instance of `OpParallelConfig` with default
Default `default_dpmp_config`, an instance of `OpParallelConfig` with default
args.
Inputs:

View File

@ -15,7 +15,7 @@
"""
High-Level training interfaces.
Helper functions in train piplines.
Helper functions in train pipelines.
"""
from .model import Model
from .dataset_helper import DatasetHelper, connect_network_with_dataset

View File

@ -128,7 +128,7 @@ def _construct_tensor_list(types, shapes, batch_expand_num=1):
List, list of Tensors.
"""
if len(types) != len(shapes):
raise ValueError("The length of dataset types must equal to dataset shapes, "
raise ValueError("The length of dataset types must be equal to dataset shapes, "
"but got dataset types={} and dataset shapes={}".format(types, shapes))
tensor_list = []
for type_, shape in zip(types, shapes):

View File

@ -207,7 +207,7 @@ def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True,
be parameter or Tensor).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether to open a independent thread to save the checkpoint file. Default: False
async_save (bool): Whether to open an independent thread to save the checkpoint file. Default: False
append_dict (dict): Additional information that needs to be saved. The key of dict must be str,
the value of dict must be one of int float and bool. Default: None
enc_key (Union[None, bytes]): Byte type key used for encryption. If the value is None, the encryption

View File

@ -60,7 +60,7 @@ def mindspore_test(verification_pipeline):
elif issubclass(component, IERPolicyComponent):
er_policy_components.append(component)
else:
raise Exception(f'{component} is not a instance of {IComponent}')
raise Exception(f'{component} is not an instance of {IComponent}')
for component in facade_components:
fc = component(verification_set)

View File

@ -55,4 +55,4 @@ def test_lenet5_exception():
net = train_step_with_loss_warp(LeNet5())
with pytest.raises(RuntimeError) as info:
_cell_graph_executor.compile(net, predict, label)
assert "x_shape[C_in] / group must equal to w_shape[C_in] = " in str(info.value)
assert "x_shape[C_in] / group must be equal to w_shape[C_in]: " in str(info.value)