diff --git a/mindspore/_checkparam.py b/mindspore/_checkparam.py index 6f6a50c360e..323ed34aeb5 100644 --- a/mindspore/_checkparam.py +++ b/mindspore/_checkparam.py @@ -131,7 +131,7 @@ def check_number(arg_value, value, rel, arg_type=int, arg_name=None, prim_name=N """ Check argument integer. - Usage: + Example: - number = check_int(number, 0, Rel.GE, "number", None) # number >= 0 """ rel_fn = Rel.get_fns(rel) diff --git a/mindspore/_extends/graph_kernel/expanders/gelu.py b/mindspore/_extends/graph_kernel/expanders/gelu.py index acf88d0bfe2..86be18aed97 100644 --- a/mindspore/_extends/graph_kernel/expanders/gelu.py +++ b/mindspore/_extends/graph_kernel/expanders/gelu.py @@ -24,8 +24,8 @@ HALF = 0.5 def expand_gelu(expand_info): """Gelu expander""" # cal formula are: - # gelu(x) = 0.5 * x * (1.0 + tanh(y)) - # y = sqrt(2.0 / pi) * (x + 0.044715 * x * x * x) + # gelu(x) is 0.5 * x * (1.0 + tanh(y)) + # y is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x) # get op info. input_desc = expand_info['input_desc'][0] diff --git a/mindspore/_extends/graph_kernel/expanders/gelu_grad.py b/mindspore/_extends/graph_kernel/expanders/gelu_grad.py index 48b4188bd41..5e0647634f0 100644 --- a/mindspore/_extends/graph_kernel/expanders/gelu_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/gelu_grad.py @@ -25,10 +25,10 @@ HALF = 0.5 def expand_gelugrad(expand_info): """GeluGrad expander""" # cal formula are: - # gelu_grad(dy, x) = dy * y' - # y' = 0.5 * (1.0 + tanh(tanh_para)) + 0.5 * x * (1.0 - tanh(tanh_para) * tanh(para)) * mul_right - # tanh_para = sqrt(2.0 / pi) * (x + 0.044715 * x * x * x) - # mul_right = sqrt(2.0 / pi) * (1 + 3 * 0.044715 * x * x) + # gelu_grad(dy, x) is dy * y' + # y' is 0.5 * (1.0 + tanh(tanh_para)) + 0.5 * x * (1.0 - tanh(tanh_para) * tanh(para)) * mul_right + # tanh_para is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x) + # mul_right is sqrt(2.0 / pi) * (1 + 3 * 0.044715 * x * x) # get op info. input_desc_0 = expand_info['input_desc'][0] diff --git a/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py b/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py index b8129083ed1..9d73fa92d2c 100644 --- a/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py @@ -66,8 +66,6 @@ def expand_layernormgrad(expand_info): mean_cof = graph_builder.value(x.dtype, (1.0 / reduce_size), x.data_format) # cal dg db - # dg = np.sum(dy * np.power(var + epsilon, -0.5) * (x - mean), axis=tuple(param_axis), keepdims=True) - # db = np.sum(dy, axis=tuple(param_axis), keepdims=True) var_eps = graph_builder.emit('TensorAdd', [variance, eps]) sqrt_var_eps = graph_builder.emit('Sqrt', [var_eps]) rsqrt_var_eps = graph_builder.emit('RealDiv', [const_one, sqrt_var_eps]) @@ -78,8 +76,6 @@ def expand_layernormgrad(expand_info): db = graph_builder.emit('ReduceSum', [dy], attrs={'reduce_axis': param_axis, 'keep_dims': False}) # cal sum_1 - # sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis), - # keepdims=True) tmp_var_eps = graph_builder.emit('Mul', [sqrt_var_eps, var_eps]) r_tmp_var_eps = graph_builder.emit('RealDiv', [const_one, tmp_var_eps]) x_sub_mean_mul_r_tmp_var_eps = graph_builder.emit('Mul', [x_sub_mean, r_tmp_var_eps]) @@ -89,18 +85,13 @@ def expand_layernormgrad(expand_info): sum_1 = graph_builder.emit('ReduceSum', [sum_1_mul], attrs={'reduce_axis': norm_axis, 'keep_dims': True}) # cal sum_2 - # sum2 = np.sum(dy * gamma, axis=tuple(norm_axis), keepdims=True) sum_2 = graph_builder.emit('ReduceSum', [dy_mul_gamma], attrs={'reduce_axis': norm_axis, 'keep_dims': True}) # cal sum_3 - # sum3 = np.sum(-2.0 * (x - mean), axis=tuple(norm_axis), keepdims=True) sum_3_mul = graph_builder.emit('Mul', [const_neg_two, x_sub_mean]) sum_3 = graph_builder.emit('ReduceSum', [sum_3_mul], attrs={'reduce_axis': norm_axis, 'keep_dims': True}) # cal dx = dx1 + dx2 + dx3 - # dx1 = dy * gamma * rsqrt_var_eps - # dx2 = sum1 * 2.0 / mean_cof * x_sub_mean - # dx3 = (1.0 / mean_cof) * (-1.0 * rsqrt_var_eps * sum2 + 1.0 / mean_cof * sum1 * sum3) dx_1 = graph_builder.emit('Mul', [dy_mul_gamma, rsqrt_var_eps]) sum_1_mul_two = graph_builder.emit('Mul', [sum_1, const_two]) sum_1_mul_two_tmp = graph_builder.emit('Mul', [sum_1_mul_two, mean_cof]) diff --git a/mindspore/_extends/graph_kernel/expanders/reduce_mean.py b/mindspore/_extends/graph_kernel/expanders/reduce_mean.py index 932e59d4129..3e9fd6b5e3b 100644 --- a/mindspore/_extends/graph_kernel/expanders/reduce_mean.py +++ b/mindspore/_extends/graph_kernel/expanders/reduce_mean.py @@ -38,8 +38,7 @@ def expand_reducemean(expand_info): x_shape = input_x.shape graph_scope.set_input(input_x) - # cal reduce_mean - # when axis = None, reduce axis are all + # cal reduce_mean, when axis = None, reduce axis are all all_shape = 1.0 real_axis = [] if not axis: diff --git a/mindspore/_extends/graph_kernel/expanders/sqrt_grad.py b/mindspore/_extends/graph_kernel/expanders/sqrt_grad.py index e9dacaaed4a..31ef05f4945 100644 --- a/mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/sqrt_grad.py @@ -19,7 +19,7 @@ from mindspore._extends.graph_kernel.model import model_builder as builder def expand_sqrtgrad(expand_info): """SqrtGrad expander""" # cal formula are: - # sqrt_grad(x, dout) = dout / (2 * x) + # sqrt_grad(x, dout) is dout / (2 * x) # get op info. input_desc_0 = expand_info['input_desc'][0] diff --git a/mindspore/_extends/graph_kernel/expanders/tanh_grad.py b/mindspore/_extends/graph_kernel/expanders/tanh_grad.py index b60521bb612..263c6bd7672 100644 --- a/mindspore/_extends/graph_kernel/expanders/tanh_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/tanh_grad.py @@ -21,7 +21,6 @@ ONE = 1.0 def expand_tanhgrad(expand_info): """TanhGrad expander""" - # tanh_grad(y, dy) = dy * (1- y * y) # get op info. input_desc_0 = expand_info['input_desc'][0] input_desc_1 = expand_info['input_desc'][1] diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/arithmetic_simplify.cc b/mindspore/ccsrc/frontend/optimizer/irpass/arithmetic_simplify.cc index d5b26f84e3d..f04b3b506ce 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/arithmetic_simplify.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/arithmetic_simplify.cc @@ -156,7 +156,6 @@ AnfNodePtr AdjustAllReduceMulAdd::operator()(const OptimizerPtr &, const AnfNode if (x_shape != z_shape) { // AddN requires x_ and z_ have the same shape. // If broadcasting TensorAdd is supported then can use this - // AnfNodePtr add = NewCNode({NewValueNode(prim::kPrimTensorAdd), z_, x_}, fg); return nullptr; } AnfNodePtr tuple = NewCNode({make_tuple_op_node, z_, x_}, fg); diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/node_removal_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/node_removal_pass.cc index a969d569493..20d72ae33b5 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/node_removal_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/node_removal_pass.cc @@ -62,13 +62,6 @@ Status NodeRemovalPass::RemovalNodes::Visit(std::shared_ptr node, bo // Perform ShuffleNode removal check. Status NodeRemovalPass::RemovalNodes::Visit(std::shared_ptr node, bool *modified) { *modified = false; -#if 0 - // If we are in a cache descendant tree, then this shuffle op needs to be removed - if (is_caching_) { - MS_LOG(INFO) << "Shuffle under an operation with cache is identified for removal."; - nodes_to_remove_.push_back(std::static_pointer_cast(node)); - } -#endif return Status::OK(); } diff --git a/mindspore/core/abstract/prim_arrays.cc b/mindspore/core/abstract/prim_arrays.cc index 6a638e70f33..1ebb9a2dbc9 100644 --- a/mindspore/core/abstract/prim_arrays.cc +++ b/mindspore/core/abstract/prim_arrays.cc @@ -326,7 +326,6 @@ AbstractBasePtr InferImplUnsortedSegmentMax(const AnalysisEnginePtr &, const Pri } else { num_segments_value = *static_cast(num_segments_tensor->data_c()); } - // num_segments_value = *static_cast(num_segments_tensor->data_c()); } else if (args_spec_list[2]->isa()) { // num_segments is Scalar auto num_segments = CheckArg(op_name, args_spec_list, 2); if (num_segments->GetTypeTrack()->type_id() == TypeId::kNumberTypeInt64) { diff --git a/mindspore/core/abstract/prim_others.cc b/mindspore/core/abstract/prim_others.cc index 46a30b3e6c3..a2010058891 100644 --- a/mindspore/core/abstract/prim_others.cc +++ b/mindspore/core/abstract/prim_others.cc @@ -483,7 +483,6 @@ AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &pri const AbstractBasePtrList &args_spec_list) { const std::string op_name = primitive->name(); // GPU has 2 inputs while tbe has 1 only. Skip CheckArgsSize. - // CheckArgsSize(op_name, args_spec_list, 1); auto input_x = CheckArg(op_name, args_spec_list, 0); MS_EXCEPTION_IF_NULL(input_x); MS_EXCEPTION_IF_NULL(input_x->shape()); diff --git a/mindspore/lite/minddata/wrapper/album_op_android.cc b/mindspore/lite/minddata/wrapper/album_op_android.cc index a35b2b0d632..1fa3cb6d405 100644 --- a/mindspore/lite/minddata/wrapper/album_op_android.cc +++ b/mindspore/lite/minddata/wrapper/album_op_android.cc @@ -195,7 +195,6 @@ Status AlbumOp::LoadImageTensor(const std::string &image_file_path, uint32_t col // load empty tensor since image is not jpg MS_LOG(INFO) << "Bin file found" << image_file_path << "."; RETURN_IF_NOT_OK(Tensor::CreateFromFile(image_file_path, tensor)); - // row->push_back(std::move(image)); return Status::OK(); } @@ -270,7 +269,6 @@ Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t c MS_LOG(INFO) << "String array label found: " << data << "."; // TensorPtr label; RETURN_IF_NOT_OK(Tensor::CreateFromVector(data, tensor)); - // row->push_back(std::move(label)); return Status::OK(); } @@ -281,7 +279,6 @@ Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_nu MS_LOG(INFO) << "String label found: " << data << "."; TensorPtr label; RETURN_IF_NOT_OK(Tensor::CreateScalar(data, tensor)); - // row->push_back(std::move(label)); return Status::OK(); } @@ -310,7 +307,6 @@ Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_ RETURN_STATUS_UNEXPECTED("Invalid data, column type is neither int32 nor int64, it is " + data_schema_->column(col_num).type().ToString()); } - // row->push_back(std::move(label)); return Status::OK(); } @@ -339,7 +335,6 @@ Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t co RETURN_STATUS_UNEXPECTED("Invalid data, column type is neither float32 nor float64, it is " + data_schema_->column(col_num).type().ToString()); } - // row->push_back(std::move(float_array)); return Status::OK(); } @@ -347,7 +342,6 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPt if (data_schema_->column(col_num).type() == DataType::DE_STRING) { // TensorPtr id; RETURN_IF_NOT_OK(Tensor::CreateScalar(file, tensor)); - // row->push_back(std::move(id)); return Status::OK(); } // hack to get the file name without extension, the 1 is to get rid of the backslash character @@ -355,7 +349,6 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPt // TensorPtr id; RETURN_IF_NOT_OK(Tensor::CreateScalar(image_id, tensor)); MS_LOG(INFO) << "File ID " << image_id << "."; - // row->push_back(std::move(id)); return Status::OK(); } @@ -363,7 +356,6 @@ Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorPtr *tensor) { // hack to get the file name without extension, the 1 is to get rid of the backslash character // TensorPtr empty_tensor; RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), tensor)); - // row->push_back(std::move(empty_tensor)); return Status::OK(); } @@ -382,7 +374,6 @@ Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num RETURN_IF_NOT_OK(Tensor::CreateScalar(data, tensor)); MS_LOG(INFO) << "float found: " << json_obj << "."; } - // row->push_back(std::move(float_tensor)); return Status::OK(); } @@ -398,7 +389,6 @@ Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, RETURN_IF_NOT_OK(Tensor::CreateScalar(data, tensor)); MS_LOG(INFO) << "int32 found: " << json_obj << "."; } - // row->push_back(std::move(int_tensor)); return Status::OK(); } @@ -410,7 +400,6 @@ Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, Status AlbumOp::LoadTensorRow(row_id_type row_id, const std::string &file, std::unordered_map> *map_row) { // testing here is to just print out file path - // (*row) = TensorRow(row_id, {}); MS_LOG(INFO) << "Image row file: " << file << "."; std::ifstream file_handle(folder_path_ + file); diff --git a/mindspore/lite/nnacl/fp32_grad/softmax_grad.c b/mindspore/lite/nnacl/fp32_grad/softmax_grad.c index c863e705dd3..d0d5983042b 100644 --- a/mindspore/lite/nnacl/fp32_grad/softmax_grad.c +++ b/mindspore/lite/nnacl/fp32_grad/softmax_grad.c @@ -57,8 +57,6 @@ void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, *(output_ptr + outter_offset + k * N + j) += a * sum_data[j]; } } - - // gemm(0, 0, M, N, K, -1, sum_mul, K, sum_data, N, 1, &output_ptr[outter_offset], N); } for (int i = 0; i < ele_size; i++) { diff --git a/mindspore/lite/src/ops/one_hot.cc b/mindspore/lite/src/ops/one_hot.cc index 2917ca49006..c580134ba0e 100644 --- a/mindspore/lite/src/ops/one_hot.cc +++ b/mindspore/lite/src/ops/one_hot.cc @@ -91,7 +91,6 @@ int OneHot::InferShape(std::vector inputs, std::vector outpu int axis = GetAxis(); // indices, depth, on_value, off_value - // indices, depth, on_off_value(contain 2 values); if (inputs.size() != kOneHotInputNum && inputs.size() != kOneHotInputNumOpt) { MS_LOG(ERROR) << "OneHot got inputs num " << inputs.size() << ", should be " << kOneHotInputNum << " or " << kOneHotInputNumOpt; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h index b69ff1807bb..c2b514c6f81 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h @@ -43,7 +43,6 @@ class SparseToDenseOpenCLKernel : public OpenCLKernel { int InitOutputToDefault(); private: - // bool IndicesIsScalar{false}; bool enable_fp16_{false}; float default_{0.0f}; float weight_scalar_{0.f}; diff --git a/mindspore/lite/tools/net_train/net_train.cc b/mindspore/lite/tools/net_train/net_train.cc index 4b81b04a0f0..bbb467d2749 100644 --- a/mindspore/lite/tools/net_train/net_train.cc +++ b/mindspore/lite/tools/net_train/net_train.cc @@ -337,7 +337,6 @@ int NetTrain::RunExportedNet() { } context->thread_num_ = flags_->num_threads_; - // context->enable_float16_ = flags_->enable_fp16_; session_ = session::TrainSession::CreateSession(flags_->export_file_.c_str(), context.get()); if (session_ == nullptr) { MS_LOG(ERROR) << "CreateSession failed while running ", model_name.c_str(); @@ -406,7 +405,6 @@ int NetTrain::RunNetTrain() { context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = NO_BIND; } context->thread_num_ = flags_->num_threads_; - // context->enable_float16_ = flags_->enable_fp16_; session_ = session::TrainSession::CreateSession(flags_->model_file_.c_str(), context.get()); if (session_ == nullptr) { MS_LOG(ERROR) << "CreateSession failed while running ", model_name.c_str(); diff --git a/mindspore/nn/graph_kernels/graph_kernels.py b/mindspore/nn/graph_kernels/graph_kernels.py index c37719529bd..ca542da989f 100644 --- a/mindspore/nn/graph_kernels/graph_kernels.py +++ b/mindspore/nn/graph_kernels/graph_kernels.py @@ -901,7 +901,8 @@ class Gelu(GraphKernel): def _math_four_compute(data_x): """ - return: math_four equal 2*(np(sqrt(2 / np.pi)*(x + 0.044715*tf.pow(x, 3))) + Return: + math_four equal 2*(np(sqrt(2 / np.pi)*(x + 0.044715*tf.pow(x, 3))) """ datax_pow = data_x * data_x * data_x datax_muls_c = self.mul(datax_pow, self.CSVALUE) diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 5201cf7ebaa..9062405874e 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -431,8 +431,8 @@ class SampledSoftmaxLoss(_Loss): # Sample the negative labels. # sampled shape: [num_sampled] tensor - # true_expected_count shape = [batch_size, 1] tensor - # sampled_expected_count shape = [num_sampled] tensor + # true_expected_count shape is [batch_size, 1] tensor + # sampled_expected_count shape is [num_sampled] tensor if sampled_values is None: sampled_values = self.sampler(labels) diff --git a/mindspore/nn/probability/distribution/categorical.py b/mindspore/nn/probability/distribution/categorical.py index e1be1ab8e0c..8229ae40285 100644 --- a/mindspore/nn/probability/distribution/categorical.py +++ b/mindspore/nn/probability/distribution/categorical.py @@ -308,7 +308,6 @@ class Categorical(Distribution): broadcast_shape_tensor = logits * value broadcast_shape = self.shape(broadcast_shape_tensor) - # broadcast_shape (N, C) num_classes = broadcast_shape[-1] label_shape = broadcast_shape[:-1] @@ -373,7 +372,6 @@ class Categorical(Distribution): broadcast_shape_tensor = probs * value broadcast_shape = self.shape(broadcast_shape_tensor) - # broadcast_shape (N, C) num_classes = broadcast_shape[-1] label_shape = broadcast_shape[:-1] diff --git a/mindspore/ops/composite/multitype_ops/_compile_utils.py b/mindspore/ops/composite/multitype_ops/_compile_utils.py index e184eade8e9..aea85bb8aa9 100644 --- a/mindspore/ops/composite/multitype_ops/_compile_utils.py +++ b/mindspore/ops/composite/multitype_ops/_compile_utils.py @@ -276,7 +276,6 @@ def tensor_expand_dims(data, tuple_index): def tensor_index_by_tuple(data, tuple_index): """Tensor getitem by tuple of various types with None""" - # data, tuple_index_without_none = tensor_expand_dims(data, tuple_index) tuple_index_without_none = tuple_index if len(tuple_index) == 1: return data[tuple_index_without_none[0]] diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index cf40af3e265..5530f759972 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -743,9 +743,7 @@ class MatMul(PrimitiveWithInfer): # validate whether last two dims satisfying matrix multiply x1_last = x1[-2:] x2_last = x2[-2:] - # x1_col = x1_last[1] if (not transpose_a) else x1_last[0] x1_col = x1_last[not self.transpose_a] - # x2_row = x2_last[0] if (not transpose_b) else x2_last[1] x2_row = x2_last[self.transpose_b] if x1_col != x2_row: raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'