diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index 73b99be7bdb..35c3e2c0577 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -33,7 +33,7 @@ using mindspore::schema::PrimitiveType_ActivationGrad; namespace mindspore::kernel { int ActivationGradCPUKernel::Init() { - if (2 != in_tensors_.size()) { + if (in_tensors_.size() != 2) { MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors"; return RET_ERROR; } @@ -86,6 +86,7 @@ int ActivationGradCPUKernel::DoActivation(int task_id) { } int ActivationGradRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto activationGrad_kernel = reinterpret_cast(cdata); auto error_code = activationGrad_kernel->DoActivation(task_id); if (error_code != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h index 5000d11277c..f56b9ec9cca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h @@ -40,7 +40,6 @@ class ActivationGradCPUKernel : public LiteKernel { private: ActivationParameter *param_act_grad_; }; - } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc index 65572eebf03..21ca174cca8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc @@ -45,6 +45,10 @@ int AdamCPUKernel::Execute(int task_id) { auto eps = reinterpret_cast(in_tensors_[8]->MutableData())[0]; auto gradient = reinterpret_cast(in_tensors_[9]->MutableData()); size_t elem_num = in_tensors_[0]->ElementsNum(); + if (fabs(1 - beta1_power) <= 0.0f) { + MS_LOG(ERROR) << "divisor cannot be 0"; + return RET_ERROR; + } auto update_lr = learning_rate * std::sqrt(1 - beta2_power) / (1 - beta1_power); if (adam_param_->use_nesterov_) { // Nadam @@ -64,6 +68,7 @@ int AdamCPUKernel::Execute(int task_id) { } int AdamRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto Adam_kernel = reinterpret_cast(cdata); auto error_code = Adam_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -90,16 +95,19 @@ kernel::LiteKernel *CpuAdamFp32KernelCreator(const std::vector & const lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Adam); auto *kernel = new (std::nothrow) AdamCPUKernel(opParameter, inputs, outputs, ctx, primitive); - MS_ASSERT(kernel != nullptr); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new AdamCPUKernel fail!"; + free(opParameter); + return nullptr; + } auto ret = kernel->Init(); - if (0 != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; return nullptr; } - return kernel; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index b629d5f441c..56672756288 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -29,7 +29,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ApplyMomentum; namespace mindspore::kernel { - int ApplyMomentumCPUKernel::ReSize() { return RET_OK; } int ApplyMomentumCPUKernel::Execute(int task_id) { @@ -55,6 +54,7 @@ int ApplyMomentumCPUKernel::Execute(int task_id) { } int ApplyMomentumRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto applyMomentum_kernel = reinterpret_cast(cdata); auto error_code = applyMomentum_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -89,13 +89,12 @@ kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vectorInit(); - if (0 != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; return nullptr; } - return kernel; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 833f0225225..1488fedfa4a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -42,7 +42,6 @@ int ArithmeticGradCPUKernel::Init() { arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradMul2L; else if (Type() == PrimitiveType_DivGrad) arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradDiv2L; - } else if (dx2->ElementsNum() < dx1->ElementsNum()) { if (Type() == PrimitiveType_MulGrad) arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradMul1L; @@ -75,25 +74,28 @@ int ArithmeticGradCPUKernel::Init() { void ArithmeticGradCPUKernel::ArithmeticGradAdd(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - if (dx1_size == dy_size) + if (dx1_size == dy_size) { memcpy(dx1, dy, dy_size * sizeof(float)); - else + } else { ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx1, arithmeticParameter_->in_shape0_, arithmeticParameter_->ndim_); - if (dx2_size == dy_size) + } + if (dx2_size == dy_size) { memcpy(dx2, dy, dy_size * sizeof(float)); - else + } else { ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx2, arithmeticParameter_->in_shape1_, arithmeticParameter_->ndim_); + } } void ArithmeticGradCPUKernel::ArithmeticGradSub(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - if (dx1_size == dy_size) + if (dx1_size == dy_size) { memcpy(dx1, dy, dy_size * sizeof(float)); - else + } else { ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx1, arithmeticParameter_->in_shape0_, arithmeticParameter_->ndim_); + } if (dx2_size == dy_size) { for (int i = 0; i < dx2_size; i++) { dx2[i] = -dy[i]; @@ -156,7 +158,9 @@ void ArithmeticGradCPUKernel::ArithmeticGradDiv1L(float *dy, int dy_size, float arithmeticParameter_); // broadcast directly to dx1 ReduceSumByAxes(tile_data2, arithmeticParameter_->in_shape0_, dx2, arithmeticParameter_->in_shape1_, arithmeticParameter_->ndim_); - for (int i = 0; i < dx2_size; i++) dx2[i] = -dx2[i]; + for (int i = 0; i < dx2_size; i++) { + dx2[i] = -dx2[i]; + } // broadcasting x2 BroadcastDiv(dy, x2_data, tile_data0, tile_data1, dx1, dy_size, arithmeticParameter_); // broadcast directly to dx1 @@ -214,6 +218,7 @@ int ArithmeticGradCPUKernel::Execute(int task_id) { } int ArithmeticGradRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto Arithmetic_kernel = reinterpret_cast(cdata); auto error_code = Arithmetic_kernel->Execute(task_id); if (error_code != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc index 4a222c78435..792fb0e683e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc @@ -42,6 +42,7 @@ int AssignCPUKernel::Execute(int task_id) { } int AssignRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto Assign_kernel = reinterpret_cast(cdata); auto error_code = Assign_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -68,16 +69,19 @@ kernel::LiteKernel *CpuAssignFp32KernelCreator(const std::vector const lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Assign); auto *kernel = new (std::nothrow) AssignCPUKernel(opParameter, inputs, outputs, ctx, primitive); - MS_ASSERT(kernel != nullptr); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new AssignCPUKernel fail!"; + free(opParameter); + return nullptr; + } auto ret = kernel->Init(); - if (0 != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; return nullptr; } - return kernel; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index 16d9dd15ad9..bb98c01c424 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -52,7 +52,9 @@ int BiasGradCPUKernel::Execute(int task_id) { size_t nhw_size = 1; size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC - for (unsigned int i = 0; i < bias_param->ndim_ - 1; i++) nhw_size *= bias_param->in_shape0_[i]; + for (unsigned int i = 0; i < bias_param->ndim_ - 1; i++) { + nhw_size *= bias_param->in_shape0_[i]; + } size_t total_size = channels * nhw_size; for (size_t c = 0; c < channels; ++c) { @@ -98,7 +100,7 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vectorInit(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index 577a97205fc..aa614ce17fe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -28,7 +28,6 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -// using mindspore::lite::REG_OP; using mindspore::schema::PrimitiveType_BNGrad; namespace mindspore::kernel { @@ -84,6 +83,7 @@ int BNGradCPUKernel::Execute(int task_id) { } int BNGradRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto bn_kernel = reinterpret_cast(cdata); if (task_id == 0) { auto error_code = bn_kernel->Execute(task_id); @@ -117,7 +117,7 @@ kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector return nullptr; } auto ret = kernel->Init(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index 8f9623d683c..e109b221edb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -27,11 +27,11 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { int ConvolutionTrainCPUKernel::Init() { - if (2 > in_tensors_.size()) { + if (in_tensors_.size() < 2) { MS_LOG(ERROR) << "Convolution should have at least two inputs"; return RET_ERROR; } - if (1 != out_tensors_.size()) { + if (out_tensors_.size() != 1) { MS_LOG(ERROR) << "Convolution should have one output"; return RET_ERROR; } @@ -105,6 +105,7 @@ int ConvolutionTrainCPUKernel::Execute(int task_id) { } int ConvolutionTrainRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto conv_kernel = reinterpret_cast(cdata); auto error_code = conv_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -138,7 +139,7 @@ kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vectorInit(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; @@ -146,5 +147,4 @@ kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector(cdata); auto error_code = convfilter_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -145,7 +146,7 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vectorInit(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h index 763abc7612f..465a91e57ac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h @@ -43,7 +43,6 @@ class ConvolutionGradFilterCPUKernel : public LiteKernel { const int chunk = C12NUM; #endif }; - } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index b770ea1b915..c1e8e59c57d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -122,6 +122,7 @@ int ConvolutionGradInputCPUKernel::Execute(int task_id) { } int ConvolutionGradInputRun(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto convinput_kernel = reinterpret_cast(cdata); auto error_code = convinput_kernel->Execute(task_id); if (error_code != RET_OK) { @@ -157,7 +158,7 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vectorInit(); - if (0 != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; @@ -168,5 +169,4 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vectorInit(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h index a95b4e484ad..cb3007c67cc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h @@ -38,7 +38,6 @@ class DeConvolutionGradFilterCPUKernel : public LiteKernel { size_t ws_size = 0; const int chunk = 1; }; - } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_DECONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc index 7fa2eafa8bc..367112b8778 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc @@ -115,6 +115,7 @@ kernel::LiteKernel *CpuDropoutFp32KernelCreator(const std::vectorInit(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc index bb62ba40f81..b4af30a444f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc @@ -102,6 +102,7 @@ kernel::LiteKernel *CpuDropoutGradFp32KernelCreator(const std::vectorInit(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc index eb90801ec0f..0b6b17fc331 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc @@ -32,6 +32,7 @@ namespace { int NegGradRun(void *cdata, int thread_id) { MS_ASSERT(cdata != nullptr); auto kernel = reinterpret_cast(cdata); + MS_ASSERT(kernel != nullptr); return kernel->DoNegGrad(thread_id); } } // namespace diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index 80aebea72dc..8e62a651e2d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -77,6 +77,7 @@ int PoolingGradCPUKernel::Execute(int task_id) { } int PoolingGradImpl(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); auto pooling = reinterpret_cast(cdata); auto error_code = pooling->Execute(task_id); if (error_code != RET_OK) { @@ -92,7 +93,9 @@ int PoolingGradCPUKernel::Run() { auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); int size = pooling_param->input_w_ * pooling_param->input_h_ * pooling_param->input_channel_ * pooling_param->output_batch_; - for (int i = 0; i < size; i++) output_ptr[i] = 0.0; + for (int i = 0; i < size; i++) { + output_ptr[i] = 0.0; + } int error_code = ParallelLaunch(this->context_->thread_pool_, PoolingGradImpl, this, 1); if (error_code != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index f87a90a74b1..aacf5ec2820 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -28,11 +28,11 @@ using mindspore::schema::PrimitiveType_PowerGrad; namespace mindspore::kernel { int PowerGradCPUKernel::Init() { - if (2 != in_tensors_.size()) { + if (in_tensors_.size() != 2) { MS_LOG(ERROR) << "Power Grad Filter should have 2 inputs"; return RET_ERROR; } - if (1 != out_tensors_.size()) { + if (out_tensors_.size() != 1) { MS_LOG(ERROR) << "Power Grad Filter should have one output"; return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index 1d4d9b9d7b3..a7ae2b03baf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -77,7 +77,9 @@ int SgdCPUKernel::Init() { // Only for test with uninitialized Data size_t elem_num = in_tensors_[0]->ElementsNum(); auto accumulate = reinterpret_cast(in_tensors_[3]->MutableData()); - for (size_t i = 0; i < elem_num; i++) accumulate[i] = 0.0; + for (size_t i = 0; i < elem_num; i++) { + accumulate[i] = 0.0; + } if (sgd_param_->dampening_ < 0.0f) { MS_LOG(ERROR) << "dampening should be at least 0.0"; @@ -105,7 +107,7 @@ kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector &i } auto ret = kernel->Init(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc index cc31dfbce4c..a094af8213c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -61,7 +61,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { auto ins = reinterpret_cast(in_tensors_.at(0)->MutableData()); auto labels = reinterpret_cast(in_tensors_.at(1)->MutableData()); float *out = reinterpret_cast(out_tensors_.at(0)->MutableData()); - float *grads = NULL; + float *grads = nullptr; if (IsTrain() && out_tensors_.size() > 1) { grads = reinterpret_cast(out_tensors_.at(1)->MutableData()); } @@ -104,7 +104,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Init() { param_->number_of_classes_ = dims[1]; param_->batch_size_ = dims[0]; for (unsigned int i = 0; i < dims.size(); i++) param_->input_shape_[i] = dims[i]; - if (2 != this->in_tensors_.size()) { + if (this->in_tensors_.size() != 2) { MS_LOG(ERROR) << "softmax entropy loss should have two inputs"; return RET_ERROR; } @@ -139,7 +139,7 @@ kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector
  • Init(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc index 32034315a2f..8a88ebe5bdb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc @@ -43,6 +43,7 @@ int SoftmaxGradCPUKernel::Init() { auto axis = param->axis_; if ((axis < -1) || (axis > param->n_dim_)) { MS_LOG(ERROR) << "SoftmaxGrad axis is invalid!"; + return RET_ERROR; } else if (axis == -1) { axis = param->axis_ = (in_dims - 1); } @@ -108,5 +109,4 @@ kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector(in_tensors_.at(0)->data_c()); auto labels = reinterpret_cast(in_tensors_.at(1)->data_c()); float *out = reinterpret_cast(out_tensors_.at(0)->data_c()); - float *grads = NULL; + float *grads = nullptr; if (IsTrain() && out_tensors_.size() > 1) { grads = reinterpret_cast(out_tensors_.at(1)->MutableData()); } @@ -164,7 +164,7 @@ kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator( return nullptr; } auto ret = kernel->Init(); - if (RET_OK != ret) { + if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; @@ -172,5 +172,4 @@ kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator( } return kernel; } - } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc index 29a7be3d6a6..090f4c714a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc @@ -30,11 +30,11 @@ using mindspore::schema::PrimitiveType_TupleGetItem; namespace mindspore::kernel { int TupleGetItemCPUKernel::Init() { - if (1 != in_tensors_.size()) { + if (in_tensors_.size() != 1) { MS_LOG(ERROR) << "Tuple Grad Filter should have one input"; return RET_ERROR; } - if (1 != out_tensors_.size()) { + if (out_tensors_.size() != 1) { MS_LOG(ERROR) << "Tuple Grad Filter should have one output"; return RET_ERROR; } @@ -48,7 +48,6 @@ int TupleGetItemCPUKernel::Execute(int task_id) { auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); memcpy(out, in, in_tensors_.at(0)->Size()); - return RET_OK; }