!22812 [MS][LITE][TOD] Clean Code according to Static code analysis recomendations

Merge pull request !22812 from ehaleva/export
This commit is contained in:
i-robot 2021-09-03 01:03:20 +00:00 committed by Gitee
commit 6e5637e9e1
5 changed files with 64 additions and 10 deletions

View File

@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/infer/infer_register.h"
#include "nnacl/infer/common_infer.h"
#include "nnacl/fp32/lstm_fp32.h"
int LstmGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 11, 4);
if (check_ret != NNACL_OK) {
return check_ret;
}
const TensorC *input = inputs[0];
const TensorC *H = inputs[1];
const TensorC *C = inputs[2];
const TensorC *weight = inputs[3];
TensorC *output = outputs[0];
for (int i = 0; i < outputs_size; i++) {
SetDataTypeFormat(outputs[i], input);
}
if (!InferFlag(inputs, inputs_size)) {
return NNACL_INFER_INVALID;
}
if (input->shape_size_ != 3 || weight->shape_size_ != 3) {
return NNACL_ERR;
}
SetShapeArray(output, input->shape_, input->shape_size_);
SetShapeArray(outputs[1], H->shape_, H->shape_size_);
SetShapeArray(outputs[2], C->shape_, C->shape_size_);
SetShapeArray(outputs[3], weight->shape_, weight->shape_size_);
return NNACL_OK;
}
REG_INFER(LSTMGrad, PrimType_LSTMGrad, LstmGradInferShape)

View File

@ -87,6 +87,7 @@ std::shared_ptr<session::LiteSession> CreateTrainSession(std::shared_ptr<Graph::
class TrainSupport {
public:
TrainSupport() { CreateTrainSessionCallbackHolder(CreateTrainSession); }
~TrainSupport() {}
};
TrainSupport support_train_api;

View File

@ -35,11 +35,9 @@ constexpr int kNumShapeDim_2 = 2;
} // namespace
int PoolingGradCPUKernelFp16::ReSize() {
CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_3D);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
CHECK_LESS_RETURN(out_tensors_.size(), DIMENSION_1D);
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(op_parameter_);
CHECK_NULL_RETURN(pool_param);
CHECK_LESS_RETURN(in_tensors_.size(), 3);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
CHECK_NULL_RETURN(in_tensors_.at(0));
CHECK_NULL_RETURN(in_tensors_.at(1));
CHECK_NULL_RETURN(in_tensors_.at(kNumInputDim_2));

View File

@ -100,11 +100,9 @@ int ApplyMomentumCPUKernel::Run() {
int ApplyMomentumCPUKernel::Init() {
CHECK_NULL_RETURN(apply_momentum_param_);
CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_5D);
CHECK_NULL_RETURN(in_tensors_.at(0));
CHECK_NULL_RETURN(in_tensors_.at(1));
CHECK_NULL_RETURN(in_tensors_.at(2));
CHECK_NULL_RETURN(in_tensors_.at(3));
CHECK_NULL_RETURN(in_tensors_.at(4));
for (int i = 0; i < DIMENSION_5D; i++) {
CHECK_NULL_RETURN(in_tensors_.at(i));
}
auto ret = OptimizerKernel::Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Failed to initialize Apply Momentum Kernel";

View File

@ -34,8 +34,8 @@ static __attribute__((always_inline)) inline bool MS_ISNAN(float var) {
}
namespace mindspore::kernel {
enum class WeightUpdateMode { NORMAL, VIRTUAL_BATCH, ACCUMULATE_GRADS };
class OptimizerKernel : public InnerKernel {
public:
OptimizerKernel() = default;
@ -107,7 +107,11 @@ class OptimizerKernel : public InnerKernel {
lite::Tensor *grad_sum_tensor = nullptr;
if (grad_sum_ != nullptr) {
auto shape = in_tensors_.at(grad_idx_)->shape();
grad_sum_tensor = new lite::Tensor(kNumberTypeFloat, shape);
grad_sum_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat, shape);
if (grad_sum_tensor == nullptr) {
MS_LOG(ERROR) << "failed to allocate grad sum tensor";
return nullptr;
}
grad_sum_tensor->set_tensor_name(in_tensors_.at(grad_idx_)->tensor_name());
grad_sum_tensor->set_data(static_cast<void *>(grad_sum_));
grad_sum_tensor->set_own_data(false);