!7996 [lite] fix cppcheck

Merge pull request !7996 from 徐安越/master
This commit is contained in:
mindspore-ci-bot 2020-10-30 09:41:51 +08:00 committed by Gitee
commit 2458c9b322
43 changed files with 81 additions and 15 deletions

View File

@ -90,6 +90,10 @@ Model *ImportFromBuffer(const char *model_buf, size_t size, bool take_buf) {
if (take_buf) {
model->buf = const_cast<char *>(model_buf);
} else {
if (size == 0) {
MS_LOG(ERROR) << "malloc size is equal to 0";
return nullptr;
}
model->buf = reinterpret_cast<char *>(malloc(size));
if (model->buf == nullptr) {
MS_LOG(ERROR) << "new inner model buf fail!";

View File

@ -112,7 +112,7 @@ void *WorkspacePool::AllocWorkSpaceMem(size_t size) {
return alloc.second;
}
void WorkspacePool::FreeWorkSpaceMem(void *ptr) {
void WorkspacePool::FreeWorkSpaceMem(const void *ptr) {
if (ptr == nullptr) {
return;
}

View File

@ -33,7 +33,7 @@ class WorkspacePool {
WorkspacePool &operator=(const WorkspacePool &) = delete;
static WorkspacePool *GetInstance();
void *AllocWorkSpaceMem(size_t size);
void FreeWorkSpaceMem(void *ptr);
void FreeWorkSpaceMem(const void *ptr);
private:
std::vector<std::pair<size_t, void *>> allocList{};

View File

@ -27,6 +27,7 @@ namespace mindspore::lite {
class Scheduler {
public:
explicit Scheduler(const InnerContext *ctx) { context_ = const_cast<InnerContext *>(ctx); }
~Scheduler() = default;
int Schedule(const lite::Model *model, std::vector<Tensor *> *tensors, std::vector<kernel::LiteKernel *> *kernels);

View File

@ -199,12 +199,22 @@ int CpuFp16SubGraph::PostProcess() {
auto ret = tensor->FreeData();
if (RET_OK != ret) {
MS_LOG(ERROR) << "free data failed";
if (this->context_ != nullptr && this->context_->allocator != nullptr) {
this->context_->allocator->Free(float16_data);
} else {
free(float16_data);
}
return RET_ERROR;
}
tensor->set_data_type(TypeId::kNumberTypeFloat32);
ret = tensor->MallocData();
if (RET_OK != ret) {
MS_LOG(ERROR) << "malloc data failed";
if (this->context_ != nullptr && this->context_->allocator != nullptr) {
this->context_->allocator->Free(float16_data);
} else {
free(float16_data);
}
return RET_ERROR;
}
fp16_to_fp32_cast_func(float16_data, tensor->data_c(), tensor->ElementsNum());

View File

@ -427,7 +427,9 @@ int Benchmark::RunBenchmark() {
return ret;
}
}
model->Free();
if (model != nullptr) {
model->Free();
}
ms_inputs_ = session_->GetInputs();
auto end_prepare_time = GetTimeUs();
MS_LOG(INFO) << "PrepareTime = " << (end_prepare_time - start_prepare_time) / 1000 << " ms";
@ -446,6 +448,7 @@ int Benchmark::RunBenchmark() {
data.second->shape.clear();
data.second->data.clear();
delete data.second;
data.second = nullptr;
}
benchmark_data_.clear();
if (status != 0) {

View File

@ -137,7 +137,7 @@ STATUS FormatTransPass::DoNodeInoutFormatTrans(schema::MetaGraphT *graph) {
MS_LOG(ERROR) << "Op should have " << kOutputNum << " output tensor";
return RET_ERROR;
}
STATUS status;
STATUS status = RET_OK;
#ifdef SUPPORT_TRAIN
if (IsContain(GetNhwcAllInputOpList(), GetCNodeTType(**iter))) {
int idx_num = node->inputIndex.size();

View File

@ -143,6 +143,7 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
if (type == PrimitiveType_Concat) {
auto origin_axis = node->primitive->value.AsConcat()->axis;
auto axis_map = GetNc2NhAxisMap();
MS_ASSERT(node->primitive->value.AsConcat() != nullptr);
node->primitive->value.AsConcat()->axis = axis_map[origin_axis];
}
if (type == PrimitiveType_StridedSlice) {
@ -157,12 +158,14 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
if (type == PrimitiveType_Split) {
auto origin_axis = node->primitive->value.AsSplit()->splitDim;
auto axis_map = GetNc2NhAxisMap();
MS_ASSERT(node->primitive->value.AsSplit != nullptr);
node->primitive->value.AsSplit()->splitDim = axis_map[origin_axis];
}
if (type == PrimitiveType_Crop) {
auto origin_axis = node->primitive->value.AsCrop()->axis;
auto offsets = node->primitive->value.AsCrop()->offsets;
auto axis_map = GetNc2NhAxisMap();
MS_ASSERT(node->primitive->value.AsCrop() != nullptr);
node->primitive->value.AsCrop()->axis = axis_map[origin_axis];
// nchw->nhwc,offsets need pad 0;
if (axis_map[origin_axis] == 0) {
@ -178,10 +181,15 @@ STATUS TransOpInsertPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::uni
MS_LOG(ERROR) << "Crop error";
return RET_ERROR;
}
MS_ASSERT(node->primitive->value.AsCrop() != nullptr);
node->primitive->value.AsCrop()->offsets = offsets;
}
if (type == PrimitiveType_Slice) {
auto attr = node->primitive->value.AsSlice();
if (attr == nullptr) {
MS_LOG(ERROR) << "attr is nullptr";
return RET_NULL_PTR;
}
auto origin_begin = attr->begin;
attr->begin = {origin_begin[NCHW_N], origin_begin[NCHW_H], origin_begin[NCHW_W], origin_begin[NCHW_C]};
auto origin_end = attr->axes;

View File

@ -26,7 +26,7 @@ namespace lite {
class CaffeArgMaxParser : public CaffeNodeParser {
public:
CaffeArgMaxParser() : CaffeNodeParser("argmax") {}
~CaffeArgMaxParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeConcatParser : public CaffeNodeParser {
public:
CaffeConcatParser() : CaffeNodeParser("concat") {}
~CaffeConcatParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -27,6 +27,7 @@ namespace lite {
class CaffeConvolutionParser : public CaffeNodeParser {
public:
CaffeConvolutionParser() : CaffeNodeParser("convolution") {}
~CaffeConvolutionParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeCropParser : public CaffeNodeParser {
public:
CaffeCropParser() : CaffeNodeParser("crop") {}
~CaffeCropParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -27,6 +27,7 @@ namespace lite {
class CaffeDeconvolutionParser : public CaffeNodeParser {
public:
CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {}
~CaffeDeconvolutionParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeEltwiseParser : public CaffeNodeParser {
public:
CaffeEltwiseParser() : CaffeNodeParser("eltwise") {}
~CaffeEltwiseParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeEluParser : public CaffeNodeParser {
public:
CaffeEluParser() : CaffeNodeParser("elu") {}
~CaffeEluParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeExpParser : public CaffeNodeParser {
public:
CaffeExpParser() : CaffeNodeParser("exp") {}
~CaffeExpParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeFlattenParser : public CaffeNodeParser {
public:
CaffeFlattenParser() : CaffeNodeParser("flatten") {}
~CaffeFlattenParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeInnerProductParser : public CaffeNodeParser {
public:
CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {}
~CaffeInnerProductParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -29,6 +29,7 @@ namespace lite {
class CaffeInspector {
public:
CaffeInspector() = default;
~CaffeInspector() = default;
STATUS InspectModel(const caffe::NetParameter &proto);
STATUS ParseInput();

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeInterpParser : public CaffeNodeParser {
public:
CaffeInterpParser() : CaffeNodeParser("Interp") {}
~CaffeInterpParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -62,8 +62,11 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
buf[i] = proto.double_data(i);
}
weight->data.resize(count * sizeof(float));
::memcpy_s(weight->data.data(), count * sizeof(float), reinterpret_cast<uint8_t *>(buf.get()),
count * sizeof(float));
if (::memcpy_s(weight->data.data(), count * sizeof(float), reinterpret_cast<uint8_t *>(buf.get()),
count * sizeof(float)) != EOK) {
MS_LOG(ERROR) << "memcpy failed";
return nullptr;
}
} else {
// datatype float
if (count != proto.data_size()) {
@ -73,7 +76,10 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
}
weight->data.resize(count * sizeof(float));
const float *data_ptr = proto.data().data();
::memcpy_s(weight->data.data(), count * sizeof(float), (uint8_t *)data_ptr, count * sizeof(float));
if (::memcpy_s(weight->data.data(), count * sizeof(float), (uint8_t *)data_ptr, count * sizeof(float)) != EOK) {
MS_LOG(ERROR) << "memcpy failed";
return nullptr;
}
}
weight->refCount = 1;

View File

@ -30,7 +30,7 @@ namespace mindspore {
namespace lite {
class CaffeNodeParser {
public:
explicit CaffeNodeParser(const std::string &nodeName) : name(nodeName) {}
explicit CaffeNodeParser(const std::string nodeName) : name(nodeName) {}
virtual ~CaffeNodeParser() {}
@ -38,7 +38,7 @@ class CaffeNodeParser {
std::vector<schema::TensorT *> *weightVec) = 0;
protected:
const std::string &name;
const std::string name;
};
schema::TensorT *ConvertWeight(const caffe::BlobProto &proto);

View File

@ -41,6 +41,7 @@ class CaffeNodeRegistrar {
CaffeNodeRegistrar(const std::string &name, CaffeNodeParser *parser) {
CaffeNodeParserRegistry::GetInstance()->parsers[name] = parser;
}
~CaffeNodeRegistrar() = default;
};
} // namespace mindspore::lite

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffePermuteParser : public CaffeNodeParser {
public:
CaffePermuteParser() : CaffeNodeParser("Permute") {}
~CaffePermuteParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffePoolingParser : public CaffeNodeParser {
public:
CaffePoolingParser() : CaffeNodeParser("pooling") {}
~CaffePoolingParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffePowerParser : public CaffeNodeParser {
public:
CaffePowerParser() : CaffeNodeParser("power") {}
~CaffePowerParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffePReluParser : public CaffeNodeParser {
public:
CaffePReluParser() : CaffeNodeParser("pRelu") {}
~CaffePReluParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -25,15 +25,13 @@ namespace mindspore {
namespace lite {
class CaffeReduceParser : public CaffeNodeParser {
public:
CaffeReduceParser() : CaffeNodeParser("reduce") {}
CaffeReduceParser() : CaffeNodeParser("reduce") {}
~CaffeReduceParser() = default;
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_REDUCE_PARSER_H

View File

@ -25,6 +25,7 @@ namespace lite {
class CaffeRelu6Parser : public CaffeNodeParser {
public:
CaffeRelu6Parser() : CaffeNodeParser("relu6") {}
~CaffeRelu6Parser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeReshapeParser : public CaffeNodeParser {
public:
CaffeReshapeParser() : CaffeNodeParser("reshape") {}
~CaffeReshapeParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeScaleParser : public CaffeNodeParser {
public:
CaffeScaleParser() : CaffeNodeParser("scale") {}
~CaffeScaleParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeSigmoidParser : public CaffeNodeParser {
public:
CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {}
~CaffeSigmoidParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeSliceParser : public CaffeNodeParser {
public:
CaffeSliceParser() : CaffeNodeParser("slice") {}
~CaffeSliceParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeSoftmaxParser : public CaffeNodeParser {
public:
CaffeSoftmaxParser() : CaffeNodeParser("softmax") {}
~CaffeSoftmaxParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeTanhParser : public CaffeNodeParser {
public:
CaffeTanhParser() : CaffeNodeParser("tanh") {}
~CaffeTanhParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -26,6 +26,7 @@ namespace lite {
class CaffeTileParser : public CaffeNodeParser {
public:
CaffeTileParser() : CaffeNodeParser("tile") {}
~CaffeTileParser() = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

View File

@ -57,6 +57,7 @@ class PostTrainingQuantizer : public Quantizer {
public:
PostTrainingQuantizer(FuncGraphPtr graph, std::string path, int bit_num, TypeId target_type = kNumberTypeInt8,
bool per_channel = true);
~PostTrainingQuantizer() = default;
STATUS DoQuantize(FuncGraphPtr func_graph) override;

View File

@ -27,6 +27,7 @@ namespace mindspore::lite::quant {
class QuantCast {
public:
QuantCast() = default;
~QuantCast() = default;
STATUS Run(FuncGraphPtr graph);
void SetInputDataDType(TypeId dataType) { this->inputDataDType = dataType; }

View File

@ -80,6 +80,7 @@ STATUS GetRightMatmulInputParamter(const CNodePtr &stack_node, const ParameterPt
auto tensor_addr = GetInputAddr(stack_node->input(i), 2);
if (tensor_addr == nullptr) {
MS_LOG(ERROR) << "input tensor addr nullptr";
delete[] new_tensor_data;
return RET_ERROR;
}
if (EOK != memcpy_s(new_tensor_data + (i - 1) * tensor_size, tensor_size, tensor_addr, tensor_size)) {
@ -155,6 +156,7 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons
rmatmul_quant_params.pop_back();
// no bias quantParams
rmatmul_quant_params.emplace_back(jointed_quant_params);
MS_ASSERT(matmul_cvalue != nullptr);
matmul_cvalue->SetInputQuantParams(rmatmul_quant_params);
matmul_cvalue->SetOutputQuantParams(fc_prim->GetOutputQuantParams());
auto matmul_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(matmul_cvalue));
@ -169,6 +171,7 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons
return node;
}
auto prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(matmul_value_node);
MS_ASSERT(prim->GetPrimitiveT()->value.AsMatMul() != nullptr);
prim->GetPrimitiveT()->value.AsMatMul()->transposeB = true;
matmul_inputs.push_back(rmatmul_paramter);
} else {

View File

@ -235,6 +235,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
}
lite_primitive->InferShape(input_tensors, output_tensors);
auto primitive = lite_primitive.get();
MS_ASSERT(primitive != nullptr);
auto parameter =
lite::PopulateRegistry::GetInstance()->getParameterCreator(schema::PrimitiveType(primitive->Type()))(primitive);

View File

@ -46,7 +46,10 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) {
MS_ASSERT(clip_cnode->inputs().size() > kClipMaxIndex);
auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(clip_cnode->input(0));
MS_ASSERT(primitive_c != nullptr);
auto primT = primitive_c->GetPrimitiveT();
MS_ASSERT(primT != nullptr);
MS_ASSERT(primT->value.AsClip() != nullptr);
float max = primT->value.AsClip()->max;
float min = primT->value.AsClip()->min;
if ((min == -1) && (max == -1)) {

View File

@ -22,6 +22,7 @@ namespace mindspore::lite {
class SchemaGenFlags : public virtual FlagParser {
public:
SchemaGenFlags() { AddFlag(&SchemaGenFlags::export_path_, "exportPath", "schema define export path", "."); }
~SchemaGenFlags() = default;
public:
std::string export_path_ = ".";
@ -30,6 +31,7 @@ class SchemaGenFlags : public virtual FlagParser {
class SchemaGen {
public:
explicit SchemaGen(SchemaGenFlags *flags) : flags_(flags) {}
~SchemaGen() = default;
int Init();
private:

View File

@ -21,6 +21,7 @@ namespace mindspore::lite::ops {
class SchemaTypeRegister {
public:
explicit SchemaTypeRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->TypePush(func); }
~SchemaTypeRegister() = default;
};
} // namespace mindspore::lite::ops