fix nullptr-dereference memory-leak unsuitable-nullptr-check

incorrect-errorcode-check const-input-argument
This commit is contained in:
hangangqiang 2021-08-03 09:37:55 +08:00
parent 61ac011cfc
commit 9e528fb81d
16 changed files with 65 additions and 64 deletions

View File

@ -172,11 +172,11 @@ int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
return RET_OK;
}
int GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
int GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, int outputs_size,
std::vector<TensorC *> *out_tensor_c) {
MS_ASSERT(out_tensor_c != nullptr);
int ret = RET_OK;
for (size_t i = 0; i < outputs.size(); i++) {
for (int i = 0; i < outputs_size; i++) {
out_tensor_c->push_back(nullptr);
}
return ret;
@ -199,7 +199,7 @@ int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lit
out_tensor_c->push_back(reinterpret_cast<TensorC *const>(tensor_list_c));
} else if (parameter->type_ == mindspore::schema::PrimitiveType_Merge ||
parameter->type_ == mindspore::schema::PrimitiveType_Switch) {
ret = GenerateMergeSwitchOutTensorC(inputs, outputs, out_tensor_c);
ret = GenerateMergeSwitchOutTensorC(inputs, static_cast<int>(outputs.size()), out_tensor_c);
} else {
ret = OutputTensor2TensorC(outputs, out_tensor_c);
}

View File

@ -35,7 +35,7 @@ int Tensor2TensorC(const Tensor *src, TensorC *dst);
void TensorC2Tensor(const TensorC *src, Tensor *dst);
int TensorList2TensorListC(TensorList *src, TensorListC *dst);
int TensorListC2TensorList(const TensorListC *src, TensorList *dst);
int GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
int GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, int output_size,
std::vector<TensorC *> *out_tensor_c);
int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *in_tensor_c);

View File

@ -26,26 +26,6 @@
namespace mindspore {
namespace lite {
std::vector<std::string> StringSplit(std::string str, const std::string &pattern) {
std::vector<std::string> result;
if (str.empty()) {
return result;
}
std::string::size_type pos;
str += pattern;
auto size = str.size();
for (size_t i = 0; i < size; i++) {
pos = str.find(pattern, i);
if (pos < size) {
std::string s = str.substr(i, pos - i);
result.push_back(s);
i = pos + pattern.size() - 1;
}
}
return result;
}
uint64_t GetTimeUs() {
#ifdef SUPPORT_MSVC
FILETIME ft;
@ -71,18 +51,22 @@ std::string RemoveSubStr(const std::string &from, const std::string &sub_str, Re
MS_LOG(ERROR) << "string is empty";
return "";
}
if (sub_str.length() > from.length()) {
MS_LOG(ERROR) << "sub_str is longer than from";
return "";
}
if (mode == PREFIX) {
if (from.substr(0, sub_str.length()) == sub_str) {
result = from.substr(sub_str.size());
result = from.substr(sub_str.length());
}
} else if (mode == SUFFIX) {
if (from.rfind(sub_str) == from.size() - sub_str.size()) {
result = from.substr(0, from.size() - sub_str.size());
if (from.rfind(sub_str) == from.length() - sub_str.length()) {
result = from.substr(0, from.length() - sub_str.length());
}
} else {
size_t index;
while ((index = result.find(sub_str)) != std::string::npos) {
result = result.erase(index, sub_str.size());
result = result.erase(index, sub_str.length());
}
}

View File

@ -37,8 +37,6 @@ enum NodeType {
const int USEC = 1000000;
const int MSEC = 1000;
std::vector<std::string> StringSplit(std::string str, const std::string &pattern);
uint64_t GetTimeUs();
bool IsSupportSDot();
@ -119,7 +117,7 @@ inline std::string GetFileName(const std::string &path) {
char delim = '/';
size_t i = path.rfind(delim, path.length());
if (i != std::string::npos) {
if (i != std::string::npos && i + 1 < path.length()) {
return (path.substr(i + 1, path.length() - i));
}

View File

@ -208,10 +208,10 @@ void BenchmarkFlags::InitInputDataList() {
void BenchmarkFlags::InitResizeDimsList() {
std::string content = this->resize_dims_in_;
std::vector<int> shape;
auto shape_strs = StringSplit(content, std::string(DELIM_COLON));
auto shape_strs = StrSplit(content, std::string(DELIM_COLON));
for (const auto &shape_str : shape_strs) {
shape.clear();
auto dim_strs = StringSplit(shape_str, std::string(DELIM_COMMA));
auto dim_strs = StrSplit(shape_str, std::string(DELIM_COMMA));
std::cout << "Resize Dims: ";
for (const auto &dim_str : dim_strs) {
std::cout << dim_str << " ";

View File

@ -646,10 +646,10 @@ int NetTrain::InitCallbackParameter() {
void NetTrainFlags::InitResizeDimsList() {
std::string content = this->resize_dims_in_;
std::vector<int> shape;
auto shape_strs = StringSplit(content, std::string(DELIM_COLON));
auto shape_strs = StrSplit(content, std::string(DELIM_COLON));
for (const auto &shape_str : shape_strs) {
shape.clear();
auto dim_strs = StringSplit(shape_str, std::string(DELIM_COMMA));
auto dim_strs = StrSplit(shape_str, std::string(DELIM_COMMA));
std::cout << "Resize Dims: ";
for (const auto &dim_str : dim_strs) {
std::cout << dim_str << " ";

View File

@ -24,6 +24,10 @@ Option<std::string> FlagParser::ParseFlags(int argc, const char *const *argv, bo
bool supportDuplicate) {
MS_ASSERT(argv != nullptr);
const int FLAG_PREFIX_LEN = 2;
if (argc <= 0) {
MS_LOG(ERROR) << "The arguments number is out of range";
return Option<std::string>("Failed: flags is not valid");
}
binName = GetFileName(argv[0]);
std::multimap<std::string, Option<std::string>> keyValues;

View File

@ -280,8 +280,11 @@ void FlagParser::AddFlag(Option<T> Flags::*t, const std::string &flagName, const
ConstructFlag(t, flagName, helpInfo, &flagItem);
flagItem.isRequired = false;
flagItem.parse = [t](FlagParser *base, const std::string &value) -> Option<Nothing> {
if (base == nullptr) {
return Option<Nothing>(Nothing());
}
auto *flag = dynamic_cast<Flags *>(base);
if (base != nullptr) {
if (flag != nullptr) {
Option<T> ret = Option<std::string>(GenericParseValue<T>(value));
if (ret.IsNone()) {
return Option<Nothing>(None());

View File

@ -211,10 +211,10 @@ int Flags::InitTrainModel() {
int Flags::InitInTensorShape() {
std::string content = this->inTensorShape;
std::vector<int64_t> shape;
auto shape_strs = StringSplit(content, std::string(";"));
auto shape_strs = StrSplit(content, std::string(";"));
for (const auto &shape_str : shape_strs) {
shape.clear();
auto string_split = StringSplit(shape_str, std::string(":"));
auto string_split = StrSplit(shape_str, std::string(":"));
auto name = string_split[0];
if (name.empty()) {
MS_LOG(ERROR) << "input tensor name is empty";
@ -223,7 +223,7 @@ int Flags::InitInTensorShape() {
if (dim_strs.empty()) {
MS_LOG(ERROR) << "input tensor dim string is empty";
}
auto dims = StringSplit(dim_strs, std::string(","));
auto dims = StrSplit(dim_strs, std::string(","));
if (dims.empty()) {
MS_LOG(ERROR) << "input tensor dim is empty";
}

View File

@ -250,19 +250,6 @@ STATUS BatchNormConvertScalePass::GetBnWeightTensors(MetaGraphT *graph, BNWeight
if (fmkType == converter::FmkType_CAFFE) {
bnWeightTensors->meanTensor = graph->allTensors.at(bnWeightTensorIdxes[CAFFE_BATCHNORM_MEAN_INDEX]).get();
bnWeightTensors->varianceTensor = graph->allTensors.at(bnWeightTensorIdxes[CAFFE_BATCHNORM_VARIANCE_INDEX]).get();
auto scaleTensor = graph->allTensors.at(bnWeightTensorIdxes[CAFFE_BATCHNORM_SCALE_INDEX]).get();
// calibrate mean and variance
float scale_factor_data = (reinterpret_cast<float *>(scaleTensor->data.data()))[0];
float scale_factor = scale_factor_data == 0 ? 0 : 1 / scale_factor_data;
auto mean_data = reinterpret_cast<float *>(bnWeightTensors->meanTensor->data.data());
auto variance_data = reinterpret_cast<float *>(bnWeightTensors->varianceTensor->data.data());
for (size_t i = 0; i < GetShapeSize(*bnWeightTensors->meanTensor); i++) {
mean_data[i] *= scale_factor;
}
for (size_t i = 0; i < GetShapeSize(*bnWeightTensors->varianceTensor); i++) {
variance_data[i] *= scale_factor;
}
} else {
bnWeightTensors->scaleTensor = graph->allTensors.at(bnWeightTensorIdxes[TF_BATCHNORM_SCALE_INDEX]).get();
bnWeightTensors->biasTensor = graph->allTensors.at(bnWeightTensorIdxes[TF_BATCHNORM_BIAS_INDEX]).get();
@ -274,11 +261,24 @@ STATUS BatchNormConvertScalePass::GetBnWeightTensors(MetaGraphT *graph, BNWeight
MS_LOG(ERROR) << "BatchNorm's mean tensor is nullptr";
return RET_ERROR;
}
if (bnWeightTensors->varianceTensor == nullptr) {
MS_LOG(ERROR) << "BatchNorm's variance tensor is nullptr";
return RET_ERROR;
}
if (fmkType == converter::FmkType_CAFFE) {
auto scaleTensor = graph->allTensors.at(bnWeightTensorIdxes[CAFFE_BATCHNORM_SCALE_INDEX]).get();
// calibrate mean and variance
float scale_factor_data = (reinterpret_cast<float *>(scaleTensor->data.data()))[0];
float scale_factor = scale_factor_data == 0 ? 0 : 1 / scale_factor_data;
auto mean_data = reinterpret_cast<float *>(bnWeightTensors->meanTensor->data.data());
auto variance_data = reinterpret_cast<float *>(bnWeightTensors->varianceTensor->data.data());
for (size_t i = 0; i < GetShapeSize(*bnWeightTensors->meanTensor); i++) {
mean_data[i] *= scale_factor;
}
for (size_t i = 0; i < GetShapeSize(*bnWeightTensors->varianceTensor); i++) {
variance_data[i] *= scale_factor;
}
}
bnChannel = bnWeightTensors->meanTensor->data.size() * sizeof(uint8_t) / sizeof(float);
if (bnChannel <= 0) {
MS_LOG(ERROR) << "BatchNorm's channel less or equal 0";
@ -289,14 +289,12 @@ STATUS BatchNormConvertScalePass::GetBnWeightTensors(MetaGraphT *graph, BNWeight
MS_LOG(ERROR) << "conv kernel num expected to be equal to variance size";
return RET_ERROR;
}
if (bnWeightTensors->scaleTensor != nullptr) {
if (bnChannel != bnWeightTensors->scaleTensor->data.size() * sizeof(uint8_t) / sizeof(float)) {
MS_LOG(ERROR) << "conv kernel num expected to be equal to scale size";
return RET_ERROR;
}
}
if (bnWeightTensors->biasTensor != nullptr) {
if (bnChannel != bnWeightTensors->biasTensor->data.size() * sizeof(uint8_t) / sizeof(float)) {
MS_LOG(ERROR) << "conv kernel num expected to be equal to bias size";

View File

@ -196,6 +196,7 @@ STATUS OnnxInputAdjust::ReplaceTransposeWithGraphInput(const FuncGraphPtr &func_
auto shape_ptr = param_node->abstract()->GetShapeTrack()->cast<abstract::ShapePtr>();
if (shape_ptr == nullptr) {
MS_LOG(ERROR) << "shape is nullptr.";
return lite::RET_ERROR;
}
auto shape_vector = shape_ptr->shape();
if (shape_vector.size() != opt::kInputSizeFour) {

View File

@ -1454,7 +1454,10 @@ KernelCallBack PostTrainingQuantizer::GetBeforeCallBack(bool int8_op) {
auto tensor = beforeInputs[0];
MS_ASSERT(tensor != nullptr);
auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor);
MS_ASSERT(lite_tensor != nullptr);
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "Before inputs is not a lite::Tensor";
return false;
}
if (tensor->data_type() != kNumberTypeInt8) {
MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type();
return false;
@ -1513,7 +1516,10 @@ KernelCallBack PostTrainingQuantizer::GetInt8AfterCallBack() {
auto tensor = afterOutputs[0];
MS_ASSERT(tensor != nullptr);
auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor);
MS_ASSERT(lite_tensor != nullptr);
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "Before inputs is not a lite::Tensor";
return false;
}
if (tensor->data_type() != kNumberTypeInt8) {
MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type();
return false;

View File

@ -41,14 +41,13 @@ STATUS QuantCast::Run(const FuncGraphPtr &graph) {
auto cnodes = graph->GetOrderedCnodes();
for (auto &cnode : cnodes) {
auto primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(cnode->input(0));
auto primitive_quant_param_holder = GetCNodeQuantHolder(primitive_c);
MS_ASSERT(primitive_quant_param_holder != nullptr);
auto curnode_quant_type = schema::QuantType_QUANT_NONE;
if (primitive_c == nullptr) {
MS_LOG(WARNING) << "primitive_c is nullptr: " << cnode->fullname_with_scope();
} else {
curnode_quant_type = primitive_quant_param_holder->quant_type();
continue;
}
auto primitive_quant_param_holder = GetCNodeQuantHolder(primitive_c);
MS_ASSERT(primitive_quant_param_holder != nullptr);
auto curnode_quant_type = primitive_quant_param_holder->quant_type();
if (primitive_c->name() == ops::kNameGather) {
continue;
}

View File

@ -126,7 +126,7 @@ int Cropper::GetModelOps() {
int Cropper::GetModelFiles() {
if (!this->flags_->model_file_.empty()) {
auto files = StringSplit(this->flags_->model_file_, std::string(kDelimComma));
auto files = StrSplit(this->flags_->model_file_, std::string(kDelimComma));
for (const auto &file : files) {
if (ValidFileSuffix(file, "ms") != RET_OK) {
return RET_INPUT_PARAM_INVALID;
@ -177,7 +177,7 @@ int Cropper::GetOpMatchFiles() {
while (!in_file.eof()) {
in_file.getline(buf, kBufSize);
std::string buf_str = buf;
auto mapping = StringSplit(buf_str, kDelimComma);
auto mapping = StrSplit(buf_str, kDelimComma);
if (!mapping.empty()) {
std::string primitive = mapping.at(0);
std::string type = mapping.at(1);

View File

@ -55,6 +55,10 @@ bool IsCommonConvNode(const BaseRef &n) {
}
STATUS GenNewConvBias(const ParameterPtr &down_bias_node, const ParameterPtr &down_weight_node,
const ParameterPtr &up_bias_node, const ParameterPtr &new_bias_node) {
if (down_weight_node == nullptr || up_bias_node == nullptr || new_bias_node == nullptr) {
MS_LOG(ERROR) << "Input down_weight_node or up_bias_node or new_bias_node is nullptr";
return RET_FAILED;
}
float *down_bias_data = nullptr;
if (down_bias_node != nullptr) {
auto down_bias_param = std::dynamic_pointer_cast<tensor::Tensor>(down_bias_node->default_param());

View File

@ -401,6 +401,10 @@ bool SlicePreposePass::SiblingsAreSameSlice(const FuncGraphPtr &graph, const Nod
auto first_slice_cnode = slices.front();
auto first_slice_node = GetSlice(first_slice_cnode);
if (first_slice_node == nullptr) {
MS_LOG(ERROR) << "GetSlice return nullptr";
return false;
}
auto first_axes = first_slice_node->get_axes();
auto first_begin = GetSliceBeginAndSize(first_slice_cnode, SliceBeginIndex);
auto first_size = GetSliceBeginAndSize(first_slice_cnode, SliceSizeIndex);