!8108 modify files for static check

Merge pull request !8108 from lyvette/check_master
This commit is contained in:
mindspore-ci-bot 2020-11-02 19:54:54 +08:00 committed by Gitee
commit ea001d330f
9 changed files with 70 additions and 58 deletions

View File

@ -16,7 +16,7 @@
#include "src/common/file_utils.h"
#include <fcntl.h>
#include <stdlib.h>
#include <cstdlib>
#include <climits>
#include <cmath>
#include "securec/include/securec.h"
@ -107,7 +107,7 @@ int CompareOutputData(const float *output_data, size_t output_size, const float
return 0;
}
int CompareOutput(const float *output_data, size_t output_num, std::string file_path) {
int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path) {
size_t ground_truth_size = 0;
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
size_t ground_truth_num = ground_truth_size / sizeof(float);

View File

@ -59,7 +59,7 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) {
}
int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size);
int CompareOutput(const float *output_data, size_t output_num, std::string file_path);
int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path);
std::string GetAndroidPackageName();
std::string GetAndroidPackagePath();

View File

@ -14,8 +14,8 @@
* limitations under the License.
*/
#include "src/common/string_util.h"
#include <algorithm>
#include "src/common/string_util.h"
#include "include/ms_tensor.h"
namespace mindspore {
@ -35,7 +35,7 @@ std::vector<StringPack> ParseStringBuffer(const void *data) {
MS_LOG(ERROR) << "data is nullptr";
return buffer;
}
const int32_t *offset = reinterpret_cast<const int32_t *>(data);
const auto *offset = reinterpret_cast<const int32_t *>(data);
int32_t num = *offset;
for (int i = 0; i < num; i++) {
offset += 1;
@ -59,7 +59,7 @@ int WriteStringsToTensor(Tensor *tensor, const std::vector<StringPack> &string_b
return RET_ERROR;
}
int32_t *string_info = reinterpret_cast<int32_t *>(data);
auto *string_info = reinterpret_cast<int32_t *>(data);
char *string_data = reinterpret_cast<char *>(data);
string_info[0] = num;
@ -140,13 +140,13 @@ static uint64_t k1 = 0xb492b66fbe98f273ULL;
static uint64_t k2 = 0x9ae16a3b2f90404fULL;
uint64_t Fetch64Bit(const char *p) {
uint64_t result;
uint64_t result = 0;
memcpy(&result, p, sizeof(uint64_t));
return result;
}
uint32_t Fetch32Bit(const char *p) {
uint32_t result;
uint32_t result = 0;
memcpy(&result, p, sizeof(uint32_t));
return result;
}
@ -226,7 +226,7 @@ std::pair<uint64_t, uint64_t> HashLen32WithSeeds(const char *s, uint64_t a, uint
}
uint64_t StringHash64(const char *s, size_t len) {
uint64_t seed_value = 81;
const uint64_t seed_value = 81;
if (len <= 16) {
return HashStringLen0to16(s, len);
} else if (len <= 32) {

View File

@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/model_common.h"
#include "include/version.h"
#ifndef PRIMITIVE_WRITEABLE
@ -23,7 +22,7 @@
namespace mindspore::lite {
bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) {
for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) {
Model::Node *node = new (std::nothrow) Model::Node();
auto *node = new (std::nothrow) Model::Node();
if (node == nullptr) {
MS_LOG(ERROR) << "new node fail!";
return false;

View File

@ -384,6 +384,7 @@ int SetArch(CpuInfo *freq_set, int core_num) {
freq_set[i].arch = archs[i];
}
free(archs);
fclose(fp);
return RET_TP_OK;
}

View File

@ -49,8 +49,8 @@ TrainModel *TrainModel::Import(const char *model_buf, size_t size) {
model->buf_size_ = size;
auto meta_graph = schema::GetMetaGraph(model->buf);
if (meta_graph == nullptr) {
delete model;
free(model->buf);
delete model;
MS_LOG(ERROR) << "meta_graph is nullptr!";
return nullptr;
}

View File

@ -16,6 +16,7 @@
#include "tools/converter/parser/onnx/onnx_expand_parser.h"
#include <memory>
#include <vector>
namespace mindspore {
namespace lite {

View File

@ -19,10 +19,10 @@
#include <sys/stat.h>
#include <future>
#include <map>
#include <unordered_map>
#include <algorithm>
#include <functional>
#include <memory>
#include <algorithm>
#include <unordered_map>
#include <functional>
#include <numeric>
#include <utility>
#include <string>
@ -55,7 +55,7 @@ STATUS DivergInfo::RecordMaxValue(const std::vector<float> &datas) {
}
STATUS DivergInfo::RecordMaxValueArray(const std::vector<float> &datas) {
if (datas.size() == 0) {
if (datas.empty()) {
return RET_ERROR;
}
float max_num = datas.at(0);
@ -106,7 +106,7 @@ STATUS DivergInfo::ComputeThreshold() {
return RET_OK;
}
constexpr int quant_bint_nums = 128;
const constexpr int quant_bint_nums = 128;
int threshold = quant_bint_nums;
float min_kl = FLT_MAX;
float after_threshold_sum = std::accumulate(this->histogram.begin() + quant_bint_nums, this->histogram.end(), 0.0f);
@ -247,18 +247,19 @@ std::pair<CNodePtr, int32_t> DivergInfo::GetZeropoint() {
std::unordered_map<CNodePtr, float> Calibrator::GetScale(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info) {
std::unordered_map<CNodePtr, float> result;
for (auto iter = diverg_info->begin(); iter != diverg_info->end(); iter++) {
DivergInfo *info = iter->second.get();
for (auto &iter : *diverg_info) {
DivergInfo *info = iter.second.get();
auto item = info->GetScale();
result.insert(item);
}
return result;
}
std::unordered_map<CNodePtr, int32_t> Calibrator::GetZeropoint(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info) {
std::unordered_map<CNodePtr, int32_t> result;
for (auto iter = diverg_info->begin(); iter != diverg_info->end(); iter++) {
DivergInfo *info = iter->second.get();
for (auto &iter : *diverg_info) {
DivergInfo *info = iter.second.get();
auto zeropoint = info->GetZeropoint();
result.insert(zeropoint);
}
@ -268,8 +269,8 @@ std::unordered_map<CNodePtr, int32_t> Calibrator::GetZeropoint(
std::map<CNodePtr, MaxMin> Calibrator::GetMinMax(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info) {
std::map<CNodePtr, MaxMin> result;
for (auto iter = diverg_info->begin(); iter != diverg_info->end(); iter++) {
DivergInfo *info = iter->second.get();
for (auto &iter : *diverg_info) {
DivergInfo *info = iter.second.get();
mindspore::lite::quant::MaxMin input_maxmin{};
input_maxmin.min = info->min;
input_maxmin.max = info->max;
@ -358,16 +359,16 @@ STATUS Calibrator::UpdateDataFrequency(const vector<float> &data, const std::uni
return RET_OK;
}
STATUS Calibrator::AddQuantizedOp(CNodePtr node) {
STATUS Calibrator::AddQuantizedOp(const CNodePtr &node) {
if (node == nullptr) {
MS_LOG(ERROR) << "To be quantized node is null";
return RET_ERROR;
}
string node_name = node->fullname_with_scope();
std::unique_ptr<DivergInfo> input_diverg = std::unique_ptr<DivergInfo>(
new DivergInfo(node, kDefaultBinNumber, bit_num_, quant_max_, quant_min_, config_param_.method_x));
std::unique_ptr<DivergInfo> output_diverg = std::unique_ptr<DivergInfo>(
new DivergInfo(node, kDefaultBinNumber, bit_num_, quant_max_, quant_min_, config_param_.method_x));
std::unique_ptr<DivergInfo> input_diverg =
std::make_unique<DivergInfo>(node, kDefaultBinNumber, bit_num_, quant_max_, quant_min_, config_param_.method_x);
std::unique_ptr<DivergInfo> output_diverg =
std::make_unique<DivergInfo>(node, kDefaultBinNumber, bit_num_, quant_max_, quant_min_, config_param_.method_x);
inputs_diverg_info_[node_name].push_back(std::move(input_diverg));
outputs_diverg_info_[node_name].push_back(std::move(output_diverg));
@ -379,7 +380,7 @@ void Calibrator::AddImage(const string &file, size_t index) {
MS_LOG(ERROR) << "images_ size: " << images_.size() << " but index: " << index;
return;
}
auto exist = [](const string file) {
auto exist = [](const string &file) {
struct stat buf {};
return stat(file.c_str(), &buf) == 0;
};
@ -408,6 +409,7 @@ STATUS Calibrator::GenerateInputData(int input_index, int image_index, mindspore
auto ret = memcpy_s(data, tensor->Size(), bin_buf, size);
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy_s error: " << ret;
delete[] bin_buf;
return RET_ERROR;
}
delete[] bin_buf;
@ -536,11 +538,11 @@ STATUS Calibrator::ReadConfig() {
}
Calibrator::Calibrator(string path, size_t bit_num, int quant_max, int quant_min)
: config_path_(path), bit_num_(bit_num), quant_max_(quant_max), quant_min_(quant_min) {}
: config_path_(std::move(path)), bit_num_(bit_num), quant_max_(quant_max), quant_min_(quant_min) {}
PostTrainingQuantizer::PostTrainingQuantizer(FuncGraphPtr graph, string path, int bit_num, TypeId target_type,
bool per_channel)
: Quantizer(graph) {
: Quantizer(std::move(graph)) {
this->per_channel_ = per_channel;
this->bit_num = bit_num;
this->target_type_ = target_type;
@ -553,7 +555,7 @@ PostTrainingQuantizer::PostTrainingQuantizer(FuncGraphPtr graph, string path, in
} else {
MS_LOG(ERROR) << "unsupported quant value type: " << target_type;
}
calibrator_ = std::unique_ptr<Calibrator>(new Calibrator(path, this->bit_num, quant_max, quant_min));
calibrator_ = std::make_unique<Calibrator>(std::move(path), this->bit_num, quant_max, quant_min);
if (calibrator_ == nullptr) {
MS_LOG(ERROR) << "creat calibrator failed!";
return;
@ -561,7 +563,7 @@ PostTrainingQuantizer::PostTrainingQuantizer(FuncGraphPtr graph, string path, in
}
STATUS PostTrainingQuantizer::DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min,
std::shared_ptr<PrimitiveC> lite_primitive) {
const std::shared_ptr<PrimitiveC> &lite_primitive) const {
schema::QuantParamT quant_param;
quant_param.scale = scale;
quant_param.zeroPoint = zeropoint;
@ -576,7 +578,7 @@ STATUS PostTrainingQuantizer::DoQuantInput(double scale, int32_t zeropoint, stru
}
STATUS PostTrainingQuantizer::DoQuantOutput(double scale, int zeropoint, struct MaxMin *max_min,
std::shared_ptr<PrimitiveC> lite_primitive) {
const std::shared_ptr<PrimitiveC> &lite_primitive) const {
schema::QuantParamT quant_param;
quant_param.scale = scale;
quant_param.zeroPoint = zeropoint;
@ -590,8 +592,8 @@ STATUS PostTrainingQuantizer::DoQuantOutput(double scale, int zeropoint, struct
return RET_OK;
}
STATUS PostTrainingQuantizer::DoWeightQuant(AnfNodePtr weight, std::shared_ptr<PrimitiveC> primitive_c,
bool perchanel) {
STATUS PostTrainingQuantizer::DoWeightQuant(const AnfNodePtr &weight, std::shared_ptr<PrimitiveC> primitive_c,
bool perchanel) const {
// perlayer
if (!weight->isa<Parameter>()) {
MS_LOG(ERROR) << "not a parameter";
@ -607,8 +609,8 @@ STATUS PostTrainingQuantizer::DoWeightQuant(AnfNodePtr weight, std::shared_ptr<P
MS_LOG(ERROR) << weight->fullname_with_scope() << " can not get value";
return RET_ERROR;
}
auto status =
QuantFilter<int8_t>(paramValue, primitive_c, QuantType_PostTraining, quant_max, quant_min, bit_num, perchanel);
auto status = QuantFilter<int8_t>(paramValue, std::move(primitive_c), QuantType_PostTraining, quant_max, quant_min,
bit_num, perchanel);
if (status != RET_OK) {
MS_LOG(ERROR) << "QuantFilter failed: " << status;
return status;
@ -628,7 +630,7 @@ STATUS PostTrainingQuantizer::DoWeightQuant(AnfNodePtr weight, std::shared_ptr<P
return RET_OK;
}
STATUS PostTrainingQuantizer::DoBiasQuant(AnfNodePtr bias, std::shared_ptr<PrimitiveC> primitive_c) {
STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::shared_ptr<PrimitiveC> &primitive_c) {
if (primitive_c == nullptr || bias == nullptr) {
MS_LOG(ERROR) << "null pointer!";
return RET_NULL_PTR;
@ -675,22 +677,22 @@ STATUS PostTrainingQuantizer::DoBiasQuant(AnfNodePtr bias, std::shared_ptr<Primi
// set bias quant param
vector<schema::QuantParamT> quant_params;
for (size_t i = 0; i < bias_scales.size(); i++) {
for (double bias_scale : bias_scales) {
schema::QuantParamT quant_param;
quant_param.scale = bias_scales[i];
quant_param.scale = bias_scale;
quant_param.zeroPoint = 0;
quant_param.inited = true;
quant_params.emplace_back(quant_param);
}
// quant bias data
int32_t *quant_datas = new (std::nothrow) int32_t[shape_size];
auto *quant_datas = new (std::nothrow) int32_t[shape_size];
if (quant_datas == nullptr) {
MS_LOG(ERROR) << "null pointer dereferencing.";
return RET_NULL_PTR;
}
float *raw_datas = static_cast<float *>(bias_param->tensor_addr());
auto *raw_datas = static_cast<float *>(bias_param->tensor_addr());
double bias_scale_tmp;
constexpr int32_t quanted_bias_abs_limit = 0.5 * INT32_MAX;
const constexpr int32_t quanted_bias_abs_limit = 0.5 * INT32_MAX;
for (size_t i = 0; i < shape_size; i++) {
if (bias_scales.size() == 1) {
bias_scale_tmp = bias_scales[0];
@ -944,7 +946,7 @@ STATUS PostTrainingQuantizer::PreProcess() {
STATUS PostTrainingQuantizer::CheckFp32TensorVec(const std::string &node_name,
const std::vector<mindspore::tensor::MSTensor *> &tensor_vec) const {
if (tensor_vec.size() < 1) {
if (tensor_vec.empty()) {
MS_LOG(ERROR) << "node: " << node_name << " input tensors is 0";
return RET_ERROR;
}
@ -1000,7 +1002,7 @@ STATUS PostTrainingQuantizer::DoInference() {
}
for (size_t i = 0; i < (*diverg_info_map)[callParam.node_name].size(); i++) {
auto tensor = beforeInputs[i];
const float *tensor_data = static_cast<const float *>(tensor->MutableData());
const auto *tensor_data = static_cast<const float *>(tensor->MutableData());
size_t elem_count = tensor->ElementsNum();
vector<float> data(tensor_data, tensor_data + elem_count);
this->calibrator_->RecordMaxValue(data, (*diverg_info_map)[callParam.node_name][i]);
@ -1027,7 +1029,7 @@ STATUS PostTrainingQuantizer::DoInference() {
}
size_t output_i = 0;
for (const auto &tensor : afterOutputs) {
const float *tensor_data = static_cast<const float *>(tensor->MutableData());
const auto *tensor_data = static_cast<const float *>(tensor->MutableData());
size_t elem_count = tensor->ElementsNum();
vector<float> data(tensor_data, tensor_data + elem_count);
this->calibrator_->RecordMaxValue(data, (*diverg_info_map)[callParam.node_name][output_i]);
@ -1184,7 +1186,7 @@ STATUS PostTrainingQuantizer::Int8Inference() {
return RET_OK;
}
STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph) {
auto ret = RET_OK;
std::future<STATUS> int8_inference = std::async(std::launch::async, &PostTrainingQuantizer::Int8Inference, this);
// get input tensor
@ -1233,7 +1235,7 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
return false;
}
auto tensor = afterOutputs[0];
const float *tensor_data = static_cast<const float *>(tensor->MutableData());
const auto *tensor_data = static_cast<const float *>(tensor->MutableData());
size_t elem_count = tensor->ElementsNum();
auto shapes = tensor->shape();
if (shapes.size() != 4) {
@ -1318,7 +1320,7 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
for (int i = 0; i < bias_param->tensor_shape_size(); i++) {
auto scale = bias_quant_params[i].scale;
double after_correct = std::round(bias_diff[i] / scale) + bias_datas[i];
constexpr int32_t corrected_bias_abs_limit = 0.6 * INT32_MAX;
const constexpr int32_t corrected_bias_abs_limit = 0.6 * INT32_MAX;
if (after_correct > corrected_bias_abs_limit) {
MS_LOG(WARNING) << op_name << " ch: " << i << " bias after_corrected too large: " << after_correct
<< " origin value: " << bias_datas[i] << " bias_diff: " << bias_diff[i]
@ -1359,6 +1361,8 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
ret = ::memcpy_s(tensor_data, size * sizeof(char), bias_diff.data(), size * sizeof(char));
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy_s error: " << ret;
free(tensor_data);
tensor_data = nullptr;
return false;
}
param_value->set_tensor_addr(tensor_data);
@ -1372,6 +1376,8 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
auto conv2d = primitive_c->GetPrimitiveT()->value.AsConv2D();
if (conv2d == nullptr) {
MS_LOG(ERROR) << "conv2d is null";
free(tensor_data);
tensor_data = nullptr;
return RET_ERROR;
}
conv2d->hasBias = true;
@ -1379,10 +1385,14 @@ STATUS PostTrainingQuantizer::BiasCorrection(FuncGraphPtr func_graph) {
auto depthwise_conv2d = primitive_c->GetPrimitiveT()->value.AsDepthwiseConv2D();
if (depthwise_conv2d == nullptr) {
MS_LOG(ERROR) << "conv2d is null";
free(tensor_data);
tensor_data = nullptr;
return RET_ERROR;
}
depthwise_conv2d->hasBias = true;
}
free(tensor_data);
tensor_data = nullptr;
} else {
MS_LOG(ERROR) << "unexpected input_quant_params size: " << input_quant_params.size();
continue;
@ -1423,7 +1433,7 @@ STATUS PostTrainingQuantizer::CollectDataFrequency() {
}
for (size_t i = 0; i < (*diverg_info_map)[callParam.node_name].size(); i++) {
auto tensor = beforeInputs[i];
const float *tensor_data = static_cast<const float *>(tensor->MutableData());
const auto *tensor_data = static_cast<const float *>(tensor->MutableData());
size_t elem_count = tensor->ElementsNum();
vector<float> data(tensor_data, tensor_data + elem_count);
this->calibrator_->UpdateDataFrequency(data, (*diverg_info_map)[callParam.node_name][i]);
@ -1443,7 +1453,7 @@ STATUS PostTrainingQuantizer::CollectDataFrequency() {
}
int output_i = 0;
for (const auto &tensor : after_outputs) {
const float *tensor_data = static_cast<const float *>(tensor->MutableData());
const auto *tensor_data = static_cast<const float *>(tensor->MutableData());
size_t elem_count = tensor->ElementsNum();
vector<float> data(tensor_data, tensor_data + elem_count);
this->calibrator_->UpdateDataFrequency(data, (*diverg_info_map)[call_param.node_name][output_i]);

View File

@ -110,14 +110,15 @@ class PostTrainingQuantizer : public Quantizer {
STATUS QuantNode();
STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min,
std::shared_ptr<PrimitiveC> lite_primitive);
STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min, std::shared_ptr<PrimitiveC>);
const std::shared_ptr<PrimitiveC> &lite_primitive) const;
STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min,
const std::shared_ptr<PrimitiveC> &) const;
STATUS DoWeightQuant(AnfNodePtr weight, std::shared_ptr<PrimitiveC> primitive_c, bool perchannel);
STATUS DoWeightQuant(const AnfNodePtr &weight, std::shared_ptr<PrimitiveC> primitive_c, bool perchannel) const;
STATUS DoBiasQuant(AnfNodePtr bias, std::shared_ptr<PrimitiveC> primitive_c);
STATUS DoBiasQuant(const AnfNodePtr &bias, const std::shared_ptr<PrimitiveC> &primitive_c);
STATUS Int8Inference();
STATUS BiasCorrection(FuncGraphPtr func_graph);
STATUS BiasCorrection(const FuncGraphPtr &func_graph);
};
struct DivergInfo {
@ -189,7 +190,7 @@ class Calibrator {
size_t GetInputNum() const { return config_param_.image_paths.size(); }
STATUS AddQuantizedOp(CNodePtr node);
STATUS AddQuantizedOp(const CNodePtr &node);
STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);