forked from mindspore-Ecosystem/mindspore
!25682 [MS][LITE]clean code
Merge pull request !25682 from mengyuanli/bugfix
This commit is contained in:
commit
fd4eeee485
|
@ -133,7 +133,7 @@ mindspore::Context *MSContextFromContext(const lite::Context *context) {
|
|||
return nullptr;
|
||||
}
|
||||
if (device_type == DT_CPU) {
|
||||
ms_context->SetThreadAffinity(device_context.device_info_.cpu_device_info_.cpu_bind_mode_);
|
||||
ms_context->SetThreadAffinity(static_cast<int>(device_context.device_info_.cpu_device_info_.cpu_bind_mode_));
|
||||
}
|
||||
device_infos.push_back(device_info);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ STATUS CalQuantizationParams(schema::QuantParamT *quant_param, double real_min,
|
|||
MS_LOG(ERROR) << "cal error while min" << real_min << ">" << real_max;
|
||||
return RET_PARAM_INVALID;
|
||||
}
|
||||
if (real_min == real_max) {
|
||||
if (real_max - real_min <= 0.0f) {
|
||||
if (real_min != 0.0f) {
|
||||
MS_LOG(ERROR) << "min and max should both be zero if they are equal to each other";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -61,7 +61,7 @@ inline int QuantMax(int bits, TypeId type) {
|
|||
|
||||
inline int QuantMin(int bits, TypeId type) {
|
||||
if (type == kNumberTypeInt8) {
|
||||
return -(1 << (bits - 1));
|
||||
return -(1 << static_cast<unsigned int>(bits - 1));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ int WriteStringsToTensor(Tensor *tensor, const std::vector<StringPack> &string_b
|
|||
auto *string_info = reinterpret_cast<int32_t *>(data);
|
||||
char *string_data = reinterpret_cast<char *>(data);
|
||||
|
||||
string_info[0] = num;
|
||||
string_info[0] = static_cast<int32_t>(num);
|
||||
for (size_t i = 0; i <= num; i++) {
|
||||
string_info[i + 1] = offset[i];
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ int WriteSeperatedStringsToTensor(Tensor *tensor, const std::vector<std::vector<
|
|||
auto *string_info = reinterpret_cast<int32_t *>(data);
|
||||
auto *string_data = reinterpret_cast<char *>(data);
|
||||
|
||||
string_info[0] = num;
|
||||
string_info[0] = static_cast<int32_t>(num);
|
||||
for (size_t i = 0; i <= num; i++) {
|
||||
string_info[i + 1] = offset[i];
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ void FreeAllTensorC(std::vector<TensorC *> *tensors_in) {
|
|||
int Tensor2TensorC(const Tensor *src, TensorC *dst) {
|
||||
MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
|
||||
dst->is_ready_ = src->IsReady();
|
||||
dst->format_ = src->format();
|
||||
dst->format_ = static_cast<int>(src->format());
|
||||
dst->data_ = src->data();
|
||||
dst->data_type_ = src->data_type();
|
||||
dst->shape_size_ = src->shape().size();
|
||||
|
@ -102,7 +102,7 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) {
|
|||
MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
|
||||
dst->is_ready_ = src->IsReady();
|
||||
dst->data_type_ = static_cast<TypeIdC>(src->data_type());
|
||||
dst->format_ = src->format();
|
||||
dst->format_ = static_cast<int>(src->format());
|
||||
dst->shape_value_ = src->shape().empty() ? 0 : src->shape().front();
|
||||
dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size();
|
||||
|
||||
|
@ -142,7 +142,7 @@ int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
|
|||
|
||||
// Set Tensors
|
||||
for (size_t i = 0; i < src->element_num_; i++) {
|
||||
auto ret = TensorC2Tensor(&src->tensors_[i], dst->GetTensor(i));
|
||||
auto ret = TensorC2Tensor(&src->tensors_[i], dst->GetTensor(static_cast<int>(i)));
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "TensorC2Tensor failed";
|
||||
return ret;
|
||||
|
@ -156,8 +156,8 @@ int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
|
|||
|
||||
#endif
|
||||
|
||||
int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *out_tensor_c) {
|
||||
int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &outputs,
|
||||
std::vector<TensorC *> *out_tensor_c) {
|
||||
MS_CHECK_TRUE_RET(out_tensor_c != nullptr && parameter != nullptr, RET_ERROR);
|
||||
if (parameter->type_ == mindspore::schema::PrimitiveType_TensorListFromTensor ||
|
||||
parameter->type_ == mindspore::schema::PrimitiveType_TensorListReserve ||
|
||||
|
@ -181,7 +181,7 @@ int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lit
|
|||
}
|
||||
|
||||
int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *in_tensor_c) {
|
||||
std::vector<TensorC *> *in_tensor_c) {
|
||||
MS_CHECK_TRUE_RET(in_tensor_c != nullptr, RET_ERROR);
|
||||
int ret = RET_OK;
|
||||
for (auto input : inputs) {
|
||||
|
|
|
@ -39,9 +39,9 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst);
|
|||
int TensorListC2TensorList(const TensorListC *src, TensorList *dst);
|
||||
#endif
|
||||
int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *in_tensor_c);
|
||||
int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *out_tensor_c);
|
||||
std::vector<TensorC *> *in_tensor_c);
|
||||
int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &outputs,
|
||||
std::vector<TensorC *> *out_tensor_c);
|
||||
|
||||
int CheckTensorsInvalid(const std::vector<Tensor *> &tensors);
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ bool LiteKernel::IsReady(const std::vector<lite::Tensor *> &scope_tensors) {
|
|||
void LiteKernel::InitOutTensorInitRefCount(const std::vector<LiteKernel *> *mask_kernels) {
|
||||
for (auto *tensor : this->out_tensors()) {
|
||||
MS_ASSERT(tensor != nullptr);
|
||||
size_t init_ref_count = 0;
|
||||
int init_ref_count = 0;
|
||||
for (auto *post_kernel : this->out_kernels_) {
|
||||
if ((mask_kernels == nullptr) ||
|
||||
std::find(mask_kernels->begin(), mask_kernels->end(), post_kernel) != mask_kernels->end()) {
|
||||
|
|
|
@ -193,8 +193,6 @@ void LiteKernelUtil::InitTensorInitRefCount(const std::vector<kernel::LiteKernel
|
|||
}
|
||||
}
|
||||
|
||||
int LiteKernelUtil::SetInput(const LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs) { return -1; }
|
||||
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
bool LiteKernelUtil::IsSwitchCall(kernel::LiteKernel *kernel) {
|
||||
#ifndef DELEGATE_CLIP
|
||||
|
|
|
@ -30,7 +30,6 @@ class LiteKernelUtil {
|
|||
static std::vector<lite::Tensor *> SubgraphOutputTensors(const std::vector<kernel::LiteKernel *> &kernels);
|
||||
static int TopologicalSortKernels(std::vector<kernel::LiteKernel *> *kernels);
|
||||
static void InitTensorInitRefCount(const std::vector<kernel::LiteKernel *> &kernels);
|
||||
static int SetInput(const LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs);
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
static bool IsSwitchCall(kernel::LiteKernel *kernel);
|
||||
#endif
|
||||
|
|
|
@ -197,7 +197,7 @@ int LiteModel::VersionVerify(flatbuffers::Verifier *verify) const {
|
|||
|
||||
int LiteModel::NodeVerify() const {
|
||||
auto tensor_size = this->all_tensors_.size();
|
||||
uint32_t subgraph_size = this->sub_graphs_.size();
|
||||
uint32_t subgraph_size = static_cast<uint32_t>(this->sub_graphs_.size());
|
||||
|
||||
for (auto &node : this->all_nodes_) {
|
||||
if (node == nullptr || node->primitive_ == nullptr) {
|
||||
|
@ -291,7 +291,7 @@ bool LiteModel::ModelVerify() const {
|
|||
return NodeVerify() == RET_OK && SubGraphVerify() == RET_OK;
|
||||
}
|
||||
|
||||
const void *LiteModel::GetMetaGraphByVerison() {
|
||||
const void *LiteModel::GetMetaGraphByVerison() const {
|
||||
MS_ASSERT(this->buf != nullptr);
|
||||
if (schema_version_ == SCHEMA_VERSION::SCHEMA_CUR) {
|
||||
return reinterpret_cast<const void *>(schema::GetMetaGraph(this->buf));
|
||||
|
@ -344,7 +344,7 @@ int LiteModel::GenerateModelByVersion(const void *meta_graph) {
|
|||
}
|
||||
|
||||
int LiteModel::ConstructModel() {
|
||||
if (this->buf == nullptr || this->buf_size_ <= 0) {
|
||||
if (this->buf == nullptr || this->buf_size_ == 0) {
|
||||
MS_LOG(ERROR) << "cannot construct model.";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ Model *ImportFromBuffer(const char *model_buf, size_t size, bool take_buf) {
|
|||
Model *Model::Import(const char *model_buf, size_t size) { return ImportFromBuffer(model_buf, size, false); }
|
||||
|
||||
Model *Model::Import(const char *filename) {
|
||||
size_t size = -1;
|
||||
size_t size = 0;
|
||||
auto buf = ReadFile(filename, &size);
|
||||
if (buf == nullptr) {
|
||||
return nullptr;
|
||||
|
|
|
@ -259,7 +259,7 @@ class LiteModel : public Model {
|
|||
|
||||
int VersionVerify(flatbuffers::Verifier *verify) const;
|
||||
|
||||
const void *GetMetaGraphByVerison();
|
||||
const void *GetMetaGraphByVerison() const;
|
||||
|
||||
int GenerateModelByVersion(const void *meta_graph);
|
||||
|
||||
|
|
|
@ -484,7 +484,7 @@ void LiteSession::FreePackOpWeight(const std::vector<kernel::LiteKernel *> &kern
|
|||
for (auto *kernel : kernels) {
|
||||
MS_ASSERT(kernel != nullptr);
|
||||
if (kernel->subgraph_type() == kernel::kNotSubGraph) {
|
||||
if (!IsPackedOp(kernel->type())) {
|
||||
if (!IsPackedOp(static_cast<int>(kernel->type()))) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -127,12 +127,12 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ret = GenerateInTensorC(parameter, inputs, outputs, &in_tensors);
|
||||
int ret = GenerateInTensorC(parameter, inputs, &in_tensors);
|
||||
if (ret != RET_OK) {
|
||||
FreeAllTensorC(&in_tensors);
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = GenerateOutTensorC(parameter, inputs, outputs, &out_tensors);
|
||||
ret = GenerateOutTensorC(parameter, outputs, &out_tensors);
|
||||
if (ret != RET_OK) {
|
||||
FreeAllTensorC(&in_tensors);
|
||||
FreeAllTensorC(&out_tensors);
|
||||
|
|
|
@ -271,8 +271,8 @@ int Scheduler::SchedulePreProcess() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int Scheduler::CheckCpuValid(const std::vector<kernel::LiteKernel *> *dst_kernels) {
|
||||
if (context_->IsCpuEnabled() == true) {
|
||||
int Scheduler::CheckCpuValid(const std::vector<kernel::LiteKernel *> *dst_kernels) const {
|
||||
if (context_->IsCpuEnabled()) {
|
||||
return RET_OK;
|
||||
}
|
||||
for (auto kernel : *dst_kernels) {
|
||||
|
|
|
@ -78,7 +78,7 @@ class Scheduler {
|
|||
int FindCpuKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
OpParameter *op_parameter, const kernel::KernelKey &desc, TypeId kernel_data_type,
|
||||
kernel::LiteKernel **kernel);
|
||||
int CheckCpuValid(const std::vector<kernel::LiteKernel *> *dst_kernels);
|
||||
int CheckCpuValid(const std::vector<kernel::LiteKernel *> *dst_kernels) const;
|
||||
void ResetByExecutionPlan(std::string node_name, TypeId *data_type);
|
||||
|
||||
#ifdef GPU_OPENCL
|
||||
|
|
|
@ -179,7 +179,9 @@ int TensorList::SetTensor(int index, const Tensor *src_tensor) {
|
|||
<< " must be equal to tensors_data_type_:" << this->tensors_data_type_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (index < 0 || index > (this->ElementsNum() - 1)) {
|
||||
auto element_num = this->ElementsNum();
|
||||
MS_CHECK_GE(element_num, 0, RET_ERROR);
|
||||
if (index < 0 || index > (element_num - 1)) {
|
||||
MS_LOG(ERROR) << "index:" << index << " must in [0, " << this->ElementsNum() - 1 << "]!";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -259,6 +261,14 @@ STATUS TensorList::Decode(const int *data) {
|
|||
return RET_ERROR;
|
||||
}
|
||||
tensors_data_type_ = TypeId(data[0]);
|
||||
if (tensors_data_type_ < kTypeUnknown || tensors_data_type_ > kMonadTypeEnd) {
|
||||
MS_LOG(ERROR) << "TypeId illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (data[1] < 0) {
|
||||
MS_LOG(ERROR) << "element shape size illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (int j = 0; j < data[1]; ++j) {
|
||||
element_shape_.push_back(data[2 + j]);
|
||||
}
|
||||
|
|
|
@ -147,7 +147,9 @@ class WeightDecoder {
|
|||
template <typename ST, typename DT = float>
|
||||
static DT *DequantPerLayerData(const lite::Tensor *input_tensor, const ST *quant_datas) {
|
||||
auto quant_param = input_tensor->quant_params();
|
||||
DT *dequant_datas = static_cast<DT *>(malloc(input_tensor->ElementsNum() * sizeof(DT)));
|
||||
auto input_tensor_element_num = input_tensor->ElementsNum();
|
||||
MS_CHECK_GT(input_tensor_element_num, 0, nullptr);
|
||||
DT *dequant_datas = static_cast<DT *>(malloc(input_tensor_element_num * sizeof(DT)));
|
||||
if (dequant_datas == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc failed.";
|
||||
return nullptr;
|
||||
|
@ -156,7 +158,7 @@ class WeightDecoder {
|
|||
auto param = quant_param.front();
|
||||
auto scale = param.scale;
|
||||
auto zero_point = param.zeroPoint;
|
||||
for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) {
|
||||
for (int64_t j = 0; j < input_tensor_element_num; j++) {
|
||||
if (!quant_clusters.empty()) {
|
||||
int8_t index = quant_datas[j];
|
||||
if (index > INT8_MAX || index < INT8_MIN) {
|
||||
|
@ -180,7 +182,9 @@ class WeightDecoder {
|
|||
template <typename ST, typename DT = float>
|
||||
static DT *DequantPerChannelData(const lite::Tensor *input_tensor, const ST *quant_datas, int preferred_dim) {
|
||||
auto quant_param = input_tensor->quant_params();
|
||||
DT *dequant_datas = static_cast<DT *>(malloc(input_tensor->ElementsNum() * sizeof(DT)));
|
||||
auto input_tensor_element_num = input_tensor->ElementsNum();
|
||||
MS_CHECK_GT(input_tensor_element_num, 0, nullptr);
|
||||
DT *dequant_datas = static_cast<DT *>(malloc(input_tensor_element_num * sizeof(DT)));
|
||||
if (dequant_datas == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc failed.";
|
||||
return nullptr;
|
||||
|
@ -194,7 +198,7 @@ class WeightDecoder {
|
|||
return nullptr;
|
||||
}
|
||||
MS_CHECK_GT(channels, 0, nullptr);
|
||||
size_t per_channel_size = input_tensor->ElementsNum() / channels;
|
||||
size_t per_channel_size = input_tensor_element_num / channels;
|
||||
for (size_t i = 0; i < channels; i++) {
|
||||
auto param = quant_param.at(i);
|
||||
auto scale = param.scale;
|
||||
|
|
Loading…
Reference in New Issue