From 3037d6b91091c1db2271b632a5cce593583e56ea Mon Sep 17 00:00:00 2001 From: XianglongZeng Date: Wed, 11 May 2022 10:40:51 +0800 Subject: [PATCH] code clean 2 --- mindspore/lite/tools/common/tensor_util.cc | 21 +++++++++++++------ mindspore/lite/tools/common/tensor_util.h | 4 ++-- .../graph/convert_fp32_to_fp16_pass.cc | 6 +----- .../graph/tensor_quant_pass.cc | 18 +++++----------- 4 files changed, 23 insertions(+), 26 deletions(-) diff --git a/mindspore/lite/tools/common/tensor_util.cc b/mindspore/lite/tools/common/tensor_util.cc index 609f9407b16..00af91df4bb 100644 --- a/mindspore/lite/tools/common/tensor_util.cc +++ b/mindspore/lite/tools/common/tensor_util.cc @@ -238,11 +238,15 @@ size_t GetElementSize(const TypeId &dataType) { } } -int GetShapeSize(const TensorT &tensor) { +size_t GetShapeSize(const TensorT &tensor) { auto shape = tensor.dims; - int shapeSize = 1; + size_t shapeSize = 1; for (auto dim : shape) { - shapeSize *= dim; + if (dim <= 0) { + MS_LOG(WARNING) << "Dim value less than or equal to 0 found in tensor's shape."; + return 0; + } + shapeSize *= static_cast(dim); } return shapeSize; } @@ -277,10 +281,15 @@ size_t GetRefCount(MetaGraphT *graphT, uint32_t tensorIdx) { } return refCount; } -int GetShapeSize(const std::vector &shape) { - int shapeSize = 1; + +size_t GetShapeSize(const std::vector &shape) { + size_t shapeSize = 1; for (auto dim : shape) { - shapeSize *= dim; + if (dim <= 0) { + MS_LOG(WARNING) << "Dim value less than or equal to 0 found in tensor's shape."; + return 0; + } + shapeSize *= static_cast(dim); } return shapeSize; } diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index bb1a1bbde9a..44f20d70b71 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -68,9 +68,9 @@ size_t GetElementSize(const TensorT &tensor); size_t GetElementSize(const TypeId &dataType); -int GetShapeSize(const TensorT &tensor); +size_t GetShapeSize(const TensorT &tensor); -int GetShapeSize(const std::vector &shape); +size_t GetShapeSize(const std::vector &shape); std::unique_ptr CopyTensorDefT(const std::unique_ptr &); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/convert_fp32_to_fp16_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/convert_fp32_to_fp16_pass.cc index 45623322d90..0a4267e842b 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/convert_fp32_to_fp16_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/convert_fp32_to_fp16_pass.cc @@ -43,10 +43,6 @@ STATUS ConvertFP32ToFP16Pass::Run(schema::MetaGraphT *graph) { continue; } auto ele_num = lite::GetShapeSize(tensor->dims); - if (ele_num < 0) { - MS_LOG(ERROR) << "Tensor data length error."; - return RET_ERROR; - } auto origin_data = tensor->data; if (origin_data.size() != ele_num * sizeof(float) || origin_data.size() % kFp16ToFp32Multiply != 0) { MS_LOG(ERROR) << "Tensor data length error."; @@ -58,7 +54,7 @@ STATUS ConvertFP32ToFP16Pass::Run(schema::MetaGraphT *graph) { auto fp16_data = reinterpret_cast(new_data.data()); CHECK_NULL_RETURN(fp32_data); CHECK_NULL_RETURN(fp16_data); - for (int i = 0; i < ele_num; i++) { + for (size_t i = 0; i < ele_num; i++) { fp16_data[i] = float16(fp32_data[i]); } tensor->data.swap(new_data); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc index 05394f0f03b..19618686f32 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc @@ -48,11 +48,7 @@ constexpr int kHalfUInt = 128; STATUS ComputeDataToInt8(const std::unique_ptr &tensor) { MS_ASSERT(tensor != nullptr); - int wShapeSize = tensor->data.empty() ? 0 : GetShapeSize(*(tensor.get())); - if (wShapeSize < 0) { - MS_LOG(ERROR) << "Invalid shape."; - return RET_ERROR; - } + size_t wShapeSize = tensor->data.empty() ? 0 : GetShapeSize(*(tensor.get())); void *oriWeightData = tensor->data.data(); if (oriWeightData == nullptr) { return RET_OK; @@ -62,12 +58,12 @@ STATUS ComputeDataToInt8(const std::unique_ptr &tensor) { if (tensor->dataType == TypeId::kNumberTypeFloat || tensor->dataType == TypeId::kNumberTypeFloat32) { // normal awareing quant auto *weightData = static_cast(oriWeightData); - for (int j = 0; j < wShapeSize; j++) { + for (size_t j = 0; j < wShapeSize; j++) { qDatas[j] = QuantizeData(weightData[j], weightQauntParam.get()); } } else { // convert uint8 to int8 auto *weightData = static_cast(oriWeightData); - for (int j = 0; j < wShapeSize; j++) { + for (size_t j = 0; j < wShapeSize; j++) { qDatas[j] = static_cast(static_cast(weightData[j]) - kHalfUInt); } weightQauntParam->zeroPoint -= kHalfUInt; @@ -90,11 +86,7 @@ STATUS ComputeDataToInt8(const std::unique_ptr &tensor) { STATUS ComputeDataToInt32(const std::unique_ptr &tensor) { MS_ASSERT(tensor != nullptr); - int bShapeSize = GetShapeSize(*(tensor)); - if (bShapeSize < 0) { - MS_LOG(ERROR) << "Invalid shape."; - return RET_ERROR; - } + size_t bShapeSize = GetShapeSize(*(tensor)); auto qDatas = std::make_unique(bShapeSize); if (qDatas == nullptr) { MS_LOG(ERROR) << "new qDatas failed"; @@ -106,7 +98,7 @@ STATUS ComputeDataToInt32(const std::unique_ptr &tensor) { MS_LOG(ERROR) << "divisor 'scale' cannot be 0"; return RET_ERROR; } - for (int i = 0; i < bShapeSize; ++i) { + for (size_t i = 0; i < bShapeSize; ++i) { qDatas[i] = (int32_t)std::round(rawDatas[i] / tensor->quantParams.front()->scale); } tensor->dataType = TypeId::kNumberTypeInt32;