fix codex
This commit is contained in:
parent
8f2afe13b4
commit
eb56ead558
|
@ -368,8 +368,9 @@ struct MSCallBackParam {
|
|||
};
|
||||
|
||||
/// \brief KernelCallBack defined the function pointer for callBack.
|
||||
using MSKernelCallBack = std::function<bool(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
|
||||
const MSCallBackParam &opInfo)>;
|
||||
using MSKernelCallBack =
|
||||
std::function<bool(const std::vector<MSTensor> & /* inputs */, const std::vector<MSTensor> & /* outputs */,
|
||||
const MSCallBackParam &opInfo)>;
|
||||
|
||||
std::vector<char> CharVersion();
|
||||
inline std::string Version() { return CharToString(CharVersion()); }
|
||||
|
|
|
@ -41,7 +41,7 @@ bool IsLinearActivation(const api::SharedPtr<ops::Conv2DFusion> &conv2d) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool IsCommonConvNode(const BaseRef &n) {
|
||||
bool IsConvNode(const BaseRef &n, ConvNode node) {
|
||||
if (utils::isa<AnfNodePtr>(n)) {
|
||||
auto anf_node = utils::cast<AnfNodePtr>(n);
|
||||
if (!opt::CheckPrimitiveType(anf_node, prim::kPrimConv2DFusion)) {
|
||||
|
@ -57,32 +57,24 @@ bool IsCommonConvNode(const BaseRef &n) {
|
|||
if (conv == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return conv->get_group() == 1;
|
||||
|
||||
if (node == COMMON_CONV) {
|
||||
return conv->get_group() == 1;
|
||||
} else if (node == DEPTHWISE_CONV) {
|
||||
bool ret = IsLinearActivation(conv) && conv->GetAttr(ops::kIsDepthWise) != nullptr &&
|
||||
GetValue<bool>(conv->GetAttr(ops::kIsDepthWise));
|
||||
return ret;
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Not supported conv node type.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsDepthWiseConvNode(const BaseRef &n) {
|
||||
if (utils::isa<AnfNodePtr>(n)) {
|
||||
auto anf_node = utils::cast<AnfNodePtr>(n);
|
||||
if (!opt::CheckPrimitiveType(anf_node, prim::kPrimConv2DFusion)) {
|
||||
return false;
|
||||
}
|
||||
api::SharedPtr<ops::Conv2DFusion> conv = nullptr;
|
||||
if (utils::isa<CNodePtr>(anf_node)) {
|
||||
auto c_node = anf_node->cast<CNodePtr>();
|
||||
conv = ops::GetOperator<ops::Conv2DFusion>(c_node->input(0));
|
||||
} else if (utils::isa<ValueNodePtr>(anf_node)) {
|
||||
conv = ops::GetOperator<ops::Conv2DFusion>(anf_node);
|
||||
}
|
||||
if (conv == nullptr || !IsLinearActivation(conv)) {
|
||||
return false;
|
||||
}
|
||||
auto ret = conv->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(conv->GetAttr(ops::kIsDepthWise));
|
||||
return ret;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool IsCommonConvNode(const BaseRef &n) { return IsConvNode(n, COMMON_CONV); }
|
||||
|
||||
bool IsDepthWiseConvNode(const BaseRef &n) { return IsConvNode(n, DEPTHWISE_CONV); }
|
||||
|
||||
VectorRef CLEPattern::DefineConvWithConvPattern() const {
|
||||
auto is_conv1 = std::make_shared<CondVar>(IsCommonConvNode);
|
||||
|
|
|
@ -31,6 +31,7 @@ struct CombinationLayer {
|
|||
};
|
||||
constexpr size_t kInputsNum2 = 2;
|
||||
constexpr size_t kInputsNum3 = 3;
|
||||
enum ConvNode { COMMON_CONV, DEPTHWISE_CONV };
|
||||
class CLEPattern : public opt::MultiplePatternProcessPass {
|
||||
public:
|
||||
explicit CLEPattern(const std::string &name = "CLEPattern", bool multigraph = true)
|
||||
|
|
|
@ -33,8 +33,6 @@
|
|||
namespace mindspore::lite::quant {
|
||||
using lite::RET_ERROR;
|
||||
using lite::RET_OK;
|
||||
static const std::set<std::string> kSupportCLENode = {
|
||||
schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion)};
|
||||
static const float kDefaultScale = 1;
|
||||
int CLEStrategy::Run() {
|
||||
MS_LOG(INFO) << "CLE start to find pattern.";
|
||||
|
|
|
@ -112,7 +112,7 @@ void DataDistribution::HandleBinForKL(int quant_bint_nums, int bin_index, std::v
|
|||
float left_scale = 0.0f;
|
||||
if (left_upper > start) {
|
||||
left_scale = left_upper - start;
|
||||
if (this->histogram_[left_upper - 1] != 0) {
|
||||
if (!IsZero(this->histogram_[left_upper - 1])) {
|
||||
count += left_scale;
|
||||
}
|
||||
}
|
||||
|
@ -120,12 +120,13 @@ void DataDistribution::HandleBinForKL(int quant_bint_nums, int bin_index, std::v
|
|||
double right_scale = 0.0f;
|
||||
if (right_lower < end) {
|
||||
right_scale = end - right_lower;
|
||||
if (this->histogram_[right_lower] != 0) {
|
||||
if (!IsZero(this->histogram_[right_lower])) {
|
||||
count += right_scale;
|
||||
}
|
||||
}
|
||||
std::for_each(this->histogram_.begin() + left_upper, this->histogram_.begin() + right_lower, [&count](float item) {
|
||||
if (item != 0) {
|
||||
bool is_zero = (item <= kEps && item >= -kEps);
|
||||
if (!is_zero) {
|
||||
count += 1;
|
||||
}
|
||||
});
|
||||
|
@ -133,14 +134,14 @@ void DataDistribution::HandleBinForKL(int quant_bint_nums, int bin_index, std::v
|
|||
continue;
|
||||
}
|
||||
const float average_num = quantized_histogram->at(i) / count;
|
||||
if (left_upper > start && this->histogram_[left_upper - 1] != 0) {
|
||||
if (left_upper > start && !IsZero(this->histogram_[left_upper - 1])) {
|
||||
expanded_histogram->at(left_upper - 1) += average_num * left_scale;
|
||||
}
|
||||
if (right_lower < end && this->histogram_[right_lower] != 0) {
|
||||
if (right_lower < end && !IsZero(this->histogram_[right_lower])) {
|
||||
expanded_histogram->at(right_lower) += average_num * right_scale;
|
||||
}
|
||||
for (int k = left_upper; k < right_lower; ++k) {
|
||||
if (this->histogram_[k] != 0) {
|
||||
if (!IsZero(this->histogram_[k])) {
|
||||
expanded_histogram->at(k) += average_num;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include "tools/converter/quantizer/quant_params.h"
|
||||
#include "tools/converter/quantizer/quantize_util.h"
|
||||
namespace mindspore::lite::quant {
|
||||
constexpr float kEps = 1e-8;
|
||||
|
||||
class DataDistribution {
|
||||
public:
|
||||
DataDistribution() = default;
|
||||
|
@ -78,6 +80,8 @@ class DataDistribution {
|
|||
|
||||
std::pair<float, float> CalQuantileMinMax(const std::vector<float> &min_datas, const std::vector<float> &max_datas);
|
||||
|
||||
inline bool IsZero(float x) { return (x <= kEps && x >= -kEps); }
|
||||
|
||||
private:
|
||||
std::vector<float> histogram_;
|
||||
CNodePtr cnode_;
|
||||
|
|
|
@ -94,7 +94,7 @@ std::string DebugInfoManager::ParseInOutTensorToString(InOutFlag in_out_flag) {
|
|||
return "ERROR";
|
||||
}
|
||||
|
||||
std::string DebugInfoManager::ParseDataTypeFlagToString(DataTypeFlag data_type_flag) {
|
||||
std::string DebugInfoManager::ParseDataTypeFlagToString(DataTypeFlag data_type_flag) const {
|
||||
switch (data_type_flag) {
|
||||
case ORIGIN:
|
||||
return "Origin";
|
||||
|
@ -363,7 +363,7 @@ int DebugInfoManager::AddComparedInfo(const mindspore::MSCallBackParam &call_bac
|
|||
}
|
||||
|
||||
std::map<std::string, mindspore::schema::Tensor *> DebugInfoManager::ParseInputTensors(
|
||||
const mindspore::lite::LiteModel &model) {
|
||||
const mindspore::lite::LiteModel &model) const {
|
||||
std::map<std::string, mindspore::schema::Tensor *> maps;
|
||||
for (auto &node : model.graph_.all_nodes_) {
|
||||
for (auto &index : node->input_indices_) {
|
||||
|
|
|
@ -112,7 +112,7 @@ class DebugInfoManager {
|
|||
int tensor_index, QuantDebugInfo *quant_debug_info, const mindspore::lite::Tensor &tensor,
|
||||
const quant::DebugMode &debug_mode);
|
||||
|
||||
std::string ParseDataTypeFlagToString(DataTypeFlag data_type_flag);
|
||||
std::string ParseDataTypeFlagToString(DataTypeFlag data_type_flag) const;
|
||||
|
||||
std::string ParseTensorTypeFlagToString(TensorTypeFlag tensor_type_flag);
|
||||
|
||||
|
@ -122,7 +122,7 @@ class DebugInfoManager {
|
|||
|
||||
void SaveInfo(std::ofstream &out_file, const QuantDebugInfo &info);
|
||||
|
||||
std::map<std::string, mindspore::schema::Tensor *> ParseInputTensors(const mindspore::lite::LiteModel &model);
|
||||
std::map<std::string, mindspore::schema::Tensor *> ParseInputTensors(const mindspore::lite::LiteModel &model) const;
|
||||
|
||||
std::map<std::string, mindspore::schema::Tensor *> ParseOutputTensorFromModel(const Model &model);
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ void FSEBitStream::Empty() {
|
|||
|
||||
int64_t FSEBitStream::Pop(uint8_t bit_count) {
|
||||
MS_ASSERT(curr_bit_count_ <= kCurrentBitCount);
|
||||
int64_t right = curr_chunk_ >> (kCurrentBitCount - curr_bit_count_);
|
||||
int64_t right = curr_chunk_ >> static_cast<size_t>(kCurrentBitCount - curr_bit_count_);
|
||||
int64_t res = right & ((1 << bit_count) - 1);
|
||||
curr_bit_count_ -= static_cast<int8_t>(bit_count);
|
||||
if (curr_bit_count_ > 0) {
|
||||
|
|
|
@ -303,7 +303,7 @@ int FullQuantQuantizer::QuantNode(const FuncGraphPtr &func_graph) {
|
|||
auto outputs_diverg_info = calibrator_->GetOutputDivergInfo();
|
||||
|
||||
auto cnodes = func_graph->GetOrderedCnodes();
|
||||
for (auto &cnode : cnodes) {
|
||||
for (const auto &cnode : cnodes) {
|
||||
auto op_name = cnode->fullname_with_scope();
|
||||
auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0));
|
||||
if (primitive == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "schema/inner/model_generated.h"
|
||||
#include "tools/converter/quantizer/quantizer.h"
|
||||
#include "tools/converter/quantizer/quantize_util.h"
|
||||
#include "tools/converter/quantizer/quant_params.h"
|
||||
#include "tools/converter/preprocess/preprocess_param.h"
|
||||
#include "tools/converter/quantizer/calibrator.h"
|
||||
#include "tools/converter/quantizer/data_distribution.h"
|
||||
|
|
|
@ -118,7 +118,7 @@ int InsertQuantNodeManager::InsertCastNode(const FuncGraphPtr &graph, const CNod
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int InsertQuantNodeManager::CheckDataType(const AnfNodePtr &input_node, TypeId check_type_id) {
|
||||
int InsertQuantNodeManager::CheckDataType(const AnfNodePtr &input_node, TypeId check_type_id) const {
|
||||
bool is_graph_input = IsGraphInput(input_node);
|
||||
if (!input_node->isa<mindspore::CNode>() && !is_graph_input) {
|
||||
return RET_NO_CHANGE;
|
||||
|
|
|
@ -43,7 +43,7 @@ class InsertQuantNodeManager {
|
|||
|
||||
int InsertCastNode(const FuncGraphPtr &graph, const CNodePtr &cnode, size_t input_index, bool is_graph_input);
|
||||
|
||||
int CheckDataType(const AnfNodePtr &input_node, TypeId check_type_id);
|
||||
int CheckDataType(const AnfNodePtr &input_node, TypeId check_type_id) const;
|
||||
|
||||
int NewDynamicQuantNode(const FuncGraphPtr &graph, const CNodePtr &cnode);
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include "schema/inner/model_generated.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "src/common/quant_utils.h"
|
||||
#include "include/errorcode.h"
|
||||
|
||||
namespace mindspore::lite::quant {
|
||||
constexpr float kBinarySearchStep = 2.0;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "tools/converter/quantizer/quant_helper/conv_quant_type_determiner.h"
|
||||
#include "tools/converter/quantizer/quantize_util.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "mindspore/core/ir/dtype/type_id.h"
|
||||
namespace mindspore::lite {
|
||||
bool ConvQuantTypeDeterminer::DetermineQuantWeight(const mindspore::schema::MetaGraphT &graph,
|
||||
mindspore::schema::CNodeT *node) {
|
||||
|
|
|
@ -82,7 +82,7 @@ int ComputeBiasDataAndQuantParam(const std::vector<double> &bias_scales, const s
|
|||
quant_params->at(i).scale = bias_scale_tmp;
|
||||
MS_LOG(DEBUG) << "new filter scale: " << filter_scale;
|
||||
}
|
||||
auto quant_data = (int32_t)std::round(raw_datas[i] / bias_scale_tmp);
|
||||
auto quant_data = static_cast<int32_t>(std::round(raw_datas[i] / bias_scale_tmp));
|
||||
quant_datas->at(i) = quant_data;
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -215,9 +215,9 @@ bool IndexingCompress(const std::set<T> &quant_data_set, const std::map<T, size_
|
|||
bits[index++] = (unique_value_cnt >> (bit_num - i - 1)) & (0x1);
|
||||
}
|
||||
// write the unique value set: each value has bit_num bit signed
|
||||
for (auto unique_value : quant_data_set) {
|
||||
for (auto iter = quant_data_set.cbegin(); iter != quant_data_set.cend(); ++iter) {
|
||||
for (size_t i = 0; i < bit_num; i++) {
|
||||
bits[index++] = ((unique_value + (1 << (bit_num - 1))) >> (bit_num - i - 1)) & (0x1);
|
||||
bits[index++] = ((*iter + (1 << (bit_num - 1))) >> (bit_num - i - 1)) & (0x1);
|
||||
}
|
||||
}
|
||||
// write the index: each index has unique_value_bit unsigned
|
||||
|
@ -375,8 +375,8 @@ bool PackRepetition(size_t bit_num, schema::TensorT *tensor) {
|
|||
}
|
||||
std::map<T, size_t> unique_value_index_map;
|
||||
auto index = 0;
|
||||
for (auto value : quant_data_set) {
|
||||
unique_value_index_map[value] = index++;
|
||||
for (auto iter = quant_data_set.cbegin(); iter != quant_data_set.cend(); ++iter) {
|
||||
unique_value_index_map[*iter] = index++;
|
||||
}
|
||||
|
||||
auto unique_value_cnt = quant_data_set.size();
|
||||
|
|
|
@ -16,11 +16,9 @@
|
|||
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_QUANTIZER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_QUANTIZER_H_
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <memory>
|
||||
#include "schema/inner/model_generated.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "ir/func_graph.h"
|
||||
#include "ir/anf.h"
|
||||
#include "base/base.h"
|
||||
|
|
Loading…
Reference in New Issue