!37973 fix code check and quant debug
Merge pull request !37973 from liyan2022/master
This commit is contained in:
commit
f51d585baa
|
@ -747,7 +747,7 @@ STATUS GetShapeVectorFromStringTensor(const api::TensorPtr &tensor_info, ShapeVe
|
|||
(*offset)++;
|
||||
break;
|
||||
}
|
||||
shape_size_str.push_back(tensor_data[*offset]);
|
||||
shape_size_str.push_back(static_cast<char>(tensor_data[*offset]));
|
||||
}
|
||||
if (*offset == 0) {
|
||||
MS_LOG(ERROR) << "string tensor's dim size not found.";
|
||||
|
@ -768,7 +768,7 @@ STATUS GetShapeVectorFromStringTensor(const api::TensorPtr &tensor_info, ShapeVe
|
|||
shape_vector->push_back(std::stoi(shape_str));
|
||||
shape_str.clear();
|
||||
} else {
|
||||
shape_str.push_back(tensor_data[*offset]);
|
||||
shape_str.push_back(static_cast<char>(tensor_data[*offset]));
|
||||
}
|
||||
if (cnt == shape_size) {
|
||||
(*offset)++;
|
||||
|
|
|
@ -43,14 +43,14 @@ STATUS NHWC2NCHW(T *src_data, T *dst_data, std::vector<int32_t> shape) {
|
|||
MS_LOG(ERROR) << "The dim should be 4.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
size_t batch = shape.at(0);
|
||||
size_t plane = shape.at(kAxis1) * shape.at(kAxis2);
|
||||
size_t channel = shape.at(kAxis3);
|
||||
for (size_t b = 0; b < batch; b++) {
|
||||
for (size_t p = 0; p < plane; p++) {
|
||||
for (size_t c = 0; c < channel; c++) {
|
||||
size_t src_idx = b * plane * channel + p * channel + c;
|
||||
size_t dst_idx = b * channel * plane + c * plane + p;
|
||||
int32_t batch = shape.at(0);
|
||||
int32_t plane = shape.at(kAxis1) * shape.at(kAxis2);
|
||||
int32_t channel = shape.at(kAxis3);
|
||||
for (int32_t b = 0; b < batch; b++) {
|
||||
for (int32_t p = 0; p < plane; p++) {
|
||||
for (int32_t c = 0; c < channel; c++) {
|
||||
int32_t src_idx = b * plane * channel + p * channel + c;
|
||||
int32_t dst_idx = b * channel * plane + c * plane + p;
|
||||
dst_data[dst_idx] = src_data[src_idx];
|
||||
}
|
||||
}
|
||||
|
@ -64,14 +64,14 @@ STATUS NCHW2NHWC(T *src_data, T *dst_data, std::vector<int32_t> shape) {
|
|||
MS_LOG(ERROR) << "The dim should be 4.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
size_t batch = shape.at(0);
|
||||
size_t channel = shape.at(1);
|
||||
size_t plane = shape.at(kAxis2) * shape.at(kAxis3);
|
||||
for (size_t b = 0; b < batch; b++) {
|
||||
for (size_t c = 0; c < channel; c++) {
|
||||
for (size_t p = 0; p < plane; p++) {
|
||||
size_t src_idx = b * channel * plane + c * plane + p;
|
||||
size_t dst_idx = b * plane * channel + p * channel + c;
|
||||
int32_t batch = shape.at(0);
|
||||
int32_t channel = shape.at(1);
|
||||
int32_t plane = shape.at(kAxis2) * shape.at(kAxis3);
|
||||
for (int32_t b = 0; b < batch; b++) {
|
||||
for (int32_t c = 0; c < channel; c++) {
|
||||
for (int32_t p = 0; p < plane; p++) {
|
||||
int32_t src_idx = b * channel * plane + c * plane + p;
|
||||
int32_t dst_idx = b * plane * channel + p * channel + c;
|
||||
dst_data[dst_idx] = src_data[src_idx];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,9 +56,9 @@ Status DpicoPassThroughInterface::Infer(std::vector<mindspore::MSTensor> *inputs
|
|||
|
||||
// get param value
|
||||
std::map<std::string, const flatbuffers::Vector<uint8_t> *> custom_attrs;
|
||||
uint32_t num_output = 0;
|
||||
uint32_t block_height = 0;
|
||||
uint32_t block_width = 0;
|
||||
int64_t num_output = 0;
|
||||
int64_t block_height = 0;
|
||||
int64_t block_width = 0;
|
||||
if (param->attr() == nullptr) {
|
||||
MS_LOG(ERROR) << "param->attr() is nullptr";
|
||||
return kLiteError;
|
||||
|
|
|
@ -41,7 +41,7 @@ STATUS SetNumOutput(const api::CNodePtr &cnode, const api::PrimitivePtr &prim, m
|
|||
}
|
||||
auto output_shape = output_shapes.at(0);
|
||||
if (prim->GetAttr(kNumOutput) != nullptr) {
|
||||
uint32_t num_output = api::GetValue<int64_t>(prim->GetAttr(kNumOutput));
|
||||
uint32_t num_output = static_cast<size_t>(api::GetValue<int64_t>(prim->GetAttr(kNumOutput)));
|
||||
if (output_shape.back() != num_output) {
|
||||
MS_LOG(ERROR) << "num output attr isn't matched with fc output shape.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -324,7 +324,7 @@ int CalibDataGenerator::Run(const api::AnfNodePtrList &graph_inputs, const api::
|
|||
return RET_ERROR;
|
||||
}
|
||||
dump_op_info.dump_op_name = tuple_get_item_cnode->input(1)->fullname_with_scope();
|
||||
dump_op_info.output_index = GetTupleGetItemOutIndex(tuple_get_item_cnode);
|
||||
dump_op_info.output_index = static_cast<int32_t>(GetTupleGetItemOutIndex(tuple_get_item_cnode));
|
||||
}
|
||||
}
|
||||
if (image_lists.find(dump_op_info.dump_op_name) == image_lists.end()) {
|
||||
|
|
|
@ -85,7 +85,7 @@ class CalibDataGenerator {
|
|||
shape_size *= static_cast<size_t>(op_attr.shape.at(i));
|
||||
}
|
||||
ifs.seekg(0, std::ios::end);
|
||||
size_t file_size = ifs.tellg();
|
||||
size_t file_size = static_cast<size_t>(ifs.tellg());
|
||||
if (file_size != shape_size * sizeof(T)) {
|
||||
MS_LOG(ERROR) << "file size " << file_size << " is not equal to shape size " << shape_size;
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -74,6 +74,10 @@ int Normalize(cv::Mat *image, const std::map<int, double> &mean, const std::map<
|
|||
}
|
||||
std::vector<double> mean_vec;
|
||||
std::vector<double> var_vec;
|
||||
if (image->channels() < 0) {
|
||||
MS_LOG(ERROR) << "image channels should not be negative.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
size_t img_channel_size = image->channels();
|
||||
if (mean.empty()) {
|
||||
mean_vec = std::vector<double>(img_channel_size, 0.0);
|
||||
|
|
|
@ -73,6 +73,7 @@ void DebugInfoManager::AddQuantParamExtend(const mindspore::lite::LiteGraph::Nod
|
|||
quant_param_extend.node_type = schema::EnumNamePrimitiveType(static_cast<PrimitiveType>(node->node_type_));
|
||||
std::vector<int> dims;
|
||||
int element_num = 1;
|
||||
MS_CHECK_PTR_IF_NULL(tensor->dims());
|
||||
for (size_t j = 0; j < tensor->dims()->size(); j++) {
|
||||
auto dim = tensor->dims()->data()[j];
|
||||
dims.push_back(dim);
|
||||
|
@ -486,7 +487,11 @@ MSKernelCallBack DebugInfoManager::GetQuantBeforeCallBack(
|
|||
if (debug_mode == quant::FAST && (origin_outputs_.find(tensor.Name()) == origin_outputs_.end())) {
|
||||
continue;
|
||||
}
|
||||
MS_LOG(INFO) << "Get input " << tensor.Name() << " statistics info.";
|
||||
MS_LOG(DEBUG) << "Get input " << tensor.Name() << " statistics info.";
|
||||
if (op_parameters.find(call_param.node_name) == op_parameters.end()) {
|
||||
MS_LOG(ERROR) << tensor.Name() << " op_parameters find node name " << call_param.node_name << " failed.";
|
||||
return false;
|
||||
}
|
||||
auto is_const = static_cast<mindspore::lite::Tensor *>(lite_tensor)->category() == CONST_TENSOR ||
|
||||
static_cast<mindspore::lite::Tensor *>(lite_tensor)->category() == CONST_SCALAR;
|
||||
if (is_const) {
|
||||
|
@ -562,6 +567,10 @@ MSKernelCallBack DebugInfoManager::GetAfterCallBack(const std::map<std::string,
|
|||
MS_LOG(INFO) << " Get output " << tensor.Name() << " statistics info.";
|
||||
auto lite_tensor = quant::MSTensorToLiteTensor(tensor);
|
||||
auto lite_inputs = quant::MSTensorToLiteTensors(inputs);
|
||||
if (op_parameters.find(call_param.node_name) == op_parameters.end()) {
|
||||
MS_LOG(ERROR) << tensor.Name() << " op_parameters find node name " << call_param.node_name << " failed.";
|
||||
return false;
|
||||
}
|
||||
AddComparedInfo(call_param, lite_inputs, op_parameters.at(call_param.node_name), false, i, lite_tensor,
|
||||
debug_mode);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue