fix bug in thread_pool & fix codex

This commit is contained in:
hangangqiang 2020-11-27 17:08:30 +08:00
parent 627e4d0cf3
commit fcde1b3623
16 changed files with 39 additions and 31 deletions

View File

@ -47,6 +47,11 @@ class ParamValueLite : public Value {
TypeId tensor_type() const { return type_id_; }
void set_tensor_type(const TypeId type_id) { type_id_ = type_id; }
void SetTensorData(void *addr, const size_t size) {
this->tensor_addr_ = addr;
this->tensor_size_ = size;
}
int tensor_shape_size() const {
int size = 1;
for (auto val : tensor_shape_) {

View File

@ -402,8 +402,7 @@ int SortCpuProcessor() {
}
int err_code = SetArch(freq_set, gCoreNum);
if (err_code != RET_TP_OK) {
LOG_ERROR("set arch failed.");
return RET_TP_ERROR;
LOG_INFO("set arch failed, ignoring arch.");
}
// sort core id by frequency into descending order
for (int i = 0; i < gCoreNum; ++i) {

View File

@ -477,7 +477,12 @@ int AnfExporter::ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_ano
paramTensor->format = schema::Format(valueLite->format());
paramTensor->dataType = valueLite->tensor_type();
paramTensor->dims = valueLite->tensor_shape();
memcpy(paramTensor->data.data(), valueLite->tensor_addr(), valueLite->tensor_size());
auto ret = memcpy_s(paramTensor->data.data(), valueLite->tensor_size() * sizeof(uint8_t), valueLite->tensor_addr(),
valueLite->tensor_size());
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy_s data into tensor failed.";
return RET_ERROR;
}
node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size();
output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size());
meta_graphT->allTensors.emplace_back(std::move(paramTensor));

View File

@ -64,8 +64,7 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
delete[] tensor_data;
return RET_MEMORY_FAILED;
}
param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(size);
param_value->SetTensorData(tensor_data, size);
parameter->set_default_param(param_value);
} else if (std::find(meta_graph_->inputIndex.begin(), meta_graph_->inputIndex.end(), i) ==
meta_graph_->inputIndex.end()) {

View File

@ -264,8 +264,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node
delete tensor_info;
return RET_NULL_PTR;
}
param_value->set_tensor_addr(tensor_data_buf);
param_value->set_tensor_size(tensor_info->Size());
param_value->SetTensorData(tensor_data_buf, tensor_info->Size());
param_value->set_tensor_type(tensor_info->data_type());
param_value->set_tensor_shape(tensor_info->shape());
node->set_default_param(param_value);
@ -475,7 +474,7 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val
param_value->set_tensor_shape(shape_vector);
param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]);
const std::string &tensor_buf = attr_tensor.raw_data();
auto tensor_data = new (std::nothrow) char[tensor_buf.size() + 1];
auto tensor_data = new (std::nothrow) char[tensor_buf.size()];
if (tensor_data == nullptr) {
MS_LOG(ERROR) << "Tensor_data is nullptr";
return false;
@ -486,8 +485,7 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val
MS_LOG(ERROR) << "Memcpy error: " << ret;
return false;
}
param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(tensor_buf.size());
param_value->SetTensorData(tensor_data, tensor_buf.size());
auto new_value_node = NewValueNode(MakeValue(param_value));
if (new_value_node == nullptr) {
MS_LOG(ERROR) << "Make valuenode fail";

View File

@ -100,8 +100,8 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::AttrValue &attr_value
return RET_ERROR;
}
}
param_value->set_tensor_addr(tensor_data);
tensor_size = shape_size * sizeof(float);
param_value->SetTensorData(tensor_data, tensor_size);
} else if (type == kNumberTypeInt32) {
auto tensor_data = new (std::nothrow) int[shape_size];
if (tensor_proto.int_val_size() == 1) {
@ -118,8 +118,8 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::AttrValue &attr_value
return RET_ERROR;
}
}
param_value->set_tensor_addr(tensor_data);
tensor_size = shape_size * sizeof(int);
param_value->SetTensorData(tensor_data, tensor_size);
} else if (type == kNumberTypeBool) {
auto tensor_data = new (std::nothrow) int[shape_size];
if (tensor_proto.bool_val_size() == 1) {
@ -128,8 +128,8 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::AttrValue &attr_value
tensor_data[i] = value;
}
}
param_value->set_tensor_addr(tensor_data);
tensor_size = shape_size * sizeof(int);
param_value->SetTensorData(tensor_data, tensor_size);
} else {
MS_LOG(ERROR) << "Unsupport dataType: " << type;
return RET_ERROR;
@ -138,7 +138,6 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::AttrValue &attr_value
std::vector<int> param_shape(shape_vector->begin(), shape_vector->end());
param_value->set_tensor_shape(param_shape);
param_value->set_tensor_type(type);
param_value->set_tensor_size(tensor_size);
param_value->set_format(schema::Format::Format_NHWC);
parameter->set_default_param(param_value);
parameter->set_name("const_" + std::to_string(anf_node_map.size()) + "_parameter");

View File

@ -344,8 +344,7 @@ STATUS TfliteModelParser::ConvertConstTensor(const tflite::TensorT *tensor, Para
return RET_MEMORY_FAILED;
}
std::memcpy(tensor_data, data.data(), size);
param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(size);
param_value->SetTensorData(tensor_data, size);
parameter->set_default_param(param_value);
}
return RET_OK;

View File

@ -1462,8 +1462,7 @@ STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph) {
delete[] tensor_data;
return false;
}
param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(size);
param_value->SetTensorData(tensor_data, size);
parameter->set_default_param(param_value);
cnode->add_input(parameter);
DoBiasQuant(parameter, primitive_c);

View File

@ -104,6 +104,12 @@ int Cropper::GetModelOps() {
}
auto nodes = meta_graph->nodes();
for (auto node : *nodes) {
if (node->primitive() == nullptr) {
delete[] graph_buf;
MS_LOG(ERROR) << "node primitive is nullptr!";
std::cerr << "node primitive is nullptr!" << std::endl;
return RET_ERROR;
}
this->all_operators_.insert(node->primitive()->value_type());
MS_LOG(DEBUG) << "PrimitiveType:" << schema::EnumNamePrimitiveType(node->primitive()->value_type())
<< " QuantType:" << schema::EnumNameQuantType(node->quantType());

View File

@ -361,8 +361,7 @@ ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, in
ParamValueLitePtr param_value = std::make_shared<ParamValueLite>();
MS_ASSERT(param_value != nullptr);
param_value->set_tensor_addr(bias_data);
param_value->set_tensor_size(kernel_num * sizeof(float) / sizeof(uint8_t));
param_value->SetTensorData(bias_data, kernel_num * sizeof(float) / sizeof(uint8_t));
param_value->set_format(weight_tensor->format());
param_value->set_tensor_type(weight_tensor->tensor_type());
param_value->set_tensor_shape(shape);

View File

@ -103,8 +103,7 @@ STATUS GetRightMatmulInputParamter(const CNodePtr &stack_node, const ParameterPt
param_value->set_tensor_shape(rmatmul_input_shape);
param_value->set_tensor_type(fc_weight_param->tensor_type());
param_value->set_format(fc_weight_param->format());
param_value->set_tensor_addr(new_tensor_data);
param_value->set_tensor_size(joint_fullconnect_size * tensor_size);
param_value->SetTensorData(new_tensor_data, joint_fullconnect_size * tensor_size);
rmatmul_input->set_default_param(param_value);
return RET_OK;
}

View File

@ -106,8 +106,7 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) {
MS_LOG(ERROR) << "memcpy error: " << ret;
return nullptr;
}
param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(size);
param_value->SetTensorData(tensor_data, size);
}
parameter->set_default_param(param_value);
return parameter;

View File

@ -88,8 +88,7 @@ STATUS GenNewConvBias(const ParameterPtr &down_bias_node, const ParameterPtr &do
param_value->set_tensor_shape({new_bias_size});
param_value->set_tensor_type(up_bias_param->tensor_type());
param_value->set_format(up_bias_param->format());
param_value->set_tensor_addr(new_bias_data);
param_value->set_tensor_size(sizeof(float) * new_bias_size);
param_value->SetTensorData(new_bias_data, sizeof(float) * new_bias_size);
new_bias_node->set_name(down_bias_node->fullname_with_scope());
new_bias_node->set_default_param(param_value);
new_bias_node->set_abstract(down_bias_node->abstract());
@ -142,8 +141,7 @@ STATUS GenNewConvWeight(const ParameterPtr &down_weight_node, const ParameterPtr
param_value->set_tensor_shape(new_weight_shape);
param_value->set_tensor_type(up_weight_param->tensor_type());
param_value->set_format(up_weight_param->format());
param_value->set_tensor_addr(new_weight_data);
param_value->set_tensor_size(sizeof(float) * size);
param_value->SetTensorData(new_weight_data, sizeof(float) * size);
new_weight_node->set_name(down_weight_node->fullname_with_scope());
new_weight_node->set_default_param(param_value);
new_weight_node->set_abstract(down_weight_node->abstract());

View File

@ -82,7 +82,10 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) {
MS_ASSERT(primitive != nullptr);
primitive->value.type = schema::PrimitiveType_Activation;
auto prim2 = new (std::nothrow) schema::ActivationT;
MS_ASSERT(prim2 != nullptr);
if (prim2 == nullptr) {
MS_LOG(ERROR) << "new ActivationT failed";
return false;
}
if (min == 0 && max == 6) {
prim2->type = schema::ActivationType_RELU6;
} else {

View File

@ -94,7 +94,7 @@ STATUS InferShapePass::SetParameterAbstract(const ParameterPtr &parameter) {
delete[] tensor_data;
return RET_ERROR;
}
new_value->set_tensor_addr(tensor_data);
new_value->SetTensorData(tensor_data, new_value->tensor_size());
}
new_abstract->set_value(new_value);
parameter->set_abstract(new_abstract);

View File

@ -55,9 +55,10 @@ bool TfliteInputsOrderExchangePass::Run(const FuncGraphPtr &graph) {
opt::GetCNodeType(node) == schema::PrimitiveType_SpaceToBatchND ||
opt::GetCNodeType(node) == schema::PrimitiveType_BatchToSpaceND ||
opt::GetCNodeType(node) == schema::PrimitiveType_SpaceToDepth ||
(opt::GetCNodeType(node) == schema::PrimitiveType_Pad &&
(opt::GetCNodeType(node) == schema::PrimitiveType_Pad && primitive_c->primitiveT()->value.AsPad() != nullptr &&
primitive_c->primitiveT()->value.AsPad()->paddingMode == schema::PaddingMode_CONSTANT) ||
(opt::GetCNodeType(node) == schema::PrimitiveType_Resize &&
primitive_c->primitiveT()->value.AsResize() != nullptr &&
primitive_c->primitiveT()->value.AsResize()->newHeight != 0 &&
primitive_c->primitiveT()->value.AsResize()->newWidth != 0)) {
std::vector<AnfNodePtr> new_inputs;