!22354 [MSLITE] Support the shape operator with input datatype of int64.

Merge pull request !22354 from wangshaocong/bugfix
This commit is contained in:
i-robot 2021-08-27 01:04:43 +00:00 committed by Gitee
commit 12ced9e89b
9 changed files with 56 additions and 27 deletions

View File

@ -83,15 +83,6 @@ int CalculateNewHeightAndWidth(const TensorC *const *inputs, size_t inputs_size,
if (inputs_size == 2) {
return HandleTwoInputs(inputs, param);
} else if (inputs_size == 1) {
} else if (inputs_size == 4) {
if (inputs[3]->data_ == NULL) {
return NNACL_INFER_INVALID;
}
if (GetElementNum(inputs[3]) < 2) {
return NNACL_ERR;
}
const TensorC *inputs_tmp[2] = {inputs[0], inputs[3]};
return HandleTwoInputs(inputs_tmp, param);
} else {
return NNACL_ERR;
}

View File

@ -24,14 +24,6 @@ int MaxPoolingNPUOp::IsSupport(const schema::Primitive *primitive, const std::ve
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
return RET_ERROR;
}
auto stride_h = static_cast<int>(*(pooling_prim->strides()->begin()));
auto stride_w = static_cast<int>(*(pooling_prim->strides()->begin() + 1));
auto pad_u = static_cast<int>(*(pooling_prim->pad()->begin() + PAD_UP));
auto pad_l = static_cast<int>(*(pooling_prim->pad()->begin() + PAD_LEFT));
if (pad_u > stride_h || pad_l > stride_w) {
MS_LOG(WARNING) << "Npu pooling does not support pad > stride.";
return RET_NOT_SUPPORT;
}
return RET_OK;
}

View File

@ -57,4 +57,5 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Shape, LiteKernelCreator<ShapeC
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Shape, LiteKernelCreator<ShapeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Shape, LiteKernelCreator<ShapeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Shape, LiteKernelCreator<ShapeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt64, PrimitiveType_Shape, LiteKernelCreator<ShapeCPUKernel>)
} // namespace mindspore::kernel

View File

@ -96,3 +96,4 @@ ml_motion_capture_spin-mobile;4 1.5
ml_motion_capture_spin-res50;4 1
ml_motion_capture_spin-res50-poolingnoceilmode;4 1
ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 1.5
ml_motion_capture_yolov3-spp-deploy_ddk_prune 1

View File

@ -107,3 +107,4 @@ Q888_CV_face_recognition_self.onnx
ml_video_edit_dimming_tech_model_styleGan.onnx;2
ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4
GAT_SISO.onnx;1;913,29;;input_dependent
ml_intelligent_cockpit_model.onnx;3;1,32:1,32:1,32

View File

@ -104,3 +104,4 @@ ml_video_edit_makeup_mobilenetv203.onnx 4
ml_video_edit_hair_dyeing_migrate_v2.onnx;4:3,4,1,2 2.5
Q888_CV_face_recognition_self.onnx 3.6
ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 3
ml_intelligent_cockpit_model.onnx;3;1,32:1,32:1,32 3.5

View File

@ -102,7 +102,7 @@ decoder_step_nocumsum_v5.pb;13:11,2,13,12,10,7,3,5,1,4,9,8,6;1,512:1,512:1,512:1
ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
hiai_nlu_model_v1.pb;3:1,3,2;1,16:1,16:1,16 2.0
hiai_nlu_model_v2.pb;7:5,7,6,4,3,2,1;1,5:1,5:1,5:1,98:1,174:1,6:1,5
# hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6
hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6
hiai_nlu_model_single.pb;3:1,3,2;1,32:1,32:1,32
fsr_270_mindspore.pb
fsr_360_mindspore.pb

View File

@ -86,7 +86,7 @@ ml_tts_decoder.pb;5:4,5,2,1,3 2.5
ml_tts_vocoder.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38 53
hiai_transformer_encoder.pb;15:1,3,4,5,6,7,8,9,10,11,12,13,14,15,2 4
decoder_step_nocumsum_v5.pb;13:11,2,13,12,10,7,3,5,1,4,9,8,6;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127 1.2
# hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6 25
hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6 40
hiai_nlu_model_single.pb;3:1,3,2;1,32:1,32:1,32 2470
fsr_270_mindspore.pb 6.0
fsr_360_mindspore.pb 6.5

View File

@ -108,6 +108,16 @@ STATUS OnnxInputAdjust::ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph
return lite::RET_OK;
}
bool ValidParameterNode(const ParameterPtr &param_node) {
MS_ASSERT(param_node != nullptr);
if (!param_node->has_default()) {
return true;
}
auto tensor_info = std::dynamic_pointer_cast<tensor::Tensor>(param_node->default_param());
MS_ASSERT(tensor_info != nullptr);
return tensor_info->Size() != 0;
}
STATUS OnnxInputAdjust::AdjustResize(const CNodePtr &cnode) {
MS_ASSERT(cnode != nullptr);
auto node = cnode->input(0);
@ -120,17 +130,49 @@ STATUS OnnxInputAdjust::AdjustResize(const CNodePtr &cnode) {
if (resize_prim->GetAttr(ops::kCoordinateTransformMode) == nullptr) {
return lite::RET_OK;
}
if (cnode->inputs().size() > opt::kInputSizeFour &&
resize_prim->get_coordinate_transform_mode() == mindspore::HALF_PIXEL) {
std::vector<AnfNodePtr> new_resize_inputs;
new_resize_inputs.push_back(cnode->inputs()[0]);
new_resize_inputs.push_back(cnode->inputs()[1]);
new_resize_inputs.push_back(cnode->inputs()[opt::kInputIndexFour]);
cnode->set_inputs(new_resize_inputs);
} else if (cnode->inputs().size() == opt::kInputSizeFour) {
if (cnode->inputs().size() == opt::kInputSizeFour) {
auto new_input = cnode->inputs();
new_input.erase(new_input.begin() + opt::kInputIndexTwo);
cnode->set_inputs(new_input);
} else if (cnode->inputs().size() > opt::kInputSizeFour) {
std::vector<AnfNodePtr> new_resize_inputs;
new_resize_inputs.push_back(cnode->inputs()[0]);
new_resize_inputs.push_back(cnode->inputs()[1]);
// remove roi and checkout the scale or size as the third input.
int shape_index = opt::kInputIndexFour;
auto scale_node = cnode->inputs()[opt::kInputIndexThree];
auto size_node = cnode->inputs()[opt::kInputIndexFour];
MS_ASSERT(scale_node != nullptr);
MS_ASSERT(size_node != nullptr);
if (scale_node->isa<CNode>() && size_node->isa<CNode>()) {
MS_LOG(ERROR) << "One of scale and size should be specified.";
return lite::RET_ERROR;
} else if ((scale_node->isa<CNode>() && size_node->isa<Parameter>()) ||
(scale_node->isa<Parameter>() && size_node->isa<CNode>())) {
auto param_node =
scale_node->isa<Parameter>() ? scale_node->cast<ParameterPtr>() : size_node->cast<ParameterPtr>();
MS_ASSERT(param_node != nullptr);
if (ValidParameterNode(param_node)) {
MS_LOG(ERROR) << "One of scale and size should be specified.";
return lite::RET_ERROR;
}
shape_index = scale_node->isa<CNode>() ? opt::kInputIndexThree : opt::kInputIndexFour;
} else if (scale_node->isa<Parameter>() && size_node->isa<Parameter>()) {
auto scale_param = scale_node->cast<ParameterPtr>();
auto size_param = size_node->cast<ParameterPtr>();
MS_ASSERT(scale_param != nullptr);
MS_ASSERT(size_param != nullptr);
bool is_scale_valid = ValidParameterNode(scale_param);
bool is_size_valid = ValidParameterNode(size_param);
if (!(is_scale_valid ^ is_size_valid)) {
MS_LOG(ERROR) << "One of scale and size should be specified.";
return lite::RET_ERROR;
}
shape_index = is_scale_valid ? opt::kInputIndexThree : opt::kInputIndexFour;
}
new_resize_inputs.push_back(cnode->inputs()[shape_index]);
cnode->set_inputs(new_resize_inputs);
}
return lite::RET_OK;
}