forked from mindspore-Ecosystem/mindspore
!21864 fix graph input
Merge pull request !21864 from hangq/graph_inout2
This commit is contained in:
commit
9ccfee288b
|
@ -122,6 +122,6 @@ ml_face_emotion
|
|||
hdc_ocr_recog_horizontal
|
||||
ml_Heatmap_depth_240180;2
|
||||
ml_Heatmap_depth_180240;2
|
||||
ml_video_edit_person_divison_video;2
|
||||
ml_video_edit_person_divison_video;2:2,1
|
||||
ml_video_edit_hair_dyeing_segmodel_v2
|
||||
ml_video_edit_hairline_segmentation;3
|
||||
|
|
|
@ -31,16 +31,16 @@ ml_video_edit_style_transfer_autoportrait.onnx 9
|
|||
ml_video_edit_style_transfer_candy.onnx 11
|
||||
ml_video_edit_style_transfer_gongnongbing.onnx 10
|
||||
ml_video_edit_style_transfer_starry.onnx 11
|
||||
porseg_tmp.onnx;2 1
|
||||
porseg_tmp.onnx;2:2,1 1
|
||||
ml_video_edit_Mnet 1.5
|
||||
ml_video_edit_hairSeg_have_imageProcessLayer_interpTo145 0.5
|
||||
ml_video_edit_img_segment 1
|
||||
ml_video_edit_video_segment_gauss_adaptis_part1 2
|
||||
ml_video_edit_generate_filter.pb 1
|
||||
ml_video_edit_img_segment_adaptise.pb;2 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 10
|
||||
ml_video_edit_img_segment_adaptise.pb;2:2,1 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:2,1 10
|
||||
ml_video_edit_person_divison_pic 0.5
|
||||
ml_video_edit_person_divison_video;2 13
|
||||
ml_video_edit_person_divison_video;2:2,1 13
|
||||
ml_video_edit_judge.onnx 5
|
||||
ml_video_edit_vignet.onnx 0.5
|
||||
hdc_Face_Aesthetic_MTI_Aesthetic 0.5
|
||||
|
@ -67,12 +67,12 @@ hdc_ocr_attention.onnx 0.5 #too many subgraphs
|
|||
# hdc_ocr_detect.onnx 30 #too many subgraphs
|
||||
ml_edu_kit_hand_detection.onnx 1
|
||||
ml_edu_kit_hand_key_position.onnx 2
|
||||
ml_video_edit_oneclick_adaptis.pb;3 2.4
|
||||
ml_video_edit_oneclick_adaptis.pb;3:2,1,3 2.4
|
||||
densenet.tflite 3
|
||||
resnet_v2_101_299.tflite 1
|
||||
ml_video_edit_enhance.pb 2
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 10
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:2,1 10
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:2,1 0.5
|
||||
#the fifth value of the ml_video_edit_imitate_filter.onnx's output is very small (10-5).
|
||||
ml_video_edit_imitate_filter.onnx 200
|
||||
hdc_mobilenet_1w_class.onnx 20
|
||||
|
@ -83,12 +83,12 @@ ml_video_edit_art_generate.onnx 0.5
|
|||
ml_video_edit_art_transfer.onnx;3 3
|
||||
ml_video_edit_enhance_update_tmp.onnx 0.5
|
||||
ml_video_edit_art_generate_20210513.onnx 0.5
|
||||
ml_video_edit_art_transfer_20210513.onnx;3 0.5
|
||||
ml_video_edit_art_transfer_20210513.onnx;3:1,3,2 0.5
|
||||
ml_video_edit_hair_dyeing_segmodel_v2 0.5
|
||||
ml_video_edit_makeup_mobilenetv203.onnx 2
|
||||
ml_video_edit_hairline_segmentation;3 0.5
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4 0.5
|
||||
ml_audio_kit_encoder_v5.pb;6;1,32:1,32:1,32:1,32:1:1
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4:3,4,1,2 0.5
|
||||
ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
fsr_270_mindspore.pb 1
|
||||
fsr_360_mindspore.pb 1
|
||||
fsr_720_mindspore.pb 1
|
||||
|
|
|
@ -16,16 +16,16 @@ ml_video_edit_style_transfer_autoportrait.onnx 9
|
|||
ml_video_edit_style_transfer_candy.onnx 11
|
||||
ml_video_edit_style_transfer_gongnongbing.onnx 11
|
||||
ml_video_edit_style_transfer_starry.onnx 11
|
||||
porseg_tmp.onnx;2 1
|
||||
porseg_tmp.onnx;2:2,1 1
|
||||
ml_video_edit_Mnet 1.5
|
||||
ml_video_edit_hairSeg_have_imageProcessLayer_interpTo145 0.5
|
||||
ml_video_edit_img_segment 1
|
||||
ml_video_edit_video_segment_gauss_adaptis_part1 2
|
||||
ml_video_edit_generate_filter.pb 1
|
||||
ml_video_edit_img_segment_adaptise.pb;2 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 10
|
||||
ml_video_edit_img_segment_adaptise.pb;2:2,1 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:2,1 10
|
||||
ml_video_edit_person_divison_pic 0.5
|
||||
ml_video_edit_person_divison_video;2 13
|
||||
ml_video_edit_person_divison_video;2:2,1 13
|
||||
ml_video_edit_judge.onnx 5
|
||||
ml_video_edit_vignet.onnx 0.5
|
||||
hdc_Face_Aesthetic_MTI_Aesthetic 0.5
|
||||
|
@ -52,12 +52,12 @@ ml_video_edit_v10_best_model_nomean_20200723 8
|
|||
# hdc_ocr_detect.onnx 30 #too many subgraphs
|
||||
ml_edu_kit_hand_detection.onnx 1
|
||||
ml_edu_kit_hand_key_position.onnx 2
|
||||
ml_video_edit_oneclick_adaptis.pb;3 2.4
|
||||
ml_video_edit_oneclick_adaptis.pb;3:2,1,3 2.4
|
||||
densenet.tflite 3
|
||||
resnet_v2_101_299.tflite 1
|
||||
ml_video_edit_enhance.pb 2
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 10
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:2,1 10
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:2,1 0.5
|
||||
#the fifth value of the ml_video_edit_imitate_filter.onnx's output is very small (10-5).
|
||||
ml_video_edit_imitate_filter.onnx 200
|
||||
hdc_mobilenet_1w_class.onnx 20
|
||||
|
@ -69,6 +69,6 @@ ml_video_edit_art_transfer.onnx;3 3
|
|||
ml_video_edit_enhance_update_tmp.onnx 0.5
|
||||
#ml_video_edit_art_generate_20210513.onnx, output is out of range
|
||||
# ConstructSubgraph change, adjust threshold(3->29) for nlu temporary
|
||||
ml_video_edit_art_transfer_20210513.onnx;3 29
|
||||
ml_video_edit_art_transfer_20210513.onnx;3:1,3,2 29
|
||||
ml_video_edit_hair_dyeing_segmodel_v2 0.5
|
||||
ml_video_edit_makeup_mobilenetv203.onnx 2
|
||||
|
|
|
@ -85,14 +85,14 @@ ml_asr_encoder_int8_202103.onnx
|
|||
rpnt_pdr_conv2d_16_fixed_last.onnx
|
||||
hdc_efficientnet_b3_1w_class.onnx
|
||||
yolov5s.onnx
|
||||
porseg_tmp.onnx;2
|
||||
hiai_nlu_onnx_model_v1_0.onnx;3
|
||||
hiai_nlu_onnx_model_v1_1.onnx;3
|
||||
ml_video_edit_art_transfer_20210513.onnx;3
|
||||
porseg_tmp.onnx;2:2,1
|
||||
hiai_nlu_onnx_model_v1_0.onnx;3:3,1,2
|
||||
hiai_nlu_onnx_model_v1_1.onnx;3:2,1,3
|
||||
ml_video_edit_art_transfer_20210513.onnx;3:1,3,2
|
||||
ml_asr_decoder_202103.onnx;2;1,64,512:1,64
|
||||
decoder.onnx;2;1,7,512:1,7
|
||||
ml_video_edit_makeup_mobilenetv203.onnx
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4:3,4,1,2
|
||||
# cur acc for ml_audio_kit_vocals_test is 1.7% because the softmax's output of the last op has very small numbers.
|
||||
ml_audio_kit_vocals_test.onnx;1;1,512,1024,2 2
|
||||
gender_lstm_scd.onnx
|
||||
|
|
|
@ -97,9 +97,9 @@ hdc_efficientnet_b3_1w_class.onnx 18
|
|||
yolov5s.onnx 2
|
||||
ml_video_edit_art_transfer.onnx;3 3
|
||||
decoder.onnx;2;1,7,512:1,7 113
|
||||
ml_video_edit_art_transfer_20210513.onnx;3 1
|
||||
ml_video_edit_art_transfer_20210513.onnx;3:1,3,2 1
|
||||
ml_asr_decoder_202103.onnx;2;1,64,512:1,64 0.5
|
||||
ml_video_edit_makeup_mobilenetv203.onnx 4
|
||||
# The input of ml_video_edit_hair_dyeing_migrate_v2.onnx should be between [0, 1]
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4 2.5
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4:3,4,1,2 2.5
|
||||
Q888_CV_face_recognition_self.onnx 3.5
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ml_face_mnet 105
|
||||
ml_face_landmark_2 2
|
||||
mobilenet.tflite 0.5
|
||||
transformer_20200831_encoder_fp32.tflite;36 70
|
||||
transformer_20200831_decoder_fp32.tflite;11 35
|
||||
#transformer_20200831_encoder_fp32.tflite;36 70
|
||||
#transformer_20200831_decoder_fp32.tflite;11 35
|
||||
|
|
|
@ -72,11 +72,11 @@ siteAI_trans_nonlinear40g.pb;1;1,271
|
|||
siteAI_trans_nonlinear134g.pb;1;1,137
|
||||
siteAI_trans_nonlinear134g_nrz.pb;1;1,182
|
||||
ml_vision_guide_detection2.pb;1;1,320,320,1
|
||||
ml_tts_encoder.pb;4;1:1,44:1:1;;input_dependent
|
||||
ml_tts_encoder.pb;4:2,4,3,1;1,44:1:1:1;;input_dependent
|
||||
# encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
|
||||
#encoder_0111_control_flow.pb;4;1:1,44:1:1;;input_dependent
|
||||
ml_video_edit_img_segment_adaptise.pb;2
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2
|
||||
ml_video_edit_img_segment_adaptise.pb;2:2,1
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:2,1
|
||||
#fasterrcnn_crop.pb is the same model as gts_object_detect_Ics.pb.
|
||||
#fasterrcnn_crop.pb;1;420,630,3
|
||||
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
|
||||
|
@ -85,25 +85,25 @@ ml_video_edit_video_segment_gauss_adaptis_part2.pb;2
|
|||
#decoder_step_201217_modified.pb;5
|
||||
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
|
||||
#encoder_0111.pb;4;1:1,44:1:1
|
||||
encoder_201228.pb;3;1:1,22:1;;input_dependent
|
||||
ml_video_edit_oneclick_adaptis.pb;3
|
||||
tacotron_encoder_stf.pb;5;1:1,62:1,62:1,62:1,62;;input_dependent
|
||||
female_model_step2_int16_noiseout.pb;66
|
||||
ml_female_model_step6_noiseout.pb;66
|
||||
ml_male_model_step6_noiseout.pb;66
|
||||
ml_tts_decoder_control_flow.pb;5
|
||||
ml_tts_decoder.pb;5
|
||||
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1;;input_dependent
|
||||
ml_tts_vocoder.pb;66
|
||||
encoder_201228.pb;3:2,3,1;1,22:1:1;;input_dependent
|
||||
ml_video_edit_oneclick_adaptis.pb;3:2,1,3
|
||||
tacotron_encoder_stf.pb;5:2,3,5,4,1;1,62:1,62:1,62:1,62:1;;input_dependent
|
||||
female_model_step2_int16_noiseout.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38
|
||||
ml_female_model_step6_noiseout.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38
|
||||
ml_male_model_step6_noiseout.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38
|
||||
ml_tts_decoder_control_flow.pb;5:5,4,3,1,2
|
||||
ml_tts_decoder.pb;5:4,5,2,1,3
|
||||
ml_tts_encoder_control_flow.pb;4:2,4,3,1;1,22:1:1:1;;input_dependent
|
||||
ml_tts_vocoder.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38
|
||||
hiai_nlu_model.pb;3;1,16:1,16:1,16
|
||||
gts_object_detect_Ics.pb;1;420,630,3;;input_dependent
|
||||
hiai_transformer_encoder.pb;15
|
||||
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512
|
||||
ml_audio_kit_encoder_v5.pb;6;1,32:1,32:1,32:1,32:1:1
|
||||
hiai_nlu_model_v1.pb;3;1,16:1,16:1,16 2.0
|
||||
hiai_nlu_model_v2.pb;7;1,5:1,6:1,174:1,98:1,5:1,5:1,5
|
||||
hiai_nlu_model_multi.pb;6;1,32:1,32:1,6:1,11:1,74:1,32
|
||||
hiai_nlu_model_single.pb;3;1,32:1,32:1,32
|
||||
hiai_transformer_encoder.pb;15:1,3,4,5,6,7,8,9,10,11,12,13,14,15,2
|
||||
decoder_step_nocumsum_v5.pb;13:11,2,13,12,10,7,3,5,1,4,9,8,6;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127
|
||||
ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
hiai_nlu_model_v1.pb;3:1,3,2;1,16:1,16:1,16 2.0
|
||||
hiai_nlu_model_v2.pb;7:5,7,6,4,3,2,1;1,5:1,5:1,5:1,98:1,174:1,6:1,5
|
||||
hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6
|
||||
hiai_nlu_model_single.pb;3:1,3,2;1,32:1,32:1,32
|
||||
fsr_270_mindspore.pb
|
||||
fsr_360_mindspore.pb
|
||||
fsr_720_mindspore.pb
|
||||
|
|
|
@ -65,29 +65,29 @@ siteAI_trans_nonlinear134g.pb;1;1,137 0.5
|
|||
siteAI_trans_nonlinear134g_nrz.pb;1;1,182 0.6
|
||||
ml_vision_guide_detection2.pb;1;1,320,320,1 1
|
||||
# ml_tts_encoder.pb has a round op, which will cause round-off error when the decimal of input value is near 0.5
|
||||
ml_tts_encoder.pb;4;1:1,44:1:1 9
|
||||
ml_tts_encoder.pb;4:2,4,3,1;1,44:1:1:1 9
|
||||
# encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
|
||||
#encoder_0111_control_flow.pb;4;1:1,44:1:1 10
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 11
|
||||
ml_video_edit_img_segment_adaptise.pb;2 40
|
||||
ml_video_edit_person_divison_video;2 38
|
||||
ml_video_edit_oneclick_adaptis.pb;3 6
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:2,1 11
|
||||
ml_video_edit_img_segment_adaptise.pb;2:2,1 40
|
||||
ml_video_edit_person_divison_video;2:2,1 38
|
||||
ml_video_edit_oneclick_adaptis.pb;3:2,1,3 6
|
||||
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
|
||||
#decoder_step_201217.pb;5 187
|
||||
#decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
|
||||
#decoder_step_201217_modified.pb;5 0.5
|
||||
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
|
||||
#encoder_0111.pb;4;1:1,44:1:1
|
||||
ml_female_model_step6_noiseout.pb;66 2
|
||||
ml_male_model_step6_noiseout.pb;66 2.5
|
||||
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1 1.5
|
||||
ml_tts_decoder_control_flow.pb;5 1
|
||||
ml_tts_decoder.pb;5 2.5
|
||||
ml_tts_vocoder.pb;66 53
|
||||
hiai_transformer_encoder.pb;15 4
|
||||
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512 1.2
|
||||
hiai_nlu_model_multi.pb;6;1,32:1,32:1,6:1,11:1,74:1,32 25
|
||||
hiai_nlu_model_single.pb;3;1,32:1,32:1,32 2470
|
||||
ml_female_model_step6_noiseout.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38 2
|
||||
ml_male_model_step6_noiseout.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38 2.5
|
||||
ml_tts_encoder_control_flow.pb;4:2,4,3,1;1,22:1:1:1 1.5
|
||||
ml_tts_decoder_control_flow.pb;5:5,4,3,1,2 1
|
||||
ml_tts_decoder.pb;5:4,5,2,1,3 2.5
|
||||
ml_tts_vocoder.pb;66:2,7,6,1,3,4,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38 53
|
||||
hiai_transformer_encoder.pb;15:1,3,4,5,6,7,8,9,10,11,12,13,14,15,2 4
|
||||
decoder_step_nocumsum_v5.pb;13:11,2,13,12,10,7,3,5,1,4,9,8,6;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127 1.2
|
||||
hiai_nlu_model_multi.pb;6:1,6,2,5,4,3;1,32:1,32:1,32:1,74:1,11:1,6 25
|
||||
hiai_nlu_model_single.pb;3:1,3,2;1,32:1,32:1,32 2470
|
||||
fsr_270_mindspore.pb 6.0
|
||||
fsr_360_mindspore.pb 6.5
|
||||
fsr_720_mindspore.pb 2.0
|
||||
|
|
|
@ -185,18 +185,18 @@ bloom_isface.tflite
|
|||
hiai_object_detect_814.tflite
|
||||
hiai_object_tflite_graph_8bit.tflite
|
||||
lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite
|
||||
lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2
|
||||
magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2
|
||||
albert_lite_base_squadv1_1.tflite;3
|
||||
mobilebert_1_default_1.tflite;3
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2
|
||||
hdc_tb_cn_neg.tflite;3
|
||||
hiai_cv_labelDetectorModel_v3.tflite;2
|
||||
lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2:2,1
|
||||
magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2:2,1
|
||||
albert_lite_base_squadv1_1.tflite;3:2,3,1
|
||||
mobilebert_1_default_1.tflite;3:2,3,1
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:2,1
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:2,1
|
||||
hdc_tb_cn_neg.tflite;3:3,1,2 0.5
|
||||
hiai_cv_labelDetectorModel_v3.tflite;2:2,1
|
||||
Modify_Out_ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1
|
||||
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
|
||||
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
|
||||
lite-model_albert_lite_base_squadv1_metadata_1.tflite;3
|
||||
ml_headpose_pb2tflite.tflite;3:2,3,1;1,64,64,3:16:16
|
||||
ml_ei_headpose_pb2tflite.tflite;3:2,3,1;1,64,64,3:16:16
|
||||
lite-model_albert_lite_base_squadv1_metadata_1.tflite;3:2,3,1
|
||||
lite-model_mobilebert_1_metadata_1.tflite;3
|
||||
Modify_Out_hiai_vad.tflite;2
|
||||
add_uint8.tflite;2
|
||||
|
|
|
@ -213,10 +213,10 @@ bloom_isface.tflite 0.5
|
|||
# The output values of conv layers range from -e±5 to e±5, which almost reaches the representation limit of fp16. In
|
||||
# this range, the fp16 data will has big bias. And the accumulation of this bias lowers the final precision.
|
||||
hiai_object_detect_814.tflite 14
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 11
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
|
||||
hdc_tb_cn_neg.tflite;3 295
|
||||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:2,1 11
|
||||
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:2,1 0.5
|
||||
hdc_tb_cn_neg.tflite;3:3,1,2 295
|
||||
# The input of hiai_cv_labelDetectorModel_v3.tflite is between 0-255.
|
||||
hiai_cv_labelDetectorModel_v3.tflite;2 2
|
||||
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 1
|
||||
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 0.6
|
||||
hiai_cv_labelDetectorModel_v3.tflite;2:2,1 2
|
||||
ml_headpose_pb2tflite.tflite;3:2,3,1;1,64,64,3:16:16 1
|
||||
ml_ei_headpose_pb2tflite.tflite;3:2,3,1;1,64,64,3:16:16 0.6
|
||||
|
|
|
@ -133,7 +133,7 @@ function Run_Benchmark() {
|
|||
model_info=`echo ${line_info}|awk -F ' ' '{print $1}'`
|
||||
spec_acc_limit=`echo ${line_info}|awk -F ' ' '{print $2}'`
|
||||
model_name=`echo ${model_info}|awk -F ';' '{print $1}'`
|
||||
input_num=`echo ${model_info} | awk -F ';' '{print $2}'`
|
||||
input_config=`echo ${model_info} | awk -F ';' '{print $2}'`
|
||||
input_shapes=`echo ${model_info} | awk -F ';' '{print $3}'`
|
||||
spec_threads=`echo ${model_info} | awk -F ';' '{print $4}'`
|
||||
extra_info=`echo ${model_info} | awk -F ';' '{print $5}'`
|
||||
|
@ -172,13 +172,24 @@ function Run_Benchmark() {
|
|||
input_files=""
|
||||
output_file=""
|
||||
data_path=$3"/input_output/"
|
||||
if [[ ${input_num} == "" || ${input_num} == 1 ]]; then
|
||||
if [[ ${input_config} == "" || ${input_config} == 1 ]]; then
|
||||
input_files=${data_path}'input/'${model_name}'.ms.bin'
|
||||
else
|
||||
for i in $(seq 1 $input_num)
|
||||
do
|
||||
input_files=${input_files}${data_path}'input/'${model_name}'.ms.bin_'$i','
|
||||
done
|
||||
input_num=`echo ${input_config} | awk -F ':' '{print $1}'`
|
||||
input_seq=`echo ${input_config} | awk -F ':' '{print $2}'`
|
||||
if [[ ${input_seq} == "" ]]; then
|
||||
for i in $(seq 1 $input_num)
|
||||
do
|
||||
input_files=${input_files}${data_path}'input/'${model_name}'.ms.bin_'$i','
|
||||
done
|
||||
else
|
||||
for i in $(seq 1 $input_num)
|
||||
do
|
||||
cur_input_num=${input_seq%%,*}
|
||||
input_seq=${input_seq#*,}
|
||||
input_files=${input_files}${data_path}'input/'${model_name}'.ms.bin_'$cur_input_num','
|
||||
done
|
||||
fi
|
||||
fi
|
||||
output_file=${data_path}'output/'${model_name}'.ms.out'
|
||||
# adjust threads
|
||||
|
|
|
@ -532,6 +532,23 @@ FuncGraphPtr GetFinalGraph(const FuncGraphPtr &func_graph) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
int AnfExporter::SetMetaGraphInput(const FuncGraphPtr &func_graph,
|
||||
const std::unique_ptr<schema::MetaGraphT> &meta_graphT) {
|
||||
MS_ASSERT(func_graph != nullptr);
|
||||
if (!reorder_input_) {
|
||||
return RET_OK;
|
||||
}
|
||||
meta_graphT->inputIndex.clear();
|
||||
for (const auto &input : func_graph->get_inputs()) {
|
||||
auto iter = graph_inputs_map_.find(input);
|
||||
if (iter == graph_inputs_map_.end()) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
meta_graphT->inputIndex.emplace_back(iter->second);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int AnfExporter::SetMetaGraphOutput(const FuncGraphPtr &func_graph,
|
||||
const std::unique_ptr<schema::MetaGraphT> &meta_graphT) {
|
||||
auto final_fg = GetFinalGraph(func_graph);
|
||||
|
@ -554,6 +571,9 @@ int AnfExporter::SetMetaGraphOutput(const FuncGraphPtr &func_graph,
|
|||
schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph, bool keep_graph, bool copy_primitive,
|
||||
bool train_flag) {
|
||||
this->train_flag_ = train_flag;
|
||||
// hardcode for nnie and train
|
||||
this->reorder_input_ = !(train_flag) && !(ConverterContext::GetInstance()->GetGraphInputTensorNames().empty());
|
||||
this->graph_inputs_map_.clear();
|
||||
auto meta_graphT = std::make_unique<schema::MetaGraphT>();
|
||||
auto fmk = func_graph->get_attr("fmk");
|
||||
MS_ASSERT(fmk != nullptr);
|
||||
|
@ -568,7 +588,18 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph, bool kee
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
SetMetaGraphOutput(func_graph, meta_graphT);
|
||||
ret = SetMetaGraphInput(func_graph, meta_graphT);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "SetMetaGraphInput failed.";
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(ret);
|
||||
return nullptr;
|
||||
}
|
||||
ret = SetMetaGraphOutput(func_graph, meta_graphT);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "SetMetaGraphOutput failed.";
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(ret);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return meta_graphT.release();
|
||||
}
|
||||
|
@ -749,7 +780,11 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<sch
|
|||
if (IsContain(graph_inputs_, input_node->cast<AnfNodePtr>()) &&
|
||||
graph_inputs_has_exported_.find(input_node) == graph_inputs_has_exported_.end()) {
|
||||
graph_inputs_has_exported_.insert(input_node);
|
||||
meta_graphT->inputIndex.push_back(meta_graphT->allTensors.size() - 1);
|
||||
if (reorder_input_) {
|
||||
graph_inputs_map_[input_node] = meta_graphT->allTensors.size() - 1;
|
||||
} else {
|
||||
meta_graphT->inputIndex.push_back(meta_graphT->allTensors.size() - 1);
|
||||
}
|
||||
}
|
||||
} else if (input_node->isa<ValueNode>()) {
|
||||
auto ret = ConvertInputValueNode(cnode, i, primitive_c, meta_graphT, fb_node);
|
||||
|
|
|
@ -82,6 +82,7 @@ class AnfExporter {
|
|||
const bool ©_primitive, const CNodePtr &partial_cnode,
|
||||
const std::unique_ptr<schema::CNodeT> &schema_cnode);
|
||||
std::list<CNodePtr> InsertCallNode(const FuncGraphPtr &func_graph);
|
||||
int SetMetaGraphInput(const FuncGraphPtr &func_graph, const std::unique_ptr<schema::MetaGraphT> &meta_graphT);
|
||||
int SetMetaGraphOutput(const FuncGraphPtr &func_graph, const std::unique_ptr<schema::MetaGraphT> &meta_graphT);
|
||||
bool IsCall(const AnfNodePtr node);
|
||||
int CreateNewTensorForParameter(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const AnfNodePtr &input);
|
||||
|
@ -93,8 +94,10 @@ class AnfExporter {
|
|||
std::map<FuncGraphPtr, size_t> fg_subgraph_map_;
|
||||
std::vector<AnfNodePtr> graph_inputs_;
|
||||
std::set<AnfNodePtr> graph_inputs_has_exported_;
|
||||
std::map<AnfNodePtr, int> graph_inputs_map_;
|
||||
uint32_t node_idx_ = 0;
|
||||
bool train_flag_ = false;
|
||||
bool reorder_input_ = false;
|
||||
};
|
||||
// by default, copy_primitive is false, which means that the MetaGraph and func_graph share the same schema::PrimitiveT.
|
||||
// but in PostQuantization, the func_graph need to transfer to MetaGraph first and do MetaGraph pass, which may modify
|
||||
|
|
|
@ -109,7 +109,12 @@ class ConverterContext {
|
|||
void SetGraphOutputTensorNames(const std::vector<std::string> &output_names) {
|
||||
graph_output_tensor_names_ = output_names;
|
||||
}
|
||||
const std::vector<std::string> GetGraphOutputTensorNames() { return graph_output_tensor_names_; }
|
||||
|
||||
const std::vector<std::string> GetGraphOutputTensorNames() const { return graph_output_tensor_names_; }
|
||||
|
||||
void AddGraphInputTensorNames(const std::string &input_name) { graph_input_tensor_names_.emplace_back(input_name); }
|
||||
|
||||
const std::vector<std::string> GetGraphInputTensorNames() const { return graph_input_tensor_names_; }
|
||||
|
||||
private:
|
||||
ConverterContext() {}
|
||||
|
@ -118,6 +123,7 @@ class ConverterContext {
|
|||
std::map<int32_t, int32_t> graph_input_data_type_map_;
|
||||
std::map<int32_t, int32_t> graph_output_data_type_map_;
|
||||
std::map<std::string, std::vector<int64_t>> graph_input_tensor_shape_map_;
|
||||
std::vector<std::string> graph_input_tensor_names_;
|
||||
std::vector<std::string> graph_output_tensor_names_;
|
||||
};
|
||||
} // namespace lite
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "tools/converter/parser/caffe/caffe_inspector.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "src/common/utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -48,13 +49,12 @@ STATUS CaffeInspector::ParseInput() {
|
|||
|
||||
STATUS CaffeInspector::FindGraphInputsAndOutputs() {
|
||||
for (const auto &iter : layerBottoms) {
|
||||
if (layerTops.find(iter) == layerTops.end()) {
|
||||
if (!IsContain(layerTops, iter)) {
|
||||
graphInput.insert(iter);
|
||||
}
|
||||
}
|
||||
for (const auto &iter : layerTops) {
|
||||
if (layerBottoms.find(iter) == layerBottoms.end() &&
|
||||
std::find(graphOutput.begin(), graphOutput.end(), iter) == graphOutput.end()) {
|
||||
if (layerBottoms.find(iter) == layerBottoms.end() && !IsContain(graphOutput, iter)) {
|
||||
graphOutput.push_back(iter);
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,9 @@ STATUS CaffeInspector::SetLayerTopsAndBottoms() {
|
|||
graphInput.insert(layer.top(0));
|
||||
}
|
||||
for (int j = 0; j < layer.top_size(); j++) {
|
||||
layerTops.insert(layer.top(j));
|
||||
if (!IsContain(layerTops, layer.top(j))) {
|
||||
layerTops.push_back(layer.top(j));
|
||||
}
|
||||
}
|
||||
for (int j = 0; j < layer.bottom_size(); j++) {
|
||||
layerBottoms.insert(layer.bottom(j));
|
||||
|
|
|
@ -43,7 +43,7 @@ class CaffeInspector {
|
|||
private:
|
||||
caffe::NetParameter net;
|
||||
|
||||
std::set<std::string> layerTops;
|
||||
std::vector<std::string> layerTops;
|
||||
std::set<std::string> layerBottoms;
|
||||
|
||||
std::set<std::string> graphInput;
|
||||
|
|
|
@ -243,6 +243,10 @@ STATUS CaffeModelParser::ConvertGraphInputsOfLayer() {
|
|||
for (int i = 0; i < caffe_model_.layer_size(); i++) {
|
||||
auto layer = caffe_model_.layer(i);
|
||||
if (layer.type() == "Input") {
|
||||
if (layer.bottom_size() != 0) {
|
||||
MS_LOG(ERROR) << "The input layer should not have inputs";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto parameter = res_graph_->add_parameter();
|
||||
std::vector<int64_t> shape = ConverterContext::GetInstance()->GetGraphInputTensorShape(layer.name());
|
||||
if (ConverterContext::GetInstance()->GetGraphInputTensorShapeMapSize() > 0 && shape.empty()) {
|
||||
|
@ -259,7 +263,8 @@ STATUS CaffeModelParser::ConvertGraphInputsOfLayer() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
parameter->set_abstract(abstract);
|
||||
parameter->set_name("graph_input-" + std::to_string(i));
|
||||
parameter->set_name(layer.name());
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(layer.name());
|
||||
nodes_.insert(std::pair(layer.top(0), parameter));
|
||||
}
|
||||
}
|
||||
|
@ -291,7 +296,8 @@ STATUS CaffeModelParser::ConvertGraphInputsOfShape() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
parameter->set_abstract(abstract);
|
||||
parameter->set_name("graph_input-" + caffe_model_.input(i));
|
||||
parameter->set_name(caffe_model_.input(i));
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(caffe_model_.input(i));
|
||||
nodes_.insert(std::pair(caffe_model_.input(i), parameter));
|
||||
}
|
||||
return RET_OK;
|
||||
|
@ -323,7 +329,8 @@ STATUS CaffeModelParser::ConvertGraphInputsOfDim() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
parameter->set_abstract(abstract);
|
||||
parameter->set_name("graph_input-" + caffe_model_.input(i));
|
||||
parameter->set_name(caffe_model_.input(i));
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(caffe_model_.input(i));
|
||||
nodes_.insert(std::pair(caffe_model_.input(i), parameter));
|
||||
}
|
||||
return RET_OK;
|
||||
|
@ -334,12 +341,17 @@ STATUS CaffeModelParser::ConvertGraphInputs() {
|
|||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
if (caffe_model_.input_dim_size() > 0) {
|
||||
return ConvertGraphInputsOfDim();
|
||||
} else {
|
||||
return ConvertGraphInputsOfShape();
|
||||
ret = ConvertGraphInputsOfShape();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
if (caffe_model_.input_dim_size() > 0) {
|
||||
ret = ConvertGraphInputsOfDim();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::ConvertGraphOutputs() {
|
||||
|
|
|
@ -221,6 +221,7 @@ STATUS OnnxModelParser::ConvertGraphInputs(const onnx::GraphProto &onnx_graph, c
|
|||
}
|
||||
parameter->set_abstract(abstract_tensor);
|
||||
parameter->set_name(input_value.name());
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(input_value.name());
|
||||
anf_nodes_map->emplace(input_value.name(), parameter);
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -414,7 +414,7 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::NodeDef &node_def, co
|
|||
}
|
||||
|
||||
STATUS TFModelParser::ConvertParameter(const tensorflow::NodeDef &node, const ParameterPtr ¶meter,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map) {
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map, bool root_graph) {
|
||||
MS_ASSERT(node != nullptr);
|
||||
MS_ASSERT(parameter != nullptr);
|
||||
|
||||
|
@ -446,7 +446,10 @@ STATUS TFModelParser::ConvertParameter(const tensorflow::NodeDef &node, const Pa
|
|||
return status;
|
||||
}
|
||||
} else {
|
||||
graph_input_names_.emplace_back(node.name()); // only root graph need set graph input names
|
||||
if (root_graph) {
|
||||
graph_input_names_.emplace_back(node.name()); // only root graph need set graph input names
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(node.name());
|
||||
}
|
||||
}
|
||||
|
||||
type = (type == kNumberTypeInt64) ? kNumberTypeInt32 : type;
|
||||
|
@ -463,13 +466,14 @@ STATUS TFModelParser::ConvertParameter(const tensorflow::NodeDef &node, const Pa
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS TFModelParser::ConvertGraphInputsAndConsts(
|
||||
const std::map<std::string, const tensorflow::NodeDef *> &tf_graph_nodes, const FuncGraphPtr &anf_graph,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map) {
|
||||
for (auto &pair : tf_graph_nodes) {
|
||||
STATUS TFModelParser::ConvertGraphInputsAndConsts(const std::vector<const tensorflow::NodeDef *> &tf_graph_nodes,
|
||||
const FuncGraphPtr &anf_graph,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map,
|
||||
bool root_graph) {
|
||||
for (auto &node : tf_graph_nodes) {
|
||||
bool have_data_depend = false;
|
||||
for (int i = 0; i < pair.second->input_size(); ++i) {
|
||||
auto name = pair.second->input(i);
|
||||
for (int i = 0; i < node->input_size(); ++i) {
|
||||
auto name = node->input(i);
|
||||
if (!name.empty() && name[0] != '^') { // control_depend input start with "^"
|
||||
have_data_depend = true;
|
||||
break;
|
||||
|
@ -477,7 +481,7 @@ STATUS TFModelParser::ConvertGraphInputsAndConsts(
|
|||
}
|
||||
if (!have_data_depend) {
|
||||
auto parameter = anf_graph->add_parameter();
|
||||
if (ConvertParameter(*pair.second, parameter, anf_node_map) != RET_OK) {
|
||||
if (ConvertParameter(*node, parameter, anf_node_map, root_graph) != RET_OK) {
|
||||
MS_LOG(ERROR) << "convert Parameter Node failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -520,9 +524,10 @@ FuncGraphPtr TFModelParser::Parse(const converter::ConverterParameters &flag) {
|
|||
for (int i = 0; i < tf_root_graph_->node_size(); i++) {
|
||||
auto &node_def = tf_root_graph_->node(i);
|
||||
tf_root_graph_nodes_[node_def.name()] = &node_def;
|
||||
tf_root_graph_nodes_vec_.emplace_back(&node_def);
|
||||
}
|
||||
|
||||
status = ConvertGraphInputsAndConsts(tf_root_graph_nodes_, res_graph_, &anf_root_node_map_);
|
||||
status = ConvertGraphInputsAndConsts(tf_root_graph_nodes_vec_, res_graph_, &anf_root_node_map_, true);
|
||||
if (status != RET_OK) {
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
return nullptr;
|
||||
|
@ -607,11 +612,13 @@ STATUS TFModelParser::ConvertSubgraphInputs(std::map<std::string, const tensorfl
|
|||
}
|
||||
sub_graph_inputs.emplace_back(parameter);
|
||||
}
|
||||
std::vector<const tensorflow::NodeDef *> subgraph_tf_node_vec;
|
||||
for (int j = 0; j < tf_sub_fuction.node_def_size(); j++) {
|
||||
auto &node_def = tf_sub_fuction.node_def(j);
|
||||
(*tf_sub_node_map)[node_def.name()] = &node_def;
|
||||
subgraph_tf_node_vec.emplace_back(&node_def);
|
||||
}
|
||||
if (ConvertGraphInputsAndConsts(*tf_sub_node_map, sub_func_graph, anf_sub_node_map) != RET_OK) {
|
||||
if (ConvertGraphInputsAndConsts(subgraph_tf_node_vec, sub_func_graph, anf_sub_node_map, false) != RET_OK) {
|
||||
MS_LOG(ERROR) << "Convert subgraph consts failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -1029,23 +1036,23 @@ STATUS TFModelParser::ConvertRootGraphOutputs() {
|
|||
// tf_root_graph_nodes_ but not anf_root_node_map_
|
||||
std::set<std::string> all_node_inputs;
|
||||
std::vector<AnfNodePtr> output_nodes;
|
||||
for (auto &pair : tf_root_graph_nodes_) {
|
||||
for (int i = 0; i < pair.second->input_size(); ++i) {
|
||||
all_node_inputs.insert(TensorFlowUtils::GetNodeName(pair.second->input(i)));
|
||||
auto input_name = pair.second->input(i);
|
||||
for (auto &node : tf_root_graph_nodes_vec_) {
|
||||
for (int i = 0; i < node->input_size(); ++i) {
|
||||
all_node_inputs.insert(TensorFlowUtils::GetNodeName(node->input(i)));
|
||||
auto input_name = node->input(i);
|
||||
if (input_name[0] == '^') {
|
||||
input_name.erase(0, 1);
|
||||
}
|
||||
all_node_inputs.insert(input_name);
|
||||
}
|
||||
}
|
||||
for (auto &pair : tf_root_graph_nodes_) {
|
||||
if (pair.second->op() == "Assert") {
|
||||
for (auto &node : tf_root_graph_nodes_vec_) {
|
||||
if (node->op() == "Assert") {
|
||||
continue;
|
||||
}
|
||||
auto it = all_node_inputs.find(pair.first);
|
||||
if (it == all_node_inputs.end() && pair.second->input_size() > 0) { // output node not constraint to Identity
|
||||
auto origin_name = GetOriginInputName(*(pair.second), tf_root_graph_nodes_);
|
||||
auto it = all_node_inputs.find(node->name());
|
||||
if (it == all_node_inputs.end() && node->input_size() > 0) { // output node not constraint to Identity
|
||||
auto origin_name = GetOriginInputName(*(node), tf_root_graph_nodes_);
|
||||
// node with multiple outputs has been changed to tupleGetItem, and the original name changes to be name:idx.
|
||||
for (int i = 0; i < node_output_num_[origin_name]; i++) {
|
||||
auto anf_node = GetAnfNode(origin_name, anf_root_node_map_, i);
|
||||
|
@ -1055,8 +1062,8 @@ STATUS TFModelParser::ConvertRootGraphOutputs() {
|
|||
}
|
||||
output_nodes.push_back(anf_node);
|
||||
// Get the name of node 'Identity' and 'StopGradient'.
|
||||
if (pair.second->op() == "Identity" || pair.second->op() == "StopGradient") {
|
||||
auto tmp_node = pair.second;
|
||||
if (node->op() == "Identity" || node->op() == "StopGradient") {
|
||||
auto tmp_node = node;
|
||||
bool found_input = true;
|
||||
while (tmp_node->name().empty() && (tmp_node->op() == "Identity" || tmp_node->op() == "StopGradient")) {
|
||||
auto flatten_input_name = TensorFlowUtils::GetFlattenNodeName(tmp_node->input(0));
|
||||
|
|
|
@ -51,10 +51,11 @@ class TFModelParser : public converter::ModelParser {
|
|||
std::vector<int64_t> *shape_vector);
|
||||
static STATUS SetTensorInfoFromType(const tensorflow::TensorProto &tensor_proto, tensor::TensorPtr *tensor_info);
|
||||
STATUS ConvertParameter(const tensorflow::NodeDef &node, const ParameterPtr ¶meter,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map);
|
||||
STATUS ConvertGraphInputsAndConsts(const std::map<std::string, const tensorflow::NodeDef *> &tf_graph_nodes,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map, bool root_graph = false);
|
||||
STATUS ConvertGraphInputsAndConsts(const std::vector<const tensorflow::NodeDef *> &tf_graph_nodes,
|
||||
const FuncGraphPtr &anf_graph,
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map);
|
||||
std::unordered_map<std::string, AnfNodePtr> *anf_node_map,
|
||||
bool root_graph = false);
|
||||
static STATUS ConvertInputNodes(const tensorflow::NodeDef &node_def, const std::vector<std::string> &input_names,
|
||||
const std::map<std::string, const tensorflow::NodeDef *> &tf_node_map,
|
||||
const std::unordered_map<std::string, AnfNodePtr> &anf_node_map,
|
||||
|
@ -97,6 +98,7 @@ class TFModelParser : public converter::ModelParser {
|
|||
|
||||
std::unique_ptr<tensorflow::GraphDef> tf_root_graph_; // tf root graph def
|
||||
std::map<std::string, const tensorflow::NodeDef *> tf_root_graph_nodes_; // tf root graph node map
|
||||
std::vector<const tensorflow::NodeDef *> tf_root_graph_nodes_vec_;
|
||||
std::unordered_map<std::string, AnfNodePtr> anf_root_node_map_;
|
||||
std::vector<std::string> graph_input_names_;
|
||||
std::vector<std::string> graph_output_names_;
|
||||
|
|
|
@ -336,7 +336,8 @@ STATUS TfliteModelParser::ConvertGraphInputs() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
parameter->set_abstract(abstract_tensor);
|
||||
parameter->set_name("graph_input-" + std::to_string(tflite_graph_input));
|
||||
parameter->set_name(tensor->name);
|
||||
ConverterContext::GetInstance()->AddGraphInputTensorNames(tensor->name);
|
||||
nodes_.insert(std::pair(tflite_graph_input, parameter));
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
Loading…
Reference in New Issue