From 8669e6a540a80bb0e583182da708780c126602c7 Mon Sep 17 00:00:00 2001 From: zengxianglong Date: Tue, 13 Apr 2021 14:29:02 +0800 Subject: [PATCH] add model to the entrance guard --- mindspore/lite/test/models_caffe.cfg | 1 + mindspore/lite/test/models_caffe_fp16.cfg | 1 + mindspore/lite/test/models_tf_fp16.cfg | 17 +++++++---- mindspore/lite/test/models_tflite.cfg | 10 +++---- mindspore/lite/test/models_tflite_fp16.cfg | 28 +++++++++++-------- .../lite/test/models_with_multiple_inputs.cfg | 3 ++ .../test/models_with_multiple_inputs_fp16.cfg | 9 ++++-- 7 files changed, 44 insertions(+), 25 deletions(-) diff --git a/mindspore/lite/test/models_caffe.cfg b/mindspore/lite/test/models_caffe.cfg index 5f260fdf1e6..c831bac9e27 100644 --- a/mindspore/lite/test/models_caffe.cfg +++ b/mindspore/lite/test/models_caffe.cfg @@ -117,3 +117,4 @@ identify_card_detect ml_2012_ocr_rec_caffe ml_2012_ocr_detection_caffe ml_face_mnet +ml_segmentation_atlanta_1 diff --git a/mindspore/lite/test/models_caffe_fp16.cfg b/mindspore/lite/test/models_caffe_fp16.cfg index 0dec6b74b95..2a83a5d0128 100644 --- a/mindspore/lite/test/models_caffe_fp16.cfg +++ b/mindspore/lite/test/models_caffe_fp16.cfg @@ -115,3 +115,4 @@ ml_2012_ocr_rec_caffe 0.5 ml_lable_model_hebing_device 2 ml_face_sex 0.5 ml_face_mnet 12 +ml_segmentation_atlanta_1 0.5 diff --git a/mindspore/lite/test/models_tf_fp16.cfg b/mindspore/lite/test/models_tf_fp16.cfg index fcae8ccb9f8..b51514a21c3 100644 --- a/mindspore/lite/test/models_tf_fp16.cfg +++ b/mindspore/lite/test/models_tf_fp16.cfg @@ -7,19 +7,21 @@ scan_hms_angle.pb 1.5 scan_hms_detect.pb 2.5 ml_face_openclose.pb;1,32,32,3 0.5 ml_object_detect.pb;1,288,288,3 2 -Q_crnn_screen_slim400w_more_20w.pb 137 +# the inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7). +Q_crnn_screen_slim400w_more_20w.pb 72 Q_inception-249970-672-11-16.pb 6.5 -hiai_latin_ocr_1.pb 23 hiai_ssd_mobilenetv2_object.pb 15 hiai_humanDetection.pb 3.5 hiai_PoseEstimation_Pcm.pb 0.5 hiai_cn_recognize_modify_padv2.pb;1,32,512,1 27 hiai_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 17 +# the output of mtk_model_ckpt.pb has small value mtk_model_ckpt.pb 19 mtk_age_gender.pb 0.5 mtk_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 mtk_AADB_HADB_MBV2_model.pb;1,224,224,3 5.5 mtk_AADB_HADB_MBV3_model.pb;1,224,224,3 4 +# the output of mtk_face_features_v1.pb has small value mtk_face_features_v1.pb 26 model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10 hiai_AADB_HADB_MBV2_model.pb;1,224,224,3 6 @@ -37,6 +39,11 @@ hiai_cpu_face_gazing.pb 0.5 hiai_cpu_face_emotion.pb 2 hiai_cv_poseEstimation.pb 103 Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb 1.5 -Q_crnn_ori_75w_slim_norm.pb 190 -Q_crnn_ori_v2_405001_notrans_nopre.pb 25 -hiai_latin_ocr.pb 12 \ No newline at end of file +# the input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6). +Q_crnn_ori_75w_slim_norm.pb 37 +# the output of Q_crnn_ori_v2 model has small values (e-4). +Q_crnn_ori_v2_405001_notrans_nopre.pb 24 +# the input of hiai_latin models are between 0-255 +hiai_latin_ocr.pb 4 +hiai_latin_ocr_1.pb 3.5 +hiai_cpu_face_headpose.pb 4 diff --git a/mindspore/lite/test/models_tflite.cfg b/mindspore/lite/test/models_tflite.cfg index b08f823fd38..22e2cfbaf4f 100644 --- a/mindspore/lite/test/models_tflite.cfg +++ b/mindspore/lite/test/models_tflite.cfg @@ -84,7 +84,6 @@ ml_face_openclose.tflite hiai_face_model_npu.tflite hiai_ctpn_feature_map.tflite hiai_cv_labelDetectorModel_v2.tflite -#hiai_cv_labelDetectorModel_v3.tflite hiai_cv_labelDetectorModel_v4.tflite hiai_dress_detect.tflite hiai_cv_saliencyDetectorModel.tflite @@ -106,8 +105,6 @@ mnasnet_1.0_160_1_metadata_1.tflite mnasnet_1.0_192_1_metadata_1.tflite mnasnet_1.0_224_1_metadata_1.tflite mnasnet_1.0_96_1_metadata_1.tflite -# ml_vision_guide_detection1.tflite -# ml_vision_guide_detection3.tflite lite-model_on_device_vision_classifier_popular_us_products_V1_1.tflite lite-model_on_device_vision_classifier_popular_wine_V1_1.tflite posenet_mobilenet_float_075_1_default_1.tflite @@ -133,6 +130,7 @@ mindspore_text_classification_tflite.tflite ml_text_correction.tflite ml_pic_shopping.tflite ml_vision_guide_detection3_pb2tflite.tflite +ml_vision_guide_detection1_pb2tflite.tflite ml_pic_shopping_pb2tflite.tflite ml_ocr_jk_pb2tflite.tflite ml_ocr_latin_pb2tflite.tflite @@ -161,15 +159,15 @@ unet_mbv2_05_104pts.tflite hiai_AADB_HADB_MBV2_model_f16.tflite hiai_AADB_HADB_MBV2_model_fp32.tflite hiai_detect_curve_model_float32.tflite -#hiai_detectmodel_06_23_960_480_1180700.tflite +hiai_detectmodel_06_23_960_480_1180700.tflite hiai_detectmodel_desnet_256_128_64_32.tflite lite-model_aiy_vision_classifier_food_V1_1.tflite lite-model_disease-classification_1.tflite lite-model_models_mushroom-identification_v1_1.tflite #lite-model_albert_lite_base_squadv1_metadata_1.tflite #lite-model_mobilebert_1_metadata_1.tflite -#smartreply_1_default_1.tflite +smartreply_1_default_1.tflite text_classification.tflite Q_detect_fpn_add_inception-1448650.tflite Q_hand_0812_pb2tflite.tflite -#bloom_landmark.tflite +bloom_landmark.tflite diff --git a/mindspore/lite/test/models_tflite_fp16.cfg b/mindspore/lite/test/models_tflite_fp16.cfg index 234fb0f11f0..c5a9282e3a0 100644 --- a/mindspore/lite/test/models_tflite_fp16.cfg +++ b/mindspore/lite/test/models_tflite_fp16.cfg @@ -38,7 +38,7 @@ mnasnet_1.3_224.tflite 12 inception_v3.tflite 3 deeplabv3_257_mv_gpu.tflite 3 multi_person_mobilenet_v1_075_float.tflite 9 -#hiai_vad.tflite 20 +#hiai_vad.tflite ide_label_base.tflite 22 # ide_label_retrained.tflite involves a softmax-like structure whose output channel is 12. # The values in the first few channels are extremely small and casted into 0 in the fp16 subgraph. @@ -88,7 +88,6 @@ hiai_humanDetection.tflite 15 hiai_face_model_npu.tflite 5 hiai_ctpn_feature_map.tflite 10 hiai_cv_labelDetectorModel_v2.tflite 30 -#hiai_cv_labelDetectorModel_v3.tflite 20 hiai_cv_labelDetectorModel_v4.tflite 3 hiai_dress_detect.tflite 3 hiai_cv_saliencyDetectorModel.tflite 3 @@ -110,8 +109,6 @@ mnasnet_1.0_160_1_metadata_1.tflite 6 mnasnet_1.0_192_1_metadata_1.tflite 8 mnasnet_1.0_224_1_metadata_1.tflite 6 mnasnet_1.0_96_1_metadata_1.tflite 6 -# ml_vision_guide_detection1.tflite 20 -# ml_vision_guide_detection3.tflite 20 lite-model_on_device_vision_classifier_popular_us_products_V1_1.tflite 16 lite-model_on_device_vision_classifier_popular_wine_V1_1.tflite 80 posenet_mobilenet_float_075_1_default_1.tflite 45 @@ -141,6 +138,7 @@ ml_text_correction.tflite 1 # fp16: 27.6 - 27.4 = 0.2 #ml_pic_shopping.tflite 0.1 ml_vision_guide_detection3_pb2tflite.tflite 0.5 +ml_vision_guide_detection1_pb2tflite.tflite 0.5 ml_pic_shopping_pb2tflite.tflite 95 ml_ocr_jk_pb2tflite.tflite 0.5 ml_ocr_latin_pb2tflite.tflite 11 @@ -167,21 +165,27 @@ Q_language_model_hrmini_Q4_b4_17w.tflite 3.5 lite-model_aiy_vision_classifier_food_V1_1.tflite 42 lite-model_disease-classification_1.tflite 70 lite-model_models_mushroom-identification_v1_1.tflite 3 -#smartreply_1_default_1.tflite 0.5 +smartreply_1_default_1.tflite 0.5 text_classification.tflite 0.5 Q_AADB_HADB_MBV2_model.tflite 5 +# the input of Q_convert model is between 0-255 Q_convert.tflite 12 -Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 200 -Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 40 -Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 235 +# the input of Q_crnn_ori_75w_slim model is between 0-255, but its outputs has small values (e-6). +Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 29 +# the output of Q_crnn_ori_v2 model has small values (e-4). +Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 36 +# the inputs of two Q_crnn_screen_slim400w models are between 0-255, but their outputs have small values (e-7). +Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 71 Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite 1.5 -Q_focusocr_cn_recog.tflite 200 -Q_focusocr_jk_recog.tflite 45 +# the inputs of two Q_focusocr models are between 0-255, but their outputs have small values (e-6). +Q_focusocr_cn_recog.tflite 30 +Q_focusocr_jk_recog.tflite 25 Q_inception-249970-672-11-16_pb2tflite.tflite 6 Q_isface.tflite 0.5 Q_landmark.tflite 0.5 Q_new_detect.tflite 3.5 -Q_object_scene.tflite 14 +# the input of Q_object_scene model is between 0-255 +Q_object_scene.tflite 3 Q_pose.tflite 1.5 Q_detect_fpn_add_inception-1448650.tflite 1 -#bloom_landmark.tflite 0.5 +bloom_landmark.tflite 0.5 diff --git a/mindspore/lite/test/models_with_multiple_inputs.cfg b/mindspore/lite/test/models_with_multiple_inputs.cfg index abf36020d47..d18e01ac89b 100644 --- a/mindspore/lite/test/models_with_multiple_inputs.cfg +++ b/mindspore/lite/test/models_with_multiple_inputs.cfg @@ -28,3 +28,6 @@ hiai_cv_labelDetectorModel_v3.tflite;2 ml_tts_vocoder.pb;66 ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1 add_uint8.tflite;2 +ml_Heatmap_depth_240180;2 +ml_Heatmap_depth_180240;2 +hiai_nlu_model.pb;3;1,16:1,16:1,16 diff --git a/mindspore/lite/test/models_with_multiple_inputs_fp16.cfg b/mindspore/lite/test/models_with_multiple_inputs_fp16.cfg index 160831e0605..08097ce2e70 100644 --- a/mindspore/lite/test/models_with_multiple_inputs_fp16.cfg +++ b/mindspore/lite/test/models_with_multiple_inputs_fp16.cfg @@ -12,7 +12,12 @@ decoder_step_201217_modified.pb;5 0.5 #encoder_0111_control_flow.pb;4;1:1,44:1:1 10 ml_female_model_step6_noiseout.pb;66 2 ml_male_model_step6_noiseout.pb;66 2.5 +ml_tts_encoder_control_flow.pb;4;1:1,22:1:1 1.5 ml_tts_decoder_control_flow.pb;5 1 ml_tts_decoder.pb;5 117 -hiai_cv_labelDetectorModel_v3.tflite;2 5.5 -ml_tts_vocoder.pb;66 53 \ No newline at end of file +# The input of hiai_cv_labelDetectorModel_v3.tflite is between 0-255. +hiai_cv_labelDetectorModel_v3.tflite;2 2 +ml_tts_vocoder.pb;66 53 +# The outputs of two Heatmap_depth models have small value +ml_Heatmap_depth_240180;2 102 +ml_Heatmap_depth_180240;2 101 \ No newline at end of file