From 2a277636486aeecd78b28210daeeb51243c3f05d Mon Sep 17 00:00:00 2001 From: greatpanc Date: Fri, 11 Feb 2022 12:43:20 +0800 Subject: [PATCH] avx512 gate --- .../kernel_compiler/cpu/nnacl/fp32/exp_fp32.h | 4 +- .../test/config/models_caffe_gpu_fp16.cfg | 74 ++++ .../test/config/models_caffe_gpu_fp32.cfg | 84 ++++ .../lite/test/config/models_gpu_fp16.cfg | 331 -------------- .../lite/test/config/models_gpu_fp32.cfg | 416 ------------------ .../test/config/models_mindspore_gpu_fp16.cfg | 2 + .../test/config/models_mindspore_gpu_fp32.cfg | 7 + .../lite/test/config/models_onnx_gpu_fp16.cfg | 67 +++ .../lite/test/config/models_onnx_gpu_fp32.cfg | 77 ++++ .../lite/test/config/models_tf_gpu_fp16.cfg | 60 +++ .../lite/test/config/models_tf_gpu_fp32.cfg | 81 ++++ .../test/config/models_tflite_gpu_fp16.cfg | 155 +++++++ .../test/config/models_tflite_gpu_fp32.cfg | 188 ++++++++ mindspore/lite/test/st/run_benchmark_nets.sh | 22 +- .../lite/test/st/scripts/run_benchmark_gpu.sh | 50 ++- .../lite/test/st/scripts/run_benchmark_x86.sh | 49 ++- 16 files changed, 903 insertions(+), 764 deletions(-) create mode 100644 mindspore/lite/test/config/models_caffe_gpu_fp16.cfg create mode 100644 mindspore/lite/test/config/models_caffe_gpu_fp32.cfg delete mode 100644 mindspore/lite/test/config/models_gpu_fp16.cfg delete mode 100644 mindspore/lite/test/config/models_gpu_fp32.cfg create mode 100644 mindspore/lite/test/config/models_mindspore_gpu_fp16.cfg create mode 100644 mindspore/lite/test/config/models_mindspore_gpu_fp32.cfg create mode 100644 mindspore/lite/test/config/models_onnx_gpu_fp16.cfg create mode 100644 mindspore/lite/test/config/models_onnx_gpu_fp32.cfg create mode 100644 mindspore/lite/test/config/models_tf_gpu_fp16.cfg create mode 100644 mindspore/lite/test/config/models_tf_gpu_fp32.cfg create mode 100644 mindspore/lite/test/config/models_tflite_gpu_fp16.cfg create mode 100644 mindspore/lite/test/config/models_tflite_gpu_fp32.cfg diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h index cd31ed0296b..b21f3173f76 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h @@ -66,7 +66,7 @@ static inline MS_FLOAT32X4 simd_exp128_f32(MS_FLOAT32X4 input) { #if defined(ENABLE_AVX512) static inline void simd_exp512(MS_FLOAT32X16 input, float *dst) { static MS_FLOAT32X16 maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, - 98.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; + 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; static MS_FLOAT32X16 minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; static MS_FLOAT32X16 param[] = { @@ -92,7 +92,7 @@ static inline void simd_exp512(MS_FLOAT32X16 input, float *dst) { static inline MS_FLOAT32X16 simd_exp512_f32(MS_FLOAT32X16 input) { static MS_FLOAT32X16 maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, - 98.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; + 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; static MS_FLOAT32X16 minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; static MS_FLOAT32X16 param[] = { diff --git a/mindspore/lite/test/config/models_caffe_gpu_fp16.cfg b/mindspore/lite/test/config/models_caffe_gpu_fp16.cfg new file mode 100644 index 00000000000..6b3f7856df4 --- /dev/null +++ b/mindspore/lite/test/config/models_caffe_gpu_fp16.cfg @@ -0,0 +1,74 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +landmark +PoseNet_dla_17_x512_tmp +plat_isface +mtk_isface +mtk_landmark +mtk_pose_tuku +mtk_2012_ATLANTA_10class_20190614_v41 +mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified +mtk_detect_mbv1_640_480_nopostprocess_simplified +ml_ocr_detect_20200305;1;1,544,544,3 10 +detection_retinaface_fix 8 +# age_new's precision deteriorates in P50 +age_new 8 +ml_ocr_bank_card_detection_inception_tmp 10 +glasses +hat +isface +ml_bank_detect_0312_tmp 13 +ml_face_div_parsing +ml_hardware_eyeclose +Mnet6_0312_extract_pay 6 +pose_3d +hiai_face_RFB-Epoch-170-no-transpose +tracking +detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified +hiai_face_detect_rfb +hiai_face_isface +hiai_face_landmark +hiai_face_pose_tuku +ml_hand_detection +ml_ocr_sfz_detect_0325_tmp +ml_hardware_liveness +ml_liveness_detect_landmark_tmp +ml_face_contour +2012_ATLANTA_1class_20190621_v4.x_nomean +ml_ocr_sfz_add_final_0325 +ml_hardware_pose +ml_bank_recog +#2012_ATLANTA_10class_20190131_v4.0's precision deteriorates in P50 +2012_ATLANTA_10class_20190131_v4.0 10 +mnet 9 +recognition 7 +ml_face_landmark +model_hebing_3branch 34 +hiai_cv_focusShootOCRModel_07 +hiai_cv_focusShootOCRModel_03 11 +hiai_cv_focusShootOCRModel_01 9 +hiai_face_hat1 +hiai_cv_focusShootOCRModel_04 6 +hiai_cv_focusShootOCRModel_06 11 +hiai_cpu_face_hat +hiai_video_seg +hiai_semantic_seg +hiai_human_seg 28 +hiai_face_recognition_1 7 +hiai_cpu_face_detect +hiai_cpu_face_attr 34 +hiai_face_attr1 34 +retinaface +deconvs_model +ml_location_scene_division 9 +ml_tabel_recog +6c_seg_nomean_20200610 +ml_video_edit_img_segment +ml_video_edit_video_segment_gauss_adaptis_part1 +ml_video_edit_Mnet +ml_video_edit_detect_20211111 +ml_video_edit_MnetN367_extract_1010_pay +ml_video_edit_person_divison_pic +ml_video_edit_reid +ml_video_edit_v10_best_model_nomean_20200723 +ml_face_isface;1:data diff --git a/mindspore/lite/test/config/models_caffe_gpu_fp32.cfg b/mindspore/lite/test/config/models_caffe_gpu_fp32.cfg new file mode 100644 index 00000000000..d55893bc51c --- /dev/null +++ b/mindspore/lite/test/config/models_caffe_gpu_fp32.cfg @@ -0,0 +1,84 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +detection_retinaface_fix +landmark +PoseNet_dla_17_x512_tmp +age_new +plat_isface +ml_ocr_bank_card_detection_inception_tmp +ml_ocr_detect_20200305 +hdc_age_medium +hdc_contour_pose_128 +hdc_emotion +hdc_fivembnet +hdc_isface +hdc_mobilenetface +hdc_retinaface +hdc_resnet +mtk_isface +mtk_landmark +mtk_pose_tuku +mtk_face_recognition_v1 +mtk_2012_ATLANTA_10class_20190614_v41 +mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified +mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified +mtk_detect_mbv1_640_480_nopostprocess_simplified +emotion +gender_res_large_deploy +glasses +hat +isface +ml_bank_detect_0312_tmp +ml_face_div_parsing +ml_hardware_eyeclose +Mnet6_0312_extract_pay +pose_3d +hiai_face_RFB-Epoch-170-no-transpose +tracking +detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified +hiai_face_detect_rfb +hiai_face_isface +hiai_face_landmark +hiai_face_pose_tuku +ml_hand_detection +ml_ocr_sfz_detect_0325_tmp +ml_hardware_liveness +ml_liveness_detect_landmark_tmp +ml_face_contour +2012_ATLANTA_1class_20190621_v4.x_nomean +ml_ocr_sfz_add_final_0325 +ml_hardware_pose +ml_bank_recog +2012_ATLANTA_10class_20190131_v4.0 +mnet +recognition +ml_face_landmark +model_hebing_3branch +hiai_cv_focusShootOCRModel_07 +hiai_cv_focusShootOCRModel_03 +hiai_cv_focusShootOCRModel_01 +hiai_face_hat1 +hiai_cv_focusShootOCRModel_04 +hiai_cv_focusShootOCRModel_06 +hiai_cpu_face_hat +hiai_video_seg +hiai_semantic_seg +hiai_human_seg +hiai_face_recognition_1 +hiai_cpu_face_detect +hiai_cpu_face_attr +hiai_face_attr1 +retinaface +deconvs_model +ml_location_scene_division +ml_tabel_recog +6c_seg_nomean_20200610 +ml_video_edit_img_segment +ml_video_edit_video_segment_gauss_adaptis_part1 +ml_video_edit_Mnet +ml_video_edit_detect_20211111 +ml_video_edit_MnetN367_extract_1010_pay +ml_video_edit_person_divison_pic +ml_video_edit_reid +ml_video_edit_v10_best_model_nomean_20200723 +ml_face_isface;1:data diff --git a/mindspore/lite/test/config/models_gpu_fp16.cfg b/mindspore/lite/test/config/models_gpu_fp16.cfg deleted file mode 100644 index 25111fc2110..00000000000 --- a/mindspore/lite/test/config/models_gpu_fp16.cfg +++ /dev/null @@ -1,331 +0,0 @@ -mobilenet_v1_1.0_224.tflite -mobilenet_v2_1.0_224.tflite -mtk_age_gender_fp16.tflite -mtk_isface.tflite -mtk_landmark.tflite -mtk_new_detect.tflite -mtk_pose.tflite -mtk_model_emotions_0727_nosoftmax.tflite -landmark -PoseNet_dla_17_x512_tmp -plat_isface -ml_location_lane_counter.onnx 5.5 -Q888_face_recognition.onnx -Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb -Q_AADB_HADB_MBV2_model.tflite -Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite -Q_inception-249970-672-11-16_pb2tflite.tflite -Q_isface.tflite -Q_landmark.tflite -Q_language_model_hrmini_Q4_b4_17w.tflite -Q_new_detect.tflite -Q_object_scene.tflite -Q_pose.tflite -Q_face_recognition.onnx -Q888_iris_detect.onnx -Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite -Q_iMaxSR_RGB_385_p_pb2tflite.tflite -Q_detect_fpn_add_inception-1448650.tflite -Q888_face_dress_mv3y.tflite -Q888_HADB_AADB_MBV2_model_fp32.tflite -Q888_landmark.tflite -Q888_pose.tflite -Q888_isface.tflite -Q888_new_detect.tflite -Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite -Q888_face_emo_dress_mv3_orderd.tflite -mtk_detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx -mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx -mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx -inception_v3.pb;1;1,299,299,3 5 -mobilenet_v1_0.25_128_frozen.pb;1;1,128,128,3 5 -ml_face_openclose.pb;1;1,32,32,3 5 -hiai_AADB_HADB_MBV2_model.pb;1;1,224,224,3 5 -mtk_AADB_HADB_MBV2_model.pb;1;1,224,224,3 5 -mtk_AADB_HADB_MBV3_model.pb;1;1,224,224,3 5 -mtk_model_face_dress.pb;1;1,128,128,3 5 -mtk_AADB_HADB_MBV2_model_fp32.tflite -mtk_detect-mbv2-shortcut-400-400-simplified.onnx -mtk_age_gender.pb -mtk_age_gender.tflite -mtk_model_face_dress.tflite -mtk_isface -mtk_landmark -mtk_pose_tuku -mtk_2012_ATLANTA_10class_20190614_v41 -mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified -mtk_detect_mbv1_640_480_nopostprocess_simplified -mtk_model_normalize_object_scene_ps_20200519_f16.tflite -mtk_AADB_HADB_MBV2_model_f16.tflite -mtk_model_emotions_0725_fp16.tflite -# Q888_age_gender_orderd.tflite's precision deteriorates in P50 -Q888_age_gender_orderd.tflite 9 -ml_ocr_latin.tflite;1;1,32,512,1 10 -ml_ocr_detect_20200305;1;1,544,544,3 10 -ml_face_3d.onnx -CloudBU_FSRCNN_RTC_8ch_3450_QP9.onnx;1;1,225,225,3 1.5 -CloudBU_rfdn_rtc_x2_ver2_13.onnx;1;1,225,225,3 1.0 -CloudBU_rfdn_rtc_x2_ver2_3450.onnx;1;1,225,225,3 108 -Q888_CV_model_face_dress_mv3y.pb;1:input;1,112,112,3 4 -mtk_AADB_HADB_MBV2_model_fp32.tflite;1:input_0 3 -ml_location_lane_counter0.onnx;1:input -resnet.tflite -squeezenet.tflite -# hiai_cn_recognize_modify_padv2.tflite's precision deteriorates in P50 -hiai_cn_recognize_modify_padv2.tflite 10 -hiai_model_normalize_object_scene_ps_20200519.tflite 18 -inception_v3.tflite -mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 29 -mtk_276landmark_0913.tflite 7 -mtk_face_recognition.tflite 8 -mtk_convert_model.tflite -mtk_model_face_dress_fp16.tflite -detection_retinaface_fix 8 -# age_new's precision deteriorates in P50 -age_new 8 -Q_convert.tflite 9 -#Q_crnn_ori_75w_slim_norm_pb2tflite.tflite's precision deteriorates in P50 -Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 23 -#Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite's precision deteriorates in P50 -Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 33 -Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 31 -Q_focusocr_cn_recog.tflite 24 -Q_focusocr_jk_recog.tflite 14 -matmul.pb -add_uint8.tflite;2 -mtk_face_features_v3.onnx 7 -hdc_Face_Landmark5_MTI_Aesthetic.onnx -mobilenet_v2_1.0_224_frozen.pb;1;1,224,224,3 6 -hiai_model_0909_kd_rot_ps_softmax.pb;1;1,224,224,3 12 -model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 7 -hiai_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 18 -hiai_label_and_video.pb;1;1,224,224,3 16 -tinyyolov2-8.onnx;1;1,416,416,3 11 -emotion-ferplus-8.onnx -#rcnn-ilsvrc13-9.onnx's precision deteriorates in P50(has nan value) -#rcnn-ilsvrc13-9.onnx -shufflenet-v2-10.onnx -squeezenet1.1-7.onnx -ml_table_detection_fp32_tmp.onnx -ml_table_segment.onnx -shufflenet-9.onnx -gts_version-RFB-320_simplified.onnx -mnist-8.onnx -ml_video_edit_judge.onnx 12 -ml_video_edit_vignet.onnx -hdc_mobilenet_1w_class.onnx 22 -ml_edu_kit_hand_detection.onnx -ml_edu_kit_hand_key_position.onnx -ml_2012_ocr_detection_tmp.onnx -ml_video_edit_enhance_update_tmp.onnx -bloom_hongmo_detection_tmp.onnx -ml_ocr_bank_card_detection_inception_tmp 10 -mtk_model_ckpt.pb 17 -Q_inception-249970-672-11-16.pb 6 -#Q_crnn_screen_slim400w_more_20w.pb's precision deteriorates in P50 -Q_crnn_screen_slim400w_more_20w.pb 70 -hiai_ssd_mobilenetv2_object.pb 38 -hiai_humanDetection.pb 13 -#mtk_face_features_v1.pb's precision deteriorates in P50 -mtk_face_features_v1.pb 17 -#Q_crnn_ori_75w_slim_norm.pb's precision deteriorates in P50 -Q_crnn_ori_75w_slim_norm.pb 30 -Q_crnn_ori_v2_405001_notrans_nopre.pb 23 -ml_location_lane_counter.onnx 4 -gts_detect_5k_tf115.tflite -smartreply.tflite -ml_text_correction.tflite -ml_ocr_jk_pb2tflite.tflite -scan_hms_angle_pb2tflite.tflite -scan_hms_detect_pb2tflite.tflite 16 -ml_face_openclose_tflite.tflite -unet_mbv2_05_104pts.tflite 8 -hiai_AADB_HADB_MBV2_model_f16.tflite -hiai_AADB_HADB_MBV2_model_fp32.tflite -hiai_detect_curve_model_float32.tflite -smartreply_1_default_1.tflite -text_classification.tflite -nasnet_large.pb;1:input;1,331,331,3 -nasnet_mobile.pb;1:input;1,224,224,3 -ml_ocr_jk.pb;1:input_0 -ml_video_edit_enhance.pb;1:lowres_input -scan_hms_angle.pb;1:normalized_input_image_tensor -scan_hms_detect.pb;1:normalized_input_image_tensor 14 -hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1 14 -hiai_dress_detect.pb;1:data;1,960,960,3 -hiai_ghostnet.pb;1:input -hiai_latin_ocr.pb;1:input_0 -mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3 7 -ml_ocr_latin.pb;1:input_0 8 -siteAI_wireless_depress_w.pb;1:x-input;1,36 -siteAI_wireless_restore_w.pb;1:x-input;1,36 -siteAI_trans_nonlinear.pb;1:features_placeholder;1,137 -siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271 -siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137 -siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182 -ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w 12 -hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy -fsr_270_mindspore.pb -fsr_360_mindspore.pb -fsr_720_mindspore.pb -mobilenet_v1_0.25_160.tflite;1:input -mobilenet_v1_0.25_192.tflite;1:input -mobilenet_v1_0.25_224.tflite;1:input -mobilenet_v1_0.5_128.tflite;1:input -mobilenet_v1_0.5_192.tflite;1:input -mobilenet_v1_0.5_224.tflite;1:input -mobilenet_v1_0.75_128.tflite;1:input -mobilenet_v1_0.75_160.tflite;1:input -mobilenet_v1_0.75_224.tflite;1:input -mobilenet_v1_1.0_128.tflite;1:input 7 -mobilenet_v1_1.0_192.tflite;1:input 6 -hiai_latin_ocr.tflite;1:input_0 30 -hiai_latin_ocr_1.tflite;1:input_0 13 -siteAI_digcom_g2v_keras.tflite;1:conv2d_1_input -siteAI_trans_nonlinear.tflite;1:features_placeholder -siteAI_trans_tcpclassify.tflite;1:conv2d_1_input -siteAI_wireless_depress_w.tflite;1:x-input 8 -siteAI_wireless_restore_w.tflite;1:x-input -magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite;1:style_image -hiai_cpu_face_emotion.tflite;1:input_0 -hiai_cpu_face_gazing.tflite;1:input_0 -hiai_cpu_face_headpose.tflite;1:input_0 -hiai_humanDetection.tflite;1:normalized_input_image_tensor 11 -ml_face_openclose.tflite;1:input -hiai_face_model_npu.tflite;1:input_0 -hiai_ctpn_feature_map.tflite;1:input_image -hiai_cv_labelDetectorModel_v2.tflite;1:input_0 17 -hiai_cv_labelDetectorModel_v4.tflite;1:input_0 -hiai_dress_detect.tflite;1:data -hiai_cv_saliencyDetectorModel.tflite;1:image_tensor -hiai_frozen_inference_graph.tflite;1:image_tensor -hiai_ghostnet.tflite;1:input -hiai_label_and_video.tflite;1:input_0 7 -hiai_lm_inference_graph.tflite;1:image_tensor -efficientnet_lite0_fp32_2.tflite;1:images -efficientnet_lite1_fp32_2.tflite;1:images -efficientnet_lite2_fp32_2.tflite;1:images -efficientnet_lite3_fp32_2.tflite;1:images -efficientnet_lite4_fp32_2.tflite;1:images -mnasnet_0.50_224_1_metadata_1.tflite;1:input -mnasnet_0.75_224_1_metadata_1.tflite;1:input -mnasnet_1.0_128_1_metadata_1.tflite;1:input -mnasnet_1.0_160_1_metadata_1.tflite;1:input -mnasnet_1.0_192_1_metadata_1.tflite;1:input -mnasnet_1.0_224_1_metadata_1.tflite;1:input -mnasnet_1.0_96_1_metadata_1.tflite;1:input -posenet_mobilenet_float_075_1_default_1.tflite;1:sub_2 39 -deeplabv3_1_default_1.tflite;1:sub_7 -lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite;1:style_image -mindspore_text_classification_tflite.tflite;1:base_input -ml_ocr_latin_pb2tflite.tflite;1:input_0 9 -ml_location.tflite;1:inputs -bloom_new_detect.tflite;1:input -bloom_model_age_gender.tflite;1:input -bloom_isface.tflite;1:data -hiai_object_detect_814.tflite;1:normalized_input_image_tensor 10 -hiai_object_tflite_graph_8bit.tflite;1:normalized_input_image_tensor -lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite;1:inputs -ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:backbone_features2,w 12 -hiai_cv_labelDetectorModel_v3.tflite;2:input_0,input_1 -ml_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm/add,batch_normalization_1/batchnorm/add;1,64,64,3:16:16 -ml_ei_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm_1/add,batch_normalization_1/batchnorm_1/add;1,64,64,3:16:16 -#lite-model_mobilebert_1_metadata_1.tflite's precision deteriorates in P50 -lite-model_mobilebert_1_metadata_1.tflite;3:input_ids,input_mask,segment_ids 23 -coco_ssd_mobilenet_v1_1.0.tflite -bolt_segment.pb 4 -glasses -hat -isface -ml_bank_detect_0312_tmp 13 -ml_face_div_parsing -ml_hardware_eyeclose -Mnet6_0312_extract_pay 6 -pose_3d -hiai_face_RFB-Epoch-170-no-transpose -tracking -detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified -hiai_face_detect_rfb -hiai_face_isface -hiai_face_landmark -hiai_face_pose_tuku -ml_hand_detection -ml_ocr_sfz_detect_0325_tmp -ml_hardware_liveness -ml_liveness_detect_landmark_tmp -ml_face_contour -2012_ATLANTA_1class_20190621_v4.x_nomean -ml_ocr_sfz_add_final_0325 -ml_hardware_pose -ml_bank_recog -#2012_ATLANTA_10class_20190131_v4.0's precision deteriorates in P50 -2012_ATLANTA_10class_20190131_v4.0 10 -mnet 9 -recognition 7 -ml_face_landmark -model_hebing_3branch 34 -hiai_cv_focusShootOCRModel_07 -hiai_cv_focusShootOCRModel_03 11 -hiai_cv_focusShootOCRModel_01 9 -hiai_face_hat1 -hiai_cv_focusShootOCRModel_04 6 -hiai_cv_focusShootOCRModel_06 11 -hiai_cpu_face_hat -hiai_video_seg -hiai_semantic_seg -hiai_human_seg 28 -hiai_face_recognition_1 7 -hiai_cpu_face_detect -hiai_cpu_face_attr 34 -hiai_face_attr1 34 -retinaface -deconvs_model -ml_location_scene_division 9 -ml_tabel_recog -6c_seg_nomean_20200610 -ml_video_edit_img_segment -ml_video_edit_video_segment_gauss_adaptis_part1 -ml_video_edit_Mnet -ml_video_edit_detect_20211111 -ml_video_edit_MnetN367_extract_1010_pay -ml_video_edit_person_divison_pic -ml_video_edit_reid -ml_video_edit_v10_best_model_nomean_20200723 -inception-v2-9.onnx -ml_face_isface;1:data -efficientnet-lite4-11.onnx;1:images:0 -mobilenetv2-7.onnx;1:data 9 -densenet-9.onnx;1:data_0 -squeezenet1.0-9.onnx;1:data_0 -residual_distill_cifar10_bs_1.onnx;1:actual_input -residual_distill_cifar10_bs_32.onnx;1:actual_input -residual_distill_bs_1.onnx;1:actual_input -residual_distill_bs_32.onnx;1:actual_input 20 -crnn_lite_lstm_v2.onnx;1:input;32,32,32,1 -residual_distill_res34_cifar10_bs_1_update.onnx;1:actual_input -residual_distill_res50_cifar10_bs_1_update.onnx;1:actual_input -hdc_Image_Aesthetic_MTI_Aesthetic.onnx;1:input -hdc_resnet_1w_class.onnx;1:input.1 -hdc_ocr_detect_tmp.onnx;1:actual_input_1 -ml_facedetector.onnx;1:input -ml_ei_facedetection.onnx;1:input -mtk_emotions-d2012-75.onnx;1:input.1 -mtk_detect_mbv1_640_480_nopostprocess_simplified_onnx.onnx;1:input;1,480,640,3 -mtk_face_features_v2.onnx;1:input;1,256,192,3 -simple_IPS_model_4D_input.onnx;1:pytorch_onnx -rpnt_pdr_conv2d_16_fixed_last.onnx;1:input -hdc_efficientnet_b3_1w_class.onnx;1:input.1 -porseg_tmp.onnx;2:img,prev_mask -hiai_nlu_onnx_model_v1_0.onnx;3:input_ids,segment_ids,position_ids -ml_video_edit_makeup_mobilenetv203.onnx;1:input.1 -Q888_CV_face_recognition_self.onnx;1:input -# ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 precision deteriorates in P40 -ml_motion_capture_spin_mobile_mv3_v3_57mm_sim.onnx;5:input,bbox,init_pose,init_shape,init_cam -ml_video_edit_dimming_tech_model_345000_color.onnx;2:input.18,1 -Ireland_gaze_corrector.onnx;3:image,target_angle,strength 12 -unet_model_reconstruct.pb;1:content;1,256,256,3 -ml_video_edit_generate_filter.pb;1:lowres_input -inception_resnet_v2.pb;1:input;1,299,299,3 22 -inception_v4.pb;1:input;1,299,299,3 -mnasnet_1.0_224.pb;1:input -mnasnet_1.3_224.pb;1:input diff --git a/mindspore/lite/test/config/models_gpu_fp32.cfg b/mindspore/lite/test/config/models_gpu_fp32.cfg deleted file mode 100644 index e5bbaceaba3..00000000000 --- a/mindspore/lite/test/config/models_gpu_fp32.cfg +++ /dev/null @@ -1,416 +0,0 @@ -# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. -# [second column]:accuracy limit in arm64 -mobilenet_v1_1.0_224.tflite -mobilenet_v2_1.0_224.tflite -resnet.tflite -squeezenet.tflite -mtk_AADB_HADB_MBV2_model_fp32.tflite -hiai_cn_recognize_modify_padv2.tflite -hiai_cv_focusShootOCRModel_08.tflite -hiai_model_normalize_object_scene_ps_20200519.tflite -inception_v3.tflite -mtk_age_gender_fp16.tflite -mtk_isface.tflite -mtk_landmark.tflite -mtk_new_detect.tflite -mtk_pose.tflite -mtk_model_emotions_0727_nosoftmax.tflite -mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite -mtk_276landmark_0913.tflite -mtk_face_recognition.tflite -mtk_convert_model.tflite -mtk_model_face_dress_fp16.tflite -detection_retinaface_fix -landmark -PoseNet_dla_17_x512_tmp -age_new -plat_isface -Q_hand_0812.pb -Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb -Q_AADB_HADB_MBV2_model.tflite -Q_convert.tflite -Q_crnn_ori_75w_slim_norm_pb2tflite.tflite -Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite -Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite -Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite -Q_focusocr_cn_recog.tflite -Q_focusocr_jk_recog.tflite -Q_inception-249970-672-11-16_pb2tflite.tflite -Q_isface.tflite -Q_landmark.tflite -Q_language_model_hrmini_Q4_b4_17w.tflite -Q_new_detect.tflite -Q_object_scene.tflite -Q_pose.tflite -matmul.pb -add_uint8.tflite;2 -mtk_face_features_v3.onnx -hdc_Face_Landmark5_MTI_Aesthetic.onnx -inception_v3.pb;1;1,299,299,3 -mobilenet_v1_0.25_128_frozen.pb;1;1,128,128,3 -mobilenet_v2_1.0_224_frozen.pb;1;1,224,224,3 -ml_face_openclose.pb;1;1,32,32,3 -hiai_AADB_HADB_MBV2_model.pb;1;1,224,224,3 -hiai_model_0909_kd_rot_ps_softmax.pb;1;1,224,224,3 -model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 -mtk_AADB_HADB_MBV2_model.pb;1;1,224,224,3 -mtk_AADB_HADB_MBV3_model.pb;1;1,224,224,3 -mtk_model_face_dress.pb;1;1,128,128,3 -hiai_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 -hiai_label_and_video.pb;1;1,224,224,3 -tinyyolov2-8.onnx;1;1,416,416,3 -mtk_detect-mbv2-shortcut-400-400-simplified.onnx -emotion-ferplus-8.onnx -rcnn-ilsvrc13-9.onnx -shufflenet-v2-10.onnx -squeezenet1.1-7.onnx -ml_table_detection_fp32_tmp.onnx -ml_table_segment.onnx -googlenet-9.onnx -inception-v1-9.onnx -shufflenet-9.onnx -ml_face_3d.onnx -gts_version-RFB-320_simplified.onnx -mnist-8.onnx -ml_video_edit_judge.onnx -ml_video_edit_vignet.onnx -hdc_mobilenet_1w_class.onnx -ml_video_edit_imitate_filter.onnx -ml_edu_kit_hand_detection.onnx -ml_edu_kit_hand_key_position.onnx -mtk_detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx -mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx -mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx -ml_2012_ocr_detection_tmp.onnx -ml_video_edit_enhance_update_tmp.onnx -bloom_hongmo_detection_tmp.onnx -Q_face_recognition.onnx -Q888_iris_detect.onnx -ml_ocr_bank_card_detection_inception_tmp -ml_ocr_detect_20200305 -Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite -Q_iMaxSR_RGB_385_p_pb2tflite.tflite -mtk_age_gender.pb -mtk_model_ckpt.pb -Q_inception-249970-672-11-16.pb -Q_crnn_screen_slim400w_more_20w.pb -hiai_ssd_mobilenetv2_object.pb -hiai_humanDetection.pb -mtk_face_features_v1.pb -Q_crnn_ori_75w_slim_norm.pb -Q_crnn_ori_v2_405001_notrans_nopre.pb -bolt_segment.pb -ml_location_lane_counter.onnx 2 -gts_detect_5k_tf115.tflite -smartreply.tflite -ml_text_correction.tflite -ml_ocr_jk_pb2tflite.tflite -scan_hms_angle_pb2tflite.tflite -scan_hms_detect_pb2tflite.tflite -ml_face_openclose_tflite.tflite -unet_mbv2_05_104pts.tflite -hiai_AADB_HADB_MBV2_model_f16.tflite -hiai_AADB_HADB_MBV2_model_fp32.tflite -hiai_detect_curve_model_float32.tflite -hiai_detectmodel_06_23_960_480_1180700.tflite -lite-model_aiy_vision_classifier_food_V1_1.tflite -lite-model_disease-classification_1.tflite -lite-model_models_mushroom-identification_v1_1.tflite -smartreply_1_default_1.tflite -text_classification.tflite -Q_detect_fpn_add_inception-1448650.tflite -Q_hand_0812_pb2tflite.tflite -bloom_landmark.tflite -Q888_face_dress_mv3y.tflite -Q888_HADB_AADB_MBV2_model_fp32.tflite -Q888_landmark.tflite -Q888_pose.tflite -Q888_lapa158_unet_0924.tflite -Q888_isface.tflite -Q888_new_detect.tflite -Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite -Q888_face_emo_dress_mv3_orderd.tflite -hdc_age_medium -hdc_contour_pose_128 -hdc_emotion -hdc_fivembnet -hdc_isface -hdc_mobilenetface -hdc_retinaface -hdc_resnet -mtk_model_normalize_object_scene_ps_20200519_f32.tflite -hiai_cpu_face_emotion.pb -hiai_cpu_face_gazing.pb -hiai_cpu_face_headpose.pb -hiai_ctpn_feature_map.pb -hiai_cv_focusShootOCRModel_02.pb -hiai_cv_focusShootOCRModel_08.pb -hiai_cv_poseEstimation.pb -hiai_detectmodel_06_23_960_480_1180700.pb -hiai_face_model_npu.pb -hiai_iMaxDN_RGB.pb -hiai_iMaxSR_RGB.pb -hiai_lm_inference_graph.pb -hiai_PoseEstimation_Pcm.pb -hiai_model_0909_kd_rot_ps_softmax.tflite -hiai_chinese_english_recognize_model_float32.tflite -hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite -hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite -hiai_detectmodel_desnet_256_128_64_32.tflite -mtk_AADB_HADB_MBV3_model_fp32.tflite -Q888_face_recognition.onnx -mobilenet_v1_0.25_128.tflite -mobilenet_v1_0.5_160.tflite -mobilenet_v1_0.75_192.tflite -mobilenet_v1_1.0_160.tflite -mtk_model_ckpt.tflite -mtk_age_gender.tflite -mtk_model_face_dress.tflite -mtk_face_features_v1.tflite -mtk_isface -mtk_landmark -mtk_pose_tuku -mtk_face_recognition_v1 -mtk_2012_ATLANTA_10class_20190614_v41 -mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified -mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified -mtk_detect_mbv1_640_480_nopostprocess_simplified -densenet.tflite -resnet_v2_101_299.tflite -mnasnet_1.3_224.tflite -deeplabv3_257_mv_gpu.tflite -multi_person_mobilenet_v1_075_float.tflite -ide_label_base.tflite -ml_ei_headpose.tflite -mnist.tflite -mobilenet.tflite -scan_hms_angle1.tflite -scan_hms_detect.tflite -ml_ocr_jk.tflite -nasnet_mobile.tflite -nasnet_large.tflite -model_emotions_0727_nosoftmax.tflite -inception_resnet_v2.tflite -hiai_PoseEstimation_Pcm.tflite -hiai_ssd_mobilenetv2_object.tflite -hiai_cv_focusShootOCRModel_02.tflite -hiai_cv_poseEstimation.tflite -inception_v4.tflite -mtk_model_normalize_object_scene_ps_20200519_f16.tflite -mtk_AADB_HADB_MBV2_model_f16.tflite -mtk_AADB_HADB_MBV3_model_f16.tflite -mtk_model_emotions_0725_fp16.tflite -mtk_face_features_v1_fp16.tflite -Q888_age_gender_orderd.tflite -emotion -gender_res_large_deploy -glasses -hat -isface -ml_bank_detect_0312_tmp -ml_face_div_parsing -ml_hardware_eyeclose -Mnet6_0312_extract_pay -pose_3d -hiai_face_RFB-Epoch-170-no-transpose -tracking -detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified -hiai_face_detect_rfb -hiai_face_isface -hiai_face_landmark -hiai_face_pose_tuku -ml_hand_detection -ml_ocr_sfz_detect_0325_tmp -ml_hardware_liveness -ml_liveness_detect_landmark_tmp -ml_face_contour -2012_ATLANTA_1class_20190621_v4.x_nomean -ml_ocr_sfz_add_final_0325 -ml_hardware_pose -ml_bank_recog -2012_ATLANTA_10class_20190131_v4.0 -mnet -recognition -ml_face_landmark -model_hebing_3branch -hiai_cv_focusShootOCRModel_07 -hiai_cv_focusShootOCRModel_03 -hiai_cv_focusShootOCRModel_01 -hiai_face_hat1 -hiai_cv_focusShootOCRModel_04 -hiai_cv_focusShootOCRModel_06 -hiai_cpu_face_hat -hiai_video_seg -hiai_semantic_seg -hiai_human_seg -hiai_face_recognition_1 -hiai_cpu_face_detect -hiai_cpu_face_attr -hiai_face_attr1 -retinaface -deconvs_model -ml_location_scene_division -ml_tabel_recog -6c_seg_nomean_20200610 -ml_video_edit_img_segment -ml_video_edit_video_segment_gauss_adaptis_part1 -ml_video_edit_Mnet -ml_video_edit_detect_20211111 -ml_video_edit_MnetN367_extract_1010_pay -ml_video_edit_person_divison_pic -ml_video_edit_reid -ml_video_edit_v10_best_model_nomean_20200723 -inception-v2-9.onnx -ml_pic_shopping.tflite -ml_ocr_latin.tflite;1;1,32,512,1 10 -CloudBU_FSRCNN_RTC_8ch_3450_QP9.onnx;1;1,225,225,3 -CloudBU_rfdn_rtc_x2_ver2_13.onnx;1;1,225,225,3 -CloudBU_rfdn_rtc_x2_ver2_3450.onnx;1;1,225,225,3 0.7 -hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache_fp32.tflite;2 -hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache.pb;2 -Q888_CV_model_face_emo_dress_mv3.pb;1:img -Q888_CV_model_face_dress_mv3y.pb;1:input;1,112,112,3 -ml_face_isface;1:data -hdc_Face_Emotion_MTI_Aesthetic.onnx;1:input -ml_location_lane_counter0.onnx;1:input -efficientnet-lite4-11.onnx;1:images:0 -mobilenetv2-7.onnx;1:data -densenet-9.onnx;1:data_0 -squeezenet1.0-9.onnx;1:data_0 -residual_distill_cifar10_bs_1.onnx;1:actual_input -residual_distill_cifar10_bs_32.onnx;1:actual_input -residual_distill_bs_1.onnx;1:actual_input -residual_distill_bs_32.onnx;1:actual_input -crnn_lite_lstm_v2.onnx;1:input;32,32,32,1 -psenet_lite_mbv2.onnx;1:input;1,32,32,3 -residual_distill_res34_cifar10_bs_1_update.onnx;1:actual_input -residual_distill_res50_cifar10_bs_1_update.onnx;1:actual_input -hdc_Image_Aesthetic_MTI_Aesthetic.onnx;1:input -hdc_resnet_1w_class.onnx;1:input.1 -hdc_ocr_detect_tmp.onnx;1:actual_input_1 -ml_facedetector.onnx;1:input -ml_ei_facedetection.onnx;1:input -mtk_emotions-d2012-75.onnx;1:input.1 -mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx;1:input -mtk_detect_mbv1_640_480_nopostprocess_simplified_onnx.onnx;1:input;1,480,640,3 -mtk_face_features_v2.onnx;1:input;1,256,192,3 -simple_IPS_model_4D_input.onnx;1:pytorch_onnx -rpnt_pdr_conv2d_16_fixed_last.onnx;1:input -hdc_efficientnet_b3_1w_class.onnx;1:input.1 -porseg_tmp.onnx;2:img,prev_mask -hiai_nlu_onnx_model_v1_0.onnx;3:input_ids,segment_ids,position_ids -ml_video_edit_makeup_mobilenetv203.onnx;1:input.1 -Q888_CV_face_recognition_self.onnx;1:input -# ml_video_edit_hair_dyeing_migrate_v2_fix.onnx's precision deteriorates in P50 -ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 9 -ml_motion_capture_spin_mobile_mv3_v3_57mm_sim.onnx;5:input,bbox,init_pose,init_shape,init_cam -ml_video_edit_dimming_tech_model_345000_color.onnx;2:input.18,1 -Ireland_gaze_corrector.onnx;3:image,target_angle,strength 1 -unet_model_reconstruct.pb;1:content;1,256,256,3 -ml_video_edit_generate_filter.pb;1:lowres_input -inception_resnet_v2.pb;1:input;1,299,299,3 -inception_v4.pb;1:input;1,299,299,3 -mnasnet_1.0_224.pb;1:input -mnasnet_1.3_224.pb;1:input -nasnet_large.pb;1:input;1,331,331,3 -nasnet_mobile.pb;1:input;1,224,224,3 -ml_ei_headpose.pb;1:input_1;1,64,64,3 -ml_ocr_jk.pb;1:input_0 -ml_video_edit_enhance.pb;1:lowres_input -scan_hms_angle.pb;1:normalized_input_image_tensor -scan_hms_detect.pb;1:normalized_input_image_tensor -hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1 -hiai_dress_detect.pb;1:data;1,960,960,3 -hiai_ghostnet.pb;1:input -hiai_latin_ocr.pb;1:input_0 -hiai_latin_ocr_1.pb;1:input_0 -mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3 -ml_ocr_latin.pb;1:input_0 -siteAI_wireless_depress_w.pb;1:x-input;1,36 -siteAI_wireless_restore_w.pb;1:x-input;1,36 -siteAI_trans_nonlinear.pb;1:features_placeholder;1,137 -siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271 -siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137 -siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182 -ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w -ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:backbone_features2,w -# ml_video_edit_oneclick_adaptis.pb's precision deteriorates in P50 -ml_video_edit_oneclick_adaptis.pb;3:image_input,point_input,coord_features 19 -hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy -fsr_270_mindspore.pb -fsr_360_mindspore.pb -fsr_720_mindspore.pb -mobilenet_v1_0.25_160.tflite;1:input -mobilenet_v1_0.25_192.tflite;1:input -mobilenet_v1_0.25_224.tflite;1:input -mobilenet_v1_0.5_128.tflite;1:input -mobilenet_v1_0.5_192.tflite;1:input -mobilenet_v1_0.5_224.tflite;1:input -mobilenet_v1_0.75_128.tflite;1:input -mobilenet_v1_0.75_160.tflite;1:input -mobilenet_v1_0.75_224.tflite;1:input -mobilenet_v1_1.0_128.tflite;1:input -mobilenet_v1_1.0_192.tflite;1:input -hiai_latin_ocr.tflite;1:input_0 -hiai_latin_ocr_1.tflite;1:input_0 -siteAI_digcom_g2v_keras.tflite;1:conv2d_1_input -siteAI_trans_nonlinear.tflite;1:features_placeholder -siteAI_trans_tcpclassify.tflite;1:conv2d_1_input -siteAI_wireless_depress_w.tflite;1:x-input -siteAI_wireless_restore_w.tflite;1:x-input -magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite;1:style_image -hiai_cpu_face_emotion.tflite;1:input_0 -hiai_cpu_face_gazing.tflite;1:input_0 -hiai_cpu_face_headpose.tflite;1:input_0 -hiai_humanDetection.tflite;1:normalized_input_image_tensor -ml_face_openclose.tflite;1:input -hiai_face_model_npu.tflite;1:input_0 -hiai_ctpn_feature_map.tflite;1:input_image -hiai_cv_labelDetectorModel_v2.tflite;1:input_0 -hiai_cv_labelDetectorModel_v4.tflite;1:input_0 -hiai_dress_detect.tflite;1:data -hiai_cv_saliencyDetectorModel.tflite;1:image_tensor -hiai_frozen_inference_graph.tflite;1:image_tensor -hiai_ghostnet.tflite;1:input -hiai_iMaxDN_RGB.tflite;1:input -hiai_iMaxSR_RGB.tflite;1:input -hiai_label_and_video.tflite;1:input_0 -hiai_lm_inference_graph.tflite;1:image_tensor -efficientnet_lite0_fp32_2.tflite;1:images -efficientnet_lite1_fp32_2.tflite;1:images -efficientnet_lite2_fp32_2.tflite;1:images -efficientnet_lite3_fp32_2.tflite;1:images -efficientnet_lite4_fp32_2.tflite;1:images -mnasnet_0.50_224_1_metadata_1.tflite;1:input -mnasnet_0.75_224_1_metadata_1.tflite;1:input -mnasnet_1.0_128_1_metadata_1.tflite;1:input -mnasnet_1.0_160_1_metadata_1.tflite;1:input -mnasnet_1.0_192_1_metadata_1.tflite;1:input -mnasnet_1.0_224_1_metadata_1.tflite;1:input -mnasnet_1.0_96_1_metadata_1.tflite;1:input -posenet_mobilenet_float_075_1_default_1.tflite;1:sub_2 -deeplabv3_1_default_1.tflite;1:sub_7 -lite-model_deeplabv3-mobilenetv2_dm05-float16_1_default_1.tflite;1:sub_7 -lite-model_deeplabv3-mobilenetv2-float16_1_default_1.tflite;1:sub_7 -lite-model_east-text-detector_fp16_1.tflite;1:input_images -lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite;1:style_image -mindspore_text_classification_tflite.tflite;1:base_input -ml_pic_shopping_pb2tflite.tflite;1:images -ml_ocr_latin_pb2tflite.tflite;1:input_0 -ml_location.tflite;1:inputs -bloom_new_detect.tflite;1:input -bloom_model_age_gender.tflite;1:input -bloom_isface.tflite;1:data -hiai_object_detect_814.tflite;1:normalized_input_image_tensor -hiai_object_tflite_graph_8bit.tflite;1:normalized_input_image_tensor -lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite;1:inputs -lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2:content_image,Conv/BiasAdd -magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2:content_image,mobilenet_conv/Conv/BiasAdd -ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:backbone_features2,w -ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:backbone_features2,w -hiai_cv_labelDetectorModel_v3.tflite;2:input_0,input_1 -ml_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm/add,batch_normalization_1/batchnorm/add;1,64,64,3:16:16 -ml_ei_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm_1/add,batch_normalization_1/batchnorm_1/add;1,64,64,3:16:16 -lite-model_mobilebert_1_metadata_1.tflite;3:input_ids,input_mask,segment_ids -hiai_vad.tflite;2:input,input_cache -coco_ssd_mobilenet_v1_1.0.tflite diff --git a/mindspore/lite/test/config/models_mindspore_gpu_fp16.cfg b/mindspore/lite/test/config/models_mindspore_gpu_fp16.cfg new file mode 100644 index 00000000000..7c69b91860d --- /dev/null +++ b/mindspore/lite/test/config/models_mindspore_gpu_fp16.cfg @@ -0,0 +1,2 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 diff --git a/mindspore/lite/test/config/models_mindspore_gpu_fp32.cfg b/mindspore/lite/test/config/models_mindspore_gpu_fp32.cfg new file mode 100644 index 00000000000..4e17eb36938 --- /dev/null +++ b/mindspore/lite/test/config/models_mindspore_gpu_fp32.cfg @@ -0,0 +1,7 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +mobilenetv2.r1.1.mindir 0.5 +mobilenetv2_fzy.mindir 0.5 +mindspore_FaceRecognitionTrack_1011.mindir +mindspore_FaceQualityAssessment_0928_bs256.mindir +# mindspore_nasnet.mindir diff --git a/mindspore/lite/test/config/models_onnx_gpu_fp16.cfg b/mindspore/lite/test/config/models_onnx_gpu_fp16.cfg new file mode 100644 index 00000000000..4352e2e213d --- /dev/null +++ b/mindspore/lite/test/config/models_onnx_gpu_fp16.cfg @@ -0,0 +1,67 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +Q888_face_recognition.onnx +Q_face_recognition.onnx +Q888_iris_detect.onnx +mtk_detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx +mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx +mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx +mtk_detect-mbv2-shortcut-400-400-simplified.onnx +ml_face_3d.onnx +CloudBU_FSRCNN_RTC_8ch_3450_QP9.onnx;1;1,225,225,3 1.5 +CloudBU_rfdn_rtc_x2_ver2_13.onnx;1;1,225,225,3 1.0 +CloudBU_rfdn_rtc_x2_ver2_3450.onnx;1;1,225,225,3 108 +ml_location_lane_counter0.onnx;1:input +mtk_face_features_v3.onnx 7 +hdc_Face_Landmark5_MTI_Aesthetic.onnx +tinyyolov2-8.onnx;1;1,416,416,3 11 +emotion-ferplus-8.onnx +#rcnn-ilsvrc13-9.onnx's precision deteriorates in P50(has nan value) +#rcnn-ilsvrc13-9.onnx +shufflenet-v2-10.onnx +squeezenet1.1-7.onnx +ml_table_detection_fp32_tmp.onnx +ml_table_segment.onnx +shufflenet-9.onnx +gts_version-RFB-320_simplified.onnx +mnist-8.onnx +ml_video_edit_judge.onnx 12 +ml_video_edit_vignet.onnx +hdc_mobilenet_1w_class.onnx 22 +ml_edu_kit_hand_detection.onnx +ml_edu_kit_hand_key_position.onnx +ml_2012_ocr_detection_tmp.onnx +ml_video_edit_enhance_update_tmp.onnx +bloom_hongmo_detection_tmp.onnx +ml_location_lane_counter.onnx 4 +inception-v2-9.onnx +efficientnet-lite4-11.onnx;1:images:0 +mobilenetv2-7.onnx;1:data 9 +densenet-9.onnx;1:data_0 +squeezenet1.0-9.onnx;1:data_0 +residual_distill_cifar10_bs_1.onnx;1:actual_input +residual_distill_cifar10_bs_32.onnx;1:actual_input +residual_distill_bs_1.onnx;1:actual_input +#residual_distill_bs_32.onnx;1:actual_input 20 +crnn_lite_lstm_v2.onnx;1:input;32,32,32,1 +residual_distill_res34_cifar10_bs_1_update.onnx;1:actual_input +residual_distill_res50_cifar10_bs_1_update.onnx;1:actual_input +hdc_Image_Aesthetic_MTI_Aesthetic.onnx;1:input +hdc_resnet_1w_class.onnx;1:input.1 +hdc_ocr_detect_tmp.onnx;1:actual_input_1 +ml_facedetector.onnx;1:input +ml_ei_facedetection.onnx;1:input +mtk_emotions-d2012-75.onnx;1:input.1 +mtk_detect_mbv1_640_480_nopostprocess_simplified_onnx.onnx;1:input;1,480,640,3 +mtk_face_features_v2.onnx;1:input;1,256,192,3 +simple_IPS_model_4D_input.onnx;1:pytorch_onnx +rpnt_pdr_conv2d_16_fixed_last.onnx;1:input +hdc_efficientnet_b3_1w_class.onnx;1:input.1 +porseg_tmp.onnx;2:img,prev_mask +hiai_nlu_onnx_model_v1_0.onnx;3:input_ids,segment_ids,position_ids +ml_video_edit_makeup_mobilenetv203.onnx;1:input.1 +Q888_CV_face_recognition_self.onnx;1:input +ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 +ml_motion_capture_spin_mobile_mv3_v3_57mm_sim.onnx;5:input,bbox,init_pose,init_shape,init_cam +ml_video_edit_dimming_tech_model_345000_color.onnx;2:input.18,1 +Ireland_gaze_corrector.onnx;3:image,target_angle,strength 12 diff --git a/mindspore/lite/test/config/models_onnx_gpu_fp32.cfg b/mindspore/lite/test/config/models_onnx_gpu_fp32.cfg new file mode 100644 index 00000000000..d8d19f5f70f --- /dev/null +++ b/mindspore/lite/test/config/models_onnx_gpu_fp32.cfg @@ -0,0 +1,77 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +mtk_face_features_v3.onnx +hdc_Face_Landmark5_MTI_Aesthetic.onnx +tinyyolov2-8.onnx;1;1,416,416,3 +mtk_detect-mbv2-shortcut-400-400-simplified.onnx +emotion-ferplus-8.onnx +rcnn-ilsvrc13-9.onnx +shufflenet-v2-10.onnx +squeezenet1.1-7.onnx +ml_table_detection_fp32_tmp.onnx +ml_table_segment.onnx +googlenet-9.onnx +inception-v1-9.onnx +shufflenet-9.onnx +ml_face_3d.onnx +gts_version-RFB-320_simplified.onnx +mnist-8.onnx +ml_video_edit_judge.onnx +ml_video_edit_vignet.onnx +hdc_mobilenet_1w_class.onnx +ml_video_edit_imitate_filter.onnx +ml_edu_kit_hand_detection.onnx +ml_edu_kit_hand_key_position.onnx +mtk_detect-deeper-halfdeeper-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx +mtk_detect-mbv1-shortcut-400-400_nopostprocess_simplified_onnx.onnx +mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx +ml_2012_ocr_detection_tmp.onnx +ml_video_edit_enhance_update_tmp.onnx +bloom_hongmo_detection_tmp.onnx +Q_face_recognition.onnx +Q888_iris_detect.onnx +ml_location_lane_counter.onnx 2 +Q888_face_recognition.onnx +inception-v2-9.onnx +CloudBU_FSRCNN_RTC_8ch_3450_QP9.onnx;1;1,225,225,3 +CloudBU_rfdn_rtc_x2_ver2_13.onnx;1;1,225,225,3 +CloudBU_rfdn_rtc_x2_ver2_3450.onnx;1;1,225,225,3 0.7 +hdc_Face_Emotion_MTI_Aesthetic.onnx;1:input +ml_location_lane_counter0.onnx;1:input +efficientnet-lite4-11.onnx;1:images:0 +mobilenetv2-7.onnx;1:data +densenet-9.onnx;1:data_0 +squeezenet1.0-9.onnx;1:data_0 +residual_distill_cifar10_bs_1.onnx;1:actual_input +residual_distill_cifar10_bs_32.onnx;1:actual_input +residual_distill_bs_1.onnx;1:actual_input +residual_distill_bs_32.onnx;1:actual_input +crnn_lite_lstm_v2.onnx;1:input;32,32,32,1 +psenet_lite_mbv2.onnx;1:input;1,32,32,3 +residual_distill_res34_cifar10_bs_1_update.onnx;1:actual_input +residual_distill_res50_cifar10_bs_1_update.onnx;1:actual_input +hdc_Image_Aesthetic_MTI_Aesthetic.onnx;1:input +hdc_resnet_1w_class.onnx;1:input.1 +hdc_ocr_detect_tmp.onnx;1:actual_input_1 +ml_facedetector.onnx;1:input +ml_ei_facedetection.onnx;1:input +mtk_emotions-d2012-75.onnx;1:input.1 +mtk_detect-deeper-halfdeeper-mbv1-lastearlySSD-shortcut-400-400_nopostprocess_simplified_onnx.onnx;1:input +mtk_detect_mbv1_640_480_nopostprocess_simplified_onnx.onnx;1:input;1,480,640,3 +mtk_face_features_v2.onnx;1:input;1,256,192,3 +simple_IPS_model_4D_input.onnx;1:pytorch_onnx +rpnt_pdr_conv2d_16_fixed_last.onnx;1:input +hdc_efficientnet_b3_1w_class.onnx;1:input.1 +porseg_tmp.onnx;2:img,prev_mask +hiai_nlu_onnx_model_v1_0.onnx;3:input_ids,segment_ids,position_ids +ml_video_edit_makeup_mobilenetv203.onnx;1:input.1 +Q888_CV_face_recognition_self.onnx;1:input +# ml_video_edit_hair_dyeing_migrate_v2_fix.onnx's precision deteriorates in P50 +ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 9 +ml_motion_capture_spin_mobile_mv3_v3_57mm_sim.onnx;5:input,bbox,init_pose,init_shape,init_cam +ml_video_edit_dimming_tech_model_345000_color.onnx;2:input.18,1 +Ireland_gaze_corrector.onnx;3:image,target_angle,strength 1 +ml_location_lane_counter.onnx;1:input +gender_lstm_scd.onnx;1:input.1 +gender_lstm_vad.onnx;1:input.1 +# ssd-10.onnx;1:image diff --git a/mindspore/lite/test/config/models_tf_gpu_fp16.cfg b/mindspore/lite/test/config/models_tf_gpu_fp16.cfg new file mode 100644 index 00000000000..140eaec2f65 --- /dev/null +++ b/mindspore/lite/test/config/models_tf_gpu_fp16.cfg @@ -0,0 +1,60 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb +inception_v3.pb;1;1,299,299,3 5 +mobilenet_v1_0.25_128_frozen.pb;1;1,128,128,3 5 +ml_face_openclose.pb;1;1,32,32,3 5 +hiai_AADB_HADB_MBV2_model.pb;1;1,224,224,3 5 +mtk_AADB_HADB_MBV2_model.pb;1;1,224,224,3 5 +mtk_AADB_HADB_MBV3_model.pb;1;1,224,224,3 5 +mtk_model_face_dress.pb;1;1,128,128,3 5 +mtk_age_gender.pb +Q888_CV_model_face_dress_mv3y.pb;1:input;1,112,112,3 4 +matmul.pb +mobilenet_v2_1.0_224_frozen.pb;1;1,224,224,3 6 +hiai_model_0909_kd_rot_ps_softmax.pb;1;1,224,224,3 12 +model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 7 +hiai_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 18 +hiai_label_and_video.pb;1;1,224,224,3 16 +mtk_model_ckpt.pb 17 +Q_inception-249970-672-11-16.pb 6 +#Q_crnn_screen_slim400w_more_20w.pb's precision deteriorates in P50 +Q_crnn_screen_slim400w_more_20w.pb 70 +hiai_ssd_mobilenetv2_object.pb 38 +hiai_humanDetection.pb 13 +#mtk_face_features_v1.pb's precision deteriorates in P50 +mtk_face_features_v1.pb 17 +#Q_crnn_ori_75w_slim_norm.pb's precision deteriorates in P50 +Q_crnn_ori_75w_slim_norm.pb 30 +Q_crnn_ori_v2_405001_notrans_nopre.pb 23 +nasnet_large.pb;1:input;1,331,331,3 +nasnet_mobile.pb;1:input;1,224,224,3 +ml_ocr_jk.pb;1:input_0 +ml_video_edit_enhance.pb;1:lowres_input +scan_hms_angle.pb;1:normalized_input_image_tensor +scan_hms_detect.pb;1:normalized_input_image_tensor 14 +hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1 14 +hiai_dress_detect.pb;1:data;1,960,960,3 +hiai_ghostnet.pb;1:input +hiai_latin_ocr.pb;1:input_0 +mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3 7 +ml_ocr_latin.pb;1:input_0 8 +siteAI_wireless_depress_w.pb;1:x-input;1,36 +siteAI_wireless_restore_w.pb;1:x-input;1,36 +siteAI_trans_nonlinear.pb;1:features_placeholder;1,137 +siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271 +siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137 +siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182 +ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w 12 +hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy +fsr_270_mindspore.pb +fsr_360_mindspore.pb +fsr_720_mindspore.pb +unet_model_reconstruct.pb;1:content;1,256,256,3 +ml_video_edit_generate_filter.pb;1:lowres_input +inception_resnet_v2.pb;1:input;1,299,299,3 22 +inception_v4.pb;1:input;1,299,299,3 +mnasnet_1.0_224.pb;1:input +mnasnet_1.3_224.pb;1:input +ml_tts_decoder.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0 +tensor_dot.pb;1:input;1,217 diff --git a/mindspore/lite/test/config/models_tf_gpu_fp32.cfg b/mindspore/lite/test/config/models_tf_gpu_fp32.cfg new file mode 100644 index 00000000000..a6b90376377 --- /dev/null +++ b/mindspore/lite/test/config/models_tf_gpu_fp32.cfg @@ -0,0 +1,81 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +Q_hand_0812.pb +Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb +matmul.pb +inception_v3.pb;1;1,299,299,3 +mobilenet_v1_0.25_128_frozen.pb;1;1,128,128,3 +mobilenet_v2_1.0_224_frozen.pb;1;1,224,224,3 +ml_face_openclose.pb;1;1,32,32,3 +hiai_AADB_HADB_MBV2_model.pb;1;1,224,224,3 +hiai_model_0909_kd_rot_ps_softmax.pb;1;1,224,224,3 +model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 +mtk_AADB_HADB_MBV2_model.pb;1;1,224,224,3 +mtk_AADB_HADB_MBV3_model.pb;1;1,224,224,3 +mtk_model_face_dress.pb;1;1,128,128,3 +hiai_model_normalize_object_scene_ps_20200519.pb;1;1,224,224,3 +hiai_label_and_video.pb;1;1,224,224,3 +mtk_age_gender.pb +mtk_model_ckpt.pb +Q_inception-249970-672-11-16.pb +Q_crnn_screen_slim400w_more_20w.pb +hiai_ssd_mobilenetv2_object.pb +hiai_humanDetection.pb +mtk_face_features_v1.pb +Q_crnn_ori_75w_slim_norm.pb +Q_crnn_ori_v2_405001_notrans_nopre.pb +bolt_segment.pb +hiai_cpu_face_emotion.pb +hiai_cpu_face_gazing.pb +hiai_cpu_face_headpose.pb +hiai_ctpn_feature_map.pb +hiai_cv_focusShootOCRModel_02.pb +hiai_cv_focusShootOCRModel_08.pb +hiai_cv_poseEstimation.pb +hiai_detectmodel_06_23_960_480_1180700.pb +hiai_face_model_npu.pb +hiai_iMaxDN_RGB.pb +hiai_iMaxSR_RGB.pb +hiai_lm_inference_graph.pb +hiai_PoseEstimation_Pcm.pb +hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache.pb;2 +Q888_CV_model_face_emo_dress_mv3.pb;1:img +Q888_CV_model_face_dress_mv3y.pb;1:input;1,112,112,3 +unet_model_reconstruct.pb;1:content;1,256,256,3 +ml_video_edit_generate_filter.pb;1:lowres_input +inception_resnet_v2.pb;1:input;1,299,299,3 +inception_v4.pb;1:input;1,299,299,3 +mnasnet_1.0_224.pb;1:input +mnasnet_1.3_224.pb;1:input +nasnet_large.pb;1:input;1,331,331,3 +nasnet_mobile.pb;1:input;1,224,224,3 +ml_ei_headpose.pb;1:input_1;1,64,64,3 +ml_ocr_jk.pb;1:input_0 +ml_video_edit_enhance.pb;1:lowres_input +scan_hms_angle.pb;1:normalized_input_image_tensor +scan_hms_detect.pb;1:normalized_input_image_tensor +hiai_cn_recognize_modify_padv2.pb;1:input_0;1,32,512,1 +hiai_dress_detect.pb;1:data;1,960,960,3 +hiai_ghostnet.pb;1:input +hiai_latin_ocr.pb;1:input_0 +hiai_latin_ocr_1.pb;1:input_0 +mtk_model_normalize_object_scene_ps_20200519.pb;1:input_0;1,224,224,3 +ml_ocr_latin.pb;1:input_0 +siteAI_wireless_depress_w.pb;1:x-input;1,36 +siteAI_wireless_restore_w.pb;1:x-input;1,36 +siteAI_trans_nonlinear.pb;1:features_placeholder;1,137 +siteAI_trans_nonlinear40g.pb;1:features_placeholder;1,271 +siteAI_trans_nonlinear134g.pb;1:features_placeholder;1,137 +siteAI_trans_nonlinear134g_nrz.pb;1:features_placeholder;1,182 +ml_video_edit_img_segment_adaptise.pb;2:backbone_features2,w +ml_video_edit_video_segment_gauss_adaptis_part2.pb;2:backbone_features2,w +# ml_video_edit_oneclick_adaptis.pb's precision deteriorates in P50 +ml_video_edit_oneclick_adaptis.pb;3:image_input,point_input,coord_features 19 +hiai_transformer_encoder.pb;15:buffer_in_0,buffer_in_1,buffer_in_2,buffer_in_3,buffer_in_4,buffer_in_5,buffer_in_6,buffer_in_7,buffer_in_8,buffer_in_9,buffer_in_10,buffer_in_11,buffer_in_12,buffer_in_13,encoder_in_deploy +fsr_270_mindspore.pb +fsr_360_mindspore.pb +fsr_720_mindspore.pb +ml_tts_decoder.pb;5:h_1,c_1,h_0,decoder_inputs_array,c_0 +ml_video_edit_shot_selection_opticalFlow.pb;1:input +tensor_dot.pb;1:input;1,217 +g_00730000_female10_frames_tf1.pb;150:mel,resblocks_0_0,resblocks_0_1,up_0,resblocks_1_0,resblocks_1_1,up_1,resblocks_2_0,resblocks_2_1,up_2,resblocks_3_0,resblocks_3_1,up_3,resblocks_4_0,resblocks_4_1,up_4,residuals_0_0_0_0,residuals_0_0_0_1,residuals_0_0_1_0,residuals_0_0_1_1,residuals_0_0_2_0,residuals_0_0_2_1,residuals_0_0_3_0,residuals_0_0_3_1,residuals_0_2_2_0,residuals_0_2_2_1,residuals_0_2_3_0,residuals_0_2_3_1,residuals_1_0_0_0,residuals_1_0_0_1,residuals_1_0_1_0,residuals_1_0_1_1,residuals_1_0_2_0,residuals_1_0_2_1,residuals_1_0_3_0,residuals_1_0_3_1,residuals_1_1_0_0,residuals_1_1_0_1,residuals_1_1_1_0,residuals_1_1_1_1,residuals_1_1_2_0,residuals_1_1_2_1,residuals_1_1_3_0,residuals_1_1_3_1,1_2_0_0,residuals_1_2_0_1,residuals_1_2_1_0,residuals_1_2_1_1,residuals_1_2_2_0,residuals_1_2_2_1,residuals_1_2_3_0,residuals_1_2_3_1,residuals_2_1_0_0,residuals_2_1_0_1,residuals_2_1_1_0,residuals_2_1_1_1,residuals_2_1_2_0,residuals_2_1_2_1,residuals_2_1_3_0,residuals_2_1_3_1,2_2_0_0,residuals_2_2_0_1,residuals_2_2_1_0,residuals_2_2_1_1,residuals_2_2_2_0,residuals_2_2_2_1,residuals_2_2_3_0,residuals_2_2_3_1,residuals_3_0_0_0,residuals_3_0_0_1,residuals_3_0_1_0,residuals_3_0_1_1,residuals_3_0_2_0,residuals_3_0_2_1,residuals_3_0_3_0,residuals_3_0_3_1,residuals_3_2_2_0,residuals_3_2_2_1,residuals_3_2_3_0,residuals_3_2_3_1,residuals_4_0_0_0,residuals_4_0_0_1,residuals_4_0_1_0,residuals_4_0_1_1,residuals_4_0_2_0,residuals_4_0_2_1,residuals_4_0_3_0,residuals_4_0_3_1,residuals_4_2_2_0,residuals_4_2_2_1,residuals_4_2_3_0,residuals_4_2_3_1,cond_up_0,cond_up_1,cond_up_2,cond_up_3,mel_delay_1,mel_delay_2,mel_delay_3,mel_delay_4,res_output_0,res_output_1,res_output_2,conv_noise_stack,conv_pre_stack,conv_post_stack diff --git a/mindspore/lite/test/config/models_tflite_gpu_fp16.cfg b/mindspore/lite/test/config/models_tflite_gpu_fp16.cfg new file mode 100644 index 00000000000..b97c4444197 --- /dev/null +++ b/mindspore/lite/test/config/models_tflite_gpu_fp16.cfg @@ -0,0 +1,155 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +mobilenet_v1_1.0_224.tflite +mobilenet_v2_1.0_224.tflite +mtk_age_gender_fp16.tflite +mtk_isface.tflite +mtk_landmark.tflite +mtk_new_detect.tflite +mtk_pose.tflite +mtk_model_emotions_0727_nosoftmax.tflite +Q_AADB_HADB_MBV2_model.tflite +Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite +Q_inception-249970-672-11-16_pb2tflite.tflite +Q_isface.tflite +Q_landmark.tflite +Q_language_model_hrmini_Q4_b4_17w.tflite +Q_new_detect.tflite +Q_object_scene.tflite +Q_pose.tflite +Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite +Q_iMaxSR_RGB_385_p_pb2tflite.tflite +Q_detect_fpn_add_inception-1448650.tflite +Q888_face_dress_mv3y.tflite +Q888_HADB_AADB_MBV2_model_fp32.tflite +Q888_landmark.tflite +Q888_pose.tflite +Q888_isface.tflite +Q888_new_detect.tflite +Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite +Q888_face_emo_dress_mv3_orderd.tflite +mtk_AADB_HADB_MBV2_model_fp32.tflite +mtk_age_gender.tflite +mtk_model_face_dress.tflite +mtk_model_normalize_object_scene_ps_20200519_f16.tflite +mtk_AADB_HADB_MBV2_model_f16.tflite +mtk_model_emotions_0725_fp16.tflite +# Q888_age_gender_orderd.tflite's precision deteriorates in P50 +Q888_age_gender_orderd.tflite;1:input 9 +ml_ocr_latin.tflite;1;1,32,512,1 10 +mtk_AADB_HADB_MBV2_model_fp32.tflite;1:input_0 3 +resnet.tflite +squeezenet.tflite +# hiai_cn_recognize_modify_padv2.tflite's precision deteriorates in P50 +hiai_cn_recognize_modify_padv2.tflite 10 +hiai_model_normalize_object_scene_ps_20200519.tflite 18 +inception_v3.tflite +mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 29 +mtk_276landmark_0913.tflite 7 +mtk_face_recognition.tflite 8 +mtk_convert_model.tflite +mtk_model_face_dress_fp16.tflite +Q_convert.tflite 9 +#Q_crnn_ori_75w_slim_norm_pb2tflite.tflite's precision deteriorates in P50 +Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 23 +#Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite's precision deteriorates in P50 +Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 33 +Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 31 +Q_focusocr_cn_recog.tflite 24 +Q_focusocr_jk_recog.tflite 14 +add_uint8.tflite;2 +gts_detect_5k_tf115.tflite +smartreply.tflite +ml_text_correction.tflite +ml_ocr_jk_pb2tflite.tflite +scan_hms_angle_pb2tflite.tflite +scan_hms_detect_pb2tflite.tflite 16 +ml_face_openclose_tflite.tflite +unet_mbv2_05_104pts.tflite 8 +hiai_AADB_HADB_MBV2_model_f16.tflite +hiai_AADB_HADB_MBV2_model_fp32.tflite +hiai_detect_curve_model_float32.tflite +smartreply_1_default_1.tflite +text_classification.tflite +mobilenet_v1_0.25_160.tflite;1:input +mobilenet_v1_0.25_192.tflite;1:input +mobilenet_v1_0.25_224.tflite;1:input +mobilenet_v1_0.5_128.tflite;1:input +mobilenet_v1_0.5_192.tflite;1:input +mobilenet_v1_0.5_224.tflite;1:input +mobilenet_v1_0.75_128.tflite;1:input +mobilenet_v1_0.75_160.tflite;1:input +mobilenet_v1_0.75_224.tflite;1:input +mobilenet_v1_1.0_128.tflite;1:input 7 +mobilenet_v1_1.0_192.tflite;1:input 6 +hiai_latin_ocr.tflite;1:input_0 30 +hiai_latin_ocr_1.tflite;1:input_0 13 +siteAI_digcom_g2v_keras.tflite;1:conv2d_1_input +siteAI_trans_nonlinear.tflite;1:features_placeholder +siteAI_trans_tcpclassify.tflite;1:conv2d_1_input +siteAI_wireless_depress_w.tflite;1:x-input 8 +siteAI_wireless_restore_w.tflite;1:x-input +magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite;1:style_image +hiai_cpu_face_emotion.tflite;1:input_0 +hiai_cpu_face_gazing.tflite;1:input_0 +hiai_cpu_face_headpose.tflite;1:input_0 +hiai_humanDetection.tflite;1:normalized_input_image_tensor 11 +ml_face_openclose.tflite;1:input +hiai_face_model_npu.tflite;1:input_0 +hiai_ctpn_feature_map.tflite;1:input_image +hiai_cv_labelDetectorModel_v2.tflite;1:input_0 17 +hiai_cv_labelDetectorModel_v4.tflite;1:input_0 +hiai_dress_detect.tflite;1:data +hiai_cv_saliencyDetectorModel.tflite;1:image_tensor +hiai_frozen_inference_graph.tflite;1:image_tensor +hiai_ghostnet.tflite;1:input +hiai_label_and_video.tflite;1:input_0 7 +hiai_lm_inference_graph.tflite;1:image_tensor +efficientnet_lite0_fp32_2.tflite;1:images +efficientnet_lite1_fp32_2.tflite;1:images +efficientnet_lite2_fp32_2.tflite;1:images +efficientnet_lite3_fp32_2.tflite;1:images +efficientnet_lite4_fp32_2.tflite;1:images +mnasnet_0.50_224_1_metadata_1.tflite;1:input +mnasnet_0.75_224_1_metadata_1.tflite;1:input +mnasnet_1.0_128_1_metadata_1.tflite;1:input +mnasnet_1.0_160_1_metadata_1.tflite;1:input +mnasnet_1.0_192_1_metadata_1.tflite;1:input +mnasnet_1.0_224_1_metadata_1.tflite;1:input +mnasnet_1.0_96_1_metadata_1.tflite;1:input +posenet_mobilenet_float_075_1_default_1.tflite;1:sub_2 39 +deeplabv3_1_default_1.tflite;1:sub_7 +lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite;1:style_image +mindspore_text_classification_tflite.tflite;1:base_input +ml_ocr_latin_pb2tflite.tflite;1:input_0 9 +ml_location.tflite;1:inputs +bloom_new_detect.tflite;1:input +bloom_model_age_gender.tflite;1:input +bloom_isface.tflite;1:data +hiai_object_detect_814.tflite;1:normalized_input_image_tensor 10 +hiai_object_tflite_graph_8bit.tflite;1:normalized_input_image_tensor +lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite;1:inputs +ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:backbone_features2,w 12 +hiai_cv_labelDetectorModel_v3.tflite;2:input_0,input_1 +ml_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm/add,batch_normalization_1/batchnorm/add;1,64,64,3:16:16 +ml_ei_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm_1/add,batch_normalization_1/batchnorm_1/add;1,64,64,3:16:16 +#lite-model_mobilebert_1_metadata_1.tflite's precision deteriorates in P50 +lite-model_mobilebert_1_metadata_1.tflite;3:input_ids,input_mask,segment_ids 23 +coco_ssd_mobilenet_v1_1.0.tflite +bloom_landmark.tflite;1:img +mtk_AADB_HADB_MBV3_model_fp32.tflite;1:input_0 +mobilenet_v1_0.25_128.tflite;1:input +mobilenet_v1_0.5_160.tflite;1:input +mobilenet_v1_0.75_192.tflite;1:input +mobilenet_v1_1.0_160.tflite;1:input +densenet.tflite;1:Placeholder +resnet_v2_101_299.tflite;1:input +deeplabv3_257_mv_gpu.tflite;1:sub_7 +ml_ei_headpose.tflite;1:input_1 +mnist.tflite;1:conv2d_input +mobilenet.tflite;1:conv2d_input +ml_ocr_jk.tflite;1:input_0 +nasnet_mobile.tflite;1:input +nasnet_large.tflite;1:input +model_emotions_0727_nosoftmax.tflite;1:input +inception_v4.tflite;1:input diff --git a/mindspore/lite/test/config/models_tflite_gpu_fp32.cfg b/mindspore/lite/test/config/models_tflite_gpu_fp32.cfg new file mode 100644 index 00000000000..acbdf207acd --- /dev/null +++ b/mindspore/lite/test/config/models_tflite_gpu_fp32.cfg @@ -0,0 +1,188 @@ +# [first column]:model_name, If you need input shape, please connect it through ';1;' after the model name, where '1' is the input num. +# [second column]:accuracy limit in arm64 +mobilenet_v1_1.0_224.tflite +mobilenet_v2_1.0_224.tflite +resnet.tflite +squeezenet.tflite +mtk_AADB_HADB_MBV2_model_fp32.tflite +hiai_cn_recognize_modify_padv2.tflite +hiai_cv_focusShootOCRModel_08.tflite +hiai_model_normalize_object_scene_ps_20200519.tflite +inception_v3.tflite +mtk_age_gender_fp16.tflite +mtk_isface.tflite +mtk_landmark.tflite +mtk_new_detect.tflite +mtk_pose.tflite +mtk_model_emotions_0727_nosoftmax.tflite +mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite +mtk_276landmark_0913.tflite +mtk_face_recognition.tflite +mtk_convert_model.tflite +mtk_model_face_dress_fp16.tflite +Q_AADB_HADB_MBV2_model.tflite +Q_convert.tflite +Q_crnn_ori_75w_slim_norm_pb2tflite.tflite +Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite +Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite +Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite +Q_focusocr_cn_recog.tflite +Q_focusocr_jk_recog.tflite +Q_inception-249970-672-11-16_pb2tflite.tflite +Q_isface.tflite +Q_landmark.tflite +Q_language_model_hrmini_Q4_b4_17w.tflite +Q_new_detect.tflite +Q_object_scene.tflite +Q_pose.tflite +add_uint8.tflite;2 +Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite +Q_iMaxSR_RGB_385_p_pb2tflite.tflite +gts_detect_5k_tf115.tflite +smartreply.tflite +ml_text_correction.tflite +ml_ocr_jk_pb2tflite.tflite +scan_hms_angle_pb2tflite.tflite +scan_hms_detect_pb2tflite.tflite +ml_face_openclose_tflite.tflite +unet_mbv2_05_104pts.tflite +hiai_AADB_HADB_MBV2_model_f16.tflite +hiai_AADB_HADB_MBV2_model_fp32.tflite +hiai_detect_curve_model_float32.tflite +hiai_detectmodel_06_23_960_480_1180700.tflite +lite-model_aiy_vision_classifier_food_V1_1.tflite +lite-model_disease-classification_1.tflite +lite-model_models_mushroom-identification_v1_1.tflite +smartreply_1_default_1.tflite +text_classification.tflite +Q_detect_fpn_add_inception-1448650.tflite +Q_hand_0812_pb2tflite.tflite +bloom_landmark.tflite +Q888_face_dress_mv3y.tflite +Q888_HADB_AADB_MBV2_model_fp32.tflite +Q888_landmark.tflite +Q888_pose.tflite +Q888_lapa158_unet_0924.tflite +Q888_isface.tflite +Q888_new_detect.tflite +Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite +Q888_face_emo_dress_mv3_orderd.tflite +mtk_model_normalize_object_scene_ps_20200519_f32.tflite +hiai_model_0909_kd_rot_ps_softmax.tflite +hiai_chinese_english_recognize_model_float32.tflite +hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite +hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite +hiai_detectmodel_desnet_256_128_64_32.tflite +mtk_AADB_HADB_MBV3_model_fp32.tflite +mobilenet_v1_0.25_128.tflite +mobilenet_v1_0.5_160.tflite +mobilenet_v1_0.75_192.tflite +mobilenet_v1_1.0_160.tflite +mtk_model_ckpt.tflite +mtk_age_gender.tflite +mtk_model_face_dress.tflite +mtk_face_features_v1.tflite +densenet.tflite +resnet_v2_101_299.tflite +mnasnet_1.3_224.tflite +deeplabv3_257_mv_gpu.tflite +multi_person_mobilenet_v1_075_float.tflite +ide_label_base.tflite +ml_ei_headpose.tflite +mnist.tflite +mobilenet.tflite +scan_hms_angle1.tflite +scan_hms_detect.tflite +ml_ocr_jk.tflite +nasnet_mobile.tflite +nasnet_large.tflite +model_emotions_0727_nosoftmax.tflite +inception_resnet_v2.tflite +hiai_PoseEstimation_Pcm.tflite +hiai_ssd_mobilenetv2_object.tflite +hiai_cv_focusShootOCRModel_02.tflite +hiai_cv_poseEstimation.tflite +inception_v4.tflite +mtk_model_normalize_object_scene_ps_20200519_f16.tflite +mtk_AADB_HADB_MBV2_model_f16.tflite +mtk_AADB_HADB_MBV3_model_f16.tflite +mtk_model_emotions_0725_fp16.tflite +mtk_face_features_v1_fp16.tflite +Q888_age_gender_orderd.tflite +ml_pic_shopping.tflite +ml_ocr_latin.tflite;1;1,32,512,1 10 +hiai_asr_last_e1_cpu_fast_wavenet_batch1_frame1_one_cache_fp32.tflite;2 +mobilenet_v1_0.25_160.tflite;1:input +mobilenet_v1_0.25_192.tflite;1:input +mobilenet_v1_0.25_224.tflite;1:input +mobilenet_v1_0.5_128.tflite;1:input +mobilenet_v1_0.5_192.tflite;1:input +mobilenet_v1_0.5_224.tflite;1:input +mobilenet_v1_0.75_128.tflite;1:input +mobilenet_v1_0.75_160.tflite;1:input +mobilenet_v1_0.75_224.tflite;1:input +mobilenet_v1_1.0_128.tflite;1:input +mobilenet_v1_1.0_192.tflite;1:input +hiai_latin_ocr.tflite;1:input_0 +hiai_latin_ocr_1.tflite;1:input_0 +siteAI_digcom_g2v_keras.tflite;1:conv2d_1_input +siteAI_trans_nonlinear.tflite;1:features_placeholder +siteAI_trans_tcpclassify.tflite;1:conv2d_1_input +siteAI_wireless_depress_w.tflite;1:x-input +siteAI_wireless_restore_w.tflite;1:x-input +magenta_arbitrary-image-stylization-v1-256_fp16_prediction_1.tflite;1:style_image +hiai_cpu_face_emotion.tflite;1:input_0 +hiai_cpu_face_gazing.tflite;1:input_0 +hiai_cpu_face_headpose.tflite;1:input_0 +hiai_humanDetection.tflite;1:normalized_input_image_tensor +ml_face_openclose.tflite;1:input +hiai_face_model_npu.tflite;1:input_0 +hiai_ctpn_feature_map.tflite;1:input_image +hiai_cv_labelDetectorModel_v2.tflite;1:input_0 +hiai_cv_labelDetectorModel_v4.tflite;1:input_0 +hiai_dress_detect.tflite;1:data +hiai_cv_saliencyDetectorModel.tflite;1:image_tensor +hiai_frozen_inference_graph.tflite;1:image_tensor +hiai_ghostnet.tflite;1:input +hiai_iMaxDN_RGB.tflite;1:input +hiai_iMaxSR_RGB.tflite;1:input +hiai_label_and_video.tflite;1:input_0 +hiai_lm_inference_graph.tflite;1:image_tensor +efficientnet_lite0_fp32_2.tflite;1:images +efficientnet_lite1_fp32_2.tflite;1:images +efficientnet_lite2_fp32_2.tflite;1:images +efficientnet_lite3_fp32_2.tflite;1:images +efficientnet_lite4_fp32_2.tflite;1:images +mnasnet_0.50_224_1_metadata_1.tflite;1:input +mnasnet_0.75_224_1_metadata_1.tflite;1:input +mnasnet_1.0_128_1_metadata_1.tflite;1:input +mnasnet_1.0_160_1_metadata_1.tflite;1:input +mnasnet_1.0_192_1_metadata_1.tflite;1:input +mnasnet_1.0_224_1_metadata_1.tflite;1:input +mnasnet_1.0_96_1_metadata_1.tflite;1:input +posenet_mobilenet_float_075_1_default_1.tflite;1:sub_2 +deeplabv3_1_default_1.tflite;1:sub_7 +lite-model_deeplabv3-mobilenetv2_dm05-float16_1_default_1.tflite;1:sub_7 +lite-model_deeplabv3-mobilenetv2-float16_1_default_1.tflite;1:sub_7 +lite-model_east-text-detector_fp16_1.tflite;1:input_images +lite-model_arbitrary-image-stylization-inceptionv3_fp16_predict_1.tflite;1:style_image +mindspore_text_classification_tflite.tflite;1:base_input +ml_pic_shopping_pb2tflite.tflite;1:images +ml_ocr_latin_pb2tflite.tflite;1:input_0 +ml_location.tflite;1:inputs +bloom_new_detect.tflite;1:input +bloom_model_age_gender.tflite;1:input +bloom_isface.tflite;1:data +hiai_object_detect_814.tflite;1:normalized_input_image_tensor +hiai_object_tflite_graph_8bit.tflite;1:normalized_input_image_tensor +lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite;1:inputs +lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2:content_image,Conv/BiasAdd +magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2:content_image,mobilenet_conv/Conv/BiasAdd +ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2:backbone_features2,w +ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2:backbone_features2,w +hiai_cv_labelDetectorModel_v3.tflite;2:input_0,input_1 +ml_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm/add,batch_normalization_1/batchnorm/add;1,64,64,3:16:16 +ml_ei_headpose_pb2tflite.tflite;3:input_1,batch_normalization_8/batchnorm_1/add,batch_normalization_1/batchnorm_1/add;1,64,64,3:16:16 +lite-model_mobilebert_1_metadata_1.tflite;3:input_ids,input_mask,segment_ids +hiai_vad.tflite;2:input,input_cache +coco_ssd_mobilenet_v1_1.0.tflite diff --git a/mindspore/lite/test/st/run_benchmark_nets.sh b/mindspore/lite/test/st/run_benchmark_nets.sh index c5e2ed9982f..a51986bd3e1 100644 --- a/mindspore/lite/test/st/run_benchmark_nets.sh +++ b/mindspore/lite/test/st/run_benchmark_nets.sh @@ -49,7 +49,8 @@ if [[ $backend == "all" || $backend == "arm32_cpu" || $backend == "arm32_fp32" | fi fi -if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_gl_texture" ]]; then +if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_onnx_mindir" || $backend == "gpu_tf_caffe" || \ + $backend == "gpu_tflite" || $backend == "gpu_gl_texture" ]]; then sh $cur_path/scripts/run_benchmark_gpu.sh -r $release_path -m $models_path -d $device_id -e $backend -p $fail_not_return gpu_status=$? if [[ $gpu_status -ne 0 ]]; then @@ -77,6 +78,16 @@ if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_onnx" || $b fi fi +if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_avx512" || $backend == "x86_avx512_onnx" || $backend == "x86_avx512_tf" || \ + $backend == "x86_avx512_tflite" || $backend == "x86_avx512_caffe" || $backend == "x86_avx512_mindir" ]]; then + sh $cur_path/scripts/run_benchmark_x86.sh -r $release_path -m $models_path -e $backend -p $fail_not_return + x86_status=$? + if [[ $x86_status -ne 0 ]]; then + echo "Run x86 failed" + exit 1 + fi +fi + if [[ $backend == "all" || $backend == "codegen" ]]; then # run codegen sh $cur_path/scripts/run_benchmark_codegen.sh -r $release_path -m $models_path -d $device_id -e $backend @@ -141,3 +152,12 @@ if [[ $backend == "all" || $backend == "x86_gpu" ]]; then exit 1 fi fi + +if [[ $backend == "all" || $backend == "x86_ascend310" || $backend == "x86_ascend710" ]]; then + sh $cur_path/scripts/ascend/run_ascend.sh -r $release_path -m $models_path -d $device_id -e $backend + ascend_status=$? + if [[ ascend_status -ne 0 ]]; then + echo "Run ${backend} failed" + exit 1 + fi +fi diff --git a/mindspore/lite/test/st/scripts/run_benchmark_gpu.sh b/mindspore/lite/test/st/scripts/run_benchmark_gpu.sh index 611dd794556..9d6fb58923a 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_gpu.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_gpu.sh @@ -18,8 +18,18 @@ function Run_Converter() { backend=${backend:-"all"} if [[ $backend == "gpu_gl_texture" ]]; then local cfg_file_list=("$models_gpu_gl_texture_fp32_config" "$models_mindrt_parallel_config" "$models_gpu_weightquant_config" "$cropper_config") + elif [[ $backend == "gpu_onnx_mindir" ]]; then + local cfg_file_list=("$models_onnx_gpu_fp32_config" "$models_mindspore_gpu_fp32_config") + elif [[ $backend == "gpu_tf_caffe" ]]; then + local cfg_file_list=("$models_tf_gpu_fp32_config" "$models_caffe_gpu_fp32_config") + elif [[ $backend == "gpu_tflite" ]]; then + local cfg_file_list=("$models_tflite_gpu_fp32_config") + elif [[ $backend == "all" || $backend == "gpu" ]]; then + local cfg_file_list=("$models_onnx_gpu_fp32_config" "$models_mindspore_gpu_fp32_config" + "$models_tf_gpu_fp32_config" "$models_caffe_gpu_fp32_config" "$models_tflite_gpu_fp32_config") else - local cfg_file_list=("$models_gpu_fp32_config") + echo "unknown backend" + return 1 fi # Convert models: # $1:cfgFileList; $2:inModelPath; $3:outModelPath; $4:logFile; $5:resultFile; @@ -32,16 +42,30 @@ function Run_gpu() { backend=${backend:-"all"} if [[ $backend == "gpu_gl_texture" ]]; then local gpu_cfg_file_list=("$models_gpu_gl_texture_fp32_config" "$models_gpu_weightquant_config") + elif [[ $backend == "gpu_onnx_mindir" ]]; then + local gpu_cfg_file_list=("$models_mindspore_gpu_fp32_config" "$models_mindspore_gpu_fp16_config" + "$models_onnx_gpu_fp32_config" "$models_onnx_gpu_fp16_config") + elif [[ $backend == "gpu_tf_caffe" ]]; then + local gpu_cfg_file_list=("$models_tf_gpu_fp32_config" "$models_tf_gpu_fp16_config" + "$models_caffe_gpu_fp32_config" "$models_caffe_gpu_fp16_config") + elif [[ $backend == "gpu_tflite" ]]; then + local gpu_cfg_file_list=("$models_tflite_gpu_fp32_config" "$models_tflite_gpu_fp16_config") + elif [[ $backend == "all" || $backend == "gpu" ]]; then + local gpu_cfg_file_list=("$models_mindspore_gpu_fp32_config" "$models_mindspore_gpu_fp16_config" + "$models_onnx_gpu_fp32_config" "$models_onnx_gpu_fp16_config" + "$models_tf_gpu_fp32_config" "$models_tf_gpu_fp16_config" + "$models_caffe_gpu_fp32_config" "$models_caffe_gpu_fp16_config" + "$models_tflite_gpu_fp32_config" "$models_tflite_gpu_fp16_config") else - local gpu_cfg_file_list=("$models_gpu_fp32_config" "$models_gpu_fp16_config") + echo "unknown backend" + return 1 fi - + # Run converted models: # $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId; Run_Benchmark "${gpu_cfg_file_list[*]}" . '/data/local/tmp' $run_gpu_log_file $run_benchmark_result_file 'arm64' 'GPU' $device_id $gpu_fail_not_return } - function Run_mindrt_parallel() { while read line; do model_name=${line} @@ -137,8 +161,16 @@ IFS="-" read -r -a file_name_array <<< "$file_name" version=${file_name_array[2]} # Set models config filepath -models_gpu_fp32_config=${basepath}/../config/models_gpu_fp32.cfg -models_gpu_fp16_config=${basepath}/../config/models_gpu_fp16.cfg +models_caffe_gpu_fp32_config=${basepath}/../config/models_caffe_gpu_fp32.cfg +models_caffe_gpu_fp16_config=${basepath}/../config/models_caffe_gpu_fp16.cfg +models_mindspore_gpu_fp32_config=${basepath}/../config/models_mindspore_gpu_fp32.cfg +models_mindspore_gpu_fp16_config=${basepath}/../config/models_mindspore_gpu_fp16.cfg +models_onnx_gpu_fp32_config=${basepath}/../config/models_onnx_gpu_fp32.cfg +models_onnx_gpu_fp16_config=${basepath}/../config/models_onnx_gpu_fp16.cfg +models_tf_gpu_fp32_config=${basepath}/../config/models_tf_gpu_fp32.cfg +models_tf_gpu_fp16_config=${basepath}/../config/models_tf_gpu_fp16.cfg +models_tflite_gpu_fp32_config=${basepath}/../config/models_tflite_gpu_fp32.cfg +models_tflite_gpu_fp16_config=${basepath}/../config/models_tflite_gpu_fp16.cfg cropper_config=${basepath}/../config/models_cropper.cfg models_gpu_gl_texture_fp32_config=${basepath}/../config/models_gpu_gl_texture_fp32.cfg models_gpu_weightquant_config=${basepath}/../config/models_weightquant_8bit_gpu.cfg @@ -188,7 +220,8 @@ Push_Files $arm64_path "aarch64" $version $benchmark_test_path "adb_push_log.txt backend=${backend:-"all"} isFailed=0 -if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_gl_texture" ]]; then +if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_onnx_mindir" || $backend == "gpu_tf_caffe" || \ + $backend == "gpu_tflite" || $backend == "gpu_gl_texture" ]]; then # Run on gpu echo "start Run gpu ..." Run_gpu @@ -214,7 +247,8 @@ if [[ $backend == "all" || $backend == "gpu_gl_texture" || $backend == "cropper" # sleep 1 fi -if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_gl_texture" ]]; then +if [[ $backend == "all" || $backend == "gpu" || $backend == "gpu_onnx_mindir" || $backend == "gpu_tf_caffe" || \ + $backend == "gpu_tflite" || $backend == "gpu_gl_texture" ]]; then # wait ${Run_gpu_PID} # Run_gpu_status=$? if [[ ${Run_gpu_status} != 0 ]];then diff --git a/mindspore/lite/test/st/scripts/run_benchmark_x86.sh b/mindspore/lite/test/st/scripts/run_benchmark_x86.sh index e7a7aab27c2..cbce4d45237 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_x86.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_x86.sh @@ -100,6 +100,19 @@ function Run_x86_avx() { Run_Benchmark "${x86_cfg_file_list[*]}" $ms_models_path $models_path $run_x86_avx_log_file $run_benchmark_result_file 'x86_avx' 'CPU' '' $x86_fail_not_return } +# Run on x86 avx512 platform: +function Run_x86_avx512() { + cd ${x86_path}/avx512 || exit 1 + tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1 + cd ${x86_path}/avx512/mindspore-lite-${version}-linux-x64 || exit 1 + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib + cp tools/benchmark/benchmark ./ || exit 1 + + # Run converted models: + # $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId; $9:benchmark_mode + Run_Benchmark "${x86_cfg_file_list[*]}" $ms_models_path $models_path $run_x86_avx512_log_file $run_benchmark_result_file 'x86_avx512' 'CPU' '' $x86_fail_not_return +} + # Run on x86 java platform: function Run_x86_java() { cd ${x86_path} || exit 1 @@ -220,9 +233,11 @@ while getopts "r:m:e:p:" opt; do done x86_path=${release_path}/centos_x86 -file_name=$(ls ${x86_path}/*-linux-x64.tar.gz) +cd ${x86_path} +file_name=$(ls *-linux-x64.tar.gz) IFS="-" read -r -a file_name_array <<< "$file_name" version=${file_name_array[2]} +cd - # Set models config filepath models_tflite_parallel_split_config=${basepath}/../config/models_parallel_split.cfg @@ -244,19 +259,21 @@ models_process_only_config=${basepath}/../config/models_process_only.cfg # Prepare the config file list x86_cfg_file_list=() -if [[ $backend == "x86_tflite" ]]; then +if [[ $backend == "x86_tflite" || $backend == "x86_avx512_tflite" ]]; then x86_cfg_file_list=("$models_tflite_config") -elif [[ $backend == "x86_tf" ]]; then +elif [[ $backend == "x86_tf" || $backend == "x86_avx512_tf" ]]; then x86_cfg_file_list=("$models_tf_config") -elif [[ $backend == "x86_caffe" ]]; then +elif [[ $backend == "x86_caffe" || $backend == "x86_avx512_caffe" ]]; then x86_cfg_file_list=("$models_caffe_config") -elif [[ $backend == "x86_onnx" ]]; then +elif [[ $backend == "x86_onnx" || $backend == "x86_avx512_onnx" ]]; then x86_cfg_file_list=("$models_onnx_config") elif [[ $backend == "x86_mindir" ]]; then x86_cfg_file_list=("$models_mindspore_train_config" "$models_posttraining_config" "$models_tflite_awaretraining_config" \ "$models_weightquant_0bit_config" "$models_weightquant_8bit_config" "$models_weightquant_7bit_config" \ "$models_weightquant_0bit_auto_tune_config" "$models_weightquant_8bit_debug_config"\ "$models_weightquant_9bit_config" "$models_process_only_config" "$models_mindspore_config") +elif [[ $backend == "x86_avx512_mindir" ]]; then + x86_cfg_file_list=("$models_mindspore_config") else x86_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \ "$models_mindspore_train_config" "$models_posttraining_config" "$models_tflite_awaretraining_config" \ @@ -306,6 +323,8 @@ run_x86_java_log_file=${basepath}/run_x86_java_log.txt echo 'run x86 java logs: ' > ${run_x86_java_log_file} run_x86_parallel_split_log_file=${basepath}/run_x86_parallel_split_log.txt echo 'run x86 java logs: ' > ${run_x86_parallel_split_log_file} +run_x86_avx512_log_file=${basepath}/run_x86_avx512_log.txt +echo 'run x86 avx512 logs: ' > ${run_x86_avx512_log_file} backend=${backend:-"all"} isFailed=0 @@ -326,6 +345,14 @@ if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_avx" || $ba Run_x86_avx_PID=$! sleep 1 fi +if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_avx512" || $backend == "x86_avx512_onnx" || $backend == "x86_avx512_tf" || \ + $backend == "x86_avx512_tflite" || $backend == "x86_avx512_caffe" || $backend == "x86_avx512_mindir" ]]; then + # Run on x86_avx512 + echo "start Run avx512 $backend..." + Run_x86_avx512 & + Run_x86_avx512_PID=$! + sleep 1 +fi if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_sse" || $backend == "x86_onnx" || $backend == "x86_tf" || \ $backend == "x86_tflite" || $backend == "x86_caffe" || $backend == "x86_mindir" ]]; then # Run on x86_sse @@ -370,6 +397,16 @@ if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_avx" || $ba isFailed=1 fi fi +if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_avx512" || $backend == "x86_avx512_onnx" || $backend == "x86_avx512_tf" || \ + $backend == "x86_avx512_tflite" || $backend == "x86_avx512_caffe" || $backend == "x86_avx512_mindir" ]]; then + wait ${Run_x86_avx512_PID} + Run_x86_avx512_status=$? + if [[ ${Run_x86_avx512_status} != 0 ]];then + echo "Run_x86 avx512 failed" + cat ${run_x86_avx512_log_file} + isFailed=1 + fi +fi if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_sse" || $backend == "x86_onnx" || $backend == "x86_tf" || \ $backend == "x86_tflite" || $backend == "x86_caffe" || $backend == "x86_mindir" ]]; then wait ${Run_x86_sse_PID} @@ -399,6 +436,6 @@ if [[ $backend == "all" || $backend == "x86-all" || $backend == "x86_parallel_sp fi fi -echo "Run_x86 and Run_x86_sse and Run_x86_avx and is ended" +echo "Run_x86 and Run_x86_sse and Run_x86_avx and Run_x86-avx512 is ended" Print_Benchmark_Result $run_benchmark_result_file exit ${isFailed}