add models to the entrance guard

This commit is contained in:
zengxianglong 2021-04-02 18:27:53 +08:00
parent ae91575346
commit 2f0faeeae6
11 changed files with 97 additions and 14 deletions

View File

@ -116,3 +116,4 @@ ml_ocr_identify_card_detect
identify_card_detect
ml_2012_ocr_rec_caffe
ml_2012_ocr_detection_caffe
ml_face_mnet

View File

@ -112,3 +112,6 @@ ml_ocr_identify_card_detect 2
identify_card_detect 0.5
ml_2012_ocr_detection_caffe 1
ml_2012_ocr_rec_caffe 0.5
ml_lable_model_hebing_device 2
ml_face_sex 0.5
ml_face_mnet 12

View File

@ -24,3 +24,4 @@ quant_aware_identify_card_detect.onnx
tiny-yolov3-11.onnx;2;1,416,416,3:1,2
# cur acc for ml_video_edit_art_transfer is 2+%
ml_video_edit_art_transfer.onnx;3
ml_table_detection.onnx

View File

@ -59,7 +59,7 @@ ml_facedetector.onnx
ml_ei_facedetection.onnx
ml_video_edit_art_generate.onnx
ml_location_lane_counter.onnx
# ml_location_lane_counter0.onnx
ml_location_lane_counter0.onnx
mtk_emotions-d2012-75.onnx
mtk_detect-mbv1-shortcut-400-400.onnx
mtk_detect-mbv2-shortcut-400-400.onnx
@ -73,3 +73,6 @@ mtk_face_recognition_v3.onnx
mtk_face_recognition_v2.onnx
ml_2012_ocr_detection.onnx
ml_video_edit_enhance_update.onnx
#Harmony_Voiceprint_resnet18.onnx
bloom_hongmo_detection.onnx
Q_face_recognition.onnx

View File

@ -56,7 +56,7 @@ ml_facedetector.onnx 3
ml_ei_facedetection.onnx 2
#ml_video_edit_art_generate.onnx #mul operator overflows, not suitable for fp16
ml_location_lane_counter.onnx 6
# ml_location_lane_counter0.onnx 0.5
ml_location_lane_counter0.onnx 0.5
#The encoder an decoder model are used in ml_asr scene, both have value overflow. Not suitable for fp16.
#But added for guarding process.
encoder.onnx;1,32,83 1262
@ -72,3 +72,7 @@ mtk_face_features_v2.onnx;1,256,192,3 0.5
mtk_face_recognition_v3.onnx 0.5
mtk_face_recognition_v2.onnx 2.5
ml_2012_ocr_detection.onnx 0.5
#Harmony_Voiceprint_resnet18.onnx;1,1,200,40 4.5
bloom_hongmo_detection.onnx 0.5
Q_face_recognition.onnx 2
ml_video_edit_enhance_update.onnx 0.5

View File

@ -58,3 +58,9 @@ Q_inception-249970-672-11-16.pb
Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb
Q_crnn_screen_slim400w_more_20w.pb
matmul.pb
hiai_ssd_mobilenetv2_object.pb
hiai_humanDetection.pb
hiai_model_normalize_object_scene_ps_20200519.pb;1,224,224,3
mtk_face_features_v1.pb
Q_crnn_ori_75w_slim_norm.pb
Q_crnn_ori_v2_405001_notrans_nopre.pb

View File

@ -9,3 +9,34 @@ ml_face_openclose.pb;1,32,32,3 0.5
ml_object_detect.pb;1,288,288,3 2
Q_crnn_screen_slim400w_more_20w.pb 137
Q_inception-249970-672-11-16.pb 6.5
hiai_latin_ocr_1.pb 23
hiai_ssd_mobilenetv2_object.pb 15
hiai_humanDetection.pb 3.5
hiai_PoseEstimation_Pcm.pb 0.5
hiai_cn_recognize_modify_padv2.pb;1,32,512,1 27
hiai_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 17
mtk_model_ckpt.pb 19
mtk_age_gender.pb 0.5
mtk_model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10
mtk_AADB_HADB_MBV2_model.pb;1,224,224,3 5.5
mtk_AADB_HADB_MBV3_model.pb;1,224,224,3 4
mtk_face_features_v1.pb 26
model_normalize_object_scene_ps_20200519.pb;1,224,224,3 10
hiai_AADB_HADB_MBV2_model.pb;1,224,224,3 6
hiai_frozen_inference_graph.pb 8
hiai_lm_inference_graph.pb 0.6
hiai_ghostnet.pb 0.9
hiai_face_model_npu.pb 0.5
hiai_cv_focusShootOCRModel_02.pb 10.5
hiai_label_and_video.pb;1,224,224,3 23
hiai_dress_detect.pb;1,960,960,3 1.5
hiai_iMaxDN_RGB.pb 0.5
hiai_iMaxSR_RGB.pb 3.5
hiai_ctpn_feature_map.pb 6.5
hiai_cpu_face_gazing.pb 0.5
hiai_cpu_face_emotion.pb 2
hiai_cv_poseEstimation.pb 103
Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid.pb 1.5
Q_crnn_ori_75w_slim_norm.pb 190
Q_crnn_ori_v2_405001_notrans_nopre.pb 25
hiai_latin_ocr.pb 12

View File

@ -4,9 +4,7 @@ hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite
hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite
hiai_cn_recognize_modify_padv2.tflite
hiai_model_normalize_object_scene_ps_20200519.tflite
#hiai_detectmodel_06_23_960_480_1180700.tflite
#hiai_detect_curve_model_float32.tflite
#hiai_detectmodel_desnet_256_128_64_32.tflite
hiai_detectmodel_desnet_256_128_64_32.tflite
mtk_AADB_HADB_MBV2_model_fp32.tflite
mtk_AADB_HADB_MBV3_model_fp32.tflite
mobilenet_v1_0.25_128.tflite
@ -162,5 +160,16 @@ ml_ei_landmark_pb2tflite.tflite
unet_mbv2_05_104pts.tflite
hiai_AADB_HADB_MBV2_model_f16.tflite
hiai_AADB_HADB_MBV2_model_fp32.tflite
#hiai_cv_labelDetectorModel_v3.tflite
#hiai_detect_curve_model_float32.tflite
hiai_detect_curve_model_float32.tflite
#hiai_detectmodel_06_23_960_480_1180700.tflite
hiai_detectmodel_desnet_256_128_64_32.tflite
lite-model_aiy_vision_classifier_food_V1_1.tflite
lite-model_disease-classification_1.tflite
lite-model_models_mushroom-identification_v1_1.tflite
#lite-model_albert_lite_base_squadv1_metadata_1.tflite
#lite-model_mobilebert_1_metadata_1.tflite
#smartreply_1_default_1.tflite
text_classification.tflite
Q_detect_fpn_add_inception-1448650.tflite
Q_hand_0812_pb2tflite.tflite
#bloom_landmark.tflite

View File

@ -158,10 +158,30 @@ lite-model_on_device_vision_classifier_landmarks_classifier_oceania_antarctica_V
lite-model_on_device_vision_classifier_landmarks_classifier_europe_V1_1.tflite 32
lite-model_on_device_vision_classifier_landmarks_classifier_south_america_V1_1.tflite 14
ml_ei_landmark_pb2tflite.tflite 2
unet_mbv2_05_104pts.tflite 14.4
hiai_AADB_HADB_MBV2_model_f16.tflite 2.4
hiai_AADB_HADB_MBV2_model_fp32.tflite 4.1
mtk_age_gender_fp16.tflite 25.5
#hiai_cv_labelDetectorModel_v3.tflite 5.1
#hiai_detect_curve_model_float32.tflite 8.98
unet_mbv2_05_104pts.tflite 15
hiai_AADB_HADB_MBV2_model_f16.tflite 2.5
hiai_AADB_HADB_MBV2_model_fp32.tflite 4.5
mtk_age_gender_fp16.tflite 26
hiai_detect_curve_model_float32.tflite 9
Q_language_model_hrmini_Q4_b4_17w.tflite 3.5
lite-model_aiy_vision_classifier_food_V1_1.tflite 42
lite-model_disease-classification_1.tflite 70
lite-model_models_mushroom-identification_v1_1.tflite 3
#smartreply_1_default_1.tflite 0.5
text_classification.tflite 0.5
Q_AADB_HADB_MBV2_model.tflite 5
Q_convert.tflite 12
Q_crnn_ori_75w_slim_norm_pb2tflite.tflite 200
Q_crnn_ori_v2_405001_notrans_nopre_pb2tflite.tflite 40
Q_crnn_screen_slim400w_more_20w_pb2tflite.tflite 235
Q_dila-small-mix-full-fineturn-390000-nopixel-nosigmoid_tflite.tflite 1.5
Q_focusocr_cn_recog.tflite 200
Q_focusocr_jk_recog.tflite 45
Q_inception-249970-672-11-16_pb2tflite.tflite 6
Q_isface.tflite 0.5
Q_landmark.tflite 0.5
Q_new_detect.tflite 3.5
Q_object_scene.tflite 14
Q_pose.tflite 1.5
Q_detect_fpn_add_inception-1448650.tflite 1
#bloom_landmark.tflite 0.5

View File

@ -1,6 +1,6 @@
lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2
magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2
# albert_lite_base_squadv1_1.tflite;3
albert_lite_base_squadv1_1.tflite;3
mobilebert_1_default_1.tflite;3
porseg_tmp.onnx;2
ml_video_edit_img_segment_adaptise.pb;2
@ -24,3 +24,6 @@ ml_male_model_step6_noiseout.pb;66
ml_tts_decoder_control_flow.pb;5
ml_tts_decoder.pb;5
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1
hiai_cv_labelDetectorModel_v3.tflite;2
ml_tts_vocoder.pb;66
ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1

View File

@ -14,3 +14,5 @@ ml_female_model_step6_noiseout.pb;66 2
ml_male_model_step6_noiseout.pb;66 2.5
ml_tts_decoder_control_flow.pb;5 1
ml_tts_decoder.pb;5 117
hiai_cv_labelDetectorModel_v3.tflite;2 5.5
ml_tts_vocoder.pb;66 53