forked from mindspore-Ecosystem/mindspore
!22679 [MSLITE] Fix bug of strided slice.
Merge pull request !22679 from wangshaocong/r1.3
This commit is contained in:
commit
d3bf6af733
|
@ -366,12 +366,9 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten
|
|||
InitStridedSliceTransferBuffer(&transfer_buffer);
|
||||
|
||||
StridedSliceParameter *param = (StridedSliceParameter *)parameter;
|
||||
param->num_axes_ = (int)(in_shape_size);
|
||||
param->in_shape_length_ = (int)(in_shape_size);
|
||||
|
||||
transfer_buffer.ndim_ = 0;
|
||||
if (inputs_size == kStridedSliceInputNum) {
|
||||
transfer_buffer.ndim_ = (int)(param->num_axes_);
|
||||
transfer_buffer.ndim_ = (int)(in_shape_size);
|
||||
if (transfer_buffer.ndim_ > MAX_SHAPE_SIZE) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
|
@ -407,6 +404,11 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten
|
|||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
// update parameter with new input shape
|
||||
param->num_axes_ = (int)(in_shape_size);
|
||||
param->in_shape_length_ = (int)(in_shape_size);
|
||||
|
||||
ApplyBeginMask(&transfer_buffer);
|
||||
ret = ApplyEndMask(&transfer_buffer, in_shape, MAX_SHAPE_SIZE);
|
||||
if (ret != NNACL_OK) {
|
||||
|
|
|
@ -16,5 +16,5 @@ deconvs_model
|
|||
# onnx
|
||||
ml_2012_ocr_cn.onnx
|
||||
# aware_training
|
||||
Modify_Out_video_infer2.tflite;;;;aware_training
|
||||
video_infer2.tflite;;;;aware_training
|
||||
mobilenet_v1_1.0_224_quant.tflite;;;;aware_training
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
mobilenet_v1_1.0_224.tflite
|
||||
mobilenet_v2_1.0_224.tflite
|
||||
Modify_Out_mtk_age_gender_fp16.tflite
|
||||
mtk_age_gender_fp16.tflite
|
||||
mtk_isface.tflite
|
||||
mtk_landmark.tflite
|
||||
mtk_new_detect.tflite
|
||||
|
@ -25,7 +25,7 @@ Q_face_recognition.onnx
|
|||
Q888_iris_detect.onnx
|
||||
Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite
|
||||
Q_iMaxSR_RGB_385_p_pb2tflite.tflite
|
||||
Modify_Out_Q_detect_fpn_add_inception-1448650.tflite
|
||||
Q_detect_fpn_add_inception-1448650.tflite
|
||||
Q888_face_dress_mv3y.tflite
|
||||
Q888_HADB_AADB_MBV2_model_fp32.tflite
|
||||
Q888_landmark.tflite
|
||||
|
@ -58,4 +58,4 @@ mtk_detect_mbv1_640_480_nopostprocess_simplified
|
|||
mtk_model_normalize_object_scene_ps_20200519_f16.tflite
|
||||
mtk_AADB_HADB_MBV2_model_f16.tflite
|
||||
mtk_model_emotions_0725_fp16.tflite
|
||||
Modify_Out_Q888_age_gender_orderd.tflite
|
||||
Q888_age_gender_orderd.tflite
|
||||
|
|
|
@ -6,10 +6,10 @@ resnet.tflite
|
|||
squeezenet.tflite
|
||||
mtk_AADB_HADB_MBV2_model_fp32.tflite
|
||||
hiai_cn_recognize_modify_padv2.tflite
|
||||
Modify_Out_hiai_cv_focusShootOCRModel_08.tflite
|
||||
hiai_cv_focusShootOCRModel_08.tflite
|
||||
hiai_model_normalize_object_scene_ps_20200519.tflite
|
||||
inception_v3.tflite
|
||||
Modify_Out_mtk_age_gender_fp16.tflite
|
||||
mtk_age_gender_fp16.tflite
|
||||
mtk_isface.tflite
|
||||
mtk_landmark.tflite
|
||||
mtk_new_detect.tflite
|
||||
|
@ -102,7 +102,7 @@ Q_crnn_ori_v2_405001_notrans_nopre.pb
|
|||
bolt_segment.pb
|
||||
ml_location_lane_counter.onnx 2
|
||||
gts_detect_5k_tf115.tflite
|
||||
Modify_Out_smartreply.tflite
|
||||
smartreply.tflite
|
||||
ml_text_correction.tflite
|
||||
ml_ocr_jk_pb2tflite.tflite
|
||||
scan_hms_angle_pb2tflite.tflite
|
||||
|
@ -111,21 +111,21 @@ ml_face_openclose_tflite.tflite
|
|||
unet_mbv2_05_104pts.tflite
|
||||
hiai_AADB_HADB_MBV2_model_f16.tflite
|
||||
hiai_AADB_HADB_MBV2_model_fp32.tflite
|
||||
Modify_Out_hiai_detect_curve_model_float32.tflite
|
||||
Modify_Out_hiai_detectmodel_06_23_960_480_1180700.tflite
|
||||
Modify_Out_lite-model_aiy_vision_classifier_food_V1_1.tflite
|
||||
hiai_detect_curve_model_float32.tflite
|
||||
hiai_detectmodel_06_23_960_480_1180700.tflite
|
||||
lite-model_aiy_vision_classifier_food_V1_1.tflite
|
||||
lite-model_disease-classification_1.tflite
|
||||
lite-model_models_mushroom-identification_v1_1.tflite
|
||||
Modify_Out_smartreply_1_default_1.tflite
|
||||
smartreply_1_default_1.tflite
|
||||
text_classification.tflite
|
||||
Modify_Out_Q_detect_fpn_add_inception-1448650.tflite
|
||||
Modify_Out_Q_hand_0812_pb2tflite.tflite
|
||||
Q_detect_fpn_add_inception-1448650.tflite
|
||||
Q_hand_0812_pb2tflite.tflite
|
||||
bloom_landmark.tflite
|
||||
Q888_face_dress_mv3y.tflite
|
||||
Q888_HADB_AADB_MBV2_model_fp32.tflite
|
||||
Q888_landmark.tflite
|
||||
Q888_pose.tflite
|
||||
Modify_Out_Q888_lapa158_unet_0924.tflite
|
||||
Q888_lapa158_unet_0924.tflite
|
||||
Q888_isface.tflite
|
||||
Q888_new_detect.tflite
|
||||
Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite
|
||||
|
@ -156,7 +156,7 @@ hiai_model_0909_kd_rot_ps_softmax.tflite
|
|||
hiai_chinese_english_recognize_model_float32.tflite
|
||||
hiai_bigmodel_ghost_2_1_no_normalized_no_trans_tflite.tflite
|
||||
hiai_bigmodel_ghost_5_1_no_normalized_no_trans_tflite.tflite
|
||||
Modify_Out_hiai_detectmodel_desnet_256_128_64_32.tflite
|
||||
hiai_detectmodel_desnet_256_128_64_32.tflite
|
||||
mtk_AADB_HADB_MBV3_model_fp32.tflite
|
||||
Q888_face_recognition.onnx
|
||||
mobilenet_v1_0.25_128.tflite
|
||||
|
@ -201,7 +201,7 @@ mtk_AADB_HADB_MBV2_model_f16.tflite
|
|||
mtk_AADB_HADB_MBV3_model_f16.tflite
|
||||
mtk_model_emotions_0725_fp16.tflite
|
||||
mtk_face_features_v1_fp16.tflite
|
||||
Modify_Out_Q888_age_gender_orderd.tflite
|
||||
Q888_age_gender_orderd.tflite
|
||||
emotion
|
||||
gender_res_large_deploy
|
||||
glasses
|
||||
|
|
|
@ -88,5 +88,5 @@ ml_video_edit_hair_dyeing_segmodel_v2 0.5
|
|||
ml_video_edit_makeup_mobilenetv203.onnx 2
|
||||
ml_video_edit_hairline_segmentation;3 0.5
|
||||
ml_video_edit_hair_dyeing_migrate_v2.onnx;4 0.5
|
||||
Modify_Out_ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
ml_video_edit_hair_dyeing_migrate_v2_fix.onnx;4 1.5
|
||||
|
|
|
@ -91,7 +91,7 @@ tacotron_encoder_stf.pb;5;1,62:1,62:1,62:1,62:1;;input_dependent
|
|||
female_model_step2_int16_noiseout.pb;66
|
||||
ml_female_model_step6_noiseout.pb;66
|
||||
ml_male_model_step6_noiseout.pb;66
|
||||
Modify_Out_ml_tts_decoder_control_flow.pb;5:5,4,3,1,2
|
||||
ml_tts_decoder_control_flow.pb;5:5,4,3,1,2
|
||||
ml_tts_decoder.pb;5
|
||||
ml_tts_encoder_control_flow.pb;4;1,22:1:1:1;;input_dependent
|
||||
ml_tts_vocoder.pb;66
|
||||
|
@ -100,7 +100,7 @@ gts_object_detect_Ics.pb;1;420,630,3;;input_dependent
|
|||
hiai_transformer_encoder.pb;15
|
||||
decoder_step_nocumsum_v5.pb;13;1,512:1,512:1,512:1,512:1,512:1,127,320:1,1429,2:1,127:1:1,127:1,512:1,80:1,127
|
||||
hiai_nlu_model_v2.pb;7;1,5:1,5:1,5:1,98:1,174:1,6:1,5
|
||||
Modify_Out_ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
ml_audio_kit_encoder_v5.pb;6:5,2,1,4,6,3;1:1,32:1,32:1,32:1:1,32
|
||||
hiai_nlu_model_multi.pb;6;1,32:1,32:1,32:1,74:1,11:1,6
|
||||
hiai_nlu_model_single.pb;3;1,32:1,32:1,32
|
||||
fsr_270_mindspore.pb
|
||||
|
|
|
@ -82,7 +82,7 @@ ml_video_edit_oneclick_adaptis.pb;3 6
|
|||
#ml_female_model_step6_noiseout.pb;66 2
|
||||
#ml_male_model_step6_noiseout.pb;66 2.5
|
||||
ml_tts_encoder_control_flow.pb;4;1,22:1:1:1 1.5
|
||||
Modify_Out_ml_tts_decoder_control_flow.pb;5:5,4,3,1,2 1
|
||||
ml_tts_decoder_control_flow.pb;5:5,4,3,1,2 1
|
||||
#ml_tts_decoder.pb;5 2.5 to open
|
||||
#ml_tts_vocoder.pb;66 53
|
||||
hiai_transformer_encoder.pb;15 4
|
||||
|
|
|
@ -38,7 +38,7 @@ multi_person_mobilenet_v1_075_float.tflite
|
|||
ide_label_base.tflite
|
||||
ide_label_retrained.tflite
|
||||
ml_ei_headpose.tflite
|
||||
Modify_Out_ml_ei_landmark.tflite
|
||||
ml_ei_landmark.tflite
|
||||
mnist.tflite
|
||||
mobilenet.tflite
|
||||
resnet.tflite
|
||||
|
@ -58,7 +58,7 @@ hiai_cv_focusShootOCRModel_02.tflite
|
|||
hiai_cv_poseEstimation.tflite
|
||||
inception_v4.tflite
|
||||
mtk_model_normalize_object_scene_ps_20200519_f16.tflite
|
||||
Modify_Out_mtk_age_gender_fp16.tflite
|
||||
mtk_age_gender_fp16.tflite
|
||||
mtk_model_face_dress_fp16.tflite
|
||||
mtk_AADB_HADB_MBV2_model_f16.tflite
|
||||
mtk_AADB_HADB_MBV3_model_f16.tflite
|
||||
|
@ -77,7 +77,7 @@ hiai_cpu_face_emotion.tflite
|
|||
hiai_cpu_face_gazing.tflite
|
||||
hiai_cpu_face_headpose.tflite
|
||||
hiai_humanDetection.tflite
|
||||
Modify_Out_hiai_cv_focusShootOCRModel_08.tflite
|
||||
hiai_cv_focusShootOCRModel_08.tflite
|
||||
ml_face_openclose.tflite
|
||||
hiai_face_model_npu.tflite
|
||||
hiai_ctpn_feature_map.tflite
|
||||
|
@ -122,13 +122,13 @@ mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite
|
|||
mtk_276landmark_0913.tflite
|
||||
mtk_face_recognition.tflite
|
||||
mtk_convert_model.tflite
|
||||
Modify_Out_smartreply.tflite
|
||||
smartreply.tflite
|
||||
mindspore_text_classification_tflite.tflite
|
||||
# ml_location.tflite
|
||||
ml_text_correction.tflite
|
||||
ml_pic_shopping.tflite
|
||||
ml_vision_guide_detection3_pb2tflite.tflite
|
||||
Modify_Out_ml_vision_guide_detection1_pb2tflite.tflite
|
||||
ml_vision_guide_detection1_pb2tflite.tflite
|
||||
ml_pic_shopping_pb2tflite.tflite
|
||||
ml_ocr_jk_pb2tflite.tflite
|
||||
ml_ocr_latin_pb2tflite.tflite
|
||||
|
@ -152,27 +152,27 @@ Q_language_model_hrmini_Q4_b4_17w.tflite
|
|||
Q_new_detect.tflite
|
||||
Q_object_scene.tflite
|
||||
Q_pose.tflite
|
||||
Modify_Out_ml_ei_landmark_pb2tflite.tflite
|
||||
ml_ei_landmark_pb2tflite.tflite
|
||||
unet_mbv2_05_104pts.tflite
|
||||
hiai_AADB_HADB_MBV2_model_f16.tflite
|
||||
hiai_AADB_HADB_MBV2_model_fp32.tflite
|
||||
Modify_Out_hiai_detect_curve_model_float32.tflite
|
||||
Modify_Out_hiai_detectmodel_06_23_960_480_1180700.tflite
|
||||
Modify_Out_hiai_detectmodel_desnet_256_128_64_32.tflite
|
||||
Modify_Out_lite-model_aiy_vision_classifier_food_V1_1.tflite
|
||||
hiai_detect_curve_model_float32.tflite
|
||||
hiai_detectmodel_06_23_960_480_1180700.tflite
|
||||
hiai_detectmodel_desnet_256_128_64_32.tflite
|
||||
lite-model_aiy_vision_classifier_food_V1_1.tflite
|
||||
lite-model_disease-classification_1.tflite
|
||||
lite-model_models_mushroom-identification_v1_1.tflite
|
||||
Modify_Out_smartreply_1_default_1.tflite
|
||||
smartreply_1_default_1.tflite
|
||||
text_classification.tflite
|
||||
Modify_Out_Q_detect_fpn_add_inception-1448650.tflite
|
||||
Modify_Out_Q_hand_0812_pb2tflite.tflite
|
||||
Q_detect_fpn_add_inception-1448650.tflite
|
||||
Q_hand_0812_pb2tflite.tflite
|
||||
bloom_landmark.tflite
|
||||
Modify_Out_Q888_age_gender_orderd.tflite
|
||||
Q888_age_gender_orderd.tflite
|
||||
Q888_face_dress_mv3y.tflite
|
||||
Q888_HADB_AADB_MBV2_model_fp32.tflite
|
||||
Q888_landmark.tflite
|
||||
Q888_pose.tflite
|
||||
Modify_Out_Q888_lapa158_unet_0924.tflite
|
||||
Q888_lapa158_unet_0924.tflite
|
||||
Q888_isface.tflite
|
||||
Q888_new_detect.tflite
|
||||
Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite
|
||||
|
@ -180,7 +180,7 @@ Q888_face_emo_dress_mv3_orderd.tflite
|
|||
Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite
|
||||
Q_iMaxSR_RGB_385_p_pb2tflite.tflite
|
||||
bloom_new_detect.tflite
|
||||
Modify_Out_bloom_model_age_gender.tflite
|
||||
bloom_model_age_gender.tflite
|
||||
bloom_isface.tflite
|
||||
hiai_object_detect_814.tflite
|
||||
hiai_object_tflite_graph_8bit.tflite
|
||||
|
@ -193,11 +193,11 @@ ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2
|
|||
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2
|
||||
hdc_tb_cn_neg.tflite;3 0.5
|
||||
hiai_cv_labelDetectorModel_v3.tflite;2
|
||||
Modify_Out_ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1
|
||||
ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1
|
||||
ml_headpose_pb2tflite.tflite;3;1,64,64,3:16:16
|
||||
ml_ei_headpose_pb2tflite.tflite;3;1,64,64,3:16:16
|
||||
lite-model_albert_lite_base_squadv1_metadata_1.tflite;3
|
||||
lite-model_mobilebert_1_metadata_1.tflite;3
|
||||
Modify_Out_hiai_vad.tflite;2
|
||||
hiai_vad.tflite;2
|
||||
add_uint8.tflite;2
|
||||
coco_ssd_mobilenet_v1_1.0.tflite
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Modify_Out_video_infer2.tflite
|
||||
video_infer2.tflite
|
||||
mobilenet_v1_0.25_128_quant.tflite
|
||||
mobilenet_v1_0.25_160_quant.tflite
|
||||
mobilenet_v1_0.25_192_quant.tflite
|
||||
|
|
|
@ -49,7 +49,7 @@ ide_label_base.tflite 22
|
|||
# dividing 0 in the following operator.
|
||||
#ide_label_retrained.tflite
|
||||
ml_ei_headpose.tflite 3
|
||||
Modify_Out_ml_ei_landmark.tflite 3
|
||||
ml_ei_landmark.tflite 3
|
||||
mnist.tflite 4
|
||||
mobilenet.tflite 0.1
|
||||
resnet.tflite 120
|
||||
|
@ -131,7 +131,7 @@ mtk_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 22
|
|||
mtk_276landmark_0913.tflite 16
|
||||
mtk_face_recognition.tflite 8
|
||||
mtk_convert_model.tflite 5
|
||||
Modify_Out_smartreply.tflite 0.1
|
||||
smartreply.tflite 0.1
|
||||
mindspore_text_classification_tflite.tflite 9.2 # small output causes big bias
|
||||
#ml_location.tflite 0.1
|
||||
ml_text_correction.tflite 1
|
||||
|
@ -141,7 +141,7 @@ ml_text_correction.tflite 1
|
|||
# fp16: 27.6 - 27.4 = 0.2
|
||||
#ml_pic_shopping.tflite 0.1
|
||||
ml_vision_guide_detection3_pb2tflite.tflite 0.5
|
||||
Modify_Out_ml_vision_guide_detection1_pb2tflite.tflite 0.5
|
||||
ml_vision_guide_detection1_pb2tflite.tflite 0.5
|
||||
ml_pic_shopping_pb2tflite.tflite 95
|
||||
ml_ocr_jk_pb2tflite.tflite 0.5
|
||||
ml_ocr_latin_pb2tflite.tflite 11.5
|
||||
|
@ -158,17 +158,17 @@ lite-model_on_device_vision_classifier_landmarks_classifier_asia_V1_1.tflite 25
|
|||
lite-model_on_device_vision_classifier_landmarks_classifier_oceania_antarctica_V1_1.tflite 11
|
||||
lite-model_on_device_vision_classifier_landmarks_classifier_europe_V1_1.tflite 32
|
||||
lite-model_on_device_vision_classifier_landmarks_classifier_south_america_V1_1.tflite 14
|
||||
Modify_Out_ml_ei_landmark_pb2tflite.tflite 2
|
||||
ml_ei_landmark_pb2tflite.tflite 2
|
||||
unet_mbv2_05_104pts.tflite 17
|
||||
hiai_AADB_HADB_MBV2_model_f16.tflite 3.5
|
||||
hiai_AADB_HADB_MBV2_model_fp32.tflite 4.5
|
||||
Modify_Out_mtk_age_gender_fp16.tflite 26
|
||||
Modify_Out_hiai_detect_curve_model_float32.tflite 9
|
||||
mtk_age_gender_fp16.tflite 26
|
||||
hiai_detect_curve_model_float32.tflite 9
|
||||
Q_language_model_hrmini_Q4_b4_17w.tflite 3.5
|
||||
Modify_Out_lite-model_aiy_vision_classifier_food_V1_1.tflite 47.5
|
||||
lite-model_aiy_vision_classifier_food_V1_1.tflite 47.5
|
||||
lite-model_disease-classification_1.tflite 70
|
||||
lite-model_models_mushroom-identification_v1_1.tflite 5
|
||||
Modify_Out_smartreply_1_default_1.tflite 0.5
|
||||
smartreply_1_default_1.tflite 0.5
|
||||
text_classification.tflite 0.5
|
||||
Q_AADB_HADB_MBV2_model.tflite 5
|
||||
# the input of Q_convert model is between 0-255
|
||||
|
@ -190,16 +190,16 @@ Q_new_detect.tflite 3.5
|
|||
# the input of Q_object_scene model is between 0-255
|
||||
Q_object_scene.tflite 3
|
||||
Q_pose.tflite 4.1
|
||||
Modify_Out_Q_detect_fpn_add_inception-1448650.tflite 1
|
||||
Q_detect_fpn_add_inception-1448650.tflite 1
|
||||
bloom_landmark.tflite 0.5
|
||||
# input data: 0~255
|
||||
Modify_Out_Q888_age_gender_orderd.tflite 1.5
|
||||
Q888_age_gender_orderd.tflite 1.5
|
||||
Q888_face_dress_mv3y.tflite 0.5
|
||||
Q888_HADB_AADB_MBV2_model_fp32.tflite 2.5
|
||||
Q888_landmark.tflite 0.5
|
||||
Q888_pose.tflite 6.1
|
||||
# the output contains value less than e-7
|
||||
Modify_Out_Q888_lapa158_unet_0924.tflite 19
|
||||
Q888_lapa158_unet_0924.tflite 19
|
||||
Q888_isface.tflite 1.0
|
||||
Q888_new_detect.tflite 1.5
|
||||
Q888_model_normalize_object_scene_ps_20200826_f32_no_softmax.tflite 2
|
||||
|
@ -208,7 +208,7 @@ Q888_face_emo_dress_mv3_orderd.tflite 2.5
|
|||
Q_iMaxDN_RGB_385_p_RGB_RGB_pb2tflite.tflite 1
|
||||
Q_iMaxSR_RGB_385_p_pb2tflite.tflite 5
|
||||
bloom_new_detect.tflite 3.5
|
||||
Modify_Out_bloom_model_age_gender.tflite 0.5
|
||||
bloom_model_age_gender.tflite 0.5
|
||||
bloom_isface.tflite 0.5
|
||||
# The output values of conv layers range from -e±5 to e±5, which almost reaches the representation limit of fp16. In
|
||||
# this range, the fp16 data will has big bias. And the accumulation of this bias lowers the final precision.
|
||||
|
|
Loading…
Reference in New Issue