fix potential memory leak + remove multi-input cfg file + add models to entrance guard

This commit is contained in:
zengxianglong 2021-07-02 16:12:32 +08:00
parent d33adf825b
commit 0183584803
16 changed files with 107 additions and 115 deletions

View File

@ -61,14 +61,6 @@ void GluCPUKernel::FreeTmpBuffer() {
}
}
GluCPUKernel::~GluCPUKernel() {
FreeTmpBuffer();
if (split_param_.split_sizes_ != nullptr) {
delete[] split_param_.split_sizes_;
split_param_.split_sizes_ = nullptr;
}
}
int GluCPUKernel::Init() {
if (!InferShapeDone()) {
return RET_OK;
@ -82,7 +74,7 @@ int GluCPUKernel::ReSize() {
if (split_param_.split_sizes_ != nullptr) {
delete[] split_param_.split_sizes_;
}
split_param_.split_sizes_ = new int[kSplitNum];
split_param_.split_sizes_ = this->split_sizes_;
memset(split_param_.split_sizes_, 0, kSplitNum * sizeof(int));
auto in_tensor = in_tensors_.front();

View File

@ -38,7 +38,7 @@ class GluCPUKernel : public InnerKernel {
glu_param_ = reinterpret_cast<GluParameter *>(op_parameter_);
split_ptr_.resize(kSplitNum, nullptr);
}
~GluCPUKernel() override;
~GluCPUKernel() override { FreeTmpBuffer(); }
int Init() override;
int ReSize() override;
@ -55,6 +55,7 @@ class GluCPUKernel : public InnerKernel {
void *input_ptr_ = nullptr;
int8_t *sigmoid_ptr_ = nullptr;
std::vector<int8_t *> split_ptr_;
int split_sizes_[kSplitNum];
int thread_n_stride_ = 0;
int usable_thread_num_ = 0;
int num_unit_ = 0;

View File

@ -121,3 +121,7 @@ ml_segmentation_atlanta_1
bolt_deploy_color-server
ml_face_emotion
hdc_ocr_recog_horizontal
ml_Heatmap_depth_240180;2
ml_Heatmap_depth_180240;2
ml_video_edit_person_divison_video;2
ml_video_edit_hair_dyeing_segmodel_v2

View File

@ -130,3 +130,7 @@ ml_segmentation_atlanta_1 0.5
bolt_deploy_color-server 0.5
ml_face_emotion 0.5
hdc_ocr_recog_horizontal 0.5
# The outputs of two Heatmap_depth models have small value
ml_Heatmap_depth_240180;2 10
ml_Heatmap_depth_180240;2 7
ml_video_edit_hair_dyeing_segmodel_v2 1

View File

@ -84,3 +84,5 @@ ml_video_edit_art_transfer.onnx;3 3
ml_video_edit_enhance_update_tmp.onnx 0.5
ml_video_edit_art_generate_20210513.onnx 0.5
ml_video_edit_art_transfer_20210513.onnx;3 0.5
ml_video_edit_hair_dyeing_segmodel_v2 0.5
ml_video_edit_makeup_mobilenetv203.onnx 2

View File

@ -85,3 +85,11 @@ ml_asr_encoder_int8_202103.onnx
rpnt_pdr_conv2d_16_fixed_last.onnx
hdc_efficientnet_b3_1w_class.onnx
yolov5s.onnx
porseg_tmp.onnx;2
hiai_nlu_onnx_model_v1_0.onnx;3
hiai_nlu_onnx_model_v1_1.onnx;3
ml_video_edit_art_transfer_20210513.onnx;3
ml_asr_decoder_202103.onnx;2;1,64,512:1,64
decoder.onnx;2;1,7,512:1,7
ml_video_edit_makeup_mobilenetv203.onnx
ml_video_edit_hair_dyeing_migrate_v2.onnx;4

View File

@ -90,8 +90,15 @@ ssd_mobilenet_v1_10.onnx;1;1,383,640,3 0.5
Harmony_Voiceprint.onnx;1;1,200,40,1 5.5
# A matmul op in the later part produces overflowed output values (>65504).
#ml_video_edit_art_generate_20210513.onnx nan
ml_asr_encoder_int8_202103.onnx;;;4 2.5
ml_asr_encoder_int8_202103.onnx;;;4 2.1
# The input range of hdc_efficientnet_b3_1w_class.onnx is [-5, 5], the computation of middle layers contains small
# values(<1e-5), The fp16 computation precision is low in this case.
hdc_efficientnet_b3_1w_class.onnx 18
yolov5s.onnx 2
ml_video_edit_art_transfer.onnx;3 3
decoder.onnx;2;1,7,512:1,7 113
ml_video_edit_art_transfer_20210513.onnx;3 1
ml_asr_decoder_202103.onnx;2;1,64,512:1,64 0.5
ml_video_edit_makeup_mobilenetv203.onnx 4
# The input of ml_video_edit_hair_dyeing_migrate_v2.onnx should be between [0, 1]
ml_video_edit_hair_dyeing_migrate_v2.onnx;4 2.5

View File

@ -75,3 +75,28 @@ ml_vision_guide_detection2.pb;1;1,320,320,1
ml_tts_encoder.pb;4;1:1,44:1:1;;input_dependent
# encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
#encoder_0111_control_flow.pb;4;1:1,44:1:1;;input_dependent
ml_video_edit_img_segment_adaptise.pb;2
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2
#fasterrcnn_crop.pb is the same model as gts_object_detect_Ics.pb.
#fasterrcnn_crop.pb;1;420,630,3
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
#decoder_step_201217.pb;5
#decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
#decoder_step_201217_modified.pb;5
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
#encoder_0111.pb;4;1:1,44:1:1
encoder_201228.pb;3;1:1,22:1;;input_dependent
ml_video_edit_oneclick_adaptis.pb;3
tacotron_encoder_stf.pb;5;1:1,62:1,62:1,62:1,62;;input_dependent
female_model_step2_int16_noiseout.pb;66
ml_female_model_step6_noiseout.pb;66
ml_male_model_step6_noiseout.pb;66
ml_tts_decoder_control_flow.pb;5
ml_tts_decoder.pb;5
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1;;input_dependent
ml_tts_vocoder.pb;66
hiai_nlu_model.pb;3;1,16:1,16:1,16
gts_object_detect_Ics.pb;1;420,630,3;;input_dependent
hiai_transformer_encoder.pb;15
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512
hiai_nlu_model_v2.pb;7;1,5:1,6:1,174:1,98:1,5:1,5:1,5

View File

@ -68,3 +68,21 @@ ml_vision_guide_detection2.pb;1;1,320,320,1 1
ml_tts_encoder.pb;4;1:1,44:1:1 9
# encoder_0111_control_flow.pb is same as ml_tts_encoder_control_flow.pb
#encoder_0111_control_flow.pb;4;1:1,44:1:1 10
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 11
ml_video_edit_img_segment_adaptise.pb;2 40
ml_video_edit_person_divison_video;2 38
ml_video_edit_oneclick_adaptis.pb;3 6
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
#decoder_step_201217.pb;5 187
#decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
#decoder_step_201217_modified.pb;5 0.5
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
#encoder_0111.pb;4;1:1,44:1:1
ml_female_model_step6_noiseout.pb;66 2
ml_male_model_step6_noiseout.pb;66 2.5
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1 1.5
ml_tts_decoder_control_flow.pb;5 1
ml_tts_decoder.pb;5 2.5
ml_tts_vocoder.pb;66 53
hiai_transformer_encoder.pb;15 4
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512 0.5

View File

@ -185,3 +185,18 @@ bloom_isface.tflite
hiai_object_detect_814.tflite
hiai_object_tflite_graph_8bit.tflite
lma_tsec_shallow_channels16_ds2.1.1_model-best-f1.tflite
lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2
magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2
albert_lite_base_squadv1_1.tflite;3
mobilebert_1_default_1.tflite;3
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2
hdc_tb_cn_neg.tflite;3
hiai_cv_labelDetectorModel_v3.tflite;2
ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
lite-model_albert_lite_base_squadv1_metadata_1.tflite;3
lite-model_mobilebert_1_metadata_1.tflite;3
hiai_vad.tflite;2
add_uint8.tflite;2

View File

@ -213,3 +213,10 @@ bloom_isface.tflite 0.5
# The output values of conv layers range from -e±5 to e±5, which almost reaches the representation limit of fp16. In
# this range, the fp16 data will has big bias. And the accumulation of this bias lowers the final precision.
hiai_object_detect_814.tflite 14
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 11
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
hdc_tb_cn_neg.tflite;3 295
# The input of hiai_cv_labelDetectorModel_v3.tflite is between 0-255.
hiai_cv_labelDetectorModel_v3.tflite;2 2
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 1
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 0.5

View File

@ -1,51 +0,0 @@
lite-model_arbitrary-image-stylization-inceptionv3_fp16_transfer_1.tflite;2
magenta_arbitrary-image-stylization-v1-256_fp16_transfer_1.tflite;2
albert_lite_base_squadv1_1.tflite;3
mobilebert_1_default_1.tflite;3
porseg_tmp.onnx;2
ml_video_edit_img_segment_adaptise.pb;2
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2
decoder.onnx;2;1,7,512:1,7
#fasterrcnn_crop.pb is the same model as gts_object_detect_Ics.pb.
#fasterrcnn_crop.pb;1;420,630,3
ml_video_edit_person_divison_video;2
hdc_tb_cn_neg.tflite;3
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
#decoder_step_201217.pb;5
#decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
#decoder_step_201217_modified.pb;5
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
#encoder_0111.pb;4;1:1,44:1:1
encoder_201228.pb;3;1:1,22:1;;input_dependent
ml_video_edit_oneclick_adaptis.pb;3
tacotron_encoder_stf.pb;5;1:1,62:1,62:1,62:1,62;;input_dependent
female_model_step2_int16_noiseout.pb;66
ml_female_model_step6_noiseout.pb;66
ml_male_model_step6_noiseout.pb;66
ml_tts_decoder_control_flow.pb;5
ml_tts_decoder.pb;5
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1;;input_dependent
hiai_cv_labelDetectorModel_v3.tflite;2
ml_tts_vocoder.pb;66
ml_tacotron_decoder_step_stf.tflite;9;1,80:1,256:1,1024:1,1024:1,1024:1,1024:1,8:1,1,256:1
add_uint8.tflite;2
ml_Heatmap_depth_240180;2
ml_Heatmap_depth_180240;2
hiai_nlu_model.pb;3;1,16:1,16:1,16
gts_object_detect_Ics.pb;1;420,630,3;;input_dependent
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16
hiai_transformer_encoder.pb;15
lite-model_albert_lite_base_squadv1_metadata_1.tflite;3
lite-model_mobilebert_1_metadata_1.tflite;3
hiai_vad.tflite;2
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512
hiai_nlu_model_v2.pb;7;1,5:1,6:1,174:1,98:1,5:1,5:1,5
hiai_nlu_onnx_model_v1_0.onnx;3
hiai_nlu_onnx_model_v1_1.onnx;3
ml_video_edit_art_transfer_20210513.onnx;3
ml_asr_decoder_202103.onnx;2;1,64,512:1,64
ml_audio_kit_encoder_v5.pb;6;1,32:1,32:1,32:1,32:1:1
hiai_nlu_model_v1.pb;3;1,16:1,16:1,16 2.0

View File

@ -1,36 +0,0 @@
# [first column]:model_name;input_bin_number;input_shape (input_bin_number and input_shape maybe do not need.)
# [second column]:accuracy limit in arm64
# Each column is separated by a space.
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2 11
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2 11
ml_video_edit_img_segment_adaptise.pb;2 40
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2 0.5
ml_video_edit_person_divison_video;2 38
ml_video_edit_oneclick_adaptis.pb;3 6
hdc_tb_cn_neg.tflite;3 295
#decoder_step_201217.pb is the same model as ml_tts_decoder.pb.
#decoder_step_201217.pb;5 187
#decoder_step_201217_modified.pb is the same model as ml_tts_decoder_control_flow.pb.
#decoder_step_201217_modified.pb;5 0.5
#encoder_0111.pb is the same model as ml_tts_encoder.pb.
#encoder_0111.pb;4;1:1,44:1:1
ml_video_edit_art_transfer.onnx;3 3
decoder.onnx;2;1,7,512:1,7 113
ml_female_model_step6_noiseout.pb;66 2
ml_male_model_step6_noiseout.pb;66 2.5
ml_tts_encoder_control_flow.pb;4;1:1,22:1:1 1.5
ml_tts_decoder_control_flow.pb;5 1
ml_tts_decoder.pb;5 2.5
# The input of hiai_cv_labelDetectorModel_v3.tflite is between 0-255.
hiai_cv_labelDetectorModel_v3.tflite;2 2
ml_tts_vocoder.pb;66 53
# The outputs of two Heatmap_depth models have small value
ml_Heatmap_depth_240180;2 10
ml_Heatmap_depth_180240;2 7
ml_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 1
ml_ei_headpose_pb2tflite.tflite;3;16:1,64,64,3:16 0.5
hiai_transformer_encoder.pb;15 4
decoder_step_nocumsum_v5.pb;13;1:1,512:1,1429,2:1,127:1,127:1,127:1,127,320:1,80:1,512:1,512:1,512:1,512:1,512 0.5
ml_video_edit_art_transfer_20210513.onnx;3 1
ml_asr_decoder_202103.onnx;2;1,64,512:1,64 0.5
hiai_nlu_model_v1.pb;3;1,16:1,16:1,16 2.0

View File

@ -6,7 +6,7 @@ function Convert() {
local cfg_file_list=$1
for cfg_file in ${cfg_file_list[*]}; do
while read line; do
if [[ $line == \#* ]]; then
if [[ $line == \#* || $line == "" ]]; then
continue
fi
model_info=${line%% *}
@ -118,7 +118,7 @@ function Run_Benchmark() {
for cfg_file in ${cfg_file_list[*]}; do
while read line; do
line_info=${line}
if [[ $line_info == \#* ]]; then
if [[ $line_info == \#* || $line_info == "" ]]; then
continue
fi
model_info=`echo ${line_info}|awk -F ' ' '{print $1}'`
@ -238,8 +238,8 @@ function Run_Benchmark() {
function MS_PRINT_TESTCASE_START_MSG() {
echo ""
echo -e "-----------------------------------------------------------------------------------------------------------------------------------"
echo -e "env Testcase Result "
echo -e "--- -------- ------ "
echo -e "env Testcase Result "
echo -e "--- -------- ------ "
}
# Print start msg after run testcase

View File

@ -17,16 +17,15 @@ function Run_Converter() {
local fp32_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_tflite_posttraining_config" "$models_caffe_posttraining_config" \
"$models_tflite_awaretraining_config" "$models_weightquant_config" "$models_weightquant_7bit_config" \
"$models_weightquant_9bit_config" "$models_with_multiple_inputs_config" "$models_for_process_only_config")
"$models_weightquant_9bit_config" "$models_for_process_only_config")
local fp16_cfg_file_list=("$models_onnx_fp16_config" "$models_caffe_fp16_config" "$models_tflite_fp16_config" "$models_tf_fp16_config" \
"$models_multiple_inputs_fp16_config")
local fp16_cfg_file_list=("$models_onnx_fp16_config" "$models_caffe_fp16_config" "$models_tflite_fp16_config" "$models_tf_fp16_config")
# Convert models:
if [[ $1 == "all" || $1 == "arm64_cpu" || $1 == "arm64_fp32" ]]; then
# $1:cfgFileList; $2:inModelPath; $3:outModelPath; $4:logFile; $5:resultFile;
Convert "${fp32_cfg_file_list[*]}" $models_path $ms_models_path $run_converter_log_file $run_converter_result_file
fi
if [[ $1 == "all" || $1 == "arm64_cpu" || $1 == "arm64_fp16" ]]; then
if [[ $1 == "arm64_fp16" ]]; then
Convert "${fp16_cfg_file_list[*]}" $models_path $ms_models_path $run_converter_log_file $run_converter_result_file
fi
}
@ -76,7 +75,7 @@ function Run_arm64() {
# Prepare the config file list
local arm64_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_caffe_posttraining_config" "$models_tflite_awaretraining_config" \
"$models_weightquant_config" "$models_with_multiple_inputs_config" "$models_compatibility_config" "$models_for_process_only_config")
"$models_weightquant_config" "$models_compatibility_config" "$models_for_process_only_config")
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;
Run_Benchmark "${arm64_cfg_file_list[*]}" . '/data/local/tmp' $run_arm64_fp32_log_file $run_benchmark_result_file 'arm64' 'CPU' $device_id
@ -85,7 +84,7 @@ function Run_arm64() {
# Run on arm64-fp16 platform:
function Run_arm64_fp16() {
Push_Files $arm64_path "aarch64" $version $benchmark_test_path "adb_push_log.txt" $device_id
local arm64_cfg_file_list=("$models_onnx_fp16_config" "$models_caffe_fp16_config" "$models_tflite_fp16_config" "$models_tf_fp16_config" "$models_multiple_inputs_fp16_config")
local arm64_cfg_file_list=("$models_onnx_fp16_config" "$models_caffe_fp16_config" "$models_tflite_fp16_config" "$models_tf_fp16_config")
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;
Run_Benchmark "${arm64_cfg_file_list[*]}" . '/data/local/tmp' $run_arm64_fp16_log_file $run_benchmark_result_file 'arm64' 'CPU' $device_id
}
@ -225,14 +224,12 @@ models_onnx_fp16_config=${basepath}/../config/models_onnx_fp16.cfg
models_caffe_fp16_config=${basepath}/../config/models_caffe_fp16.cfg
models_tflite_fp16_config=${basepath}/../config/models_tflite_fp16.cfg
models_tf_fp16_config=${basepath}/../config/models_tf_fp16.cfg
models_multiple_inputs_fp16_config=${basepath}/../config/models_with_multiple_inputs_fp16.cfg
models_mindspore_config=${basepath}/../config/models_mindspore.cfg
models_mindspore_train_config=${basepath}/../config/models_mindspore_train.cfg
models_weightquant_7bit_config=${basepath}/../config/models_weightquant_7bit.cfg
models_weightquant_9bit_config=${basepath}/../config/models_weightquant_9bit.cfg
models_weightquant_config=${basepath}/../config/models_weightquant.cfg
models_compatibility_config=${basepath}/../config/models_compatibility.cfg
models_with_multiple_inputs_config=${basepath}/../config/models_with_multiple_inputs.cfg
models_for_process_only_config=${basepath}/../config/models_for_process_only.cfg
models_codegen_config=${basepath}/../config/models_codegen.cfg

View File

@ -53,7 +53,7 @@ function Run_Converter() {
local x86_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_tflite_posttraining_config" "$models_caffe_posttraining_config" \
"$models_tflite_awaretraining_config" "$models_weightquant_config" "$models_weightquant_7bit_config" \
"$models_weightquant_9bit_config" "$models_with_multiple_inputs_config" "$models_for_process_only_config")
"$models_weightquant_9bit_config" "$models_for_process_only_config")
# Convert models:
# $1:cfgFileList; $2:inModelPath; $3:outModelPath; $4:logFile; $5:resultFile;
Convert "${x86_cfg_file_list[*]}" $models_path $ms_models_path $run_converter_log_file $run_converter_result_file
@ -132,7 +132,7 @@ function Run_x86() {
# Prepare the config file list
local x86_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_caffe_posttraining_config" "$models_tflite_awaretraining_config" \
"$models_weightquant_config" "$models_with_multiple_inputs_config" "$models_for_process_only_config")
"$models_weightquant_config" "$models_for_process_only_config")
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;
Run_Benchmark "${x86_cfg_file_list[*]}" $ms_models_path $models_path $run_x86_log_file $run_benchmark_result_file 'x86' 'CPU' ''
@ -178,7 +178,7 @@ function Run_x86_sse() {
# Prepare the config file list
local sse_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_caffe_posttraining_config" "$models_tflite_awaretraining_config" \
"$models_weightquant_config" "$models_with_multiple_inputs_config" "$models_for_process_only_config")
"$models_weightquant_config" "$models_for_process_only_config")
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;
Run_Benchmark "${sse_cfg_file_list[*]}" $ms_models_path $models_path $run_x86_sse_log_file $run_benchmark_result_file 'x86' 'CPU' ''
@ -224,7 +224,7 @@ function Run_x86_avx() {
# Prepare the config file list
local avx_cfg_file_list=("$models_tf_config" "$models_tflite_config" "$models_caffe_config" "$models_onnx_config" "$models_mindspore_config" \
"$models_mindspore_train_config" "$models_caffe_posttraining_config" "$models_tflite_awaretraining_config" \
"$models_weightquant_config" "$models_with_multiple_inputs_config" "$models_for_process_only_config")
"$models_weightquant_config" "$models_for_process_only_config")
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId; $9:benchmark_mode
Run_Benchmark "${avx_cfg_file_list[*]}" $ms_models_path $models_path $run_x86_avx_log_file $run_benchmark_result_file 'x86' 'CPU' ''
@ -375,7 +375,6 @@ models_mindspore_train_config=${basepath}/../config/models_mindspore_train.cfg
models_weightquant_7bit_config=${basepath}/../config/models_weightquant_7bit.cfg
models_weightquant_9bit_config=${basepath}/../config/models_weightquant_9bit.cfg
models_weightquant_config=${basepath}/../config/models_weightquant.cfg
models_with_multiple_inputs_config=${basepath}/../config/models_with_multiple_inputs.cfg
models_for_process_only_config=${basepath}/../config/models_for_process_only.cfg
models_codegen_config=${basepath}/../config/models_codegen.cfg
models_codegen_parallel_config=${basepath}/../config/models_codegen_parallel.cfg