diff --git a/mindspore/lite/test/models_caffe_posttraining.cfg b/mindspore/lite/test/models_caffe_posttraining.cfg new file mode 100644 index 00000000000..100ccc2beec --- /dev/null +++ b/mindspore/lite/test/models_caffe_posttraining.cfg @@ -0,0 +1 @@ +ml_face_mnet diff --git a/mindspore/lite/test/run_benchmark_nets.sh b/mindspore/lite/test/run_benchmark_nets.sh index c1ad84e336b..70c526e73ae 100644 --- a/mindspore/lite/test/run_benchmark_nets.sh +++ b/mindspore/lite/test/run_benchmark_nets.sh @@ -114,6 +114,23 @@ function Run_Converter() { fi done < ${models_tflite_posttraining_config} + # Convert Caffe PostTraining models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} >> "${run_converter_log_file}" + echo 'convert mode name: '${model_name}' begin.' + echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}_posttraining' --quantType=PostTraining --config_file='${models_path}'/'${model_name}'_posttraining.config' >> "${run_converter_log_file}" + ./converter_lite --fmk=CAFFE --modelFile=$models_path/${model_name}.prototxt --weightFile=$models_path/${model_name}.caffemodel --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --configFile=${models_path}/config.${model_name} + if [ $? = 0 ]; then + converter_result='converter post_training '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file} + else + converter_result='converter post_training '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1 + fi + done < ${models_caffe_posttraining_config} + # Convert TFLite AwareTraining models: while read line; do model_name=${line} @@ -271,6 +288,24 @@ function Run_x86() { fi done < ${models_tflite_posttraining_config} + # Run caffe post training quantization converted models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} >> "${run_x86_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_x86_log_file}" + cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_posttraining.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/quantTraining/ml_face_mnet_calibration_data/20_Family_Group_Family_Group_20_1001.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'_posttraining.ms.out' --accuracyThreshold=105 >> "${run_x86_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_posttraining.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/quantTraining/ml_face_mnet_calibration_data/20_Family_Group_Family_Group_20_1001.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}_posttraining.ms.out --numThreads=1 --accuracyThreshold=105 >> "${run_x86_log_file}" + if [ $? = 0 ]; then + run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_caffe_posttraining_config} + # Run tflite aware training quantization converted models: while read line; do model_name=${line} @@ -479,6 +514,35 @@ function Run_arm64() { fi done < ${models_caffe_config} + # Run caffe posttraing models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_posttraining.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'_posttraining.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'_posttraining.ms.out' --numThreads=1 --accuracyThreshold=146 >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_posttraining.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'_posttraining.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'_posttraining.ms.out' --numThreads=1 --accuracyThreshold=146 >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + # run benchmark test without clib data + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_posttraining.ms --warmUpLoopCount=1 --loopCount=2' >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_posttraining.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_caffe_posttraining_config} + # Run onnx converted models: while read line; do model_name=${line%;*} @@ -819,6 +883,7 @@ models_tflite_config=${basepath}/models_tflite.cfg models_caffe_config=${basepath}/models_caffe.cfg models_tflite_awaretraining_config=${basepath}/models_tflite_awaretraining.cfg models_tflite_posttraining_config=${basepath}/models_tflite_posttraining.cfg +models_caffe_posttraining_config=${basepath}/models_caffe_posttraining.cfg models_tflite_weightquant_config=${basepath}/models_tflite_weightquant.cfg models_onnx_config=${basepath}/models_onnx.cfg models_fp16_config=${basepath}/models_fp16.cfg