!6209 [MSLITE][Develop] modify test shell: mindspore/lite/test/run_benchmark_nets.sh

Merge pull request !6209 from yangruoqi713/lite
This commit is contained in:
mindspore-ci-bot 2020-09-15 15:50:29 +08:00 committed by Gitee
commit 15244de50a
2 changed files with 329 additions and 331 deletions

View File

@ -0,0 +1,11 @@
age_medium
beard
emotion
glasses
hat
isface
pose_3d
tracking
ml_face_contour
mnet
ml_face_landmark

View File

@ -1,5 +1,151 @@
#!/bin/bash
# Run converter on x86 platform:
function Run_Converter() {
# Unzip x86 runtime and convertor
cd ${x86_path} || exit 1
tar -zxf mindspore-lite-${version}-runtime-x86-${process_unit_x86}.tar.gz || exit 1
tar -zxf mindspore-lite-${version}-converter-ubuntu.tar.gz || exit 1
cd ${x86_path}/mindspore-lite-${version}-converter-ubuntu || exit 1
cp converter/converter_lite ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./lib/:./third_party/protobuf/lib:./third_party/flatbuffers/lib
# Convert the models
cd ${x86_path}/mindspore-lite-${version}-converter-ubuntu || exit 1
rm -rf ${ms_models_path}
mkdir -p ${ms_models_path}
# Convert tflite models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_converter_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}
if [ $? = 0 ]; then
converter_result='converter tflite '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter tflite '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_tflite_config}
# Convert caffe models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=CAFFE --modelFile='${models_path}'/'${model_name}'.prototxt --weightFile='${models_path}'/'${model_name}'.caffemodel --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_converter_log_file}"
./converter_lite --fmk=CAFFE --modelFile=${models_path}/${model_name}.prototxt --weightFile=${models_path}/${model_name}.caffemodel --outputFile=${ms_models_path}/${model_name}
if [ $? = 0 ]; then
converter_result='converter caffe '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter caffe '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_caffe_config}
# Convert onnx models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=ONNX --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_converter_log_file}"
./converter_lite --fmk=ONNX --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name}
if [ $? = 0 ]; then
converter_result='converter onnx '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter onnx '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file}
fi
done < ${models_onnx_config}
# Convert mindspore models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=MS --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_converter_log_file}"
./converter_lite --fmk=MS --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name}
if [ $? = 0 ]; then
converter_result='converter mindspore '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter mindspore '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_mindspore_config}
# Convert TFLite PostTraining models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo 'convert mode name: '${model_name}' begin.'
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}_posttraining' --quantType=PostTraining --config_file='${models_path}'/'${model_name}'_posttraining.config' >> "${run_converter_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --config_file=${models_path}/${model_name}_posttraining.config
if [ $? = 0 ]; then
converter_result='converter post_training '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter post_training '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_tflite_posttraining_config}
# Convert TFLite AwareTraining models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}' --quantType=AwareTraining' >> "${run_converter_log_file}"
./converter_lite --fmk=TFLITE --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} --quantType=AwareTraining
if [ $? = 0 ]; then
converter_result='converter aware_training '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter aware_training '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_tflite_awaretraining_config}
# Copy fp16 ms models:
while read line; do
model_name=${line%.*}
if [[ $model_name == \#* ]]; then
continue
fi
echo 'cp '${ms_models_path}'/'${model_name}'.ms' ${ms_models_path}'/'${model_name}'.fp16.ms'
cp ${ms_models_path}/${model_name}.ms ${ms_models_path}/${model_name}.fp16.ms
if [ $? = 0 ]; then
converter_result='converter fp16 '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter fp16 '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_fp16_config}
# Convert weightquant models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'--quantType=WeightQuant --bitNum=8 --quantSize=500 --convWeightQuantChannelThreshold=16' >> "${run_converter_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_weightquant --quantType=WeightQuant --bitNum=8 --quantSize=500 --convWeightQuantChannelThreshold=16
if [ $? = 0 ]; then
converter_result='converter weight_quant '${model_name}' pass';echo ${converter_result} >> ${run_converter_result_file}
else
converter_result='converter weight_quant '${model_name}' failed';echo ${converter_result} >> ${run_converter_result_file};return 1
fi
done < ${models_tflite_weightquant_config}
}
# Run on x86 platform:
function Run_x86() {
# Run tflite converted models:
@ -9,17 +155,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "{run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "{run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_tflite_config}
@ -30,17 +173,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_caffe_config}
@ -51,17 +191,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_onnx_config}
@ -72,17 +209,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'_posttraining.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/quantTraining/mnist_calibration_data/00099.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'_posttraining.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}_posttraining.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/quantTraining/mnist_calibration_data/00099.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}_posttraining.ms.out --warmUpLoopCount=1 --loopCount=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'_posttraining.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/quantTraining/mnist_calibration_data/00099.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'_posttraining.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}_posttraining.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/quantTraining/mnist_calibration_data/00099.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}_posttraining.ms.out >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}'_posttraining pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}'_posttraining failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_tflite_posttraining_config}
@ -93,17 +227,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --numThreads=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --numThreads=1 >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}'_awaretraining pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}'_awaretraining failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_tflite_awaretraining_config}
@ -114,17 +245,14 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 --accuracyThreshold=1.5 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=1.5 >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_mindspore_config}
@ -135,23 +263,49 @@ function Run_x86() {
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd '${convertor_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${convertor_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}_weightquant.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 >> "${run_benchmark_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-runtime-x86-'${process_unit_x86} >> "${run_benchmark_log_file}"
cd ${x86_path}/mindspore-lite-${version}-runtime-x86-${process_unit_x86} || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}_weightquant.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}'_weightquant pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='x86: '${model_name}'_weightquant failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
done < ${models_tflite_weightquant_config}
}
# Run on arm64 platform:
function Run_arm64() {
# Unzip arm
cd ${arm_path} || exit 1
tar -zxf mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}.tar.gz || exit 1
# If build with minddata, copy the minddata related libs
cd ${benchmark_test_path} || exit 1
if [ -f ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/lib/libminddata-lite.so ]; then
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/third_party/libjpeg-turbo/lib/libjpeg.so ${benchmark_test_path}/libjpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/third_party/libjpeg-turbo/lib/libturbojpeg.so ${benchmark_test_path}/libturbojpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/third_party/opencv/lib/libopencv_core.so ${benchmark_test_path}/libopencv_core.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/third_party/opencv/lib/libopencv_imgcodecs.so ${benchmark_test_path}/libopencv_imgcodecs.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/third_party/opencv/lib/libopencv_imgproc.so ${benchmark_test_path}/libopencv_imgproc.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/lib/libminddata-lite.so ${benchmark_test_path}/libminddata-lite.so || exit 1
fi
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/lib/libmindspore-lite.so ${benchmark_test_path}/libmindspore-lite.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/lib/liboptimize.so ${benchmark_test_path}/liboptimize.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm64}/benchmark/benchmark ${benchmark_test_path}/benchmark || exit 1
# adb push all needed files to the phone
adb -s ${device_id} push ${benchmark_test_path} /data/local/tmp/ > adb_push_log.txt
# run adb ,run session ,check the result:
echo 'cd /data/local/tmp/benchmark_test' > adb_cmd.txt
echo 'cp /data/local/tmp/libc++_shared.so ./' >> adb_cmd.txt
echo 'chmod 777 benchmark' >> adb_cmd.txt
adb -s ${device_id} shell < adb_cmd.txt
# Run tflite converted models:
while read line; do
model_name=${line}
@ -160,32 +314,24 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
#echo ${model_name}
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_tflite_config}
# Run caffe converted models:
@ -196,16 +342,13 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
echo ${model_name} >> "${run_benchmark_log_file}"
@ -214,14 +357,10 @@ function Run_arm64() {
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_caffe_config}
# Run onnx converted models:
@ -232,16 +371,13 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
echo ${model_name} >> "${run_benchmark_log_file}"
@ -250,14 +386,10 @@ function Run_arm64() {
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_onnx_config}
# Run fp16 converted models:
@ -268,32 +400,25 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --fp16Priority=true --accuracyThreshold=5' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --fp16Priority=true --accuracyThreshold=5' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --fp16Priority=true --accuracyThreshold=5' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --fp16Priority=true --accuracyThreshold=5' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true --accuracyThreshold=5' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true --accuracyThreshold=5' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_fp16_config}
# Run tflite aware training quantization converted models:
@ -304,32 +429,25 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --fp16Priority=true --accuracyThreshold=5' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --fp16Priority=true --accuracyThreshold=5' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true --accuracyThreshold=5' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2 --fp16Priority=true --accuracyThreshold=5' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_tflite_awaretraining_config}
# Run gpu tflite converted models:
@ -340,16 +458,13 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=GPU --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=GPU --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=GPU --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=GPU --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64_gpu: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64_gpu: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64_gpu: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64_gpu: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
#echo ${model_name}
@ -358,14 +473,10 @@ function Run_arm64() {
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=GPU --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64_gpu: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64_gpu: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64_gpu: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64_gpu: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_tflite_gpu_config}
# Run mindir converted models:
@ -376,16 +487,13 @@ function Run_arm64() {
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_benchmark_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
# run benchmark test without clib data
echo ${model_name} >> "${run_benchmark_log_file}"
@ -394,16 +502,11 @@ function Run_arm64() {
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_benchmark_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='arm64: '${model_name}' failed'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
#sleep 1
done < ${models_mindspore_config}
}
# Print start msg before run testcase
@ -419,7 +522,6 @@ function MS_PRINT_TESTCASE_END_MSG() {
echo -e "-----------------------------------------------------------------------------------------------------------------------------------"
}
basepath=$(pwd)
echo ${basepath}
#set -e
@ -432,8 +534,8 @@ while getopts "a:c:m:d:" opt; do
echo "arm_path is ${OPTARG}"
;;
c)
convertor_path=${OPTARG}
echo "convertor_path is ${OPTARG}"
x86_path=${OPTARG}
echo "x86_path is ${OPTARG}"
;;
m)
models_path=${OPTARG}
@ -450,41 +552,18 @@ while getopts "a:c:m:d:" opt; do
done
mkdir train
mv ${arm_path}/*runtime-*train* ./train
mv ${arm_path}/*runtime-*train* ./train
file_name=$(ls ${arm_path}/*runtime-arm64*.tar.gz)
IFS="-" read -r -a file_name_array <<< "$file_name"
version=${file_name_array[2]}
IFS="." read -r -a suffix <<< "${file_name_array[-1]}"
process_unit_arm=${suffix[0]}
process_unit_arm64=${suffix[0]}
file_name=$(ls ${convertor_path}/*runtime-x86*.tar.gz)
file_name=$(ls ${x86_path}/*runtime-x86*.tar.gz)
IFS="-" read -r -a file_name_array <<< "$file_name"
IFS="." read -r -a suffix <<< "${file_name_array[-1]}"
process_unit_x86=${suffix[0]}
# Unzip arm
cd ${arm_path} || exit 1
tar -zxf mindspore-lite-${version}-runtime-arm64-${process_unit_arm}.tar.gz || exit 1
# Unzip x86 runtime and convertor
cd ${convertor_path} || exit 1
tar -zxf mindspore-lite-${version}-runtime-x86-${process_unit_x86}.tar.gz || exit 1
tar -zxf mindspore-lite-${version}-converter-ubuntu.tar.gz || exit 1
cd ${convertor_path}/mindspore-lite-${version}-converter-ubuntu || exit 1
cp converter/converter_lite ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./lib/:./third_party/protobuf/lib:./third_party/flatbuffers/lib
# Convert the models
cd ${convertor_path}/mindspore-lite-${version}-converter-ubuntu || exit 1
# Write resulte to temp file
run_benchmark_result_file=${basepath}/run_benchmark_result.txt
echo ' ' > ${run_benchmark_result_file}
run_benchmark_log_file=${basepath}/run_benchmark_log.txt
echo 'run benchmark logs: ' > ${run_benchmark_log_file}
# Set models config filepath
models_tflite_config=${basepath}/models_tflite.cfg
models_caffe_config=${basepath}/models_caffe.cfg
@ -495,147 +574,60 @@ models_onnx_config=${basepath}/models_onnx.cfg
models_fp16_config=${basepath}/models_fp16.cfg
models_mindspore_config=${basepath}/models_mindspore.cfg
models_tflite_gpu_config=${basepath}/models_tflite_gpu.cfg
Convert_status=0
rm -rf ${basepath}/ms_models
mkdir -p ${basepath}/ms_models
ms_models_path=${basepath}/ms_models
# Convert tflite models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_benchmark_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name} || Convert_status=$?
done < ${models_tflite_config}
# Write converter result to temp file
run_converter_log_file=${basepath}/run_converter_log.txt
echo ' ' > ${run_converter_log_file}
# Convert caffe models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
#pwd >> ${run_benchmark_log_file}
echo './converter_lite --fmk=CAFFE --modelFile='${models_path}'/'${model_name}'.prototxt --weightFile='${models_path}'/'${model_name}'.caffemodel --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_benchmark_log_file}"
./converter_lite --fmk=CAFFE --modelFile=${models_path}/${model_name}.prototxt --weightFile=${models_path}/${model_name}.caffemodel --outputFile=${ms_models_path}/${model_name} || Convert_status=$?
done < ${models_caffe_config}
run_converter_result_file=${basepath}/run_converter_result.txt
echo ' ' > ${run_converter_result_file}
# Convert onnx models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
#pwd >> ${run_benchmark_log_file}
echo './converter_lite --fmk=ONNX --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_benchmark_log_file}"
./converter_lite --fmk=ONNX --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} || Convert_status=$?
done < ${models_onnx_config}
# Run converter
echo "start Run converter ..."
Run_Converter &
Run_converter_PID=$!
sleep 1
# Convert mindspore models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
pwd >> "${run_benchmark_log_file}"
echo './converter_lite --fmk=MS --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'' >> "${run_benchmark_log_file}"
./converter_lite --fmk=MS --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} || Convert_status=$?
done < ${models_mindspore_config}
wait ${Run_converter_PID}
Run_converter_status=$?
# Convert TFLite PostTraining models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'convert mode name: '${model_name}' begin.'
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}_posttraining' --quantType=PostTraining --config_file='${models_path}'/'${model_name}'_posttraining.config' >> "${run_benchmark_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --config_file=${models_path}/${model_name}_posttraining.config || Convert_status=$?
done < ${models_tflite_posttraining_config}
# Convert TFLite AwareTraining models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}' --quantType=AwareTraining' >> "${run_benchmark_log_file}"
./converter_lite --fmk=TFLITE --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} --quantType=AwareTraining || Convert_status=$?
done < ${models_tflite_awaretraining_config}
function Print_Converter_Result() {
MS_PRINT_TESTCASE_END_MSG
while read line; do
arr=("${line}")
printf "%-15s %-20s %-90s %-7s\n" ${arr[0]} ${arr[1]} ${arr[2]} ${arr[3]}
done < ${run_converter_result_file}
MS_PRINT_TESTCASE_END_MSG
}
# Copy fp16 ms models:
while read line; do
model_name=${line%.*}
if [[ $model_name == \#* ]]; then
continue
fi
echo 'cp '${ms_models_path}'/'${model_name}'.ms' ${ms_models_path}'/'${model_name}'.fp16.ms'
cp ${ms_models_path}/${model_name}.ms ${ms_models_path}/${model_name}.fp16.ms
done < ${models_fp16_config}
# Convert weightquant models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}'--quantType=WeightQuant --bitNum=8 --quantSize=500 --convWeightQuantChannelThreshold=16' >> "${run_benchmark_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_weightquant --quantType=WeightQuant --bitNum=8 --quantSize=500 --convWeightQuantChannelThreshold=16 || Convert_status=$?
done < ${models_tflite_weightquant_config}
# Check all result and return value
if [[ ${Convert_status} = 0 ]];then
echo "convert is ended"
# Check converter result and return value
if [[ ${Run_converter_status} = 0 ]];then
echo "Run converter success"
Print_Converter_Result &
else
echo "convert failed"
cat ${run_benchmark_log_file}
echo "Run converter failed"
cat ${run_converter_log_file}
Print_Converter_Result &
exit 1
fi
# Push to the arm and run benchmark:
# First:copy benchmark exe and so files to the server which connected to the phone
echo "Push files to the arm and run benchmark"
rm -rf ${basepath}/benchmark_test
mkdir -p ${basepath}/benchmark_test
benchmark_test_path=${basepath}/benchmark_test
cd ${benchmark_test_path} || exit 1
# If build with minddata, copy the minddata related libs
if [ -f ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/lib/libminddata-lite.so ]; then
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/third_party/libjpeg-turbo/lib/libjpeg.so ${benchmark_test_path}/libjpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/third_party/libjpeg-turbo/lib/libturbojpeg.so ${benchmark_test_path}/libturbojpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/third_party/opencv/lib/libopencv_core.so ${benchmark_test_path}/libopencv_core.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/third_party/opencv/lib/libopencv_imgcodecs.so ${benchmark_test_path}/libopencv_imgcodecs.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/third_party/opencv/lib/libopencv_imgproc.so ${benchmark_test_path}/libopencv_imgproc.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/lib/libminddata-lite.so ${benchmark_test_path}/libminddata-lite.so || exit 1
fi
# Write benchmark result to temp file
run_benchmark_result_file=${basepath}/run_benchmark_result.txt
echo ' ' > ${run_benchmark_result_file}
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/lib/libmindspore-lite.so ${benchmark_test_path}/libmindspore-lite.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/lib/liboptimize.so ${benchmark_test_path}/liboptimize.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version}-runtime-arm64-${process_unit_arm}/benchmark/benchmark ${benchmark_test_path}/benchmark || exit 1
run_benchmark_log_file=${basepath}/run_benchmark_log.txt
echo 'run benchmark logs: ' > ${run_benchmark_log_file}
# Copy the MindSpore models:
echo "Push files to the arm and run benchmark"
benchmark_test_path=${basepath}/benchmark_test
rm -rf ${benchmark_test_path}
mkdir -p ${benchmark_test_path}
cp -a ${ms_models_path}/*.ms ${benchmark_test_path} || exit 1
# Second:adb push all needed files to the phone
adb -s ${device_id} push ${benchmark_test_path} /data/local/tmp/ > adb_push_log.txt
# Third:run adb ,run session ,check the result:
echo 'cd /data/local/tmp/benchmark_test' > adb_cmd.txt
echo 'cp /data/local/tmp/libc++_shared.so ./' >> adb_cmd.txt
echo 'chmod 777 benchmark' >> adb_cmd.txt
adb -s ${device_id} shell < adb_cmd.txt
# Run on x86
echo "start Run x86 ..."
Run_x86 &
@ -646,6 +638,7 @@ sleep 1
echo "start Run arm64 ..."
Run_arm64 &
Run_arm64_PID=$!
sleep 1
wait ${Run_x86_PID}
Run_x86_status=$?
@ -653,29 +646,23 @@ Run_x86_status=$?
wait ${Run_arm64_PID}
Run_arm64_status=$?
# Print all results:
MS_PRINT_TESTCASE_START_MSG
while read line; do
arr=("${line}")
printf "%-10s %-110s %-7s\n" ${arr[0]} ${arr[1]} ${arr[2]}
done < ${run_benchmark_result_file}
MS_PRINT_TESTCASE_END_MSG
# Check all result and return value
if [[ ${Run_x86_status} = 0 ]] && [[ ${Run_arm64_status} = 0 ]];then
echo "Run_x86 and Run_arm64 is ended"
exit 0
else
echo "run failed"
cat ${run_benchmark_log_file}
#print the result table again:
function Print_Benchmark_Result() {
MS_PRINT_TESTCASE_START_MSG
while read line; do
arr=("${line}")
printf "%-10s %-110s %-7s\n" ${arr[0]} ${arr[1]} ${arr[2]}
done < ${run_benchmark_result_file}
MS_PRINT_TESTCASE_END_MSG
}
# Check benchmark result and return value
if [[ ${Run_x86_status} = 0 ]] && [[ ${Run_arm64_status} = 0 ]];then
echo "Run_x86 and Run_arm64 is ended"
Print_Benchmark_Result &
exit 0
else
echo "Run failed"
cat ${run_benchmark_log_file}
Print_Benchmark_Result &
exit 1
fi