forked from mindspore-Ecosystem/mindspore
!4700 add aware quant testcase
Merge pull request !4700 from cjh9368/aware_quant
This commit is contained in:
commit
039d87c5ae
|
@ -65,9 +65,6 @@ void MatrixMultiAdd(float *c11, float *c12, float *c21, float *c22, float *x_ptr
|
|||
|
||||
void PostConvFuncComm(const float *src_ptr_, float *out_ptr, const float *bias_ptr, size_t output_channel,
|
||||
size_t plane_size, size_t stride, bool is_relu, bool is_relu6, int size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
for (int oc = 0; oc < output_channel; oc++) {
|
||||
int oc_div = oc / size, oc_mod = oc % size;
|
||||
for (int hw = 0; hw < plane_size; hw++) {
|
||||
|
|
|
@ -54,7 +54,7 @@ void DepthwiseBorderPixelInt8(int8_t *dst, const int16_t *src, const int16_t *we
|
|||
}
|
||||
tmp_buffer[c] += bias[c];
|
||||
tmp_buffer[c] = RoundingDivideByPOT(
|
||||
SaturatingRoundingDoublingHighMul(tmp_buffer[c] * (1 << (unsigned int)left), multiplier), right);
|
||||
SaturatingRoundingDoublingHighMul(tmp_buffer[c] * (1 << (unsigned int)left), multiplier), -right);
|
||||
tmp_buffer[c] += out_zp;
|
||||
tmp_buffer[c] = MSMAX(tmp_buffer[c], acc_min);
|
||||
tmp_buffer[c] = MSMIN(tmp_buffer[c], acc_max);
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
video_infer.tflite
|
|
@ -86,6 +86,27 @@ function Run_x86() {
|
|||
fi
|
||||
done < ${models_tflite_posttraining_config}
|
||||
|
||||
# Run tflite aware training quantization converted models:
|
||||
while read line; do
|
||||
model_name=${line}
|
||||
if [[ $model_name == \#* ]]; then
|
||||
continue
|
||||
fi
|
||||
echo ${model_name}
|
||||
echo 'cd '${convertor_path}'/MSLite-*-linux_x86_64'
|
||||
cd ${convertor_path}/MSLite-*-linux_x86_64 || return 1
|
||||
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1' || return 1
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1
|
||||
if [ $? = 0 ]; then
|
||||
run_result='Run_x86: '${model_name}'_awaretraining pass'
|
||||
echo ${run_result} >> ${run_benchmark_result_file}
|
||||
else
|
||||
run_result='Run_x86: '${model_name}'_awaretraining fail <<===========================this is the failed case'
|
||||
echo ${run_result} >> ${run_benchmark_result_file}
|
||||
return 1
|
||||
fi
|
||||
done < ${models_tflite_awaretraining_config}
|
||||
|
||||
# Run mindspore converted models:
|
||||
while read line; do
|
||||
model_name=${line}
|
||||
|
@ -237,6 +258,7 @@ cd ${convertor_path}/MSLite-*-linux_x86_64 || exit 1
|
|||
# Set models config filepath
|
||||
models_tflite_config=${basepath}/models_tflite.cfg
|
||||
models_caffe_config=${basepath}/models_caffe.cfg
|
||||
models_tflite_awaretraining_config=${basepath}/models_tflite_awaretraining.cfg
|
||||
models_tflite_posttraining_config=${basepath}/models_tflite_posttraining.cfg
|
||||
models_onnx_config=${basepath}/models_onnx.cfg
|
||||
models_mindspore_config=${basepath}/models_mindspore.cfg
|
||||
|
@ -303,6 +325,17 @@ while read line; do
|
|||
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --config_file=${models_path}/${model_name}_posttraining.config || exit 1
|
||||
done < ${models_tflite_posttraining_config}
|
||||
|
||||
# Convert TFLite AwareTraining models:
|
||||
while read line; do
|
||||
model_name=${line}
|
||||
if [[ $model_name == \#* ]]; then
|
||||
continue
|
||||
fi
|
||||
echo ${model_name}
|
||||
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}' --quantType=AwareTraining'
|
||||
./converter_lite --fmk=TFLITE --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} --quantType=AwareTraining || exit 1
|
||||
done < ${models_tflite_awaretraining_config}
|
||||
|
||||
# Push to the arm and run benchmark:
|
||||
# First:copy benchmark exe and so files to the server which connected to the phone
|
||||
rm -rf ${basepath}/benchmark_test
|
||||
|
|
|
@ -152,6 +152,9 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr<tflite::SubGraphT>
|
|||
auto isConst = (!tensor_buffer->data.empty());
|
||||
if (isConst) {
|
||||
CopyConstTensorData(tflite_model_buffer, tflite_tensor.get(), tensor.get());
|
||||
} else if (tensor->dataType ==TypeId::kNumberTypeUInt8) {
|
||||
// set in/out tensor to int8 to fit ms-lite op
|
||||
tensor->dataType = TypeId::kNumberTypeInt8;
|
||||
}
|
||||
|
||||
// set tensor attr
|
||||
|
|
Loading…
Reference in New Issue