diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c index 23869d32954b8ba99790bae6ca5df7bfb8027dfa..6500ef8b6e5ad8e698241198bec3972beae28661 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c @@ -65,9 +65,6 @@ void MatrixMultiAdd(float *c11, float *c12, float *c21, float *c22, float *x_ptr void PostConvFuncComm(const float *src_ptr_, float *out_ptr, const float *bias_ptr, size_t output_channel, size_t plane_size, size_t stride, bool is_relu, bool is_relu6, int size) { - if (size == 0) { - return; - } for (int oc = 0; oc < output_channel; oc++) { int oc_div = oc / size, oc_mod = oc % size; for (int hw = 0; hw < plane_size; hw++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c index 9d8d88acb502bb31249a15e475bc78145245835a..279f1caa013bf6378e7bc297fbd38df7afcb8e58 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c @@ -54,7 +54,7 @@ void DepthwiseBorderPixelInt8(int8_t *dst, const int16_t *src, const int16_t *we } tmp_buffer[c] += bias[c]; tmp_buffer[c] = RoundingDivideByPOT( - SaturatingRoundingDoublingHighMul(tmp_buffer[c] * (1 << (unsigned int)left), multiplier), right); + SaturatingRoundingDoublingHighMul(tmp_buffer[c] * (1 << (unsigned int)left), multiplier), -right); tmp_buffer[c] += out_zp; tmp_buffer[c] = MSMAX(tmp_buffer[c], acc_min); tmp_buffer[c] = MSMIN(tmp_buffer[c], acc_max); diff --git a/mindspore/lite/test/models_tflite_awaretraining.cfg b/mindspore/lite/test/models_tflite_awaretraining.cfg new file mode 100644 index 0000000000000000000000000000000000000000..577417cac35c9c6c3d0b02bbbde20c9a805b495d --- /dev/null +++ b/mindspore/lite/test/models_tflite_awaretraining.cfg @@ -0,0 +1 @@ +video_infer.tflite diff --git a/mindspore/lite/test/run_benchmark_nets.sh b/mindspore/lite/test/run_benchmark_nets.sh index 5d6f859c5b99909b9403ffe2aa9ec6b4599b55d5..bf869bc52cf7b59aeb5dda93570f66d7b1b3d3ce 100644 --- a/mindspore/lite/test/run_benchmark_nets.sh +++ b/mindspore/lite/test/run_benchmark_nets.sh @@ -86,6 +86,27 @@ function Run_x86() { fi done < ${models_tflite_posttraining_config} + # Run tflite aware training quantization converted models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} + echo 'cd '${convertor_path}'/MSLite-*-linux_x86_64' + cd ${convertor_path}/MSLite-*-linux_x86_64 || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath='${ms_models_path}'/'${model_name}'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1' || return 1 + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath=${ms_models_path}/${model_name}.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1 + if [ $? = 0 ]; then + run_result='Run_x86: '${model_name}'_awaretraining pass' + echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='Run_x86: '${model_name}'_awaretraining fail <<===========================this is the failed case' + echo ${run_result} >> ${run_benchmark_result_file} + return 1 + fi + done < ${models_tflite_awaretraining_config} + # Run mindspore converted models: while read line; do model_name=${line} @@ -237,6 +258,7 @@ cd ${convertor_path}/MSLite-*-linux_x86_64 || exit 1 # Set models config filepath models_tflite_config=${basepath}/models_tflite.cfg models_caffe_config=${basepath}/models_caffe.cfg +models_tflite_awaretraining_config=${basepath}/models_tflite_awaretraining.cfg models_tflite_posttraining_config=${basepath}/models_tflite_posttraining.cfg models_onnx_config=${basepath}/models_onnx.cfg models_mindspore_config=${basepath}/models_mindspore.cfg @@ -303,6 +325,17 @@ while read line; do ./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --config_file=${models_path}/${model_name}_posttraining.config || exit 1 done < ${models_tflite_posttraining_config} +# Convert TFLite AwareTraining models: +while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} + echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}' --quantType=AwareTraining' + ./converter_lite --fmk=TFLITE --modelFile=${models_path}/${model_name} --outputFile=${ms_models_path}/${model_name} --quantType=AwareTraining || exit 1 +done < ${models_tflite_awaretraining_config} + # Push to the arm and run benchmark: # First:copy benchmark exe and so files to the server which connected to the phone rm -rf ${basepath}/benchmark_test diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc index 978662653c62338af7a4bbca5b9cc79e7d192e0c..c7d6de41c5c52d964cb59a5155cdb8051a9e7410 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc @@ -152,6 +152,9 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr auto isConst = (!tensor_buffer->data.empty()); if (isConst) { CopyConstTensorData(tflite_model_buffer, tflite_tensor.get(), tensor.get()); + } else if (tensor->dataType ==TypeId::kNumberTypeUInt8) { + // set in/out tensor to int8 to fit ms-lite op + tensor->dataType = TypeId::kNumberTypeInt8; } // set tensor attr