未验证 提交 6ad6833d 编写于 作者: C cc 提交者: GitHub

Add test for int16 quantized model (#4387)

* Add test for int16 quantized model, test=develop
上级 b595ea5d
......@@ -41,6 +41,7 @@ if (WITH_TESTING)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_naive_model.tar.gz")
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v1.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v1_int16.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2_relu.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "resnet50.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "inception_v4_simple.tar.gz")
......
......@@ -292,6 +292,14 @@ if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING)
set_target_properties(test_mobilenetv1 PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif()
lite_cc_test(test_mobilenetv1_int16 SRCS mobilenetv1_int16_test.cc
DEPS ${lite_model_test_DEPS} ${light_lib_DEPS}
CL_DEPS ${opencl_kernels}
NPU_DEPS ${npu_kernels} ${npu_bridges}
ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
--model_dir=${LITE_MODEL_DIR}/mobilenet_v1_int16 SERIAL)
add_dependencies(test_mobilenetv1 extern_lite_download_mobilenet_v1_int16_tar_gz)
lite_cc_test(test_mobilenetv2 SRCS mobilenetv2_test.cc
DEPS ${lite_model_test_DEPS}
CL_DEPS ${opencl_kernels}
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <vector>
#include "lite/api/cxx_api.h"
#include "lite/api/light_api.h"
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/api/paddle_use_passes.h"
#include "lite/api/test_helper.h"
#include "lite/core/op_registry.h"
DEFINE_string(optimized_model,
"/data/local/tmp/int16_model",
"optimized_model");
DEFINE_int32(N, 1, "input_batch");
DEFINE_int32(C, 3, "input_channel");
DEFINE_int32(H, 224, "input_height");
DEFINE_int32(W, 224, "input_width");
namespace paddle {
namespace lite {
void TestModel(const std::vector<Place>& valid_places,
const std::string& model_dir) {
DeviceInfo::Init();
DeviceInfo::Global().SetRunMode(lite_api::LITE_POWER_NO_BIND, FLAGS_threads);
LOG(INFO) << "Optimize model.";
lite::Predictor cxx_predictor;
cxx_predictor.Build(model_dir, "", "", valid_places);
cxx_predictor.SaveModel(FLAGS_optimized_model,
paddle::lite_api::LiteModelType::kNaiveBuffer);
LOG(INFO) << "Load optimized model.";
lite::LightPredictor predictor(FLAGS_optimized_model + ".nb", false);
auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(
std::vector<DDim::value_type>({FLAGS_N, FLAGS_C, FLAGS_H, FLAGS_W})));
auto* data = input_tensor->mutable_data<float>();
auto item_size = FLAGS_N * FLAGS_C * FLAGS_H * FLAGS_W;
for (int i = 0; i < item_size; i++) {
data[i] = 1.;
}
LOG(INFO) << "Predictor run.";
predictor.Run();
auto* out = predictor.GetOutput(0);
const auto* pdata = out->data<float>();
std::vector<float> ref = {
0.000191383, 0.000592063, 0.000112282, 6.27426e-05, 0.000127522};
double eps = 1e-5;
for (int i = 0; i < ref.size(); ++i) {
EXPECT_NEAR(pdata[i], ref[i], eps);
}
}
TEST(MobileNetV1_Int16, test_arm) {
std::vector<Place> valid_places({
Place{TARGET(kARM), PRECISION(kFloat)},
});
std::string model_dir = FLAGS_model_dir;
TestModel(valid_places, model_dir);
}
} // namespace lite
} // namespace paddle
......@@ -466,7 +466,7 @@ function test_arm_android {
echo "test name: ${test_name}"
adb_work_dir="/data/local/tmp"
skip_list=("test_model_parser" "test_mobilenetv1" "test_mobilenetv2" "test_resnet50" "test_inceptionv4" "test_light_api" "test_apis" "test_paddle_api" "test_cxx_api" "test_gen_code" "test_mobilenetv1_int8" "test_subgraph_pass" "test_grid_sampler_image_opencl" "test_lrn_image_opencl" "test_pad2d_image_opencl" "test_transformer_with_mask_fp32_arm")
skip_list=("test_model_parser" "test_mobilenetv1" "test_mobilenetv2" "test_resnet50" "test_inceptionv4" "test_light_api" "test_apis" "test_paddle_api" "test_cxx_api" "test_gen_code" "test_mobilenetv1_int8" "test_subgraph_pass" "test_grid_sampler_image_opencl" "test_lrn_image_opencl" "test_pad2d_image_opencl" "test_transformer_with_mask_fp32_arm" "test_mobilenetv1_int16")
for skip_name in ${skip_list[@]} ; do
[[ $skip_name =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && echo "skip $test_name" && return
done
......@@ -1251,6 +1251,7 @@ function main {
build_test_arm_subtask_android
build_test_arm_subtask_model test_mobilenetv1 mobilenet_v1
build_test_arm_subtask_model test_mobilenetv1_int8 MobileNetV1_quant
build_test_arm_subtask_model test_mobilenetv1_int16 mobilenet_v1_int16
build_test_arm_subtask_model test_mobilenetv2 mobilenet_v2_relu
build_test_arm_subtask_model test_resnet50 resnet50
build_test_arm_subtask_model test_inceptionv4 inception_v4_simple
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册