提交 e28b5a3c 编写于 作者: S sangoly

Merge branch 'sangoly/open_inception_v4_ut' into 'incubate/lite'

add inception_v4 test && enhance uts

See merge request inference/paddlelite!55
......@@ -87,7 +87,6 @@ build:mobile_android:
dependencies:
- build:server
build:mobile_armlinux:
tags:
- lite
......@@ -108,16 +107,15 @@ build:mobile_armlinux:
dependencies:
- build:server
build:mobile_model_resnet50:
build:mobile_model_mobilenetv1:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_resnet50
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_resnet50
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv1
dependencies:
- build:server
......@@ -127,18 +125,17 @@ build:mobile_model_resnet50:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_resnet50
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv1
build:mobile_model_mobilenetv1:
build:mobile_model_mobilenetv2:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv1
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv2
dependencies:
- build:server
......@@ -148,17 +145,17 @@ build:mobile_model_mobilenetv1:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv1
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv2
build:mobile_model_mobilenetv2:
build:mobile_model_resnet50:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_mobilenetv2
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_resnet50
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_resnet50
dependencies:
- build:server
......@@ -168,30 +165,24 @@ build:mobile_model_mobilenetv2:
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_mobilenetv2
- $CI_PROJECT_DIR/build_mobile_model_resnet50
build:mobile_model_inceptionv4:
tags:
- lite
stage: build_mobile
image: $MOBILE_LITE_DOCKER_IMAGE
#build:mobile_model_inceptionv4:
# tags:
# - lite
# stage: build_mobile
# image: $MOBILE_LITE_DOCKER_IMAGE
# cache:
# key: mobile_thirdparty
# paths:
# - $MOBILE_LITE_CACHE0
# - $MOBILE_LITE_CACHE1
# - ~/.ccache
# script:
# - export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_inceptionv4
# - ./paddle/fluid/lite/tools/build.sh build_test_arm_model_inceptionv4
#
# dependencies:
# - build:server
#
# cache:
# key: mobile_thirdparty
# paths:
# - $MOBILE_LITE_CACHE0
# - $MOBILE_LITE_CACHE1
# - ~/.ccache
# - $CI_PROJECT_DIR/build_mobile_model_inceptionv4
script:
- export CCACHE_DIR=$CI_PROJECT_DIR/build_mobile_model_inceptionv4
- ./paddle/fluid/lite/tools/build.sh build_test_arm_model_inceptionv4
dependencies:
- build:server
cache:
key: mobile_thirdparty
paths:
- build.lite.android.armv8.gcc
- ~/.ccache
- $CI_PROJECT_DIR/build_mobile_model_inceptionv4
......@@ -200,9 +200,9 @@ if (WITH_TESTING)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_naive_model.tar.gz")
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v1.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2_relu.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "resnet50.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "inception_v4.tar.gz")
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "inception_v4_simple.tar.gz")
endif()
if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "GoogleNet_inference.tar.gz")
......
......@@ -74,7 +74,7 @@ if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING)
lite_cc_test(test_mobilenetv2_lite SRCS mobilenetv2_test.cc
DEPS ${lite_model_test_DEPS}
ARGS --model_dir=${LITE_MODEL_DIR}/mobilenet_v2 SERIAL)
add_dependencies(test_mobilenetv2_lite extern_lite_download_mobilenet_v2_tar_gz)
add_dependencies(test_mobilenetv2_lite extern_lite_download_mobilenet_v2_relu_tar_gz)
lite_cc_test(test_resnet50_lite SRCS resnet50_test.cc
DEPS ${lite_model_test_DEPS}
......@@ -84,7 +84,7 @@ if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING)
lite_cc_test(test_inceptionv4_lite SRCS inceptionv4_test.cc
DEPS ${lite_model_test_DEPS}
ARGS --model_dir=${LITE_MODEL_DIR}/inception_v4 SERIAL)
add_dependencies(test_inceptionv4_lite extern_lite_download_inception_v4_tar_gz)
add_dependencies(test_inceptionv4_lite extern_lite_download_inception_v4_simple_tar_gz)
endif()
# These tests needs CLI arguments, and is not supported in ARM CI.
......
......@@ -39,7 +39,8 @@ TEST(InceptionV4, test) {
auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
auto* data = input_tensor->mutable_data<float>();
for (int i = 0; i < input_tensor->dims().production(); i++) {
auto item_size = input_tensor->dims().production();
for (int i = 0; i < item_size; i++) {
data[i] = 1;
}
......@@ -58,16 +59,30 @@ TEST(InceptionV4, test) {
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0
<< " ms in average.";
// std::vector<float> results({0.00078033, 0.00083865, 0.00060029, 0.00057083,
// 0.00070094, 0.00080584, 0.00044525, 0.00074907,
// 0.00059774, 0.00063654});
//
std::vector<std::vector<float>> results;
// i = 1
results.emplace_back(std::vector<float>(
{0.0011684548, 0.0010390386, 0.0011301535, 0.0010133048,
0.0010259597, 0.0010982729, 0.00093195855, 0.0009141837,
0.00096620916, 0.00089982944, 0.0010064574, 0.0010474789,
0.0009782845, 0.0009230255, 0.0010548076, 0.0010974824,
0.0010612885, 0.00089107914, 0.0010112736, 0.00097655767}));
auto* out = predictor.GetOutput(0);
std::vector<float> results({0.00078033, 0.00083865, 0.00060029, 0.00057083,
0.00070094, 0.00080584, 0.00044525, 0.00074907,
0.00059774, 0.00063654});
for (int i = 0; i < results.size(); ++i) {
EXPECT_NEAR(out->data<float>()[i], results[i], 1e-5);
}
ASSERT_EQ(out->dims().size(), 2);
ASSERT_EQ(out->dims()[0], 1);
ASSERT_EQ(out->dims()[1], 1000);
int step = 50;
for (int i = 0; i < results.size(); ++i) {
for (int j = 0; j < results[i].size(); ++j) {
EXPECT_NEAR(out->data<float>()[j * step + (out->dims()[1] * i)],
results[i][j], 1e-6);
}
}
}
#endif
......
......@@ -36,7 +36,8 @@ void TestModel(const std::vector<Place>& valid_places,
auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
auto* data = input_tensor->mutable_data<float>();
for (int i = 0; i < input_tensor->dims().production(); i++) {
auto item_size = input_tensor->dims().production();
for (int i = 0; i < item_size; i++) {
data[i] = 1;
}
......@@ -55,17 +56,26 @@ void TestModel(const std::vector<Place>& valid_places,
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0
<< " ms in average.";
std::vector<std::vector<float>> results;
// i = 1
results.emplace_back(std::vector<float>(
{0.00019130898, 9.467885e-05, 0.00015971427, 0.0003650665,
0.00026431272, 0.00060884043, 0.0002107942, 0.0015819625,
0.0010323516, 0.00010079765, 0.00011006987, 0.0017364529,
0.0048292773, 0.0013995157, 0.0018453331, 0.0002428986,
0.00020211363, 0.00013668182, 0.0005855956, 0.00025901722}));
auto* out = predictor.GetOutput(0);
std::vector<float> results({1.91308980e-04, 5.92055148e-04, 1.12303176e-04,
6.27335685e-05, 1.27507330e-04, 1.32147351e-03,
3.13812525e-05, 6.52209565e-05, 4.78087313e-05,
2.58822285e-04});
for (int i = 0; i < results.size(); ++i) {
EXPECT_NEAR(out->data<float>()[i], results[i], 1e-6);
}
ASSERT_EQ(out->dims().size(), 2);
ASSERT_EQ(out->dims()[0], 1);
ASSERT_EQ(out->dims()[1], 1000);
int step = 50;
for (int i = 0; i < results.size(); ++i) {
for (int j = 0; j < results[i].size(); ++j) {
EXPECT_NEAR(out->data<float>()[j * step + (out->dims()[1] * i)],
results[i][j], 1e-6);
}
}
}
TEST(MobileNetV1, test_arm) {
......
......@@ -39,7 +39,8 @@ TEST(MobileNetV2, test) {
auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
auto* data = input_tensor->mutable_data<float>();
for (int i = 0; i < input_tensor->dims().production(); i++) {
auto item_size = input_tensor->dims().production();
for (int i = 0; i < item_size; i++) {
data[i] = 1;
}
......@@ -58,16 +59,26 @@ TEST(MobileNetV2, test) {
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0
<< " ms in average.";
std::vector<std::vector<float>> results;
// i = 1
results.emplace_back(std::vector<float>(
{0.00017082224, 5.699624e-05, 0.000260885, 0.00016412718,
0.00034818667, 0.00015230637, 0.00032959113, 0.0014772735,
0.0009059976, 9.5378724e-05, 5.386537e-05, 0.0006427285,
0.0070957416, 0.0016094646, 0.0018807327, 0.00010506048,
6.823785e-05, 0.00012269315, 0.0007806194, 0.00022354358}));
auto* out = predictor.GetOutput(0);
std::vector<float> results({0.00097802, 0.00099822, 0.00103093, 0.00100121,
0.00098268, 0.00104065, 0.00099962, 0.00095181,
0.00099694, 0.00099406});
for (int i = 0; i < results.size(); ++i) {
EXPECT_NEAR(out->data<float>()[i], results[i], 1e-5);
}
ASSERT_EQ(out->dims().size(), 2);
ASSERT_EQ(out->dims()[0], 1);
ASSERT_EQ(out->dims()[1], 1000);
int step = 50;
for (int i = 0; i < results.size(); ++i) {
for (int j = 0; j < results[i].size(); ++j) {
EXPECT_NEAR(out->data<float>()[j * step + (out->dims()[1] * i)],
results[i][j], 1e-6);
}
}
}
#endif
......
......@@ -39,7 +39,8 @@ TEST(ResNet50, test) {
auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
auto* data = input_tensor->mutable_data<float>();
for (int i = 0; i < input_tensor->dims().production(); i++) {
auto item_size = input_tensor->dims().production();
for (int i = 0; i < item_size; i++) {
data[i] = 1;
}
......@@ -58,17 +59,26 @@ TEST(ResNet50, test) {
<< ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0
<< " ms in average.";
std::vector<std::vector<float>> results;
// i = 1
results.emplace_back(std::vector<float>(
{0.00024139918, 0.00020566184, 0.00022418296, 0.00041731037,
0.0005366107, 0.00016948722, 0.00028638865, 0.0009257241,
0.00072681636, 8.531815e-05, 0.0002129998, 0.0021168243,
0.006387163, 0.0037145028, 0.0012812682, 0.00045948103,
0.00013535398, 0.0002483765, 0.00076759676, 0.0002773295}));
auto* out = predictor.GetOutput(0);
std::vector<float> results({2.41399175e-04, 4.13724629e-04, 2.64324830e-04,
9.68795503e-05, 2.01968738e-04, 8.14945495e-04,
7.45922662e-05, 1.76479152e-04, 7.47223166e-05,
6.06825110e-04});
for (int i = 0; i < results.size(); ++i) {
EXPECT_NEAR(out->data<float>()[i], results[i], 1e-5);
}
ASSERT_EQ(out->dims().size(), 2);
ASSERT_EQ(out->dims()[0], 1);
ASSERT_EQ(out->dims()[1], 1000);
int step = 50;
for (int i = 0; i < results.size(); ++i) {
for (int j = 0; j < results[i].size(); ++j) {
EXPECT_NEAR(out->data<float>()[j * step + (out->dims()[1] * i)],
results[i][j], 1e-6);
}
}
}
#endif
......
......@@ -512,7 +512,7 @@ function main {
shift
;;
build_test_arm_model_mobilenetv2)
build_test_arm_subtask_model test_mobilenetv2_lite mobilenet_v2
build_test_arm_subtask_model test_mobilenetv2_lite mobilenet_v2_relu
shift
;;
build_test_arm_model_resnet50)
......@@ -520,7 +520,7 @@ function main {
shift
;;
build_test_arm_model_inceptionv4)
build_test_arm_subtask_model test_inceptionv4_lite inception_v4
build_test_arm_subtask_model test_inceptionv4_lite inception_v4_simple
shift
;;
check_style)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册