未验证 提交 aec2bf7f 编写于 作者: C cc 提交者: GitHub

[CI][C++ demo]Add test lite libs (#3481)

上级 468c1e43
......@@ -369,6 +369,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/test_cv/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/test_cv/Makefile"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mask_detection" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mask_detection/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mask_detection/Makefile"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/test_libs" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/test_libs/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/test_libs/Makefile"
)
add_dependencies(publish_inference_android_cxx_demos logging gflags)
add_dependencies(publish_inference_cxx_lib publish_inference_android_cxx_demos)
......
ARM_ABI = arm7
export ARM_ABI
include ../Makefile.def
LITE_ROOT=../../../
THIRD_PARTY_DIR=${LITE_ROOT}/third_party
OPENCV_VERSION=opencv4.1.0
OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgcodecs.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgproc.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_core.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtegra_hal.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjpeg-turbo.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibwebp.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibpng.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjasper.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibtiff.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libIlmImf.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtbb.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/armeabi-v7a/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include -I${THIRD_PARTY_DIR}/gflags/include
CXX_LIBS = ${OPENCV_LIBS} ${THIRD_PARTY_DIR}/gflags/lib/libgflags.a $(SYSTEM_LIBS)
LITE_FULL_SHAPRED_LIBS=-L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared
LITE_FULL_STATIC_LIBS=$(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a
LITE_LIGHT_SHAPRED_LIBS=-L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared
LITE_LIGHT_STATIC_LIBS=$(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a
##########
fetch_opencv:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \
(echo "fetch opencv libs" && \
wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)
@ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \
tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}
test_helper.o: test_helper.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o test_helper.o -c test_helper.cc
classification_full.o: classification_full.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o classification_full.o -c classification_full.cc
classification_light.o: classification_light.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o classification_light.o -c classification_light.cc
classification_full_shared: fetch_opencv classification_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_full.o test_helper.o -o classification_full_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_FULL_SHAPRED_LIBS}
classification_full_static: fetch_opencv classification_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_full.o test_helper.o -o classification_full_static ${LITE_FULL_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
classification_light_shared: fetch_opencv classification_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_light.o test_helper.o -o classification_light_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_LIGHT_SHAPRED_LIBS}
classification_light_static: fetch_opencv classification_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_light.o test_helper.o -o classification_light_static ${LITE_LIGHT_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
######
yolov3_full.o: yolov3_full.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o yolov3_full.o -c yolov3_full.cc
yolov3_light.o: yolov3_light.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o yolov3_light.o -c yolov3_light.cc
yolov3_full_shared: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_full.o test_helper.o -o yolov3_full_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_FULL_SHAPRED_LIBS}
yolov3_full_static: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_full.o test_helper.o -o yolov3_full_static ${LITE_FULL_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
yolov3_light_shared: fetch_opencv yolov3_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_light.o test_helper.o -o yolov3_light_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_LIGHT_SHAPRED_LIBS}
yolov3_light_static: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_light.o test_helper.o -o yolov3_light_static ${LITE_LIGHT_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
#####
all: classification_full_shared classification_full_static classification_light_shared classification_light_static yolov3_full_shared yolov3_full_static yolov3_light_shared yolov3_light_static
clean:
rm -f *.o
rm -f classification_full_shared
rm -r classification_full_static
rm -r classification_light_shared
rm -f classification_light_static
rm -f yolov3_full_shared
rm -f yolov3_full_static
rm -f yolov3_light_shared
rm -f yolov3_light_static
ARM_ABI = arm8
export ARM_ABI
include ../Makefile.def
LITE_ROOT=../../../
THIRD_PARTY_DIR=${LITE_ROOT}/third_party
OPENCV_VERSION=opencv4.1.0
OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgcodecs.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgproc.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_core.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtegra_hal.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjpeg-turbo.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibwebp.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibpng.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjasper.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibtiff.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libIlmImf.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtbb.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/arm64-v8a/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include -I${THIRD_PARTY_DIR}/gflags/include
CXX_LIBS = ${OPENCV_LIBS} ${THIRD_PARTY_DIR}/gflags/lib/libgflags.a $(SYSTEM_LIBS)
LITE_FULL_SHAPRED_LIBS=-L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared
LITE_FULL_STATIC_LIBS=$(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a
LITE_LIGHT_SHAPRED_LIBS=-L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared
LITE_LIGHT_STATIC_LIBS=$(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a
##########
fetch_opencv:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \
(echo "fetch opencv libs" && \
wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)
@ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \
tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}
test_helper.o: test_helper.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o test_helper.o -c test_helper.cc
classification_full.o: classification_full.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o classification_full.o -c classification_full.cc
classification_light.o: classification_light.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o classification_light.o -c classification_light.cc
classification_full_shared: fetch_opencv classification_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_full.o test_helper.o -o classification_full_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_FULL_SHAPRED_LIBS}
classification_full_static: fetch_opencv classification_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_full.o test_helper.o -o classification_full_static ${LITE_FULL_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
classification_light_shared: fetch_opencv classification_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_light.o test_helper.o -o classification_light_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_LIGHT_SHAPRED_LIBS}
classification_light_static: fetch_opencv classification_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) classification_light.o test_helper.o -o classification_light_static ${LITE_LIGHT_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
######
yolov3_full.o: yolov3_full.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o yolov3_full.o -c yolov3_full.cc
yolov3_light.o: yolov3_light.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o yolov3_light.o -c yolov3_light.cc
yolov3_full_shared: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_full.o test_helper.o -o yolov3_full_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_FULL_SHAPRED_LIBS}
yolov3_full_static: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_full.o test_helper.o -o yolov3_full_static ${LITE_FULL_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
yolov3_light_shared: fetch_opencv yolov3_light.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_light.o test_helper.o -o yolov3_light_shared $(CXX_LIBS) $(LDFLAGS) ${LITE_LIGHT_SHAPRED_LIBS}
yolov3_light_static: fetch_opencv yolov3_full.o test_helper.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) yolov3_light.o test_helper.o -o yolov3_light_static ${LITE_LIGHT_STATIC_LIBS} $(CXX_LIBS) $(LDFLAGS)
#####
all: classification_full_shared classification_full_static classification_light_shared classification_light_static yolov3_full_shared yolov3_full_static yolov3_light_shared yolov3_light_static
clean:
rm -f *.o
rm -f classification_full_shared
rm -r classification_full_static
rm -r classification_light_shared
rm -f classification_light_static
rm -f yolov3_full_shared
rm -f yolov3_full_static
rm -f yolov3_light_shared
rm -f yolov3_light_static
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <fstream>
#include <iostream>
#include "paddle_api.h" // NOLINT
#include "test_helper.h" // NOLINT
DEFINE_string(model_dir,
"",
"the path of the model, the model and param files is under "
"model_dir.");
DEFINE_string(model_filename,
"",
"the filename of model file. When the model is combined formate, "
"please set model_file.");
DEFINE_string(param_filename,
"",
"the filename of param file, set param_file when the model is "
"combined formate.");
DEFINE_string(img_path, "", "the path of input image");
DEFINE_string(img_txt_path,
"",
"the path of input image, the image is processed "
" and saved in txt file");
DEFINE_double(out_max_value, 0.0, "The max value in output tensor");
DEFINE_double(threshold,
1e-3,
"If the max value diff is smaller than threshold, pass test");
DEFINE_int32(out_max_value_index, 65, "The max value index in output tensor");
// Optimize model for ARM CPU.
// If the model is not combined, set model_filename and params_filename as empty
void OptModel(const std::string& load_model_dir,
const std::string& model_filename,
const std::string& params_filename,
const std::string& save_model_path) {
paddle::lite_api::CxxConfig config;
config.set_model_dir(load_model_dir);
if (!model_filename.empty() && !params_filename.empty()) {
config.set_model_file(load_model_dir + "/" + model_filename);
config.set_param_file(load_model_dir + "/" + params_filename);
}
std::vector<paddle::lite_api::Place> vaild_places = {
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)},
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt32)},
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt64)},
};
config.set_valid_places(vaild_places);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
std::string cmd_str = "rm -rf " + save_model_path;
int ret = system(cmd_str.c_str());
if (ret == 0) {
std::cout << "Delete old optimized model " << save_model_path << std::endl;
}
predictor->SaveOptimizedModel(save_model_path,
paddle::lite_api::LiteModelType::kNaiveBuffer);
std::cout << "Load model from " << load_model_dir << std::endl;
std::cout << "Save optimized model to " << save_model_path << std::endl;
}
void Run(const std::string& model_path,
const std::string& img_path,
const std::string& img_txt_path,
const float out_max_value,
const int out_max_value_index,
const float threshold,
const int height,
const int width) {
// set config and create predictor
paddle::lite_api::MobileConfig config;
config.set_threads(3);
config.set_model_from_file(model_path);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
// set input
auto input_tensor = predictor->GetInput(0);
input_tensor->Resize({1, 3, height, width});
auto input_data = input_tensor->mutable_data<float>();
if (img_txt_path.size() > 0) {
std::fstream fs(img_txt_path);
if (!fs.is_open()) {
std::cerr << "Fail to open img txt file:" << img_txt_path << std::endl;
}
int num = 1 * 3 * height * width;
for (int i = 0; i < num; i++) {
fs >> input_data[i];
}
} else {
cv::Mat img = imread(img_path, cv::IMREAD_COLOR);
if (!img.data) {
std::cerr << "Fail to open img:" << img_path << std::endl;
exit(1);
}
float means[3] = {0.485f, 0.456f, 0.406f};
float scales[3] = {0.229f, 0.224f, 0.225f};
process_img(img, width, height, input_data, means, scales);
}
predictor->Run();
auto out_tensor = predictor->GetOutput(0);
auto* out_data = out_tensor->data<float>();
int64_t output_num = ShapeProduction(out_tensor->shape());
float max_value = out_data[0];
int max_index = 0;
for (int i = 0; i < output_num; i++) {
if (max_value < out_data[i]) {
max_value = out_data[i];
max_index = i;
}
}
std::cout << "max_value:" << max_value << std::endl;
std::cout << "max_index:" << max_index << std::endl;
std::cout << "max_value_ground_truth:" << out_max_value << std::endl;
std::cout << "max_index_ground_truth:" << out_max_value_index << std::endl;
if (max_index != out_max_value_index ||
fabs(max_value - out_max_value) > threshold) {
std::cerr << "----------Fail Test.---------- \n\n";
} else {
std::cout << "----------Pass Test.---------- \n\n";
}
}
int main(int argc, char** argv) {
// Check inputs
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir.empty() ||
(FLAGS_img_path.empty() && FLAGS_img_txt_path.empty())) {
std::cerr << "Input error." << std::endl;
std::cerr
<< "Usage: " << argv[0] << std::endl
<< "--model_dir: the path of not optimized model \n"
"--model_filename: the model filename of not optimized model \n"
"--param_filename: the param filename of not optimized model \n"
"--img_txt_path: the path of input image, the image is processed \n"
" and saved in txt file \n"
"--img_path: the path of input image \n"
"--out_max_value: The max value in output tensor \n"
"--threshold: If the max value diff is smaller than threshold,\n"
" pass test. Default 1e-3.\n"
"--out_max_value_index: The max value index in output tensor \n";
exit(1);
}
const int height = 224;
const int width = 224;
std::string model_dir = FLAGS_model_dir;
if (model_dir.back() == '/') {
model_dir.pop_back();
}
std::string optimized_model_path = model_dir + "_opt2";
OptModel(FLAGS_model_dir,
FLAGS_model_filename,
FLAGS_param_filename,
optimized_model_path);
std::string run_model_path = optimized_model_path + ".nb";
// Run test
Run(run_model_path,
FLAGS_img_path,
FLAGS_img_txt_path,
FLAGS_out_max_value,
FLAGS_out_max_value_index,
FLAGS_threshold,
height,
width);
return 0;
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <fstream>
#include <iostream>
#include "paddle_api.h" // NOLINT
#include "test_helper.h" // NOLINT
DEFINE_string(optimized_model_path, "", "the path of optimized model");
DEFINE_string(img_path, "", "the path of input image");
DEFINE_string(img_txt_path,
"",
"the path of input image, the image is processed "
" and saved in txt file");
DEFINE_double(out_max_value, 0.0, "The max value in output tensor");
DEFINE_double(threshold,
1e-3,
"If the max value diff is smaller than threshold, pass test");
DEFINE_int32(out_max_value_index, -1, "The max value index in output tensor");
void Run(const std::string& model_path,
const std::string& img_path,
const std::string& img_txt_path,
const float out_max_value,
const int out_max_value_index,
const float threshold,
const int height,
const int width) {
// set config and create predictor
paddle::lite_api::MobileConfig config;
config.set_threads(3);
config.set_model_from_file(model_path);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
// set input
auto input_tensor = predictor->GetInput(0);
input_tensor->Resize({1, 3, height, width});
auto input_data = input_tensor->mutable_data<float>();
if (img_txt_path.size() > 0) {
std::fstream fs(img_txt_path);
if (!fs.is_open()) {
std::cerr << "Fail to open img txt file:" << img_txt_path << std::endl;
}
int num = 1 * 3 * height * width;
for (int i = 0; i < num; i++) {
fs >> input_data[i];
}
} else {
cv::Mat img = imread(img_path, cv::IMREAD_COLOR);
if (!img.data) {
std::cerr << "Fail to open img:" << img_path << std::endl;
exit(1);
}
float means[3] = {0.485f, 0.456f, 0.406f};
float scales[3] = {0.229f, 0.224f, 0.225f};
process_img(img, width, height, input_data, means, scales);
}
predictor->Run();
auto out_tensor = predictor->GetOutput(0);
auto* out_data = out_tensor->data<float>();
int64_t output_num = ShapeProduction(out_tensor->shape());
float max_value = out_data[0];
int max_index = 0;
for (int i = 0; i < output_num; i++) {
if (max_value < out_data[i]) {
max_value = out_data[i];
max_index = i;
}
}
std::cout << "max_value:" << max_value << std::endl;
std::cout << "max_index:" << max_index << std::endl;
std::cout << "max_value_ground_truth:" << out_max_value << std::endl;
std::cout << "max_index_ground_truth:" << out_max_value_index << std::endl;
if (max_index != out_max_value_index ||
fabs(max_value - out_max_value) > threshold) {
std::cerr << "----------Fail Test---------- \n\n";
} else {
std::cout << "----------Pass Test---------- \n\n";
}
}
int main(int argc, char** argv) {
// Check inputs
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_optimized_model_path.empty() ||
(FLAGS_img_path.empty() && FLAGS_img_txt_path.empty())) {
std::cerr << "Input error." << std::endl;
std::cerr
<< "Usage: " << argv[0] << std::endl
<< "--optimized_model_path: the path of optimized model \n"
"--img_txt_path: the path of input image, the image is processed \n"
" and saved in txt file \n"
"--img_path: the path of input image \n"
"--out_max_value: The max value in output tensor \n"
"--threshold: If the max value diff is smaller than threshold,\n"
" pass test. Default 1e-3.\n"
"--out_max_value_index: The max value index in output tensor \n";
exit(1);
}
const int height = 224;
const int width = 224;
// Run test
Run(FLAGS_optimized_model_path,
FLAGS_img_path,
FLAGS_img_txt_path,
FLAGS_out_max_value,
FLAGS_out_max_value_index,
FLAGS_threshold,
height,
width);
return 0;
}
make clean
make all -j
gf=test_lite_lib_files
if [ -d ${gf} ];then
rm -rf ${gf}
fi
mkdir ${gf}
mv classification_full_shared ${gf}
mv classification_full_static ${gf}
mv classification_light_shared ${gf}
mv classification_light_static ${gf}
mv yolov3_full_shared ${gf}
mv yolov3_full_static ${gf}
mv yolov3_light_shared ${gf}
mv yolov3_light_static ${gf}
cp run.sh ${gf}
make clean
cp -r ../../../cxx/ ${gf}
mv ${gf}/cxx ${gf}/lite
if [ ! -f "test_libs_models_imgs.tgz" ];then
wget https://paddle-inference-dist.cdn.bcebos.com/PaddleLite/test_libs_models_imgs.tgz
fi
tar zxvf test_libs_models_imgs.tgz
mv test_libs_models_imgs ${gf}
mv ${gf}/test_libs_models_imgs ${gf}/models_imgs
export LD_LIBRARY_PATH=$PWD/lite/lib/:${LD_LIBRARY_PATH}
# mobilenetv1
./classification_light_shared \
--optimized_model_path=models_imgs/models/mobilenetv1.nb \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.936887 \
--out_max_value_index=65
./classification_light_static \
--optimized_model_path=models_imgs/models/mobilenetv1.nb \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.936887 \
--out_max_value_index=65
./classification_full_static \
--model_dir=models_imgs/models/mobilenetv1 \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.936887 \
--out_max_value_index=65
./classification_full_shared \
--model_dir=models_imgs/models/mobilenetv1 \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.936887 \
--out_max_value_index=65
# mobilenetv2
./classification_light_shared \
--optimized_model_path=models_imgs/models/mobilenetv2.nb \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.868888 \
--out_max_value_index=65
./classification_light_static \
--optimized_model_path=models_imgs/models/mobilenetv2.nb \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.868888 \
--out_max_value_index=65
./classification_full_static \
--model_dir=models_imgs/models/mobilenetv2 \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.868888 \
--out_max_value_index=65
./classification_full_shared \
--model_dir=models_imgs/models/mobilenetv2 \
--img_txt_path=models_imgs/images/classification.jpg.txt \
--out_max_value=0.868888 \
--out_max_value_index=65
# yolov3
./yolov3_light_shared \
--optimized_model_path=models_imgs/models/yolov3_mobilenetv1.nb \
--img_txt_path=models_imgs/images/yolov3.jpg.txt \
--out_values=0,0.153605,174.494,199.729,562.075,604.014
./yolov3_light_static \
--optimized_model_path=models_imgs/models/yolov3_mobilenetv1.nb \
--img_txt_path=models_imgs/images/yolov3.jpg.txt \
--out_values=0,0.153605,174.494,199.729,562.075,604.014
./yolov3_full_static \
--model_dir=models_imgs/models/yolov3_mobilenetv1 \
--img_txt_path=models_imgs/images/yolov3.jpg.txt \
--out_values=0,0.153605,174.494,199.729,562.075,604.014
./yolov3_full_shared \
--model_dir=models_imgs/models/yolov3_mobilenetv1 \
--img_txt_path=models_imgs/images/yolov3.jpg.txt \
--out_values=0,0.153605,174.494,199.729,562.075,604.014
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test_helper.h" // NOLINT
#include <sys/time.h>
#include <time.h>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
double GetCurrentUS() {
struct timeval time;
gettimeofday(&time, NULL);
return 1e+6 * time.tv_sec + time.tv_usec;
}
int64_t ShapeProduction(const std::vector<int64_t>& shape) {
int64_t num = 1;
for (auto i : shape) {
num *= i;
}
return num;
}
std::vector<int64_t> GetIntNumsFromStr(const std::string& str) {
std::vector<int64_t> nums;
std::string tmp_str = str;
while (!tmp_str.empty()) {
int num = atoi(tmp_str.data());
nums.push_back(num);
size_t next_offset = tmp_str.find(",");
if (next_offset == std::string::npos) {
break;
} else {
tmp_str = tmp_str.substr(next_offset + 1);
}
}
return nums;
}
std::vector<double> GetDoubleNumsFromStr(const std::string& str) {
std::vector<double> nums;
std::string tmp_str = str;
while (!tmp_str.empty()) {
double num = atof(tmp_str.data());
nums.push_back(num);
size_t next_offset = tmp_str.find(",");
if (next_offset == std::string::npos) {
break;
} else {
tmp_str = tmp_str.substr(next_offset + 1);
}
}
return nums;
}
// fill tensor with mean and scale and trans layout: nhwc -> nchw, neon speed up
void neon_mean_scale(
const float* din, float* dout, int size, float* mean, float* scale) {
float32x4_t vmean0 = vdupq_n_f32(mean[0]);
float32x4_t vmean1 = vdupq_n_f32(mean[1]);
float32x4_t vmean2 = vdupq_n_f32(mean[2]);
float32x4_t vscale0 = vdupq_n_f32(1.f / scale[0]);
float32x4_t vscale1 = vdupq_n_f32(1.f / scale[1]);
float32x4_t vscale2 = vdupq_n_f32(1.f / scale[2]);
float* dout_c0 = dout;
float* dout_c1 = dout + size;
float* dout_c2 = dout + size * 2;
int i = 0;
for (; i < size - 3; i += 4) {
float32x4x3_t vin3 = vld3q_f32(din);
float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0);
float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1);
float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2);
float32x4_t vs0 = vmulq_f32(vsub0, vscale0);
float32x4_t vs1 = vmulq_f32(vsub1, vscale1);
float32x4_t vs2 = vmulq_f32(vsub2, vscale2);
vst1q_f32(dout_c0, vs0);
vst1q_f32(dout_c1, vs1);
vst1q_f32(dout_c2, vs2);
din += 12;
dout_c0 += 4;
dout_c1 += 4;
dout_c2 += 4;
}
for (; i < size; i++) {
*(dout_c0++) = (*(din++) - mean[0]) / scale[0];
*(dout_c0++) = (*(din++) - mean[1]) / scale[1];
*(dout_c0++) = (*(din++) - mean[2]) / scale[2];
}
}
// Process img and set it as input
void process_img(const cv::Mat& img,
int width,
int height,
float* dest_data,
float* means,
float* scales) {
cv::Mat rgb_img;
cv::cvtColor(img, rgb_img, cv::COLOR_BGR2RGB);
cv::resize(rgb_img, rgb_img, cv::Size(width, height), 0.f, 0.f);
cv::Mat imgf;
rgb_img.convertTo(imgf, CV_32FC3, 1 / 255.f);
const float* dimg = reinterpret_cast<const float*>(imgf.data);
neon_mean_scale(dimg, dest_data, width * height, means, scales);
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
double GetCurrentUS();
int64_t ShapeProduction(const std::vector<int64_t>& shape);
std::vector<int64_t> GetIntNumsFromStr(const std::string& str);
std::vector<double> GetDoubleNumsFromStr(const std::string& str);
void neon_mean_scale(
const float* din, float* dout, int size, float* mean, float* scale);
void process_img(const cv::Mat& img,
int width,
int height,
float* dst_data,
float* means,
float* scales);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <fstream>
#include <iostream>
#include "paddle_api.h" // NOLINT
#include "test_helper.h" // NOLINT
DEFINE_string(model_dir,
"",
"the path of the model, the model and param files is under "
"model_dir.");
DEFINE_string(model_filename,
"",
"the filename of model file. When the model is combined formate, "
"please set model_file.");
DEFINE_string(param_filename,
"",
"the filename of param file, set param_file when the model is "
"combined formate.");
DEFINE_string(img_path, "", "the path of input image");
DEFINE_string(img_txt_path,
"",
"the path of input image, the image is processed "
" and saved in txt file");
DEFINE_string(out_values,
"",
"The output values, separated by colon and comma");
DEFINE_double(threshold,
1e-3,
"If the output value diff is smaller than threshold, pass test");
void OptModel(const std::string& load_model_dir,
const std::string& model_filename,
const std::string& params_filename,
const std::string& save_model_path) {
paddle::lite_api::CxxConfig config;
config.set_model_dir(load_model_dir);
if (!model_filename.empty() && !params_filename.empty()) {
config.set_model_file(load_model_dir + "/" + model_filename);
config.set_param_file(load_model_dir + "/" + params_filename);
}
std::vector<paddle::lite_api::Place> vaild_places = {
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)},
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt32)},
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt64)},
};
config.set_valid_places(vaild_places);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
std::string cmd_str = "rm -rf " + save_model_path;
int ret = system(cmd_str.c_str());
if (ret == 0) {
std::cout << "Delete old optimized model " << save_model_path << std::endl;
}
predictor->SaveOptimizedModel(save_model_path,
paddle::lite_api::LiteModelType::kNaiveBuffer);
std::cout << "Load model from " << load_model_dir << std::endl;
std::cout << "Save optimized model to " << save_model_path << std::endl;
}
void Run(const std::string& model_path,
const std::string& img_path,
const std::string& img_txt_path,
const std::vector<double>& out_values,
const float threshold,
const int height,
const int width) {
// set config and create predictor
paddle::lite_api::MobileConfig config;
config.set_threads(3);
config.set_model_from_file(model_path);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
// set input
auto input_tensor = predictor->GetInput(0);
input_tensor->Resize({1, 3, height, width});
auto input_data = input_tensor->mutable_data<float>();
if (img_txt_path.size() > 0) {
std::fstream fs(img_txt_path);
if (!fs.is_open()) {
std::cerr << "Fail to open img txt file:" << img_txt_path << std::endl;
}
int num = 1 * 3 * height * width;
for (int i = 0; i < num; i++) {
fs >> input_data[i];
}
} else {
cv::Mat img = imread(img_path, cv::IMREAD_COLOR);
if (!img.data) {
std::cerr << "Fail to open img:" << img_path << std::endl;
exit(1);
}
float means[3] = {0.485f, 0.456f, 0.406f};
float scales[3] = {0.229f, 0.224f, 0.225f};
process_img(img, width, height, input_data, means, scales);
}
auto shape_tensor = predictor->GetInput(1);
shape_tensor->Resize({1, 2});
auto* shape_data = shape_tensor->mutable_data<int>();
shape_data[0] = height;
shape_data[1] = width;
predictor->Run();
auto out_tensor = predictor->GetOutput(0);
auto* out_data = out_tensor->data<float>();
int64_t output_num = ShapeProduction(out_tensor->shape());
bool is_pass = true;
for (int i = 0; i < output_num && i < out_values.size(); i++) {
std::cout << "id:" << i << " out_data:" << out_data[i]
<< " gt_data:" << out_values[i] << std::endl;
if (fabs(out_data[i] - out_values[i]) > threshold) {
is_pass = false;
}
}
if (is_pass) {
std::cout << "----------Pass test---------- \n\n";
} else {
std::cout << "----------Fail test---------- \n\n";
}
}
int main(int argc, char** argv) {
// Check inputs
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir.empty() ||
(FLAGS_img_path.empty() && FLAGS_img_txt_path.empty())) {
std::cerr << "Input error." << std::endl;
std::cerr
<< "Usage: " << argv[0] << std::endl
<< "--model_dir: the path of not optimized model \n"
"--model_filename: the model filename of not optimized model \n"
"--param_filename: the param filename of not optimized model \n"
"--img_txt_path: the path of input image, the image is processed \n"
" and saved in txt file \n"
"--img_path: the path of input image \n"
"--out_values: The output values, separated by colon and comma.\n"
"--threshold: If the out value diff is smaller than threshold,\n"
" pass test. Default 1e-3.\n";
exit(1);
}
const int height = 608;
const int width = 608;
std::vector<double> out_values = GetDoubleNumsFromStr(FLAGS_out_values);
std::string model_dir = FLAGS_model_dir;
if (model_dir.back() == '/') {
model_dir.pop_back();
}
std::string optimized_model_path = model_dir + "_opt2";
OptModel(FLAGS_model_dir,
FLAGS_model_filename,
FLAGS_param_filename,
optimized_model_path);
std::string run_model_path = optimized_model_path + ".nb";
// Run test
Run(run_model_path,
FLAGS_img_path,
FLAGS_img_txt_path,
out_values,
FLAGS_threshold,
height,
width);
return 0;
}
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <fstream>
#include <iostream>
#include "paddle_api.h" // NOLINT
#include "test_helper.h" // NOLINT
DEFINE_string(optimized_model_path, "", "the path of the optimized model");
DEFINE_string(img_path, "", "the path of input image");
DEFINE_string(img_txt_path,
"",
"the path of input image, the image is processed "
" and saved in txt file");
DEFINE_string(out_values,
"",
"The output values, separated by colon and comma");
DEFINE_double(threshold,
1e-3,
"If the output value diff is smaller than threshold, pass test");
void Run(const std::string& model_path,
const std::string& img_path,
const std::string& img_txt_path,
const std::vector<double>& out_values,
const float threshold,
const int height,
const int width) {
// set config and create predictor
paddle::lite_api::MobileConfig config;
config.set_threads(3);
config.set_model_from_file(model_path);
auto predictor = paddle::lite_api::CreatePaddlePredictor(config);
// set input
auto input_tensor = predictor->GetInput(0);
input_tensor->Resize({1, 3, height, width});
auto input_data = input_tensor->mutable_data<float>();
if (img_txt_path.size() > 0) {
std::fstream fs(img_txt_path);
if (!fs.is_open()) {
std::cerr << "Fail to open img txt file:" << img_txt_path << std::endl;
}
int num = 1 * 3 * height * width;
for (int i = 0; i < num; i++) {
fs >> input_data[i];
}
} else {
cv::Mat img = imread(img_path, cv::IMREAD_COLOR);
if (!img.data) {
std::cerr << "Fail to open img:" << img_path << std::endl;
exit(1);
}
float means[3] = {0.485f, 0.456f, 0.406f};
float scales[3] = {0.229f, 0.224f, 0.225f};
process_img(img, width, height, input_data, means, scales);
}
auto shape_tensor = predictor->GetInput(1);
shape_tensor->Resize({1, 2});
auto* shape_data = shape_tensor->mutable_data<int>();
shape_data[0] = height;
shape_data[1] = width;
predictor->Run();
auto out_tensor = predictor->GetOutput(0);
auto* out_data = out_tensor->data<float>();
int64_t output_num = ShapeProduction(out_tensor->shape());
bool is_pass = true;
for (int i = 0; i < output_num && i < out_values.size(); i++) {
std::cout << "id:" << i << " out_data:" << out_data[i]
<< " gt_data:" << out_values[i] << std::endl;
if (fabs(out_data[i] - out_values[i]) > threshold) {
is_pass = false;
}
}
if (is_pass) {
std::cout << "----------Pass test---------- \n\n";
} else {
std::cout << "----------Fail test---------- \n\n";
}
}
int main(int argc, char** argv) {
// Check inputs
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_optimized_model_path.empty() ||
(FLAGS_img_path.empty() && FLAGS_img_txt_path.empty())) {
std::cerr << "Input error." << std::endl;
std::cerr
<< "Usage: " << argv[0] << std::endl
<< "--optimized_model_path: the path of optimized model \n"
"--img_txt_path: the path of input image, the image is processed \n"
" and saved in txt file \n"
"--img_path: the path of input image \n"
"--out_values: The output values, separated by colon and comma.\n"
"--threshold: If the out value diff is smaller than threshold,\n"
" pass test. Default 1e-3.\n";
exit(1);
}
const int height = 608;
const int width = 608;
std::vector<double> out_values = GetDoubleNumsFromStr(FLAGS_out_values);
// Run test
Run(FLAGS_optimized_model_path,
FLAGS_img_path,
FLAGS_img_txt_path,
out_values,
FLAGS_threshold,
height,
width);
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册