未验证 提交 a8d497a3 编写于 作者: J Jason 提交者: GitHub

Merge pull request #60 from jiangjiajun/secure_cpp

Secure cpp
...@@ -5,12 +5,29 @@ option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL." ...@@ -5,12 +5,29 @@ option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL."
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF) option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF) option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
option(WITH_ENCRYPTION "Compile demo with encryption tool." OFF)
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT") SET(TENSORRT_DIR "" CACHE PATH "Location of libraries")
SET(PADDLE_DIR "" CACHE PATH "Location of libraries") SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries") SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(ENCRYPTION_DIR"" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries") SET(CUDA_LIB "" CACHE PATH "Location of libraries")
if (NOT WIN32)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/demo)
else()
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
endif()
if (NOT WIN32)
SET(YAML_BUILD_TYPE ON CACHE BOOL "yaml build shared library.")
else()
SET(YAML_BUILD_TYPE OFF CACHE BOOL "yaml build shared library.")
endif()
include(cmake/yaml-cpp.cmake) include(cmake/yaml-cpp.cmake)
include_directories("${CMAKE_SOURCE_DIR}/") include_directories("${CMAKE_SOURCE_DIR}/")
...@@ -27,6 +44,11 @@ macro(safe_set_static_flag) ...@@ -27,6 +44,11 @@ macro(safe_set_static_flag)
endforeach(flag_var) endforeach(flag_var)
endmacro() endmacro()
if (WITH_ENCRYPTION)
add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION})
endif()
if (WITH_MKL) if (WITH_MKL)
ADD_DEFINITIONS(-DUSE_MKL) ADD_DEFINITIONS(-DUSE_MKL)
endif() endif()
...@@ -183,6 +205,7 @@ else() ...@@ -183,6 +205,7 @@ else()
set(DEPS ${DEPS} set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB} ${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt) glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
set(DEPS ${DEPS} libcmt shlwapi) set(DEPS ${DEPS} libcmt shlwapi)
if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib") if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy) set(DEPS ${DEPS} snappy)
...@@ -207,21 +230,35 @@ if(WITH_GPU) ...@@ -207,21 +230,35 @@ if(WITH_GPU)
endif() endif()
endif() endif()
if(WITH_ENCRYPTION)
if(NOT WIN32)
include_directories("${ENCRYPTION_DIR}/include")
link_directories("${ENCRYPTION_DIR}/lib")
set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/libpmodel-decrypt${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
message(FATAL_ERROR "Encryption Tool don't support WINDOWS")
endif()
endif()
if (NOT WIN32) if (NOT WIN32)
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread") set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB}) set(DEPS ${DEPS} ${EXTERNAL_LIB})
endif() endif()
set(DEPS ${DEPS} ${OpenCV_LIBS}) set(DEPS ${DEPS} ${OpenCV_LIBS})
add_executable(classifier src/classifier.cpp src/transforms.cpp src/paddlex.cpp) add_library(paddlex_inference SHARED src/visualize src/transforms.cpp src/paddlex.cpp)
ADD_DEPENDENCIES(paddlex_inference ext-yaml-cpp)
target_link_libraries(paddlex_inference ${DEPS})
add_executable(classifier demo/classifier.cpp src/transforms.cpp src/paddlex.cpp)
ADD_DEPENDENCIES(classifier ext-yaml-cpp) ADD_DEPENDENCIES(classifier ext-yaml-cpp)
target_link_libraries(classifier ${DEPS}) target_link_libraries(classifier ${DEPS})
add_executable(detector src/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp) add_executable(detector demo/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
ADD_DEPENDENCIES(detector ext-yaml-cpp) ADD_DEPENDENCIES(detector ext-yaml-cpp)
target_link_libraries(detector ${DEPS}) target_link_libraries(detector ${DEPS})
add_executable(segmenter src/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp) add_executable(segmenter demo/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
ADD_DEPENDENCIES(segmenter ext-yaml-cpp) ADD_DEPENDENCIES(segmenter ext-yaml-cpp)
target_link_libraries(segmenter ${DEPS}) target_link_libraries(segmenter ${DEPS})
...@@ -252,3 +289,14 @@ if (WIN32 AND WITH_MKL) ...@@ -252,3 +289,14 @@ if (WIN32 AND WITH_MKL)
) )
endif() endif()
file(COPY "${CMAKE_SOURCE_DIR}/include/paddlex/visualize.h"
DESTINATION "${CMAKE_BINARY_DIR}/include/" )
file(COPY "${CMAKE_SOURCE_DIR}/include/paddlex/config_parser.h"
DESTINATION "${CMAKE_BINARY_DIR}/include/" )
file(COPY "${CMAKE_SOURCE_DIR}/include/paddlex/transforms.h"
DESTINATION "${CMAKE_BINARY_DIR}/include/" )
file(COPY "${CMAKE_SOURCE_DIR}/include/paddlex/results.h"
DESTINATION "${CMAKE_BINARY_DIR}/include/" )
file(COPY "${CMAKE_SOURCE_DIR}/include/paddlex/paddlex.h"
DESTINATION "${CMAKE_BINARY_DIR}/include/" )
...@@ -14,7 +14,7 @@ ExternalProject_Add( ...@@ -14,7 +14,7 @@ ExternalProject_Add(
-DYAML_CPP_INSTALL=OFF -DYAML_CPP_INSTALL=OFF
-DYAML_CPP_BUILD_CONTRIB=OFF -DYAML_CPP_BUILD_CONTRIB=OFF
-DMSVC_SHARED_RT=OFF -DMSVC_SHARED_RT=OFF
-DBUILD_SHARED_LIBS=OFF -DBUILD_SHARED_LIBS=${YAML_BUILD_TYPE}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
......
...@@ -25,6 +25,7 @@ DEFINE_string(model_dir, "", "Path of inference model"); ...@@ -25,6 +25,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT"); DEFINE_bool(use_trt, false, "Infering with TensorRT");
DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
...@@ -43,7 +44,7 @@ int main(int argc, char** argv) { ...@@ -43,7 +44,7 @@ int main(int argc, char** argv) {
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id); model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
// 进行预测 // 进行预测
if (FLAGS_image_list != "") { if (FLAGS_image_list != "") {
......
...@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model"); ...@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT"); DEFINE_bool(use_trt, false, "Infering with TensorRT");
DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image"); DEFINE_string(save_dir, "output", "Path to save visualized image");
...@@ -45,7 +46,7 @@ int main(int argc, char** argv) { ...@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id); model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
auto colormap = PaddleX::GenerateColorMap(model.labels.size()); auto colormap = PaddleX::GenerateColorMap(model.labels.size());
std::string save_dir = "output"; std::string save_dir = "output";
...@@ -74,7 +75,7 @@ int main(int argc, char** argv) { ...@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
// 可视化 // 可视化
cv::Mat vis_img = cv::Mat vis_img =
PaddleX::VisualizeDet(im, result, model.labels, colormap, 0.5); PaddleX::Visualize(im, result, model.labels, colormap, 0.5);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path); PaddleX::generate_save_path(FLAGS_save_dir, image_path);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
...@@ -97,7 +98,7 @@ int main(int argc, char** argv) { ...@@ -97,7 +98,7 @@ int main(int argc, char** argv) {
// 可视化 // 可视化
cv::Mat vis_img = cv::Mat vis_img =
PaddleX::VisualizeDet(im, result, model.labels, colormap, 0.5); PaddleX::Visualize(im, result, model.labels, colormap, 0.5);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
......
...@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model"); ...@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
DEFINE_bool(use_trt, false, "Infering with TensorRT"); DEFINE_bool(use_trt, false, "Infering with TensorRT");
DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image"); DEFINE_string(save_dir, "output", "Path to save visualized image");
...@@ -45,7 +46,7 @@ int main(int argc, char** argv) { ...@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id); model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
auto colormap = PaddleX::GenerateColorMap(model.labels.size()); auto colormap = PaddleX::GenerateColorMap(model.labels.size());
// 进行预测 // 进行预测
...@@ -62,7 +63,7 @@ int main(int argc, char** argv) { ...@@ -62,7 +63,7 @@ int main(int argc, char** argv) {
model.predict(im, &result); model.predict(im, &result);
// 可视化 // 可视化
cv::Mat vis_img = cv::Mat vis_img =
PaddleX::VisualizeSeg(im, result, model.labels, colormap); PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path); PaddleX::generate_save_path(FLAGS_save_dir, image_path);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
...@@ -74,7 +75,7 @@ int main(int argc, char** argv) { ...@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result); model.predict(im, &result);
// 可视化 // 可视化
cv::Mat vis_img = PaddleX::VisualizeSeg(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
......
...@@ -28,9 +28,14 @@ ...@@ -28,9 +28,14 @@
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#include "include/paddlex/config_parser.h" #include "config_parser.h"
#include "include/paddlex/results.h" #include "results.h"
#include "include/paddlex/transforms.h" #include "transforms.h"
#ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h"
#include "model_code.h"
#endif
namespace PaddleX { namespace PaddleX {
...@@ -39,14 +44,16 @@ class Model { ...@@ -39,14 +44,16 @@ class Model {
void Init(const std::string& model_dir, void Init(const std::string& model_dir,
bool use_gpu = false, bool use_gpu = false,
bool use_trt = false, bool use_trt = false,
int gpu_id = 0) { int gpu_id = 0,
create_predictor(model_dir, use_gpu, use_trt, gpu_id); std::string key = "") {
create_predictor(model_dir, use_gpu, use_trt, gpu_id, key);
} }
void create_predictor(const std::string& model_dir, void create_predictor(const std::string& model_dir,
bool use_gpu = false, bool use_gpu = false,
bool use_trt = false, bool use_trt = false,
int gpu_id = 0); int gpu_id = 0,
std::string key = "");
bool load_config(const std::string& model_dir); bool load_config(const std::string& model_dir);
......
...@@ -46,13 +46,13 @@ namespace PaddleX { ...@@ -46,13 +46,13 @@ namespace PaddleX {
// Generate visualization colormap for each class // Generate visualization colormap for each class
std::vector<int> GenerateColorMap(int num_class); std::vector<int> GenerateColorMap(int num_class);
cv::Mat VisualizeDet(const cv::Mat& img, cv::Mat Visualize(const cv::Mat& img,
const DetResult& results, const DetResult& results,
const std::map<int, std::string>& labels, const std::map<int, std::string>& labels,
const std::vector<int>& colormap, const std::vector<int>& colormap,
float threshold = 0.5); float threshold = 0.5);
cv::Mat VisualizeSeg(const cv::Mat& img, cv::Mat Visualize(const cv::Mat& img,
const SegResult& result, const SegResult& result,
const std::map<int, std::string>& labels, const std::map<int, std::string>& labels,
const std::vector<int>& colormap); const std::vector<int>& colormap);
......
...@@ -16,6 +16,11 @@ CUDA_LIB=/path/to/cuda/lib/ ...@@ -16,6 +16,11 @@ CUDA_LIB=/path/to/cuda/lib/
# CUDNN 的 lib 路径 # CUDNN 的 lib 路径
CUDNN_LIB=/path/to/cudnn/lib/ CUDNN_LIB=/path/to/cudnn/lib/
# 是否加载加密后的模型
WITH_ENCRYPTION=OFF
# 加密工具的路径
ENCRYPTION_DIR=/path/to/encryption_tool/
# OPENCV 路径, 如果使用自带预编译版本可不修改 # OPENCV 路径, 如果使用自带预编译版本可不修改
OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/ OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
sh $(pwd)/scripts/bootstrap.sh sh $(pwd)/scripts/bootstrap.sh
...@@ -28,10 +33,12 @@ cmake .. \ ...@@ -28,10 +33,12 @@ cmake .. \
-DWITH_GPU=${WITH_GPU} \ -DWITH_GPU=${WITH_GPU} \
-DWITH_MKL=${WITH_MKL} \ -DWITH_MKL=${WITH_MKL} \
-DWITH_TENSORRT=${WITH_TENSORRT} \ -DWITH_TENSORRT=${WITH_TENSORRT} \
-DWITH_ENCRYPTION=${WITH_ENCRYPTION} \
-DTENSORRT_DIR=${TENSORRT_DIR} \ -DTENSORRT_DIR=${TENSORRT_DIR} \
-DPADDLE_DIR=${PADDLE_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DENCRYPTION_DIR=${ENCRYPTION_DIR} \
-DOPENCV_DIR=${OPENCV_DIR} -DOPENCV_DIR=${OPENCV_DIR}
make make
...@@ -19,7 +19,8 @@ namespace PaddleX { ...@@ -19,7 +19,8 @@ namespace PaddleX {
void Model::create_predictor(const std::string& model_dir, void Model::create_predictor(const std::string& model_dir,
bool use_gpu, bool use_gpu,
bool use_trt, bool use_trt,
int gpu_id) { int gpu_id,
std::string key) {
// 读取配置文件 // 读取配置文件
if (!load_config(model_dir)) { if (!load_config(model_dir)) {
std::cerr << "Parse file 'model.yml' failed!" << std::endl; std::cerr << "Parse file 'model.yml' failed!" << std::endl;
...@@ -28,7 +29,14 @@ void Model::create_predictor(const std::string& model_dir, ...@@ -28,7 +29,14 @@ void Model::create_predictor(const std::string& model_dir,
paddle::AnalysisConfig config; paddle::AnalysisConfig config;
std::string model_file = model_dir + OS_PATH_SEP + "__model__"; std::string model_file = model_dir + OS_PATH_SEP + "__model__";
std::string params_file = model_dir + OS_PATH_SEP + "__params__"; std::string params_file = model_dir + OS_PATH_SEP + "__params__";
config.SetModel(model_file, params_file); #ifdef WITH_ENCRYPTION
if (key != ""){
paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str());
}
#endif
if (key == ""){
config.SetModel(model_file, params_file);
}
if (use_gpu) { if (use_gpu) {
config.EnableUseGpu(100, gpu_id); config.EnableUseGpu(100, gpu_id);
} else { } else {
......
...@@ -31,7 +31,7 @@ std::vector<int> GenerateColorMap(int num_class) { ...@@ -31,7 +31,7 @@ std::vector<int> GenerateColorMap(int num_class) {
return colormap; return colormap;
} }
cv::Mat VisualizeDet(const cv::Mat& img, cv::Mat Visualize(const cv::Mat& img,
const DetResult& result, const DetResult& result,
const std::map<int, std::string>& labels, const std::map<int, std::string>& labels,
const std::vector<int>& colormap, const std::vector<int>& colormap,
...@@ -105,7 +105,7 @@ cv::Mat VisualizeDet(const cv::Mat& img, ...@@ -105,7 +105,7 @@ cv::Mat VisualizeDet(const cv::Mat& img,
return vis_img; return vis_img;
} }
cv::Mat VisualizeSeg(const cv::Mat& img, cv::Mat Visualize(const cv::Mat& img,
const SegResult& result, const SegResult& result,
const std::map<int, std::string>& labels, const std::map<int, std::string>& labels,
const std::vector<int>& colormap) { const std::vector<int>& colormap) {
......
...@@ -95,7 +95,7 @@ make ...@@ -95,7 +95,7 @@ make
``` ```
### Step5: 预测及可视化 ### Step5: 预测及可视化
编译成功后,预测demo的可执行程序分别为`build/detector``build/classifer``build/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下: 编译成功后,预测demo的可执行程序分别为`build/demo/detector``build/demo/classifer``build/demo/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
| 参数 | 说明 | | 参数 | 说明 |
| ---- | ---- | | ---- | ---- |
...@@ -116,7 +116,7 @@ make ...@@ -116,7 +116,7 @@ make
不使用`GPU`测试图片 `/path/to/xiaoduxiong.jpeg` 不使用`GPU`测试图片 `/path/to/xiaoduxiong.jpeg`
```shell ```shell
./build/detector --model_dir=/path/to/inference_model --image=/path/to/xiaoduxiong.jpeg --save_dir=output ./build/demo/detector --model_dir=/path/to/inference_model --image=/path/to/xiaoduxiong.jpeg --save_dir=output
``` ```
图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。
...@@ -131,6 +131,6 @@ make ...@@ -131,6 +131,6 @@ make
/path/to/images/xiaoduxiongn.jpeg /path/to/images/xiaoduxiongn.jpeg
``` ```
```shell ```shell
./build/detector --model_dir=/path/to/models/inference_model --image_list=/root/projects/images_list.txt --use_gpu=1 --save_dir=output ./build/demo/detector --model_dir=/path/to/models/inference_model --image_list=/root/projects/images_list.txt --use_gpu=1 --save_dir=output
``` ```
图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。
...@@ -106,7 +106,7 @@ d: ...@@ -106,7 +106,7 @@ d:
cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release
``` ```
编译成功后,预测demo的入口程序为`detector`,`classifer`,`segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下: 编译成功后,预测demo的入口程序为`demo\detector`,`demo\classifer`,`demo\segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
| 参数 | 说明 | | 参数 | 说明 |
| ---- | ---- | | ---- | ---- |
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册