Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleX
提交
a8d497a3
P
PaddleX
项目概览
PaddlePaddle
/
PaddleX
通知
138
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
43
列表
看板
标记
里程碑
合并请求
5
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleX
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
43
Issue
43
列表
看板
标记
里程碑
合并请求
5
合并请求
5
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a8d497a3
编写于
5月 18, 2020
作者:
J
Jason
提交者:
GitHub
5月 18, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #60 from jiangjiajun/secure_cpp
Secure cpp
上级
fe0a2f22
5917a0c9
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
101 addition
and
28 deletion
+101
-28
deploy/cpp/CMakeLists.txt
deploy/cpp/CMakeLists.txt
+52
-4
deploy/cpp/cmake/yaml-cpp.cmake
deploy/cpp/cmake/yaml-cpp.cmake
+1
-1
deploy/cpp/demo/classifier.cpp
deploy/cpp/demo/classifier.cpp
+2
-1
deploy/cpp/demo/detector.cpp
deploy/cpp/demo/detector.cpp
+4
-3
deploy/cpp/demo/segmenter.cpp
deploy/cpp/demo/segmenter.cpp
+4
-3
deploy/cpp/include/paddlex/paddlex.h
deploy/cpp/include/paddlex/paddlex.h
+13
-6
deploy/cpp/include/paddlex/visualize.h
deploy/cpp/include/paddlex/visualize.h
+2
-2
deploy/cpp/scripts/build.sh
deploy/cpp/scripts/build.sh
+7
-0
deploy/cpp/src/paddlex.cpp
deploy/cpp/src/paddlex.cpp
+10
-2
deploy/cpp/src/visualize.cpp
deploy/cpp/src/visualize.cpp
+2
-2
docs/tutorials/deploy/deploy_cpp_linux.md
docs/tutorials/deploy/deploy_cpp_linux.md
+3
-3
docs/tutorials/deploy/deploy_cpp_win_vs2019.md
docs/tutorials/deploy/deploy_cpp_win_vs2019.md
+1
-1
未找到文件。
deploy/cpp/CMakeLists.txt
浏览文件 @
a8d497a3
...
...
@@ -5,12 +5,29 @@ option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL."
option
(
WITH_GPU
"Compile demo with GPU/CPU, default use CPU."
ON
)
option
(
WITH_STATIC_LIB
"Compile demo with static/shared library, default use static."
OFF
)
option
(
WITH_TENSORRT
"Compile demo with TensorRT."
OFF
)
option
(
WITH_ENCRYPTION
"Compile demo with encryption tool."
OFF
)
SET
(
TENSORRT_DIR
""
CACHE PATH
"
Compile demo with TensorRT
"
)
SET
(
TENSORRT_DIR
""
CACHE PATH
"
Location of libraries
"
)
SET
(
PADDLE_DIR
""
CACHE PATH
"Location of libraries"
)
SET
(
OPENCV_DIR
""
CACHE PATH
"Location of libraries"
)
SET
(
ENCRYPTION_DIR
""
CACHE PATH
"Location of libraries"
)
SET
(
CUDA_LIB
""
CACHE PATH
"Location of libraries"
)
if
(
NOT WIN32
)
set
(
CMAKE_ARCHIVE_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/lib
)
set
(
CMAKE_LIBRARY_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/lib
)
set
(
CMAKE_RUNTIME_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/demo
)
else
()
set
(
CMAKE_ARCHIVE_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/paddlex_inference
)
set
(
CMAKE_LIBRARY_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/paddlex_inference
)
set
(
CMAKE_RUNTIME_OUTPUT_DIRECTORY
${
CMAKE_BINARY_DIR
}
/paddlex_inference
)
endif
()
if
(
NOT WIN32
)
SET
(
YAML_BUILD_TYPE ON CACHE BOOL
"yaml build shared library."
)
else
()
SET
(
YAML_BUILD_TYPE OFF CACHE BOOL
"yaml build shared library."
)
endif
()
include
(
cmake/yaml-cpp.cmake
)
include_directories
(
"
${
CMAKE_SOURCE_DIR
}
/"
)
...
...
@@ -27,6 +44,11 @@ macro(safe_set_static_flag)
endforeach
(
flag_var
)
endmacro
()
if
(
WITH_ENCRYPTION
)
add_definitions
(
-DWITH_ENCRYPTION=
${
WITH_ENCRYPTION
}
)
endif
()
if
(
WITH_MKL
)
ADD_DEFINITIONS
(
-DUSE_MKL
)
endif
()
...
...
@@ -183,6 +205,7 @@ else()
set
(
DEPS
${
DEPS
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt
)
set
(
DEPS
${
DEPS
}
libcmt shlwapi
)
if
(
EXISTS
"
${
PADDLE_DIR
}
/third_party/install/snappy/lib"
)
set
(
DEPS
${
DEPS
}
snappy
)
...
...
@@ -207,21 +230,35 @@ if(WITH_GPU)
endif
()
endif
()
if
(
WITH_ENCRYPTION
)
if
(
NOT WIN32
)
include_directories
(
"
${
ENCRYPTION_DIR
}
/include"
)
link_directories
(
"
${
ENCRYPTION_DIR
}
/lib"
)
set
(
DEPS
${
DEPS
}
${
ENCRYPTION_DIR
}
/lib/libpmodel-decrypt
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
else
()
message
(
FATAL_ERROR
"Encryption Tool don't support WINDOWS"
)
endif
()
endif
()
if
(
NOT WIN32
)
set
(
EXTERNAL_LIB
"-ldl -lrt -lgomp -lz -lm -lpthread"
)
set
(
DEPS
${
DEPS
}
${
EXTERNAL_LIB
}
)
endif
()
set
(
DEPS
${
DEPS
}
${
OpenCV_LIBS
}
)
add_executable
(
classifier src/classifier.cpp src/transforms.cpp src/paddlex.cpp
)
add_library
(
paddlex_inference SHARED src/visualize src/transforms.cpp src/paddlex.cpp
)
ADD_DEPENDENCIES
(
paddlex_inference ext-yaml-cpp
)
target_link_libraries
(
paddlex_inference
${
DEPS
}
)
add_executable
(
classifier demo/classifier.cpp src/transforms.cpp src/paddlex.cpp
)
ADD_DEPENDENCIES
(
classifier ext-yaml-cpp
)
target_link_libraries
(
classifier
${
DEPS
}
)
add_executable
(
detector
src
/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp
)
add_executable
(
detector
demo
/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp
)
ADD_DEPENDENCIES
(
detector ext-yaml-cpp
)
target_link_libraries
(
detector
${
DEPS
}
)
add_executable
(
segmenter
src
/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp
)
add_executable
(
segmenter
demo
/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp
)
ADD_DEPENDENCIES
(
segmenter ext-yaml-cpp
)
target_link_libraries
(
segmenter
${
DEPS
}
)
...
...
@@ -252,3 +289,14 @@ if (WIN32 AND WITH_MKL)
)
endif
()
file
(
COPY
"
${
CMAKE_SOURCE_DIR
}
/include/paddlex/visualize.h"
DESTINATION
"
${
CMAKE_BINARY_DIR
}
/include/"
)
file
(
COPY
"
${
CMAKE_SOURCE_DIR
}
/include/paddlex/config_parser.h"
DESTINATION
"
${
CMAKE_BINARY_DIR
}
/include/"
)
file
(
COPY
"
${
CMAKE_SOURCE_DIR
}
/include/paddlex/transforms.h"
DESTINATION
"
${
CMAKE_BINARY_DIR
}
/include/"
)
file
(
COPY
"
${
CMAKE_SOURCE_DIR
}
/include/paddlex/results.h"
DESTINATION
"
${
CMAKE_BINARY_DIR
}
/include/"
)
file
(
COPY
"
${
CMAKE_SOURCE_DIR
}
/include/paddlex/paddlex.h"
DESTINATION
"
${
CMAKE_BINARY_DIR
}
/include/"
)
deploy/cpp/cmake/yaml-cpp.cmake
浏览文件 @
a8d497a3
...
...
@@ -14,7 +14,7 @@ ExternalProject_Add(
-DYAML_CPP_INSTALL=OFF
-DYAML_CPP_BUILD_CONTRIB=OFF
-DMSVC_SHARED_RT=OFF
-DBUILD_SHARED_LIBS=
OFF
-DBUILD_SHARED_LIBS=
${
YAML_BUILD_TYPE
}
-DCMAKE_BUILD_TYPE=
${
CMAKE_BUILD_TYPE
}
-DCMAKE_CXX_FLAGS=
${
CMAKE_CXX_FLAGS
}
-DCMAKE_CXX_FLAGS_DEBUG=
${
CMAKE_CXX_FLAGS_DEBUG
}
...
...
deploy/cpp/
src
/classifier.cpp
→
deploy/cpp/
demo
/classifier.cpp
浏览文件 @
a8d497a3
...
...
@@ -25,6 +25,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU"
);
DEFINE_bool
(
use_trt
,
false
,
"Infering with TensorRT"
);
DEFINE_int32
(
gpu_id
,
0
,
"GPU card id"
);
DEFINE_string
(
key
,
""
,
"key of encryption"
);
DEFINE_string
(
image
,
""
,
"Path of test image file"
);
DEFINE_string
(
image_list
,
""
,
"Path of test image list file"
);
...
...
@@ -43,7 +44,7 @@ int main(int argc, char** argv) {
// 加载模型
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
);
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_key
);
// 进行预测
if
(
FLAGS_image_list
!=
""
)
{
...
...
deploy/cpp/
src
/detector.cpp
→
deploy/cpp/
demo
/detector.cpp
浏览文件 @
a8d497a3
...
...
@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU"
);
DEFINE_bool
(
use_trt
,
false
,
"Infering with TensorRT"
);
DEFINE_int32
(
gpu_id
,
0
,
"GPU card id"
);
DEFINE_string
(
key
,
""
,
"key of encryption"
);
DEFINE_string
(
image
,
""
,
"Path of test image file"
);
DEFINE_string
(
image_list
,
""
,
"Path of test image list file"
);
DEFINE_string
(
save_dir
,
"output"
,
"Path to save visualized image"
);
...
...
@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
// 加载模型
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
);
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_key
);
auto
colormap
=
PaddleX
::
GenerateColorMap
(
model
.
labels
.
size
());
std
::
string
save_dir
=
"output"
;
...
...
@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
// 可视化
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
Det
(
im
,
result
,
model
.
labels
,
colormap
,
0.5
);
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
,
0.5
);
std
::
string
save_path
=
PaddleX
::
generate_save_path
(
FLAGS_save_dir
,
image_path
);
cv
::
imwrite
(
save_path
,
vis_img
);
...
...
@@ -97,7 +98,7 @@ int main(int argc, char** argv) {
// 可视化
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
Det
(
im
,
result
,
model
.
labels
,
colormap
,
0.5
);
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
,
0.5
);
std
::
string
save_path
=
PaddleX
::
generate_save_path
(
FLAGS_save_dir
,
FLAGS_image
);
cv
::
imwrite
(
save_path
,
vis_img
);
...
...
deploy/cpp/
src
/segmenter.cpp
→
deploy/cpp/
demo
/segmenter.cpp
浏览文件 @
a8d497a3
...
...
@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU"
);
DEFINE_bool
(
use_trt
,
false
,
"Infering with TensorRT"
);
DEFINE_int32
(
gpu_id
,
0
,
"GPU card id"
);
DEFINE_string
(
key
,
""
,
"key of encryption"
);
DEFINE_string
(
image
,
""
,
"Path of test image file"
);
DEFINE_string
(
image_list
,
""
,
"Path of test image list file"
);
DEFINE_string
(
save_dir
,
"output"
,
"Path to save visualized image"
);
...
...
@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
// 加载模型
PaddleX
::
Model
model
;
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
);
model
.
Init
(
FLAGS_model_dir
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_key
);
auto
colormap
=
PaddleX
::
GenerateColorMap
(
model
.
labels
.
size
());
// 进行预测
...
...
@@ -62,7 +63,7 @@ int main(int argc, char** argv) {
model
.
predict
(
im
,
&
result
);
// 可视化
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
Seg
(
im
,
result
,
model
.
labels
,
colormap
);
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
);
std
::
string
save_path
=
PaddleX
::
generate_save_path
(
FLAGS_save_dir
,
image_path
);
cv
::
imwrite
(
save_path
,
vis_img
);
...
...
@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
cv
::
Mat
im
=
cv
::
imread
(
FLAGS_image
,
1
);
model
.
predict
(
im
,
&
result
);
// 可视化
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
Seg
(
im
,
result
,
model
.
labels
,
colormap
);
cv
::
Mat
vis_img
=
PaddleX
::
Visualize
(
im
,
result
,
model
.
labels
,
colormap
);
std
::
string
save_path
=
PaddleX
::
generate_save_path
(
FLAGS_save_dir
,
FLAGS_image
);
cv
::
imwrite
(
save_path
,
vis_img
);
...
...
deploy/cpp/include/paddlex/paddlex.h
浏览文件 @
a8d497a3
...
...
@@ -28,9 +28,14 @@
#include "paddle_inference_api.h" // NOLINT
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
#include "config_parser.h"
#include "results.h"
#include "transforms.h"
#ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h"
#include "model_code.h"
#endif
namespace
PaddleX
{
...
...
@@ -39,14 +44,16 @@ class Model {
void
Init
(
const
std
::
string
&
model_dir
,
bool
use_gpu
=
false
,
bool
use_trt
=
false
,
int
gpu_id
=
0
)
{
create_predictor
(
model_dir
,
use_gpu
,
use_trt
,
gpu_id
);
int
gpu_id
=
0
,
std
::
string
key
=
""
)
{
create_predictor
(
model_dir
,
use_gpu
,
use_trt
,
gpu_id
,
key
);
}
void
create_predictor
(
const
std
::
string
&
model_dir
,
bool
use_gpu
=
false
,
bool
use_trt
=
false
,
int
gpu_id
=
0
);
int
gpu_id
=
0
,
std
::
string
key
=
""
);
bool
load_config
(
const
std
::
string
&
model_dir
);
...
...
deploy/cpp/include/paddlex/visualize.h
浏览文件 @
a8d497a3
...
...
@@ -46,13 +46,13 @@ namespace PaddleX {
// Generate visualization colormap for each class
std
::
vector
<
int
>
GenerateColorMap
(
int
num_class
);
cv
::
Mat
Visualize
Det
(
const
cv
::
Mat
&
img
,
cv
::
Mat
Visualize
(
const
cv
::
Mat
&
img
,
const
DetResult
&
results
,
const
std
::
map
<
int
,
std
::
string
>&
labels
,
const
std
::
vector
<
int
>&
colormap
,
float
threshold
=
0.5
);
cv
::
Mat
Visualize
Seg
(
const
cv
::
Mat
&
img
,
cv
::
Mat
Visualize
(
const
cv
::
Mat
&
img
,
const
SegResult
&
result
,
const
std
::
map
<
int
,
std
::
string
>&
labels
,
const
std
::
vector
<
int
>&
colormap
);
...
...
deploy/cpp/scripts/build.sh
浏览文件 @
a8d497a3
...
...
@@ -16,6 +16,11 @@ CUDA_LIB=/path/to/cuda/lib/
# CUDNN 的 lib 路径
CUDNN_LIB
=
/path/to/cudnn/lib/
# 是否加载加密后的模型
WITH_ENCRYPTION
=
OFF
# 加密工具的路径
ENCRYPTION_DIR
=
/path/to/encryption_tool/
# OPENCV 路径, 如果使用自带预编译版本可不修改
OPENCV_DIR
=
$(
pwd
)
/deps/opencv3gcc4.8/
sh
$(
pwd
)
/scripts/bootstrap.sh
...
...
@@ -28,10 +33,12 @@ cmake .. \
-DWITH_GPU
=
${
WITH_GPU
}
\
-DWITH_MKL
=
${
WITH_MKL
}
\
-DWITH_TENSORRT
=
${
WITH_TENSORRT
}
\
-DWITH_ENCRYPTION
=
${
WITH_ENCRYPTION
}
\
-DTENSORRT_DIR
=
${
TENSORRT_DIR
}
\
-DPADDLE_DIR
=
${
PADDLE_DIR
}
\
-DWITH_STATIC_LIB
=
${
WITH_STATIC_LIB
}
\
-DCUDA_LIB
=
${
CUDA_LIB
}
\
-DCUDNN_LIB
=
${
CUDNN_LIB
}
\
-DENCRYPTION_DIR
=
${
ENCRYPTION_DIR
}
\
-DOPENCV_DIR
=
${
OPENCV_DIR
}
make
deploy/cpp/src/paddlex.cpp
浏览文件 @
a8d497a3
...
...
@@ -19,7 +19,8 @@ namespace PaddleX {
void
Model
::
create_predictor
(
const
std
::
string
&
model_dir
,
bool
use_gpu
,
bool
use_trt
,
int
gpu_id
)
{
int
gpu_id
,
std
::
string
key
)
{
// 读取配置文件
if
(
!
load_config
(
model_dir
))
{
std
::
cerr
<<
"Parse file 'model.yml' failed!"
<<
std
::
endl
;
...
...
@@ -28,7 +29,14 @@ void Model::create_predictor(const std::string& model_dir,
paddle
::
AnalysisConfig
config
;
std
::
string
model_file
=
model_dir
+
OS_PATH_SEP
+
"__model__"
;
std
::
string
params_file
=
model_dir
+
OS_PATH_SEP
+
"__params__"
;
config
.
SetModel
(
model_file
,
params_file
);
#ifdef WITH_ENCRYPTION
if
(
key
!=
""
){
paddle_security_load_model
(
&
config
,
key
.
c_str
(),
model_file
.
c_str
(),
params_file
.
c_str
());
}
#endif
if
(
key
==
""
){
config
.
SetModel
(
model_file
,
params_file
);
}
if
(
use_gpu
)
{
config
.
EnableUseGpu
(
100
,
gpu_id
);
}
else
{
...
...
deploy/cpp/src/visualize.cpp
浏览文件 @
a8d497a3
...
...
@@ -31,7 +31,7 @@ std::vector<int> GenerateColorMap(int num_class) {
return
colormap
;
}
cv
::
Mat
Visualize
Det
(
const
cv
::
Mat
&
img
,
cv
::
Mat
Visualize
(
const
cv
::
Mat
&
img
,
const
DetResult
&
result
,
const
std
::
map
<
int
,
std
::
string
>&
labels
,
const
std
::
vector
<
int
>&
colormap
,
...
...
@@ -105,7 +105,7 @@ cv::Mat VisualizeDet(const cv::Mat& img,
return
vis_img
;
}
cv
::
Mat
Visualize
Seg
(
const
cv
::
Mat
&
img
,
cv
::
Mat
Visualize
(
const
cv
::
Mat
&
img
,
const
SegResult
&
result
,
const
std
::
map
<
int
,
std
::
string
>&
labels
,
const
std
::
vector
<
int
>&
colormap
)
{
...
...
docs/tutorials/deploy/deploy_cpp_linux.md
浏览文件 @
a8d497a3
...
...
@@ -95,7 +95,7 @@ make
```
### Step5: 预测及可视化
编译成功后,预测demo的可执行程序分别为
`build/de
tector`
,
`build/classifer`
,
`build
/segmenter`
,用户可根据自己的模型类型选择,其主要命令参数说明如下:
编译成功后,预测demo的可执行程序分别为
`build/de
mo/detector`
,
`build/demo/classifer`
,
`build/demo
/segmenter`
,用户可根据自己的模型类型选择,其主要命令参数说明如下:
| 参数 | 说明 |
| ---- | ---- |
...
...
@@ -116,7 +116,7 @@ make
不使用
`GPU`
测试图片
`/path/to/xiaoduxiong.jpeg`
```
shell
./build/detector
--model_dir
=
/path/to/inference_model
--image
=
/path/to/xiaoduxiong.jpeg
--save_dir
=
output
./build/de
mo/de
tector
--model_dir
=
/path/to/inference_model
--image
=
/path/to/xiaoduxiong.jpeg
--save_dir
=
output
```
图片文件
`可视化预测结果`
会保存在
`save_dir`
参数设置的目录下。
...
...
@@ -131,6 +131,6 @@ make
/path/to/images/xiaoduxiongn.jpeg
```
```
shell
./build/detector
--model_dir
=
/path/to/models/inference_model
--image_list
=
/root/projects/images_list.txt
--use_gpu
=
1
--save_dir
=
output
./build/de
mo/de
tector
--model_dir
=
/path/to/models/inference_model
--image_list
=
/root/projects/images_list.txt
--use_gpu
=
1
--save_dir
=
output
```
图片文件
`可视化预测结果`
会保存在
`save_dir`
参数设置的目录下。
docs/tutorials/deploy/deploy_cpp_win_vs2019.md
浏览文件 @
a8d497a3
...
...
@@ -106,7 +106,7 @@ d:
cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release
```
编译成功后,预测demo的入口程序为`
de
tector
`,`
classifer
`,`
segmenter
`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
编译成功后,预测demo的入口程序为`
de
mo
\d
etector
`,`
demo
\c
lassifer
`,`
demo
\
s
egmenter
`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
| 参数 | 说明 |
| ---- | ---- |
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录