Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
a35e7f4b
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a35e7f4b
编写于
10月 16, 2018
作者:
T
Tao Luo
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
adjust demo_ci with fluid_inference_install_dir
test=develop
上级
fc63aa72
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
18 addition
and
12 deletion
+18
-12
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
+3
-3
paddle/fluid/inference/api/demo_ci/run.sh
paddle/fluid/inference/api/demo_ci/run.sh
+5
-4
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
+1
-1
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
+1
-1
paddle/fluid/inference/api/demo_ci/utils.h
paddle/fluid/inference/api/demo_ci/utils.h
+1
-1
paddle/fluid/inference/api/demo_ci/vis_demo.cc
paddle/fluid/inference/api/demo_ci/vis_demo.cc
+1
-1
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+6
-1
未找到文件。
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
浏览文件 @
a35e7f4b
...
...
@@ -77,7 +77,7 @@ endif(NOT WIN32)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/protobuf/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/glog/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/gflags/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/paddle/
fluid/inference
"
)
link_directories
(
"
${
PADDLE_LIB
}
/paddle/
lib
"
)
add_executable
(
${
DEMO_NAME
}
${
DEMO_NAME
}
.cc
)
...
...
@@ -97,10 +97,10 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if
(
WITH_STATIC_LIB
)
set
(
DEPS
${
PADDLE_LIB
}
/paddle/
fluid/inference
/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
${
PADDLE_LIB
}
/paddle/
lib
/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/
fluid/inference
/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
${
PADDLE_LIB
}
/paddle/
lib
/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
endif
()
if
(
NOT WIN32
)
...
...
paddle/fluid/inference/api/demo_ci/run.sh
浏览文件 @
a35e7f4b
...
...
@@ -5,12 +5,13 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR
=
$4
# dataset
TENSORRT_INCLUDE_DIR
=
$5
# TensorRT header file dir, defalut to /usr/local/TensorRT/include
TENSORRT_LIB_DIR
=
$6
# TensorRT lib file dir, default to /usr/local/TensorRT/lib
inference_install_dir
=
${
PADDLE_ROOT
}
/build/fluid_inference_install_dir
cd
`
dirname
$0
`
current_dir
=
`
pwd
`
if
[
$2
==
ON
]
;
then
# You can export yourself if move the install path
MKL_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir
/third_party/install/mklml/lib
MKL_LIB
=
${
inference_install_dir
}
/third_party/install/mklml/lib
export
LD_LIBRARY_PATH
=
$LD_LIBRARY_PATH
:
${
MKL_LIB
}
fi
if
[
$3
==
ON
]
;
then
...
...
@@ -55,7 +56,7 @@ cd build
for
WITH_STATIC_LIB
in
ON OFF
;
do
# -----simple_on_word2vec-----
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
@@ -75,7 +76,7 @@ for WITH_STATIC_LIB in ON OFF; do
fi
# ---------vis_demo---------
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
vis_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
@@ -98,7 +99,7 @@ for WITH_STATIC_LIB in ON OFF; do
# --------tensorrt mobilenet------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
trt_mobilenet_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
浏览文件 @
a35e7f4b
...
...
@@ -23,7 +23,7 @@ limitations under the License. */
#include <memory>
#include <thread> //NOLINT
#include "paddle/
fluid/inferenc
e/paddle_inference_api.h"
#include "paddle/
includ
e/paddle_inference_api.h"
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
DEFINE_bool
(
use_gpu
,
false
,
"Whether use gpu."
);
...
...
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
浏览文件 @
a35e7f4b
...
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include "
paddle/fluid/inference/demo_ci/utils.h"
#include "
utils.h" // NOLINT
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
...
...
paddle/fluid/inference/api/demo_ci/utils.h
浏览文件 @
a35e7f4b
...
...
@@ -18,7 +18,7 @@
#include <iostream>
#include <string>
#include <vector>
#include "paddle/
fluid/inferenc
e/paddle_inference_api.h"
#include "paddle/
includ
e/paddle_inference_api.h"
namespace
paddle
{
namespace
demo
{
...
...
paddle/fluid/inference/api/demo_ci/vis_demo.cc
浏览文件 @
a35e7f4b
...
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include "
paddle/fluid/inference/demo_ci/utils.h"
#include "
utils.h" // NOLINT
#ifdef PADDLE_WITH_CUDA
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
a35e7f4b
...
...
@@ -659,6 +659,7 @@ function gen_fluid_lib() {
EOF
cmake ..
-DWITH_DISTRIBUTE
=
OFF
make
-j
`
nproc
`
fluid_lib_dist
make
-j
`
nproc
`
inference_lib_dist
fi
}
...
...
@@ -672,6 +673,8 @@ EOF
cd
${
PADDLE_ROOT
}
/build
cp
-r
fluid_install_dir fluid
tar
-czf
fluid.tgz fluid
cp
-r
fluid_inference_install_dir fluid_inference
tar
-czf
fluid_inference.tgz fluid_inference
fi
}
...
...
@@ -683,7 +686,9 @@ function test_fluid_lib() {
========================================
EOF
cd
${
PADDLE_ROOT
}
/paddle/fluid/inference/api/demo_ci
./run.sh
${
PADDLE_ROOT
}
${
WITH_MKL
:-
ON
}
${
WITH_GPU
:-
OFF
}
${
INFERENCE_DEMO_INSTALL_DIR
}
${
TENSORRT_INCLUDE_DIR
:-
/usr/local/TensorRT/include
}
${
TENSORRT_LIB_DIR
:-
/usr/local/TensorRT/lib
}
./run.sh
${
PADDLE_ROOT
}
${
WITH_MKL
:-
ON
}
${
WITH_GPU
:-
OFF
}
${
INFERENCE_DEMO_INSTALL_DIR
}
\
${
TENSORRT_INCLUDE_DIR
:-
/usr/local/TensorRT/include
}
\
${
TENSORRT_LIB_DIR
:-
/usr/local/TensorRT/lib
}
./clean.sh
fi
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录