Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
af1e54ac
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
af1e54ac
编写于
7月 17, 2018
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix compiler error after move
上级
369dfb3d
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
32 addition
and
34 deletion
+32
-34
CMakeLists.txt
CMakeLists.txt
+0
-4
paddle/fluid/inference/CMakeLists.txt
paddle/fluid/inference/CMakeLists.txt
+2
-1
paddle/fluid/inference/api/CMakeLists.txt
paddle/fluid/inference/api/CMakeLists.txt
+12
-12
paddle/fluid/inference/api/api.cc
paddle/fluid/inference/api/api.cc
+1
-1
paddle/fluid/inference/api/api_anakin_engine.cc
paddle/fluid/inference/api/api_anakin_engine.cc
+3
-2
paddle/fluid/inference/api/api_anakin_engine.h
paddle/fluid/inference/api/api_anakin_engine.h
+3
-2
paddle/fluid/inference/api/api_anakin_engine_tester.cc
paddle/fluid/inference/api/api_anakin_engine_tester.cc
+1
-1
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+2
-2
paddle/fluid/inference/api/api_impl.h
paddle/fluid/inference/api/api_impl.h
+1
-1
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
+2
-2
paddle/fluid/inference/api/test_api.cc
paddle/fluid/inference/api/test_api.cc
+1
-2
paddle/fluid/inference/api/test_api_impl.cc
paddle/fluid/inference/api/test_api_impl.cc
+2
-2
paddle/fluid/inference/api/test_api_tensorrt_subgraph_engine.cc
.../fluid/inference/api/test_api_tensorrt_subgraph_engine.cc
+2
-2
未找到文件。
CMakeLists.txt
浏览文件 @
af1e54ac
...
...
@@ -282,7 +282,3 @@ if(WITH_DOC)
find_python_module
(
recommonmark REQUIRED
)
add_subdirectory
(
doc
)
endif
()
if
(
WITH_CONTRIB
)
add_subdirectory
(
paddle/contrib
)
endif
()
paddle/fluid/inference/CMakeLists.txt
浏览文件 @
af1e54ac
...
...
@@ -5,7 +5,7 @@ if (TENSORRT_FOUND)
add_subdirectory
(
tensorrt
)
endif
()
set
(
FLUID_CORE_MODULES proto_desc memory lod_tensor executor
)
set
(
FLUID_CORE_MODULES proto_desc memory lod_tensor executor
)
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
cc_library
(
paddle_fluid_api
...
...
@@ -38,3 +38,4 @@ if(WITH_TESTING)
# both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book
add_subdirectory
(
tests/book
)
endif
()
add_subdirectory
(
api
)
paddle/fluid/inference/api/CMakeLists.txt
浏览文件 @
af1e54ac
...
...
@@ -43,21 +43,21 @@ function(inference_api_test TARGET_NAME)
endfunction
(
inference_api_test
)
cc_library
(
paddle_inference_api
SRCS
paddle_inference_api.cc paddle_inference_
api_impl.cc
SRCS
api.cc
api_impl.cc
DEPS
${
FLUID_CORE_MODULES
}
${
GLOB_OP_LIB
}
)
if
(
NOT APPLE
)
set
(
LINK_FLAGS
"-Wl,--retain-symbols-file
${
CMAKE_CURRENT_SOURCE_DIR
}
/
paddle_inference_
api.sym"
)
set
(
LINK_FLAGS
"-Wl,--retain-symbols-file
${
CMAKE_CURRENT_SOURCE_DIR
}
/api.sym"
)
set_target_properties
(
paddle_inference_api PROPERTIES LINK_FLAGS
"
${
LINK_FLAGS
}
"
)
endif
()
# Here the shared library doesn't depend on other fluid libraries, or double free will occur.
cc_library
(
paddle_inference_api_shared SHARED
SRCS
paddle_inference_api.cc paddle_inference_
api_impl.cc
)
SRCS
api.cc
api_impl.cc
)
add_dependencies
(
paddle_inference_api_shared
${
FLUID_CORE_MODULES
}
${
GLOB_OP_LIB
}
)
set_target_properties
(
paddle_inference_api_shared PROPERTIES OUTPUT_NAME paddle_inference_api
)
if
(
NOT APPLE
)
set
(
LINK_FLAGS
"-Wl,--version-script
${
CMAKE_CURRENT_SOURCE_DIR
}
/
paddle_inference_
api.map"
)
set
(
LINK_FLAGS
"-Wl,--version-script
${
CMAKE_CURRENT_SOURCE_DIR
}
/api.map"
)
set_target_properties
(
paddle_inference_api_shared PROPERTIES LINK_FLAGS
"
${
LINK_FLAGS
}
"
)
FILE
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/check_symbol.cmake
"execute_process(COMMAND bash -c
\"
${
CMAKE_CURRENT_SOURCE_DIR
}
/check_symbol.sh"
...
...
@@ -73,32 +73,32 @@ if(NOT APPLE)
endif
()
cc_test
(
test_paddle_inference_api
SRCS test_
paddle_inference_
api.cc
SRCS test_api.cc
DEPS paddle_inference_api
)
inference_api_test
(
test_
paddle_inference_
api_impl
inference_api_test
(
test_api_impl
ARGS test_word2vec test_image_classification
)
if
(
WITH_GPU AND TENSORRT_FOUND
)
cc_library
(
paddle_inference_tensorrt_subgraph_engine
SRCS
paddle_inference_
api_tensorrt_subgraph_engine.cc
DEPS paddle_inference_api analysis tensorrt_engine paddle_
inference_api paddle_
fluid_api
)
SRCS api_tensorrt_subgraph_engine.cc
DEPS paddle_inference_api analysis tensorrt_engine paddle_fluid_api
)
inference_api_test
(
test_
paddle_inference_
api_tensorrt_subgraph_engine ARGS test_word2vec
)
inference_api_test
(
test_api_tensorrt_subgraph_engine ARGS test_word2vec
)
endif
()
if
(
WITH_ANAKIN
)
# only needed in CI
# Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's,
# so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to
# compile the libinference_anakin_api.a and compile with anakin.so.
nv_library
(
inference_anakin_api SRCS
paddle_inference_api.cc paddle_inference_
api_anakin_engine.cc
)
nv_library
(
inference_anakin_api_shared SHARED SRCS
paddle_inference_api.cc paddle_inference_
api_anakin_engine.cc
)
nv_library
(
inference_anakin_api SRCS
api.cc
api_anakin_engine.cc
)
nv_library
(
inference_anakin_api_shared SHARED SRCS
api.cc
api_anakin_engine.cc
)
target_compile_options
(
inference_anakin_api BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
target_compile_options
(
inference_anakin_api_shared BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
target_link_libraries
(
inference_anakin_api anakin anakin_saber_common
)
target_link_libraries
(
inference_anakin_api_shared anakin anakin_saber_common
)
if
(
WITH_TESTING
)
cc_test
(
inference_anakin_test SRCS
paddle_inference_
api_anakin_engine_tester.cc
cc_test
(
inference_anakin_test SRCS api_anakin_engine_tester.cc
ARGS --model=
${
ANAKIN_INSTALL_DIR
}
/mobilenet_v2.anakin.bin
DEPS inference_anakin_api
)
target_compile_options
(
inference_anakin_test BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
...
...
paddle/fluid/inference/api/api.cc
浏览文件 @
af1e54ac
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/
contrib/inference/paddle_inference_
api.h"
#include "paddle/
fluid/inference/api/
api.h"
namespace
paddle
{
...
...
paddle/fluid/inference/api/api_anakin_engine.cc
浏览文件 @
af1e54ac
...
...
@@ -12,8 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/api/api_anakin_engine.h"
#include <cuda.h>
#include
"paddle/contrib/inference/paddle_inference_api_anakin_engine.h"
#include
<vector>
namespace
paddle
{
...
...
@@ -110,6 +111,6 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
std
::
unique_ptr
<
PaddlePredictor
>
x
(
new
PaddleInferenceAnakinPredictor
(
config
));
return
x
;
}
;
}
}
// namespace paddle
paddle/fluid/inference/api/api_anakin_engine.h
浏览文件 @
af1e54ac
...
...
@@ -19,7 +19,8 @@ limitations under the License. */
#pragma once
#include "paddle/contrib/inference/paddle_inference_api.h"
#include <vector>
#include "paddle/fluid/inference/api/api.h"
// from anakin
#include "framework/core/net/net.h"
...
...
@@ -31,7 +32,7 @@ class PaddleInferenceAnakinPredictor : public PaddlePredictor {
public:
PaddleInferenceAnakinPredictor
()
{}
PaddleInferenceAnakinPredictor
(
const
AnakinConfig
&
config
);
explicit
PaddleInferenceAnakinPredictor
(
const
AnakinConfig
&
config
);
// NOTE Unlike the native engine, the buffers of anakin engine's output_data
// should be allocated first.
...
...
paddle/fluid/inference/api/api_anakin_engine_tester.cc
浏览文件 @
af1e54ac
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/
contrib/inference/paddle_inference_
api.h"
#include "paddle/
fluid/inference/api/
api.h"
DEFINE_string
(
model
,
""
,
"Directory of the inference model."
);
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
af1e54ac
...
...
@@ -21,7 +21,7 @@ limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/
contrib/inference/paddle_inference_
api_impl.h"
#include "paddle/
fluid/inference/api/
api_impl.h"
namespace
paddle
{
namespace
{
...
...
@@ -105,7 +105,7 @@ NativePaddlePredictor::~NativePaddlePredictor() {
PADDLE_ENFORCE_NOT_NULL
(
scope_
,
"Should have parent scope!"
);
scope_
->
DeleteScope
(
sub_scope_
);
}
}
;
}
bool
NativePaddlePredictor
::
Run
(
const
std
::
vector
<
PaddleTensor
>
&
inputs
,
std
::
vector
<
PaddleTensor
>
*
output_data
)
{
...
...
paddle/fluid/inference/api/api_impl.h
浏览文件 @
af1e54ac
...
...
@@ -19,7 +19,7 @@
#include <string>
#include <vector>
#include "paddle/
contrib/inference/paddle_inference_
api.h"
#include "paddle/
fluid/inference/api/
api.h"
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/lod_tensor.h"
...
...
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
浏览文件 @
af1e54ac
...
...
@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/contrib/inference/paddle_inference_api.h"
#include "paddle/contrib/inference/paddle_inference_api_impl.h"
#include "paddle/fluid/inference/api/api.h"
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
#include "paddle/fluid/inference/utils/singleton.h"
namespace
paddle
{
...
...
paddle/fluid/inference/api/test_api.cc
浏览文件 @
af1e54ac
...
...
@@ -12,10 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/contrib/inference/paddle_inference_api.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/fluid/inference/api/api.h"
namespace
paddle
{
...
...
paddle/fluid/inference/api/test_api_impl.cc
浏览文件 @
af1e54ac
...
...
@@ -15,10 +15,10 @@ limitations under the License. */
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <thread>
#include <thread>
// NOLINT
#include "gflags/gflags.h"
#include "paddle/
contrib/inference/paddle_inference_
api_impl.h"
#include "paddle/
fluid/inference/api/
api_impl.h"
#include "paddle/fluid/inference/tests/test_helper.h"
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
...
...
paddle/fluid/inference/api/test_api_tensorrt_subgraph_engine.cc
浏览文件 @
af1e54ac
...
...
@@ -15,7 +15,7 @@
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/
contrib/inference/paddle_inference_
api.h"
#include "paddle/
fluid/inference/api/
api.h"
namespace
paddle
{
...
...
@@ -61,4 +61,4 @@ void Main(bool use_gpu) {
TEST
(
paddle_inference_api_tensorrt_subgraph_engine
,
main
)
{
Main
(
true
);
}
}
// namespace paddle
\ No newline at end of file
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录