Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
848aca7a
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
848aca7a
编写于
7月 15, 2020
作者:
W
Wilber
提交者:
GitHub
7月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CI] [Lite-Subgraph] CI add lite subgraph check. (#25346)
上级
e65c5b8e
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
19 addition
and
8 deletion
+19
-8
CMakeLists.txt
CMakeLists.txt
+3
-1
cmake/external/lite.cmake
cmake/external/lite.cmake
+1
-0
paddle/fluid/inference/lite/CMakeLists.txt
paddle/fluid/inference/lite/CMakeLists.txt
+2
-1
paddle/fluid/inference/lite/test_tensor_utils.cc
paddle/fluid/inference/lite/test_tensor_utils.cc
+5
-5
paddle/fluid/operators/lite/ut_helper.h
paddle/fluid/operators/lite/ut_helper.h
+2
-1
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+2
-0
python/setup.py.in
python/setup.py.in
+4
-0
未找到文件。
CMakeLists.txt
浏览文件 @
848aca7a
...
...
@@ -168,6 +168,9 @@ if(WITH_BRPC_RDMA)
endif
()
endif
()
# lite subgraph compilation depends on CUDNN_ROOT,
# so include(cudnn) needs to be in front of include(third_party/lite)
include
(
cudnn
)
# set cudnn libraries, must before configure
include
(
third_party
)
# download, build, install third_party
if
(
WITH_DISTRIBUTE
)
...
...
@@ -187,7 +190,6 @@ if(NOT WIN32)
endif
()
include
(
flags
)
# set paddle compile flags
include
(
cudnn
)
# set cudnn libraries, must before configure
if
(
WITH_GPU
)
include
(
cuda
)
...
...
cmake/external/lite.cmake
浏览文件 @
848aca7a
...
...
@@ -93,6 +93,7 @@ function(external_lite_static_libs alias path)
endfunction
()
external_lite_static_libs
(
lite_full_static
${
LITE_BINARY_DIR
}
/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so
)
set
(
LITE_SHARED_LIB
${
LITE_BINARY_DIR
}
/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so
)
add_definitions
(
-DPADDLE_WITH_LITE
)
add_definitions
(
-DLITE_WITH_LOG
)
paddle/fluid/inference/lite/CMakeLists.txt
浏览文件 @
848aca7a
cc_library
(
lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash
)
cc_library
(
lite_engine SRCS engine.cc DEPS lite_full_static framework_proto
)
cc_library
(
lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost
)
cc_library
(
lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context
)
cc_test
(
test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis
)
cc_test
(
test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils
)
paddle/fluid/inference/lite/test_tensor_utils.cc
浏览文件 @
848aca7a
...
...
@@ -30,7 +30,7 @@ TEST(LiteEngineOp, GetNativePlace) {
platform
::
Place
GetNativePlace
(
const
TargetType
&
type
,
int
id
=
0
);
EXPECT_TRUE
(
platform
::
is_cpu_place
(
GetNativePlace
(
TargetType
::
kHost
)));
EXPECT_TRUE
(
platform
::
is_gpu_place
(
GetNativePlace
(
TargetType
::
kCUDA
)));
ASSERT_DEATH
(
GetNativePlace
(
TargetType
::
kUnk
),
""
);
EXPECT_ANY_THROW
(
GetNativePlace
(
TargetType
::
kUnk
)
);
}
TEST
(
LiteEngineOp
,
GetLiteTargetType
)
{
...
...
@@ -48,8 +48,8 @@ TEST(LiteEngineOp, GetLitePrecisionType) {
PrecisionType
::
kInt8
);
ASSERT_EQ
(
GetLitePrecisionType
(
framework
::
proto
::
VarType_Type_INT32
),
PrecisionType
::
kInt32
);
ASSERT_DEATH
(
GetLitePrecisionType
(
framework
::
proto
::
VarType_Type_SELECTED_ROWS
)
,
""
);
EXPECT_ANY_THROW
(
GetLitePrecisionType
(
framework
::
proto
::
VarType_Type_SELECTED_ROWS
));
}
TEST
(
LiteEngineOp
,
GetNativePrecisionType
)
{
...
...
@@ -62,7 +62,7 @@ TEST(LiteEngineOp, GetNativePrecisionType) {
framework
::
proto
::
VarType_Type_INT8
);
ASSERT_EQ
(
GetNativePrecisionType
(
PrecisionType
::
kInt32
),
framework
::
proto
::
VarType_Type_INT32
);
ASSERT_DEATH
(
GetNativePrecisionType
(
PrecisionType
::
kUnk
),
""
);
EXPECT_ANY_THROW
(
GetNativePrecisionType
(
PrecisionType
::
kUnk
)
);
}
TEST
(
LiteEngineOp
,
GetNativeLayoutType
)
{
...
...
@@ -70,7 +70,7 @@ TEST(LiteEngineOp, GetNativeLayoutType) {
framework
::
DataLayout
GetNativeLayoutType
(
const
DataLayoutType
&
type
);
ASSERT_EQ
(
GetNativeLayoutType
(
DataLayoutType
::
kNCHW
),
framework
::
DataLayout
::
kNCHW
);
ASSERT_DEATH
(
GetNativeLayoutType
(
DataLayoutType
::
kNHWC
),
""
);
EXPECT_ANY_THROW
(
GetNativeLayoutType
(
DataLayoutType
::
kNHWC
)
);
}
void
test_tensor_copy
(
const
platform
::
DeviceContext
&
ctx
)
{
...
...
paddle/fluid/operators/lite/ut_helper.h
浏览文件 @
848aca7a
...
...
@@ -23,6 +23,7 @@
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/platform/errors.h"
namespace
paddle
{
namespace
inference
{
...
...
@@ -98,7 +99,7 @@ void CreateTensor(framework::Scope* scope, const std::string& name,
#ifdef PADDLE_WITH_CUDA
place
=
platform
::
CUDAPlace
(
0
);
#else
PADDLE_THROW
(
platform
::
errors
::
PreconditionN
e
tMet
(
PADDLE_THROW
(
platform
::
errors
::
PreconditionN
o
tMet
(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."
));
#endif
}
else
{
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
848aca7a
...
...
@@ -213,6 +213,7 @@ function cmake_base() {
-DCMAKE_INSTALL_PREFIX=
${
INSTALL_PREFIX
:-
/paddle/build
}
-DWITH_GRPC=
${
grpc_flag
}
-DWITH_LITE=
${
WITH_LITE
:-
OFF
}
-DLITE_GIT_TAG=develop
========================================
EOF
# Disable UNITTEST_USE_VIRTUALENV in docker because
...
...
@@ -241,6 +242,7 @@ EOF
-DPY_VERSION
=
${
PY_VERSION
:-
2
.7
}
\
-DCMAKE_INSTALL_PREFIX
=
${
INSTALL_PREFIX
:-
/paddle/build
}
\
-DWITH_GRPC
=
${
grpc_flag
}
\
-DLITE_GIT_TAG
=
develop
\
-DWITH_LITE
=
${
WITH_LITE
:-
OFF
}
;
build_error
=
$?
if
[
"
$build_error
"
!=
0
]
;
then
exit
7
;
...
...
python/setup.py.in
浏览文件 @
848aca7a
...
...
@@ -272,6 +272,10 @@ else:
shutil.copy('${OPENBLAS_SHARED_LIB}', libs_path)
package_data['paddle.libs'] += ['openblas' + ext_name]
if '${WITH_LITE}' == 'ON':
shutil.copy('${LITE_SHARED_LIB}', libs_path)
package_data['paddle.libs']+=['libpaddle_full_api_shared' + ext_name]
if '${WITH_PSLIB}' == 'ON':
shutil.copy('${PSLIB_LIB}', libs_path)
if os.path.exists('${PSLIB_VERSION_PY}'):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录