未验证 提交 848aca7a 编写于 作者: W Wilber 提交者: GitHub

[CI] [Lite-Subgraph] CI add lite subgraph check. (#25346)

上级 e65c5b8e
...@@ -168,6 +168,9 @@ if(WITH_BRPC_RDMA) ...@@ -168,6 +168,9 @@ if(WITH_BRPC_RDMA)
endif() endif()
endif() endif()
# lite subgraph compilation depends on CUDNN_ROOT,
# so include(cudnn) needs to be in front of include(third_party/lite)
include(cudnn) # set cudnn libraries, must before configure
include(third_party) # download, build, install third_party include(third_party) # download, build, install third_party
if(WITH_DISTRIBUTE) if(WITH_DISTRIBUTE)
...@@ -187,7 +190,6 @@ if(NOT WIN32) ...@@ -187,7 +190,6 @@ if(NOT WIN32)
endif() endif()
include(flags) # set paddle compile flags include(flags) # set paddle compile flags
include(cudnn) # set cudnn libraries, must before configure
if(WITH_GPU) if(WITH_GPU)
include(cuda) include(cuda)
......
...@@ -93,6 +93,7 @@ function(external_lite_static_libs alias path) ...@@ -93,6 +93,7 @@ function(external_lite_static_libs alias path)
endfunction() endfunction()
external_lite_static_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so) external_lite_static_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
set(LITE_SHARED_LIB ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
add_definitions(-DPADDLE_WITH_LITE) add_definitions(-DPADDLE_WITH_LITE)
add_definitions(-DLITE_WITH_LOG) add_definitions(-DLITE_WITH_LOG)
cc_library(lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash) cc_library(lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash)
cc_library(lite_engine SRCS engine.cc DEPS lite_full_static framework_proto) cc_library(lite_engine SRCS engine.cc DEPS lite_full_static framework_proto)
cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost) cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context)
cc_test(test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis) cc_test(test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis)
cc_test(test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils) cc_test(test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils)
...@@ -30,7 +30,7 @@ TEST(LiteEngineOp, GetNativePlace) { ...@@ -30,7 +30,7 @@ TEST(LiteEngineOp, GetNativePlace) {
platform::Place GetNativePlace(const TargetType& type, int id = 0); platform::Place GetNativePlace(const TargetType& type, int id = 0);
EXPECT_TRUE(platform::is_cpu_place(GetNativePlace(TargetType::kHost))); EXPECT_TRUE(platform::is_cpu_place(GetNativePlace(TargetType::kHost)));
EXPECT_TRUE(platform::is_gpu_place(GetNativePlace(TargetType::kCUDA))); EXPECT_TRUE(platform::is_gpu_place(GetNativePlace(TargetType::kCUDA)));
ASSERT_DEATH(GetNativePlace(TargetType::kUnk), ""); EXPECT_ANY_THROW(GetNativePlace(TargetType::kUnk));
} }
TEST(LiteEngineOp, GetLiteTargetType) { TEST(LiteEngineOp, GetLiteTargetType) {
...@@ -48,8 +48,8 @@ TEST(LiteEngineOp, GetLitePrecisionType) { ...@@ -48,8 +48,8 @@ TEST(LiteEngineOp, GetLitePrecisionType) {
PrecisionType::kInt8); PrecisionType::kInt8);
ASSERT_EQ(GetLitePrecisionType(framework::proto::VarType_Type_INT32), ASSERT_EQ(GetLitePrecisionType(framework::proto::VarType_Type_INT32),
PrecisionType::kInt32); PrecisionType::kInt32);
ASSERT_DEATH( EXPECT_ANY_THROW(
GetLitePrecisionType(framework::proto::VarType_Type_SELECTED_ROWS), ""); GetLitePrecisionType(framework::proto::VarType_Type_SELECTED_ROWS));
} }
TEST(LiteEngineOp, GetNativePrecisionType) { TEST(LiteEngineOp, GetNativePrecisionType) {
...@@ -62,7 +62,7 @@ TEST(LiteEngineOp, GetNativePrecisionType) { ...@@ -62,7 +62,7 @@ TEST(LiteEngineOp, GetNativePrecisionType) {
framework::proto::VarType_Type_INT8); framework::proto::VarType_Type_INT8);
ASSERT_EQ(GetNativePrecisionType(PrecisionType::kInt32), ASSERT_EQ(GetNativePrecisionType(PrecisionType::kInt32),
framework::proto::VarType_Type_INT32); framework::proto::VarType_Type_INT32);
ASSERT_DEATH(GetNativePrecisionType(PrecisionType::kUnk), ""); EXPECT_ANY_THROW(GetNativePrecisionType(PrecisionType::kUnk));
} }
TEST(LiteEngineOp, GetNativeLayoutType) { TEST(LiteEngineOp, GetNativeLayoutType) {
...@@ -70,7 +70,7 @@ TEST(LiteEngineOp, GetNativeLayoutType) { ...@@ -70,7 +70,7 @@ TEST(LiteEngineOp, GetNativeLayoutType) {
framework::DataLayout GetNativeLayoutType(const DataLayoutType& type); framework::DataLayout GetNativeLayoutType(const DataLayoutType& type);
ASSERT_EQ(GetNativeLayoutType(DataLayoutType::kNCHW), ASSERT_EQ(GetNativeLayoutType(DataLayoutType::kNCHW),
framework::DataLayout::kNCHW); framework::DataLayout::kNCHW);
ASSERT_DEATH(GetNativeLayoutType(DataLayoutType::kNHWC), ""); EXPECT_ANY_THROW(GetNativeLayoutType(DataLayoutType::kNHWC));
} }
void test_tensor_copy(const platform::DeviceContext& ctx) { void test_tensor_copy(const platform::DeviceContext& ctx) {
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/platform/errors.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -98,7 +99,7 @@ void CreateTensor(framework::Scope* scope, const std::string& name, ...@@ -98,7 +99,7 @@ void CreateTensor(framework::Scope* scope, const std::string& name,
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
place = platform::CUDAPlace(0); place = platform::CUDAPlace(0);
#else #else
PADDLE_THROW(platform::errors::PreconditionNetMet( PADDLE_THROW(platform::errors::PreconditionNotMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace.")); "You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif #endif
} else { } else {
......
...@@ -213,6 +213,7 @@ function cmake_base() { ...@@ -213,6 +213,7 @@ function cmake_base() {
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX:-/paddle/build} -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX:-/paddle/build}
-DWITH_GRPC=${grpc_flag} -DWITH_GRPC=${grpc_flag}
-DWITH_LITE=${WITH_LITE:-OFF} -DWITH_LITE=${WITH_LITE:-OFF}
-DLITE_GIT_TAG=develop
======================================== ========================================
EOF EOF
# Disable UNITTEST_USE_VIRTUALENV in docker because # Disable UNITTEST_USE_VIRTUALENV in docker because
...@@ -241,6 +242,7 @@ EOF ...@@ -241,6 +242,7 @@ EOF
-DPY_VERSION=${PY_VERSION:-2.7} \ -DPY_VERSION=${PY_VERSION:-2.7} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX:-/paddle/build} \ -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX:-/paddle/build} \
-DWITH_GRPC=${grpc_flag} \ -DWITH_GRPC=${grpc_flag} \
-DLITE_GIT_TAG=develop \
-DWITH_LITE=${WITH_LITE:-OFF};build_error=$? -DWITH_LITE=${WITH_LITE:-OFF};build_error=$?
if [ "$build_error" != 0 ];then if [ "$build_error" != 0 ];then
exit 7; exit 7;
......
...@@ -272,6 +272,10 @@ else: ...@@ -272,6 +272,10 @@ else:
shutil.copy('${OPENBLAS_SHARED_LIB}', libs_path) shutil.copy('${OPENBLAS_SHARED_LIB}', libs_path)
package_data['paddle.libs'] += ['openblas' + ext_name] package_data['paddle.libs'] += ['openblas' + ext_name]
if '${WITH_LITE}' == 'ON':
shutil.copy('${LITE_SHARED_LIB}', libs_path)
package_data['paddle.libs']+=['libpaddle_full_api_shared' + ext_name]
if '${WITH_PSLIB}' == 'ON': if '${WITH_PSLIB}' == 'ON':
shutil.copy('${PSLIB_LIB}', libs_path) shutil.copy('${PSLIB_LIB}', libs_path)
if os.path.exists('${PSLIB_VERSION_PY}'): if os.path.exists('${PSLIB_VERSION_PY}'):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册