From e9961bc3180a5035988b3a94815daf2450ae841d Mon Sep 17 00:00:00 2001 From: Wilber Date: Tue, 1 Dec 2020 13:42:19 +0800 Subject: [PATCH] update lite api. (#29225) --- cmake/external/lite.cmake | 10 +++++++--- paddle/fluid/inference/lite/engine.cc | 3 +-- paddle/fluid/inference/lite/test_engine.cc | 3 ++- paddle/fluid/operators/lite/lite_engine_op_test.cc | 3 ++- paddle/fluid/operators/lite/ut_helper.h | 10 ++++++++++ 5 files changed, 22 insertions(+), 7 deletions(-) diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index bc5442c7ab7..e2f3d8dc7f2 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -36,7 +36,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite) if(NOT LITE_GIT_TAG) - set(LITE_GIT_TAG 42ab4d559f6659edfc35040fb30fdcec3dc3f8aa) + set(LITE_GIT_TAG release/v2.7) endif() if(NOT CUDA_ARCH_NAME) @@ -84,10 +84,8 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) ${EXTERNAL_OPTIONAL_ARGS} ${LITE_OPTIONAL_ARGS} ) - set(LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8) else() set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j) - set(LITE_OUTPUT_BIN_DIR inference_lite_lib) set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON -DLITE_WITH_CUDA=${WITH_GPU} -DWITH_MKLDNN=OFF @@ -135,6 +133,12 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) endif() +if (WITH_ARM) + set(LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8) +else() + set(LITE_OUTPUT_BIN_DIR inference_lite_lib) +endif() + message(STATUS "Paddle-lite BINARY_DIR: ${LITE_BINARY_DIR}") message(STATUS "Paddle-lite SOURCE_DIR: ${LITE_SOURCE_DIR}") include_directories(${LITE_SOURCE_DIR}) diff --git a/paddle/fluid/inference/lite/engine.cc b/paddle/fluid/inference/lite/engine.cc index b8f6104780f..e8ec67d6f0b 100644 --- a/paddle/fluid/inference/lite/engine.cc +++ b/paddle/fluid/inference/lite/engine.cc @@ -55,8 +55,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create( #ifdef PADDLE_WITH_ARM set_threads.set_threads(cfg.cpu_math_library_num_threads); #else - lite_cxx_config.set_x86_math_library_num_threads( - cfg.cpu_math_library_num_threads); + lite_cxx_config.set_x86_math_num_threads(cfg.cpu_math_library_num_threads); #endif #ifdef LITE_SUBGRAPH_WITH_XPU diff --git a/paddle/fluid/inference/lite/test_engine.cc b/paddle/fluid/inference/lite/test_engine.cc index 1ac33f6bbf9..e1286e7bc88 100644 --- a/paddle/fluid/inference/lite/test_engine.cc +++ b/paddle/fluid/inference/lite/test_engine.cc @@ -28,6 +28,7 @@ namespace inference { namespace lite { using inference::lite::AddTensorToBlockDesc; +using paddle::inference::lite::AddFetchListToBlockDesc; using inference::lite::CreateTensor; using inference::lite::serialize_params; @@ -64,7 +65,7 @@ void make_fake_model(std::string* model, std::string* param) { AddTensorToBlockDesc(block_, "x", std::vector({2, 4}), true); AddTensorToBlockDesc(block_, "y", std::vector({2, 4}), true); AddTensorToBlockDesc(block_, "z", std::vector({2, 4}), false); - AddTensorToBlockDesc(block_, "out", std::vector({2, 4}), false); + AddFetchListToBlockDesc(block_, "out"); *block_->add_ops() = *feed0->Proto(); *block_->add_ops() = *feed1->Proto(); diff --git a/paddle/fluid/operators/lite/lite_engine_op_test.cc b/paddle/fluid/operators/lite/lite_engine_op_test.cc index 76c963ac652..14088351cc8 100644 --- a/paddle/fluid/operators/lite/lite_engine_op_test.cc +++ b/paddle/fluid/operators/lite/lite_engine_op_test.cc @@ -25,6 +25,7 @@ USE_NO_KERNEL_OP(lite_engine) using paddle::inference::lite::AddTensorToBlockDesc; +using paddle::inference::lite::AddFetchListToBlockDesc; using paddle::inference::lite::CreateTensor; using paddle::inference::lite::serialize_params; namespace paddle { @@ -60,7 +61,7 @@ TEST(LiteEngineOp, engine_op) { AddTensorToBlockDesc(block_, "x", std::vector({2, 4}), true); AddTensorToBlockDesc(block_, "y", std::vector({2, 4}), true); AddTensorToBlockDesc(block_, "z", std::vector({2, 4}), false); - AddTensorToBlockDesc(block_, "out", std::vector({2, 4}), false); + AddFetchListToBlockDesc(block_, "out"); *block_->add_ops() = *feed1->Proto(); *block_->add_ops() = *feed0->Proto(); *block_->add_ops() = *elt_add->Proto(); diff --git a/paddle/fluid/operators/lite/ut_helper.h b/paddle/fluid/operators/lite/ut_helper.h index b549af81d8d..93ebc55a5b8 100644 --- a/paddle/fluid/operators/lite/ut_helper.h +++ b/paddle/fluid/operators/lite/ut_helper.h @@ -41,6 +41,16 @@ void AddTensorToBlockDesc(framework::proto::BlockDesc* block, desc.SetPersistable(persistable); *var = *desc.Proto(); } + +void AddFetchListToBlockDesc(framework::proto::BlockDesc* block, + const std::string& name) { + using framework::proto::VarType; + auto* var = block->add_vars(); + framework::VarDesc desc(name); + desc.SetType(VarType::FETCH_LIST); + *var = *desc.Proto(); +} + void serialize_params(std::string* str, framework::Scope* scope, const std::vector& params) { std::ostringstream os; -- GitLab