未验证 提交 e9961bc3 编写于 作者: W Wilber 提交者: GitHub

update lite api. (#29225)

上级 7e8e3bab
......@@ -36,7 +36,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite)
if(NOT LITE_GIT_TAG)
set(LITE_GIT_TAG 42ab4d559f6659edfc35040fb30fdcec3dc3f8aa)
set(LITE_GIT_TAG release/v2.7)
endif()
if(NOT CUDA_ARCH_NAME)
......@@ -84,10 +84,8 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
${EXTERNAL_OPTIONAL_ARGS}
${LITE_OPTIONAL_ARGS}
)
set(LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8)
else()
set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j)
set(LITE_OUTPUT_BIN_DIR inference_lite_lib)
set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON
-DLITE_WITH_CUDA=${WITH_GPU}
-DWITH_MKLDNN=OFF
......@@ -135,6 +133,12 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
endif()
if (WITH_ARM)
set(LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8)
else()
set(LITE_OUTPUT_BIN_DIR inference_lite_lib)
endif()
message(STATUS "Paddle-lite BINARY_DIR: ${LITE_BINARY_DIR}")
message(STATUS "Paddle-lite SOURCE_DIR: ${LITE_SOURCE_DIR}")
include_directories(${LITE_SOURCE_DIR})
......
......@@ -55,8 +55,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
#ifdef PADDLE_WITH_ARM
set_threads.set_threads(cfg.cpu_math_library_num_threads);
#else
lite_cxx_config.set_x86_math_library_num_threads(
cfg.cpu_math_library_num_threads);
lite_cxx_config.set_x86_math_num_threads(cfg.cpu_math_library_num_threads);
#endif
#ifdef LITE_SUBGRAPH_WITH_XPU
......
......@@ -28,6 +28,7 @@ namespace inference {
namespace lite {
using inference::lite::AddTensorToBlockDesc;
using paddle::inference::lite::AddFetchListToBlockDesc;
using inference::lite::CreateTensor;
using inference::lite::serialize_params;
......@@ -64,7 +65,7 @@ void make_fake_model(std::string* model, std::string* param) {
AddTensorToBlockDesc(block_, "x", std::vector<int64_t>({2, 4}), true);
AddTensorToBlockDesc(block_, "y", std::vector<int64_t>({2, 4}), true);
AddTensorToBlockDesc(block_, "z", std::vector<int64_t>({2, 4}), false);
AddTensorToBlockDesc(block_, "out", std::vector<int64_t>({2, 4}), false);
AddFetchListToBlockDesc(block_, "out");
*block_->add_ops() = *feed0->Proto();
*block_->add_ops() = *feed1->Proto();
......
......@@ -25,6 +25,7 @@
USE_NO_KERNEL_OP(lite_engine)
using paddle::inference::lite::AddTensorToBlockDesc;
using paddle::inference::lite::AddFetchListToBlockDesc;
using paddle::inference::lite::CreateTensor;
using paddle::inference::lite::serialize_params;
namespace paddle {
......@@ -60,7 +61,7 @@ TEST(LiteEngineOp, engine_op) {
AddTensorToBlockDesc(block_, "x", std::vector<int64_t>({2, 4}), true);
AddTensorToBlockDesc(block_, "y", std::vector<int64_t>({2, 4}), true);
AddTensorToBlockDesc(block_, "z", std::vector<int64_t>({2, 4}), false);
AddTensorToBlockDesc(block_, "out", std::vector<int64_t>({2, 4}), false);
AddFetchListToBlockDesc(block_, "out");
*block_->add_ops() = *feed1->Proto();
*block_->add_ops() = *feed0->Proto();
*block_->add_ops() = *elt_add->Proto();
......
......@@ -41,6 +41,16 @@ void AddTensorToBlockDesc(framework::proto::BlockDesc* block,
desc.SetPersistable(persistable);
*var = *desc.Proto();
}
void AddFetchListToBlockDesc(framework::proto::BlockDesc* block,
const std::string& name) {
using framework::proto::VarType;
auto* var = block->add_vars();
framework::VarDesc desc(name);
desc.SetType(VarType::FETCH_LIST);
*var = *desc.Proto();
}
void serialize_params(std::string* str, framework::Scope* scope,
const std::vector<std::string>& params) {
std::ostringstream os;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册