未验证 提交 72064172 编写于 作者: 石晓伟 提交者: GitHub

supports xpu runtime, test=develop (#25554)

* update ResetHolder, test=develop

* add TensorShare for lite engine, test=develop

* tensor data changed from copying to sharing, test=develop

* supports xpu runtime, test=develop

* fix code styles, test=develop
上级 dfb3ae1b
...@@ -25,7 +25,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) ...@@ -25,7 +25,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite) set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite)
if(NOT LITE_GIT_TAG) if(NOT LITE_GIT_TAG)
set(LITE_GIT_TAG ab8af5c4b4dc5b40217633e0aa436315912d7b53) set(LITE_GIT_TAG 42ab4d559f6659edfc35040fb30fdcec3dc3f8aa)
endif() endif()
if(NOT CUDA_ARCH_NAME) if(NOT CUDA_ARCH_NAME)
...@@ -83,7 +83,7 @@ message(STATUS "Paddle-lite SOURCE_DIR: ${LITE_SOURCE_DIR}") ...@@ -83,7 +83,7 @@ message(STATUS "Paddle-lite SOURCE_DIR: ${LITE_SOURCE_DIR}")
include_directories(${LITE_SOURCE_DIR}) include_directories(${LITE_SOURCE_DIR})
include_directories(${LITE_BINARY_DIR}) include_directories(${LITE_BINARY_DIR})
function(external_lite_static_libs alias path) function(external_lite_libs alias path)
add_library(${alias} SHARED IMPORTED GLOBAL) add_library(${alias} SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET ${alias} PROPERTY IMPORTED_LOCATION SET_PROPERTY(TARGET ${alias} PROPERTY IMPORTED_LOCATION
${path}) ${path})
...@@ -92,8 +92,16 @@ function(external_lite_static_libs alias path) ...@@ -92,8 +92,16 @@ function(external_lite_static_libs alias path)
endif() endif()
endfunction() endfunction()
external_lite_static_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so) external_lite_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
set(LITE_SHARED_LIB ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so) set(LITE_SHARED_LIB ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
if(XPU_SDK_ROOT)
include_directories("${XPU_SDK_ROOT}/XTDK/include")
include_directories("${XPU_SDK_ROOT}/XTCL/include")
add_definitions(-DPADDLE_WITH_XPU)
LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/shlib/")
LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/runtime/shlib/")
endif()
add_definitions(-DPADDLE_WITH_LITE) add_definitions(-DPADDLE_WITH_LITE)
add_definitions(-DLITE_WITH_LOG) add_definitions(-DLITE_WITH_LOG)
...@@ -108,8 +108,15 @@ const DDim& Tensor::dims() const { return dims_; } ...@@ -108,8 +108,15 @@ const DDim& Tensor::dims() const { return dims_; }
int64_t Tensor::numel() const { return product(dims_); } int64_t Tensor::numel() const { return product(dims_); }
void Tensor::ResetHolder(std::shared_ptr<memory::Allocation> holder) { void Tensor::ResetHolder(std::shared_ptr<memory::Allocation> holder) {
PADDLE_ENFORCE_EQ(
offset_, 0,
platform::errors::Fatal(
"Only the offset is supported to zero when the holder is reset."));
if (holder_) { if (holder_) {
PADDLE_ENFORCE_EQ(numel() * SizeOfType(type()), holder->size()); PADDLE_ENFORCE_LE(
numel() * SizeOfType(type()) + offset_, holder->size(),
paddle::platform::errors::InvalidArgument(
"The size of Holder is not enough to store the Tensor."));
} }
holder_ = holder; holder_ = holder;
} }
......
...@@ -200,6 +200,10 @@ struct Argument { ...@@ -200,6 +200,10 @@ struct Argument {
DECL_ARGUMENT_FIELD(lite_ops_filter, LiteOpsFilter, std::vector<std::string>); DECL_ARGUMENT_FIELD(lite_ops_filter, LiteOpsFilter, std::vector<std::string>);
DECL_ARGUMENT_FIELD(lite_precision_mode, LitePrecisionMode, DECL_ARGUMENT_FIELD(lite_precision_mode, LitePrecisionMode,
AnalysisConfig::Precision); AnalysisConfig::Precision);
DECL_ARGUMENT_FIELD(lite_zero_copy, LiteZeroCopy, bool);
DECL_ARGUMENT_FIELD(use_xpu, UseXpu, bool);
DECL_ARGUMENT_FIELD(xpu_l3_workspace_size, XpuL3WorkspaceSize, int);
// Memory optimized related. // Memory optimized related.
DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool);
......
...@@ -146,6 +146,10 @@ void IRPassManager::CreatePasses(Argument *argument, ...@@ -146,6 +146,10 @@ void IRPassManager::CreatePasses(Argument *argument,
pass->Set("predictor_id", new int(argument->predictor_id())); pass->Set("predictor_id", new int(argument->predictor_id()));
pass->Set("enable_int8", new bool(enable_int8)); pass->Set("enable_int8", new bool(enable_int8));
pass->Set("use_gpu", new bool(argument->use_gpu())); pass->Set("use_gpu", new bool(argument->use_gpu()));
pass->Set("zero_copy", new bool(argument->lite_zero_copy()));
pass->Set("use_xpu", new bool(argument->use_xpu()));
pass->Set("xpu_l3_workspace_size",
new int(argument->xpu_l3_workspace_size()));
} }
disable_logs_ = argument->disable_logs(); disable_logs_ = argument->disable_logs();
if (pass_name == "fc_fuse_pass") { if (pass_name == "fc_fuse_pass") {
......
...@@ -242,16 +242,33 @@ void LiteSubgraphPass::SetUpEngine( ...@@ -242,16 +242,33 @@ void LiteSubgraphPass::SetUpEngine(
bool use_gpu = Get<bool>("use_gpu"); bool use_gpu = Get<bool>("use_gpu");
bool enable_int8 = Get<bool>("enable_int8"); bool enable_int8 = Get<bool>("enable_int8");
lite_api::TargetType target_type = use_gpu ? TARGET(kCUDA) : TARGET(kX86); bool use_xpu = Get<bool>("use_xpu");
int xpu_l3_workspace_size = Get<int>("xpu_l3_workspace_size");
lite_api::TargetType target_type;
if (use_gpu) {
target_type = TARGET(kCUDA);
} else if (use_xpu) {
target_type = TARGET(kXPU);
} else {
target_type = TARGET(kX86);
}
paddle::lite_api::PrecisionType precision_type = paddle::lite_api::PrecisionType precision_type =
enable_int8 ? PRECISION(kInt8) : PRECISION(kInt64); enable_int8 ? PRECISION(kInt8) : PRECISION(kFloat);
serialize_params(&config.param, scope, repetitive_params); serialize_params(&config.param, scope, repetitive_params);
config.model = program->Proto()->SerializeAsString(); config.model = program->Proto()->SerializeAsString();
config.valid_places = { config.valid_places = {
// Notice: The ordering here determines the device where the
// input tensor of the Lite engine is located, and then affects
// whether tensor sharing is feasible.
paddle::lite::Place({target_type, precision_type}), paddle::lite::Place({target_type, precision_type}),
paddle::lite::Place({target_type, PRECISION(kInt64)}),
paddle::lite::Place({target_type, PRECISION(kFloat)}), paddle::lite::Place({target_type, PRECISION(kFloat)}),
paddle::lite::Place({TARGET(kHost), PRECISION(kFloat)}), paddle::lite::Place({TARGET(kHost), PRECISION(kFloat)}),
}; };
config.xpu_l3_workspace_size = xpu_l3_workspace_size;
if (dump_model) { if (dump_model) {
lite::StrToBinaryFile("./model.bin", config.model); lite::StrToBinaryFile("./model.bin", config.model);
lite::StrToBinaryFile("./param.bin", config.param); lite::StrToBinaryFile("./param.bin", config.param);
...@@ -283,6 +300,7 @@ void LiteSubgraphPass::BuildOperator( ...@@ -283,6 +300,7 @@ void LiteSubgraphPass::BuildOperator(
op_desc->SetAttr("engine_key", unique_key); op_desc->SetAttr("engine_key", unique_key);
op_desc->SetAttr("enable_int8", Get<bool>("enable_int8")); op_desc->SetAttr("enable_int8", Get<bool>("enable_int8"));
op_desc->SetAttr("use_gpu", Get<bool>("use_gpu")); op_desc->SetAttr("use_gpu", Get<bool>("use_gpu"));
op_desc->SetAttr("zero_copy", Get<bool>("zero_copy"));
} }
void LiteSubgraphPass::ApplyImpl(framework::ir::Graph* graph) const { void LiteSubgraphPass::ApplyImpl(framework::ir::Graph* graph) const {
......
...@@ -88,6 +88,12 @@ void AnalysisConfig::DisableFCPadding() { ...@@ -88,6 +88,12 @@ void AnalysisConfig::DisableFCPadding() {
Update(); Update();
} }
void AnalysisConfig::EnableXpu(int l3_workspace_size) {
use_xpu_ = true;
xpu_l3_workspace_size_ = l3_workspace_size;
Update();
}
AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
#define CP_MEMBER(member__) member__ = other.member__; #define CP_MEMBER(member__) member__ = other.member__;
...@@ -132,6 +138,10 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { ...@@ -132,6 +138,10 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
CP_MEMBER(lite_precision_mode_); CP_MEMBER(lite_precision_mode_);
CP_MEMBER(lite_passes_filter_); CP_MEMBER(lite_passes_filter_);
CP_MEMBER(lite_ops_filter_); CP_MEMBER(lite_ops_filter_);
CP_MEMBER(lite_zero_copy_);
CP_MEMBER(use_xpu_);
CP_MEMBER(xpu_l3_workspace_size_);
// profile related. // profile related.
CP_MEMBER(with_profile_); CP_MEMBER(with_profile_);
...@@ -344,6 +354,22 @@ void AnalysisConfig::Update() { ...@@ -344,6 +354,22 @@ void AnalysisConfig::Update() {
} }
} }
if (use_xpu_) {
#ifndef PADDLE_WITH_XPU
PADDLE_THROW(platform::errors::Unavailable(
"You tried to use an XPU device, but Paddle was not compiled "
"with XPU-runtime."));
#endif
if (!use_lite_) {
LOG(WARNING) << "Because XPU currently only works in Paddle-Lite "
"subgraph mode, please make sure you have enabled it.";
}
PADDLE_ENFORCE_EQ(use_gpu_, false,
platform::errors::Unavailable(
"Currently, XPU and GPU cannot be enabled in the "
"same analysis configuration."));
}
if (ir_debug_) { if (ir_debug_) {
pass_builder()->TurnOnDebug(); pass_builder()->TurnOnDebug();
} }
...@@ -387,6 +413,8 @@ std::string AnalysisConfig::SerializeInfoCache() { ...@@ -387,6 +413,8 @@ std::string AnalysisConfig::SerializeInfoCache() {
ss << cpu_math_library_num_threads_; ss << cpu_math_library_num_threads_;
ss << use_lite_; ss << use_lite_;
ss << use_xpu_;
ss << xpu_l3_workspace_size_;
ss << thread_local_stream_; ss << thread_local_stream_;
...@@ -464,13 +492,14 @@ void AnalysisConfig::DisableGlogInfo() { ...@@ -464,13 +492,14 @@ void AnalysisConfig::DisableGlogInfo() {
} }
void AnalysisConfig::EnableLiteEngine( void AnalysisConfig::EnableLiteEngine(
AnalysisConfig::Precision precision_mode, AnalysisConfig::Precision precision_mode, bool zero_copy,
const std::vector<std::string> &passes_filter, const std::vector<std::string> &passes_filter,
const std::vector<std::string> &ops_filter) { const std::vector<std::string> &ops_filter) {
use_lite_ = true; use_lite_ = true;
lite_precision_mode_ = precision_mode; lite_precision_mode_ = precision_mode;
lite_passes_filter_ = passes_filter; lite_passes_filter_ = passes_filter;
lite_ops_filter_ = ops_filter; lite_ops_filter_ = ops_filter;
lite_zero_copy_ = zero_copy;
Update(); Update();
} }
......
...@@ -465,6 +465,9 @@ void AnalysisPredictor::PrepareArgument() { ...@@ -465,6 +465,9 @@ void AnalysisPredictor::PrepareArgument() {
argument_.SetLitePrecisionMode(config_.lite_precision_mode_); argument_.SetLitePrecisionMode(config_.lite_precision_mode_);
argument_.SetLitePassesFilter(config_.lite_passes_filter_); argument_.SetLitePassesFilter(config_.lite_passes_filter_);
argument_.SetLiteOpsFilter(config_.lite_ops_filter_); argument_.SetLiteOpsFilter(config_.lite_ops_filter_);
argument_.SetLiteZeroCopy(config_.lite_zero_copy_);
argument_.SetUseXpu(config_.use_xpu_);
argument_.SetXpuL3WorkspaceSize(config_.xpu_l3_workspace_size_);
LOG(INFO) << "Lite subgraph engine is enabled"; LOG(INFO) << "Lite subgraph engine is enabled";
} }
......
...@@ -176,6 +176,8 @@ struct PD_INFER_DECL AnalysisConfig { ...@@ -176,6 +176,8 @@ struct PD_INFER_DECL AnalysisConfig {
/// ///
/// ///
void DisableGpu(); void DisableGpu();
void EnableXpu(int l3_workspace_size = 0xfffc00);
/// ///
/// \brief A boolean state telling whether the GPU is turned on. /// \brief A boolean state telling whether the GPU is turned on.
/// ///
...@@ -319,6 +321,7 @@ struct PD_INFER_DECL AnalysisConfig { ...@@ -319,6 +321,7 @@ struct PD_INFER_DECL AnalysisConfig {
/// ///
void EnableLiteEngine( void EnableLiteEngine(
AnalysisConfig::Precision precision_mode = Precision::kFloat32, AnalysisConfig::Precision precision_mode = Precision::kFloat32,
bool zero_copy = false,
const std::vector<std::string>& passes_filter = {}, const std::vector<std::string>& passes_filter = {},
const std::vector<std::string>& ops_filter = {}); const std::vector<std::string>& ops_filter = {});
...@@ -579,8 +582,11 @@ struct PD_INFER_DECL AnalysisConfig { ...@@ -579,8 +582,11 @@ struct PD_INFER_DECL AnalysisConfig {
std::vector<std::string> lite_passes_filter_; std::vector<std::string> lite_passes_filter_;
std::vector<std::string> lite_ops_filter_; std::vector<std::string> lite_ops_filter_;
Precision lite_precision_mode_; Precision lite_precision_mode_;
bool lite_zero_copy_;
bool thread_local_stream_{false}; bool thread_local_stream_{false};
bool use_xpu_{false};
int xpu_l3_workspace_size_;
// mkldnn related. // mkldnn related.
int mkldnn_cache_capacity_{0}; int mkldnn_cache_capacity_{0};
......
if(XPU_SDK_ROOT)
set(XPU_DEPS xpuapi xpurt)
endif()
cc_library(lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash) cc_library(lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash)
cc_library(lite_engine SRCS engine.cc DEPS lite_full_static framework_proto) cc_library(lite_engine SRCS engine.cc DEPS lite_full_static framework_proto ${XPU_DEPS})
cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context) cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context)
cc_test(test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis) cc_test(test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis)
cc_test(test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils) cc_test(test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils)
...@@ -16,8 +16,11 @@ ...@@ -16,8 +16,11 @@
#define LITE_WITH_CUDA 1 #define LITE_WITH_CUDA 1
#endif #endif
#include "paddle/fluid/inference/lite/engine.h" #ifdef PADDLE_WITH_XPU
#define LITE_WITH_XPU 1
#endif
#include "paddle/fluid/inference/lite/engine.h"
#include "lite/api/paddle_use_passes.h" #include "lite/api/paddle_use_passes.h"
namespace paddle { namespace paddle {
...@@ -39,10 +42,17 @@ paddle::lite::Predictor* EngineManager::Get(const std::string& name) const { ...@@ -39,10 +42,17 @@ paddle::lite::Predictor* EngineManager::Get(const std::string& name) const {
paddle::lite::Predictor* EngineManager::Create(const std::string& name, paddle::lite::Predictor* EngineManager::Create(const std::string& name,
const EngineConfig& cfg) { const EngineConfig& cfg) {
auto* p = new paddle::lite::Predictor(); if (cfg.valid_places.front().target == TARGET(kCUDA)) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
paddle::lite::Env<TARGET(kCUDA)>::Init(); paddle::lite::Env<TARGET(kCUDA)>::Init();
#endif #endif
} else if (cfg.valid_places.front().target == TARGET(kXPU)) {
#ifdef PADDLE_WITH_XPU
paddle::lite::TargetWrapper<TARGET(kXPU)>::workspace_l3_size_per_thread =
cfg.xpu_l3_workspace_size;
#endif
}
auto* p = new paddle::lite::Predictor();
p->Build("", cfg.model, cfg.param, cfg.valid_places, cfg.neglected_passes, p->Build("", cfg.model, cfg.param, cfg.valid_places, cfg.neglected_passes,
cfg.model_type, cfg.model_from_memory); cfg.model_type, cfg.model_from_memory);
engines_[name].reset(p); engines_[name].reset(p);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "lite/api/paddle_place.h" #include "lite/api/paddle_place.h"
#include "lite/core/context.h" #include "lite/core/context.h"
#include "lite/core/device_info.h" #include "lite/core/device_info.h"
#include "lite/core/memory.h"
#include "lite/core/op_registry.h" #include "lite/core/op_registry.h"
#include "lite/core/tensor.h" #include "lite/core/tensor.h"
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
...@@ -42,6 +43,7 @@ struct EngineConfig { ...@@ -42,6 +43,7 @@ struct EngineConfig {
std::vector<std::string> neglected_passes; std::vector<std::string> neglected_passes;
lite_api::LiteModelType model_type{lite_api::LiteModelType::kProtobuf}; lite_api::LiteModelType model_type{lite_api::LiteModelType::kProtobuf};
bool model_from_memory{true}; bool model_from_memory{true};
size_t xpu_l3_workspace_size;
}; };
class EngineManager { class EngineManager {
......
...@@ -14,8 +14,10 @@ ...@@ -14,8 +14,10 @@
#include "paddle/fluid/inference/lite/tensor_utils.h" #include "paddle/fluid/inference/lite/tensor_utils.h"
#include <map> #include <map>
#include <memory>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/inference/lite/engine.h" #include "paddle/fluid/inference/lite/engine.h"
#include "paddle/fluid/memory/allocation/allocator.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -46,6 +48,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) { ...@@ -46,6 +48,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) {
return platform::CPUPlace(); return platform::CPUPlace();
case TargetType::kCUDA: case TargetType::kCUDA:
return platform::CUDAPlace(id); return platform::CUDAPlace(id);
case TargetType::kXPU:
LOG(ERROR) << "No corresponding device for XPU yet.";
return platform::Place();
default: default:
PADDLE_THROW( PADDLE_THROW(
platform::errors::Unavailable("Unsupported target type. Now only " platform::errors::Unavailable("Unsupported target type. Now only "
...@@ -191,6 +196,31 @@ void TensorCopyAsync(framework::LoDTensor* dst, const paddle::lite::Tensor& src, ...@@ -191,6 +196,31 @@ void TensorCopyAsync(framework::LoDTensor* dst, const paddle::lite::Tensor& src,
VLOG(3) << "[Lite memory size] Bytes = " << src.memory_size(); VLOG(3) << "[Lite memory size] Bytes = " << src.memory_size();
} }
template <>
void TensorDataShare(paddle::lite::Tensor* dst, framework::LoDTensor* src) {
const size_t bytes =
static_cast<size_t>(src->numel()) * framework::SizeOfType(src->type());
auto buf = std::make_shared<paddle::lite::Buffer>(paddle::lite::Buffer(
src->data<void>(), GetLiteTargetType(src->place()), src->memory_size()));
dst->Resize(framework::vectorize(src->dims()));
dst->set_precision(GetLitePrecisionType(src->type()));
SetLoD(dst->mutable_lod(), src->lod());
dst->ResetBuffer(buf, bytes);
}
template <>
void TensorDataShare(framework::LoDTensor* dst, paddle::lite::Tensor* src) {
constexpr framework::proto::VarType::Type dtype =
framework::proto::VarType_Type_FP32;
void* src_raw_data = src->raw_data();
std::shared_ptr<memory::allocation::Allocation> holder(
new memory::allocation::Allocation(src_raw_data, src->memory_size(),
GetNativePlace(src->target())));
dst->Resize(paddle::framework::make_ddim(src->dims().Vectorize()));
SetLoD(dst->mutable_lod(), src->lod());
dst->ResetHolderWithType(holder, dtype);
}
} // namespace utils } // namespace utils
} // namespace lite } // namespace lite
} // namespace inference } // namespace inference
......
...@@ -26,6 +26,21 @@ template <typename DstTensor, typename SrcTensor> ...@@ -26,6 +26,21 @@ template <typename DstTensor, typename SrcTensor>
void TensorCopyAsync(DstTensor* dst, const SrcTensor& src, void TensorCopyAsync(DstTensor* dst, const SrcTensor& src,
const platform::DeviceContext& ctx); const platform::DeviceContext& ctx);
template <typename DstTensor, typename SrcTensor>
void TensorDataShare(DstTensor* dst, SrcTensor* src);
template <typename DstTensor, typename SrcTensor>
void TensorCopy(DstTensor* dst, SrcTensor* src,
const platform::DeviceContext& ctx, bool shared = true) {
if (shared) {
VLOG(3) << "TensorDataShare is running";
TensorDataShare(dst, src);
} else {
VLOG(3) << "TensorCopyAsync is running";
TensorCopyAsync(dst, *src, ctx);
}
}
} // namespace utils } // namespace utils
} // namespace lite } // namespace lite
} // namespace inference } // namespace inference
......
...@@ -77,7 +77,7 @@ void test_tensor_copy(const platform::DeviceContext& ctx) { ...@@ -77,7 +77,7 @@ void test_tensor_copy(const platform::DeviceContext& ctx) {
// Create LoDTensor. // Create LoDTensor.
std::vector<float> vector({1, 2, 3, 4}); std::vector<float> vector({1, 2, 3, 4});
framework::LoDTensor lod_tensor; framework::LoDTensor lod_tensor;
framework::TensorFromVector(vector, &lod_tensor); framework::TensorFromVector(vector, ctx, &lod_tensor);
framework::LoD lod({{0, 2, 4}}); framework::LoD lod({{0, 2, 4}});
lod_tensor.Resize({4, 1}); lod_tensor.Resize({4, 1});
lod_tensor.set_lod(lod); lod_tensor.set_lod(lod);
...@@ -94,7 +94,26 @@ void test_tensor_copy(const platform::DeviceContext& ctx) { ...@@ -94,7 +94,26 @@ void test_tensor_copy(const platform::DeviceContext& ctx) {
} }
#endif #endif
std::vector<float> result; std::vector<float> result;
TensorToVector(lod_tensor_n, &result); TensorToVector(lod_tensor_n, ctx, &result);
ASSERT_EQ(result, vector);
ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
}
void test_tensor_share(const platform::DeviceContext& ctx) {
std::vector<float> vector({1, 2, 3, 4});
framework::LoDTensor lod_tensor;
framework::TensorFromVector(vector, ctx, &lod_tensor);
framework::LoD lod({{0, 2, 4}});
lod_tensor.Resize({4, 1});
lod_tensor.set_lod(lod);
// Create lite::Tensor and share.
paddle::lite::Tensor lite_tensor;
TensorDataShare(&lite_tensor, &lod_tensor);
// Copy to LoDTensor.
framework::LoDTensor lod_tensor_n;
TensorCopyAsync(&lod_tensor_n, lite_tensor, ctx);
std::vector<float> result;
TensorToVector(lod_tensor_n, ctx, &result);
ASSERT_EQ(result, vector); ASSERT_EQ(result, vector);
ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod()); ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
} }
...@@ -110,6 +129,17 @@ TEST(LiteEngineOp, TensorCopyAsync) { ...@@ -110,6 +129,17 @@ TEST(LiteEngineOp, TensorCopyAsync) {
#endif #endif
} }
TEST(LiteEngineOp, TensorShare) {
auto* ctx_cpu =
platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
test_tensor_share(*ctx_cpu);
#ifdef PADDLE_WITH_CUDA
auto* ctx_gpu =
platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
test_tensor_share(*ctx_gpu);
#endif
}
} // namespace utils } // namespace utils
} // namespace lite } // namespace lite
} // namespace inference } // namespace inference
......
...@@ -42,6 +42,7 @@ class LiteEngineOp : public framework::OperatorBase { ...@@ -42,6 +42,7 @@ class LiteEngineOp : public framework::OperatorBase {
paddle::lite::Predictor *engine_; paddle::lite::Predictor *engine_;
framework::proto::VarType::Type precision_; framework::proto::VarType::Type precision_;
bool use_gpu_; bool use_gpu_;
bool zero_copy_;
public: public:
LiteEngineOp(const std::string &type, LiteEngineOp(const std::string &type,
...@@ -60,6 +61,7 @@ class LiteEngineOp : public framework::OperatorBase { ...@@ -60,6 +61,7 @@ class LiteEngineOp : public framework::OperatorBase {
precision_ = framework::proto::VarType_Type_FP32; precision_ = framework::proto::VarType_Type_FP32;
} }
use_gpu_ = Attr<bool>("use_gpu"); use_gpu_ = Attr<bool>("use_gpu");
zero_copy_ = Attr<bool>("zero_copy");
} }
protected: protected:
...@@ -73,13 +75,13 @@ class LiteEngineOp : public framework::OperatorBase { ...@@ -73,13 +75,13 @@ class LiteEngineOp : public framework::OperatorBase {
const platform::DeviceContext *ctx = const platform::DeviceContext *ctx =
platform::DeviceContextPool::Instance().Get(dev_place); platform::DeviceContextPool::Instance().Get(dev_place);
for (size_t i = 0; i < in_names_.size(); i++) { for (size_t i = 0; i < in_names_.size(); i++) {
const framework::LoDTensor &src_t = framework::LoDTensor src_t =
inference::analysis::GetFromScope<framework::LoDTensor>(scope, inference::analysis::GetFromScope<framework::LoDTensor>(scope,
in_names_[i]); in_names_[i]);
paddle::lite::Tensor *dst_t = engine_->GetInput(i); paddle::lite::Tensor *dst_t = engine_->GetInput(i);
VLOG(3) << "[Copy] fluid -> lite (" << in_names_[i] << " -> " VLOG(3) << "== fluid -> lite (" << in_names_[i] << " -> "
<< engine_->GetInputNames()[i] << ")"; << engine_->GetInputNames()[i] << ")";
inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx); inference::lite::utils::TensorCopy(dst_t, &src_t, *ctx, zero_copy_);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(dev_place)) { if (platform::is_gpu_place(dev_place)) {
...@@ -91,13 +93,13 @@ class LiteEngineOp : public framework::OperatorBase { ...@@ -91,13 +93,13 @@ class LiteEngineOp : public framework::OperatorBase {
engine_->Run(); engine_->Run();
VLOG(3) << "lite engine run done"; VLOG(3) << "lite engine run done";
for (size_t i = 0; i < out_names_.size(); i++) { for (size_t i = 0; i < out_names_.size(); i++) {
const paddle::lite::Tensor &src_t = *(engine_->GetOutput(i)); paddle::lite::Tensor src_t = *(engine_->GetOutput(i));
framework::LoDTensor *dst_t = framework::LoDTensor *dst_t =
&inference::analysis::GetFromScope<framework::LoDTensor>( &inference::analysis::GetFromScope<framework::LoDTensor>(
scope, out_names_[i]); scope, out_names_[i]);
VLOG(3) << "[Copy] lite -> fluid (" << out_names_[i] << " -> " VLOG(3) << "== lite -> fluid (" << out_names_[i] << " -> "
<< engine_->GetOutputNames()[i] << ")"; << engine_->GetOutputNames()[i] << ")";
inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx); inference::lite::utils::TensorCopy(dst_t, &src_t, *ctx, zero_copy_);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (platform::is_gpu_place(dev_place)) { if (platform::is_gpu_place(dev_place)) {
......
...@@ -100,6 +100,7 @@ TEST(LiteEngineOp, engine_op) { ...@@ -100,6 +100,7 @@ TEST(LiteEngineOp, engine_op) {
engine_op_desc.SetAttr("engine_key", engine_key); engine_op_desc.SetAttr("engine_key", engine_key);
engine_op_desc.SetAttr("enable_int8", false); engine_op_desc.SetAttr("enable_int8", false);
engine_op_desc.SetAttr("use_gpu", true); engine_op_desc.SetAttr("use_gpu", true);
engine_op_desc.SetAttr("zero_copy", true);
engine_op_desc.SetBlockAttr("sub_block", &block_desc); engine_op_desc.SetBlockAttr("sub_block", &block_desc);
inference::Singleton<inference::lite::EngineManager>::Global().Create( inference::Singleton<inference::lite::EngineManager>::Global().Create(
engine_key, config); engine_key, config);
......
...@@ -433,6 +433,7 @@ void BindAnalysisConfig(py::module *m) { ...@@ -433,6 +433,7 @@ void BindAnalysisConfig(py::module *m) {
py::arg("disable_trt_plugin_fp16") = false) py::arg("disable_trt_plugin_fp16") = false)
.def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled) .def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled)
.def("enable_lite_engine", &AnalysisConfig::EnableLiteEngine, .def("enable_lite_engine", &AnalysisConfig::EnableLiteEngine,
py::arg("zero_copy") = false,
py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32,
py::arg("passes_filter") = std::vector<std::string>(), py::arg("passes_filter") = std::vector<std::string>(),
py::arg("ops_filter") = std::vector<std::string>()) py::arg("ops_filter") = std::vector<std::string>())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册