diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index 6c837c97089b7f3997bc1f669cd9e71976cb6980..2617bdd834d1867eb6c2c40dac944aefe0f2a783 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -44,7 +44,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) ${LITE_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/PaddlePaddle/Paddle-Lite.git" - GIT_TAG 922ace19a45f30075618f71428523e7a2d5898d6 + GIT_TAG 0f875ef367bd2dbfa2e557eb2a2fc841bacdf6cf PREFIX ${LITE_SOURCES_DIR} UPDATE_COMMAND "" BUILD_COMMAND ${LITE_BUILD_COMMAND} diff --git a/paddle/fluid/inference/lite/op_teller.cc b/paddle/fluid/inference/lite/op_teller.cc index fd7a5da7cec6f12d39bd06e70deed5989ef5dc43..c5f1eccc3334a109aa0a6a21ae8c189d42e18447 100644 --- a/paddle/fluid/inference/lite/op_teller.cc +++ b/paddle/fluid/inference/lite/op_teller.cc @@ -28,7 +28,7 @@ namespace lite { struct SimpleOpTeller : public Teller { SimpleOpTeller() { const std::map& op2path = - OpKernelInfoCollector::Global().GetOp2PathDict(); + paddle::lite::GetOp2PathDict(); auto is_non_inst = [](const std::string& op) -> bool { const std::vector ops = {"feed", "fetch", "while"}; return std::find(ops.begin(), ops.end(), op) != ops.end(); diff --git a/paddle/fluid/inference/lite/tensor_utils.cc b/paddle/fluid/inference/lite/tensor_utils.cc index 9f361d563b054652f44ded5ab015d3d9cf683ee0..8e7ebd3119e976555b6281a73601723f48dcf12a 100644 --- a/paddle/fluid/inference/lite/tensor_utils.cc +++ b/paddle/fluid/inference/lite/tensor_utils.cc @@ -157,7 +157,10 @@ void TensorCopyAsync(paddle::lite::Tensor* dst, const framework::LoDTensor& src, dst->Resize(framework::vectorize(src.dims())); const void* src_data = src.data(); void* dst_data = dst->mutable_data(bytes); + VLOG(3) << "[CopyAsync fluid -> lite] Bytes = " << bytes << ", src = " << &src + << ", dst = " << dst << ", src_type = " << src.type(); MemoryCopyAsync(dst_place, dst_data, src_place, src_data, bytes, ctx); + VLOG(3) << "[Lite memory size] Bytes = " << dst->memory_size(); } template <> @@ -172,7 +175,10 @@ void TensorCopyAsync(framework::LoDTensor* dst, const paddle::lite::Tensor& src, const void* src_data = src.raw_data(); // When Lite is ready, the source type needs to be modified here. void* dst_data = dst->mutable_data(dst_place, dst->type()); + VLOG(3) << "[CopyAsync lite -> fluid] Bytes = " << bytes << ", src = " << &src + << ", dst = " << dst << ", src_type = " << dst->type(); MemoryCopyAsync(dst_place, dst_data, src_place, src_data, bytes, ctx); + VLOG(3) << "[Lite memory size] Bytes = " << src.memory_size(); } } // namespace utils diff --git a/paddle/fluid/operators/lite/lite_engine_op.h b/paddle/fluid/operators/lite/lite_engine_op.h index 62bbef663235a7a930ede47cc3f8a31921276796..3b48615338f729a56db133a2072ceea5e8e94b22 100644 --- a/paddle/fluid/operators/lite/lite_engine_op.h +++ b/paddle/fluid/operators/lite/lite_engine_op.h @@ -77,7 +77,8 @@ class LiteEngineOp : public framework::OperatorBase { inference::analysis::GetFromScope(scope, in_names_[i]); paddle::lite::Tensor *dst_t = engine_->GetInput(i); - VLOG(3) << "fluid -> lite: " << in_names_[i]; + VLOG(3) << "[Copy] fluid -> lite (" << in_names_[i] << " -> " + << engine_->GetInputNames()[i] << ")"; inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx); } #ifdef PADDLE_WITH_CUDA @@ -94,7 +95,8 @@ class LiteEngineOp : public framework::OperatorBase { framework::LoDTensor *dst_t = &inference::analysis::GetFromScope( scope, out_names_[i]); - VLOG(3) << "lite -> fluid: " << out_names_[i]; + VLOG(3) << "[Copy] lite -> fluid (" << out_names_[i] << " -> " + << engine_->GetOutputNames()[i] << ")"; inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx); } #ifdef PADDLE_WITH_CUDA