未验证 提交 041ea14c 编写于 作者: Y Yuanle Liu 提交者: GitHub

Fix libpaddle_inference.so symbol conflicts with other .so (gflags) (#50787)

上级 bda59b1b
...@@ -154,14 +154,8 @@ set_target_properties(paddle_inference_shared PROPERTIES OUTPUT_NAME ...@@ -154,14 +154,8 @@ set_target_properties(paddle_inference_shared PROPERTIES OUTPUT_NAME
paddle_inference) paddle_inference)
if(NOT APPLE AND NOT WIN32) if(NOT APPLE AND NOT WIN32)
# TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac.
if(WITH_CUSTOM_DEVICE) set(LINK_FLAGS
set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.map")
"-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference_custom_device.map"
)
else()
set(LINK_FLAGS
"-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_inference.map")
endif()
set_target_properties(paddle_inference_shared PROPERTIES LINK_FLAGS set_target_properties(paddle_inference_shared PROPERTIES LINK_FLAGS
"${LINK_FLAGS}") "${LINK_FLAGS}")
# check symbol hidden # check symbol hidden
......
...@@ -378,17 +378,16 @@ void Tensor::CopyToCpuImpl(T *data, ...@@ -378,17 +378,16 @@ void Tensor::CopyToCpuImpl(T *data,
auto *t_data = tensor->data<T>(); auto *t_data = tensor->data<T>();
auto t_place = tensor->place(); auto t_place = tensor->place();
phi::DenseTensor out;
auto mem_allocation =
std::make_shared<paddle::memory::allocation::Allocation>(
static_cast<void *>(data),
ele_num * sizeof(T),
paddle::platform::CPUPlace());
out.ResetHolder(mem_allocation);
if (paddle::platform::is_cpu_place(t_place)) { if (paddle::platform::is_cpu_place(t_place)) {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if (tensor->layout() == phi::DataLayout::ONEDNN) if (tensor->layout() == phi::DataLayout::ONEDNN) {
phi::DenseTensor out;
auto mem_allocation =
std::make_shared<paddle::memory::allocation::Allocation>(
static_cast<void *>(data),
ele_num * sizeof(T),
paddle::platform::CPUPlace());
out.ResetHolder(mem_allocation);
phi::funcs::TransDataLayoutFromOneDNN( phi::funcs::TransDataLayoutFromOneDNN(
tensor->layout(), tensor->layout(),
phi::OneDNNContext::tls().get_cur_paddle_data_layout(), phi::OneDNNContext::tls().get_cur_paddle_data_layout(),
...@@ -396,8 +395,9 @@ void Tensor::CopyToCpuImpl(T *data, ...@@ -396,8 +395,9 @@ void Tensor::CopyToCpuImpl(T *data,
&out, &out,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
true); true);
else } else {
std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T)); std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
}
#else #else
std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T)); std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
#endif #endif
...@@ -871,17 +871,16 @@ void InternalUtils::CopyToCpuWithIoStream(paddle_infer::Tensor *t, ...@@ -871,17 +871,16 @@ void InternalUtils::CopyToCpuWithIoStream(paddle_infer::Tensor *t,
auto *t_data = tensor->data<T>(); auto *t_data = tensor->data<T>();
auto t_place = tensor->place(); auto t_place = tensor->place();
phi::DenseTensor out;
auto mem_allocation =
std::make_shared<paddle::memory::allocation::Allocation>(
static_cast<void *>(data),
ele_num * sizeof(T),
paddle::platform::CPUPlace());
out.ResetHolder(mem_allocation);
if (paddle::platform::is_cpu_place(t_place)) { if (paddle::platform::is_cpu_place(t_place)) {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if (tensor->layout() == phi::DataLayout::ONEDNN) if (tensor->layout() == phi::DataLayout::ONEDNN) {
phi::DenseTensor out;
auto mem_allocation =
std::make_shared<paddle::memory::allocation::Allocation>(
static_cast<void *>(data),
ele_num * sizeof(T),
paddle::platform::CPUPlace());
out.ResetHolder(mem_allocation);
phi::funcs::TransDataLayoutFromOneDNN( phi::funcs::TransDataLayoutFromOneDNN(
tensor->layout(), tensor->layout(),
phi::OneDNNContext::tls().get_cur_paddle_data_layout(), phi::OneDNNContext::tls().get_cur_paddle_data_layout(),
...@@ -889,8 +888,9 @@ void InternalUtils::CopyToCpuWithIoStream(paddle_infer::Tensor *t, ...@@ -889,8 +888,9 @@ void InternalUtils::CopyToCpuWithIoStream(paddle_infer::Tensor *t,
&out, &out,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
true); true);
else } else {
std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T)); std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
}
#else #else
std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T)); std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
#endif #endif
......
...@@ -73,6 +73,7 @@ ...@@ -73,6 +73,7 @@
*Pass*; *Pass*;
*profile*; *profile*;
*phi*; *phi*;
PD_*;
*cinn*; *cinn*;
local: local:
*; *;
......
{
global:
extern "C++" {
*paddle_infer::GetVersion*;
*paddle_infer::UpdateDllFlag*;
*paddle_infer::experimental::InternalUtils*;
*paddle_infer::Tensor*;
*paddle_infer::Predictor*;
*paddle_infer::CreatePredictor*;
*paddle_infer::GetTrtCompileVersion*;
*paddle_infer::GetTrtRuntimeVersion*;
*paddle_infer::GetNumBytesOfDataType*;
*paddle_infer::ConvertToMixedPrecision*;
*paddle_infer::contrib::TensorUtils*;
*paddle_infer::contrib::Status*;
*paddle_infer::services::PredictorPool*;
*paddle_infer::LayoutConvert*;
*paddle::experimental*;
*paddle::internal*;
*paddle::get_version*;
*paddle::LiteNNAdapterConfig*;
*paddle::AnalysisConfig::*;
*paddle::PaddlePredictor::*;
*paddle::CreatePaddlePredictor*;
*paddle::NativePaddlePredictor*;
*paddle::AnalysisPredictor*;
*paddle::PaddleDtypeSize*;
*paddle::ZeroCopyTensor*;
*paddle::*Strategy*;
*paddle::NativeConfig*;
*paddle::PaddleBuf*;
*paddle::PaddleTensor*;
*paddle::UpdateDllFlag*;
*paddle::MakeCipher*;
*paddle::DistConfig*;
*paddle::DefaultGPUPlace*;
*paddle::ResourceManager*;
*paddle::GPUContextResource*;
*paddle::CPUContextResource*;
*paddle::OpMetaInfoBuilder*;
*paddle::CustomOpKernelContext*;
/* ut needs the following symbol, we need to modify all the ut to hidden such symbols */
/* Another question: the ut size will grow from 50M to 80M, why? */
*paddle::detail*;
*paddle::imperative*;
*paddle::detailv3*;
*paddle::memory*;
*paddle::string*;
*paddle::operators*;
*paddle::distributed*;
/* *paddle::distributed::FleetWrapper*; */
/* *paddle::distributed::TensorTable*; */
/* *paddle::distributed::TableManager*; */
/* *paddle::inference*; */
*paddle::inference::ReadBinaryFile*;
*paddle::platform*;
/* *paddle::platform::GetExportedFlagInfoMap*; */
/* *paddle::framework*; */
*paddle::framework::InterpreterCore*;
*paddle::framework::Executor*;
*paddle::framework::proto*;
};
/* The following symbols need to reconsider. */
*Pass*;
*profile*;
*phi*;
*FLAGS_*;
PD_*;
*cinn*;
local:
*;
};
...@@ -20,7 +20,8 @@ limitations under the License. */ ...@@ -20,7 +20,8 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/utils/blank.h" #include "paddle/utils/blank.h"
#include "paddle/utils/variant.h"
DECLARE_int32(call_stack_level);
namespace egr { namespace egr {
class EagerVariable; class EagerVariable;
...@@ -88,6 +89,8 @@ using NameTensorMap = NameVarMap<egr::EagerVariable>; ...@@ -88,6 +89,8 @@ using NameTensorMap = NameVarMap<egr::EagerVariable>;
namespace phi { namespace phi {
namespace enforce { namespace enforce {
int GetCallStackLevel() { return FLAGS_call_stack_level; }
template <typename T> template <typename T>
static std::string ReplaceComplexTypeStr(std::string str, static std::string ReplaceComplexTypeStr(std::string str,
const std::string& type_name) { const std::string& type_name) {
......
...@@ -101,8 +101,6 @@ limitations under the License. */ ...@@ -101,8 +101,6 @@ limitations under the License. */
#include "paddle/utils/variant.h" #include "paddle/utils/variant.h"
DECLARE_int32(call_stack_level);
namespace phi { namespace phi {
class ErrorSummary; class ErrorSummary;
} // namespace phi } // namespace phi
...@@ -235,6 +233,7 @@ struct BinaryCompareMessageConverter<false> { ...@@ -235,6 +233,7 @@ struct BinaryCompareMessageConverter<false> {
}; };
} // namespace details } // namespace details
int GetCallStackLevel();
std::string GetCurrentTraceBackString(bool for_signal = false); std::string GetCurrentTraceBackString(bool for_signal = false);
std::string SimplifyErrorTypeFormat(const std::string& str); std::string SimplifyErrorTypeFormat(const std::string& str);
...@@ -243,7 +242,7 @@ static std::string GetErrorSumaryString(StrType&& what, ...@@ -243,7 +242,7 @@ static std::string GetErrorSumaryString(StrType&& what,
const char* file, const char* file,
int line) { int line) {
std::ostringstream sout; std::ostringstream sout;
if (FLAGS_call_stack_level > 1) { if (GetCallStackLevel() > 1) {
sout << "\n----------------------\nError Message " sout << "\n----------------------\nError Message "
"Summary:\n----------------------\n"; "Summary:\n----------------------\n";
} }
...@@ -270,7 +269,7 @@ template <typename StrType> ...@@ -270,7 +269,7 @@ template <typename StrType>
static std::string GetTraceBackString(StrType&& what, static std::string GetTraceBackString(StrType&& what,
const char* file, const char* file,
int line) { int line) {
if (FLAGS_call_stack_level > 1) { if (GetCallStackLevel() > 1) {
// FLAGS_call_stack_level>1 means showing c++ call stack // FLAGS_call_stack_level>1 means showing c++ call stack
return GetCurrentTraceBackString() + GetErrorSumaryString(what, file, line); return GetCurrentTraceBackString() + GetErrorSumaryString(what, file, line);
} else { } else {
...@@ -317,7 +316,7 @@ struct EnforceNotMet : public std::exception { ...@@ -317,7 +316,7 @@ struct EnforceNotMet : public std::exception {
} }
const char* what() const noexcept override { const char* what() const noexcept override {
if (FLAGS_call_stack_level > 1) { if (GetCallStackLevel() > 1) {
return err_str_.c_str(); return err_str_.c_str();
} else { } else {
return simple_err_str_.c_str(); return simple_err_str_.c_str();
...@@ -331,7 +330,7 @@ struct EnforceNotMet : public std::exception { ...@@ -331,7 +330,7 @@ struct EnforceNotMet : public std::exception {
const std::string& simple_error_str() const { return simple_err_str_; } const std::string& simple_error_str() const { return simple_err_str_; }
void set_error_str(std::string str) { void set_error_str(std::string str) {
if (FLAGS_call_stack_level > 1) { if (GetCallStackLevel() > 1) {
err_str_ = str; err_str_ = str;
} else { } else {
simple_err_str_ = str; simple_err_str_ = str;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册