From 7c7d3d61723bb99c7337e38b213f30866d2d3338 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 31 Aug 2018 00:40:01 +0800 Subject: [PATCH] Fix mac --- .../fluid/framework/ir/attention_lstm_fuse_pass.cc | 12 ++++++------ paddle/fluid/inference/CMakeLists.txt | 2 +- paddle/fluid/inference/analysis/argument.h | 2 +- paddle/fluid/inference/api/api_impl.cc | 10 ++++++++++ paddle/fluid/inference/api/helper.h | 1 + 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index 2876de88f..c82bbda57 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -216,11 +216,11 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, float* out_data = out->mutable_data(platform::CPUPlace()); std::array tensors( - {W_forget_w0.data(), W_input_w0.data(), - W_output_w0.data(), W_cell_w0.data()}); + {{W_forget_w0.data(), W_input_w0.data(), + W_output_w0.data(), W_cell_w0.data()}}); std::array tensors1( - {W_forget_w1.data(), W_input_w1.data(), - W_output_w1.data(), W_cell_w1.data()}); + {{W_forget_w1.data(), W_input_w1.data(), + W_output_w1.data(), W_cell_w1.data()}}); for (int row = 0; row < D; row++) { for (int col = 0; col < 4; col++) { @@ -243,8 +243,8 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, const LoDTensor& B_output, const LoDTensor& B_cell, LoDTensor* out) { std::array tensors( - {B_forget.data(), B_input.data(), B_output.data(), - B_cell.data()}); + {{B_forget.data(), B_input.data(), B_output.data(), + B_cell.data()}}); PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); int D = B_forget.dims()[0]; diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index a4f6364ae..36d729e83 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -17,7 +17,7 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) # paddle_fluid_origin exclude inference api interface cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api) -if(NOT APPLE) +if(APPLE) add_subdirectory(api) endif() diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 4401d5c5a..59fc305a0 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -66,7 +66,7 @@ struct Argument { PADDLE_ENFORCE_NOT_NULL(data); PADDLE_ENFORCE(!attrs_.count(key), "duplicate attr called %s", key); attrs_[key] = data; - attr_deleters_[key] = [data, key, this]() { + attr_deleters_[key] = [data, key]() { VLOG(3) << "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; VLOG(3) << "argument delete attr: " << key; delete data; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 32a691b81..20505fbfa 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -175,8 +175,13 @@ std::unique_ptr NativePaddlePredictor::Clone() { LOG(ERROR) << "fail to call Init"; return nullptr; } +#ifdef __clang__ + // fix clang compile error + return cls; +#else // fix manylinux compile error. return std::move(cls); +#endif } bool NativePaddlePredictor::SetFeed(const std::vector &inputs, @@ -310,7 +315,12 @@ std::unique_ptr CreatePaddlePredictor< if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; } +#ifdef __clang__ + //fix clang compile error + return predictor; +#else return std::move(predictor); +#endif } } // namespace paddle diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index e44b1b74b..3895f18e6 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -16,6 +16,7 @@ #include #include +#include #include #include #include -- GitLab