From f7eafca828ccce4f9acc278de7778576c1f33051 Mon Sep 17 00:00:00 2001 From: flame Date: Sat, 15 Feb 2020 09:41:23 +0800 Subject: [PATCH] remove python inference warning (#22602) --- paddle/fluid/framework/naive_executor.cc | 10 ---------- paddle/fluid/inference/api/api_impl.cc | 11 +++++++++++ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/framework/naive_executor.cc b/paddle/fluid/framework/naive_executor.cc index a18f9df9a9..9c1638f407 100644 --- a/paddle/fluid/framework/naive_executor.cc +++ b/paddle/fluid/framework/naive_executor.cc @@ -41,16 +41,6 @@ void NaiveExecutor::Prepare(Scope *scope, const ProgramDesc &program_desc, } void NaiveExecutor::Run() { -#ifndef PADDLE_ON_INFERENCE - LOG_FIRST_N(WARNING, 5) << "The NaiveExecutor can not work properly if the " - "cmake flag ON_INFER is not set."; - LOG_FIRST_N(WARNING, 5) << "Unlike the training phase, all the scopes and " - "variables will be reused to save the allocation " - "overhead."; - LOG_FIRST_N(WARNING, 5) << "Please re-compile the inference library by " - "setting the cmake flag ON_INFER=ON if you are " - "running Paddle Inference"; -#endif // PADDLE_ON_INFERENCE for (auto &op : ops_) { VLOG(4) << std::this_thread::get_id() << " run " << op->DebugStringEx(scope_) << " on scope " << scope_; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 628817c6f4..84b367336d 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include #include @@ -131,6 +132,16 @@ NativePaddlePredictor::~NativePaddlePredictor() { bool NativePaddlePredictor::Run(const std::vector &inputs, std::vector *output_data, int batch_size) { +#ifndef PADDLE_ON_INFERENCE + LOG_FIRST_N(WARNING, 5) << "The NaiveExecutor can not work properly if the " + "cmake flag ON_INFER is not set."; + LOG_FIRST_N(WARNING, 5) << "Unlike the training phase, all the scopes and " + "variables will be reused to save the allocation " + "overhead."; + LOG_FIRST_N(WARNING, 5) << "Please re-compile the inference library by " + "setting the cmake flag ON_INFER=ON if you are " + "running Paddle Inference"; +#endif // PADDLE_ON_INFERENCE if (UNLIKELY(config_.cpu_math_library_num_threads() > 1)) { paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); } -- GitLab