diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc index a0e83a17058a4edcb8f23f23ce155e644ae0cf3b..def62318154f5964064f0df470ccf593b684ad33 100644 --- a/paddle/fluid/inference/tests/book/test_inference_nlp.cc +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -104,9 +104,9 @@ void ThreadRunInfer( const int tid, paddle::framework::Scope* scope, const std::vector>& jobs) { // maybe framework:ProgramDesc is not thread-safe + paddle::platform::CPUPlace place; + paddle::framework::Executor executor(place); auto& sub_scope = scope->NewScope(); - auto place = paddle::platform::CPUPlace(); - auto executor = paddle::framework::Executor(place); auto inference_program = paddle::inference::Load(&executor, scope, FLAGS_model_path); @@ -183,8 +183,8 @@ TEST(inference, nlp) { stop_ms = GetCurrentMs(); } else { // 1. Define place, executor, scope - auto place = paddle::platform::CPUPlace(); - auto executor = paddle::framework::Executor(place); + paddle::platform::CPUPlace place; + paddle::framework::Executor executor(place); // 2. Initialize the inference_program and load parameters std::unique_ptr inference_program;