diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c18ba049c8f107f17afd7a8e08af6b3657cfd56d..7fc407ebc94edffd7ac0883b91f2dff084363e07 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -56,9 +56,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { auto& block = pdesc.blocks(0); auto& device = device_contexts_[0]; - // TODO(tonyyang-svail): - // - runs on a new local scope - // Scope& local_scope = scope->NewScope(); + Scope& local_scope = scope->NewScope(); for (auto& var : block.vars()) { scope->NewVar(var.name()); @@ -67,7 +65,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { for (auto& op_desc : block.ops()) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); std::cout << op->DebugString() << std::endl; - op->Run(*scope, *device); + op->Run(local_scope, *device); } // TODO(tonyyang-svail): need to test gpu device diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 82f9bd6f2dd9d2ab6aa324bfa3b36132c8402615..bf6c1dffc135b47a6e2273eb649c37adc601c84b 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -131,7 +131,7 @@ template void set_feed_variable(const std::vector>& inputs) { typedef std::vector FeedInputs; // Tensors in feed value variable will only be in CPUPlace - Variable* g_feed_value = GetScope()->FindVar("feed_value"); + Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); feed_inputs.resize(size); @@ -146,7 +146,7 @@ template std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; // Tensors in fetch value variable will only be in CPUPlace - Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); + Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); auto size = fetch_outputs.size(); @@ -252,7 +252,7 @@ TEST_F(ExecutorTesterRandom, CPU) { paddle::memory::Used(cpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { @@ -281,7 +281,7 @@ TEST_F(ExecutorTesterFeed, CPU) { // need to set feed variable before Executor::Run std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { @@ -309,7 +309,7 @@ TEST_F(ExecutorTesterRandom, GPU) { paddle::memory::Used(gpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); delete executor; } @@ -333,7 +333,7 @@ TEST_F(ExecutorTesterFeed, GPU) { // need to set feed variable before Executor::Run std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index b6a9d7fbc29c58fc986b910f9b510f25e2e42ad9..2a0d9bbf334907caf5fb0e4d3d793ef0e23e058f 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -66,7 +66,7 @@ void Scope::DropKids() { std::once_flag feed_variable_flag; -framework::Scope* GetScope() { +framework::Scope* GetGlobalScope() { static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { g_scope.reset(new framework::Scope()); diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 96f3ae875b22a5b31ea7683b3140da56ee48aa88..319d291efec95d463f085cb03b1c06c0a637d8d9 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -73,7 +73,7 @@ class Scope { DISABLE_COPY_AND_ASSIGN(Scope); }; -framework::Scope* GetScope(); +framework::Scope* GetGlobalScope(); } // namespace framework } // namespace paddle diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index f2c498e2e2ccb26e29e9d1364ec0aca4fbe105f7..b9e43be9666ecfd7139cd933f3b4666d467f641b 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -27,7 +27,7 @@ class FeedOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_feed_variable = - framework::GetScope()->FindVar("feed_value"); + framework::GetGlobalScope()->FindVar("feed_value"); const FeedInputs& tensors = g_feed_variable->Get(); diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index cf93b6f434582f3453ec49d4ad39f18d7c001f37..de8ec6ff61fbaf427a029b907210892824ac35e7 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -19,17 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class FeedKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { typedef std::vector FeedInputs; - Tensor* out = ctx.Output("Out"); + framework::Tensor* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); framework::Variable* g_feed_variable = - framework::GetScope()->FindVar("feed_value"); + framework::GetGlobalScope()->FindVar("feed_value"); int col = ctx.template Attr("col"); const FeedInputs& tensors = g_feed_variable->Get(); out->CopyFrom(tensors[col], ctx.GetPlace()); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 4b6b3ca85ac203e47400f54e985b6f645db1508a..7bde4953cdeed4b4f1cb7d4fcd18acea4968b378 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -27,7 +27,7 @@ class FetchOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_fetch_variable = - framework::GetScope()->FindVar("fetch_value"); + framework::GetGlobalScope()->FindVar("fetch_value"); FetchOutputs* tensors = g_fetch_variable->GetMutable(); if (tensors->size() < static_cast(col + 1)) { diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index e8d5e3a9c00f44f1c774b5aa4525f54cca1bc1b7..3bec9c9974ae811a914964b9953d388996bccc44 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -19,17 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class FetchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { typedef std::vector FetchOutputs; - const Tensor* input = ctx.Input("Input"); + const framework::Tensor* input = ctx.Input("Input"); int col = ctx.template Attr("col"); framework::Variable* g_fetch_variable = - framework::GetScope()->FindVar("fetch_value"); + framework::GetGlobalScope()->FindVar("fetch_value"); FetchOutputs* tensors = g_fetch_variable->GetMutable(); (*tensors)[col].mutable_data(platform::CPUPlace()); (*tensors)[col].CopyFrom(*input, platform::CPUPlace());