diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index b1c9dd0d15223f7d1bf6ea44144589f1de927e3e..224e8e1f6efd7a894591ac51c929517cae7539ce 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -48,17 +48,18 @@ void FetchOpHandle::RunImpl() { WaitInputVarGenerated(platform::CPUPlace()); tensors_.resize(inputs_.size()); - auto *var_handle = static_cast(inputs_[0]); - auto &var_name = var_handle->name_; platform::CPUPlace cpu; auto &scopes = *local_scopes_; - for (size_t i = 0; i < scopes.size(); ++i) { - auto &scope = scopes[i]; - auto *var = - scope->FindVar(kLocalExecScopeName)->Get()->FindVar(var_name); + for (size_t i = 0; i < inputs_.size(); ++i) { + auto *var_handle = static_cast(inputs_[i]); + auto &scope = scopes.at(var_handle->scope_idx_); + auto *var = scope->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(var_handle->name_); PADDLE_ENFORCE_NOT_NULL(var, "Cannot find variable %s in execution scope", - var_name); + var_handle->name_); + auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index 6dc016487fd81a9292f94042a20b7356bc50abe1..056f9e1781997aa1586d972874b652d5b725fe3f 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -775,7 +775,7 @@ class TestCRFModel(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( - is_sparse=False, build_strategy=build_strategy) + is_sparse=True, build_strategy=build_strategy) def test_update_dense_parameter_reduce(self): build_strategy = fluid.BuildStrategy() @@ -849,8 +849,7 @@ class TestFetchOp(unittest.TestCase): assert not math.isnan(np.sum(ret[i])) and \ not math.isinf(np.sum(ret[i])) - @unittest.skip("this test is buggy") - def test_feed(self): + def test_fetch_op(self): tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) tst_reader_iter = tst_reader()