diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 99e8acd2b0b934d2fd666472a8a6fa79c29277f6..137ca186d7946a426b263b6b902e101be4744135 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -621,6 +621,7 @@ class DynamicGraphAdapter(object): self._input_info = None if self._nranks > 1: + dist.init_parallel_env() stradegy = fluid.dygraph.parallel.ParallelStrategy() stradegy.nranks = ParallelEnv().nranks stradegy.local_rank = ParallelEnv().local_rank @@ -888,7 +889,6 @@ class Model(object): # init backend if fluid.in_dygraph_mode(): - dist.init_parallel_env() self._adapter = DynamicGraphAdapter(self) else: self._adapter = StaticGraphAdapter(self) @@ -943,6 +943,7 @@ class Model(object): self._update_inputs() return loss + @paddle.no_grad() def eval_batch(self, inputs, labels=None): """ Run one evaluating step on a batch of data. @@ -994,6 +995,7 @@ class Model(object): self._update_inputs() return loss + @paddle.no_grad() def predict_batch(self, inputs): """ Run one predicting step on a batch of data. diff --git a/python/paddle/tests/dist_hapi_mnist_dynamic.py b/python/paddle/tests/dist_hapi_mnist_dynamic.py index 46d02789402b22263cfbd8cbdfeb6d66a5de900d..eab34a6dafbc354a24aa51e93a9fec9efc3b3cee 100644 --- a/python/paddle/tests/dist_hapi_mnist_dynamic.py +++ b/python/paddle/tests/dist_hapi_mnist_dynamic.py @@ -61,7 +61,6 @@ class TestDistTraning(unittest.TestCase): def test_static_multiple_gpus(self): device = set_device('gpu') - fluid.enable_dygraph(device) im_shape = (-1, 1, 28, 28) batch_size = 128