diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 6479e30f6ab1ab23ea0cbc076134e0112aa158c2..20817d52bf723754f71cfb20b9e1437bfd76fee4 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -526,7 +526,6 @@ paddle.reader.buffered (ArgSpec(args=['reader', 'size'], varargs=None, keywords= paddle.reader.compose (ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None), ('document', '884291104e1c3f37f33aae44b7deeb0d')) paddle.reader.chain (ArgSpec(args=[], varargs='readers', keywords=None, defaults=None), ('document', 'd22c34e379a53901ae67a6bca7f4def4')) paddle.reader.shuffle (ArgSpec(args=['reader', 'buf_size'], varargs=None, keywords=None, defaults=None), ('document', 'e42ea6fee23ce26b23cb142cd1d6522d')) -paddle.reader.ComposeNotAligned.__init__ paddle.reader.firstn (ArgSpec(args=['reader', 'n'], varargs=None, keywords=None, defaults=None), ('document', 'c5bb8f7dd4f917f1569a368aab5b8aad')) paddle.reader.xmap_readers (ArgSpec(args=['mapper', 'reader', 'process_num', 'buffer_size', 'order'], varargs=None, keywords=None, defaults=(False,)), ('document', '283bc0b8a0e26ae186b8b9bee4aec560')) paddle.reader.PipeReader.__init__ (ArgSpec(args=['self', 'command', 'bufsize', 'file_type'], varargs=None, keywords=None, defaults=(8192, 'plain')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 504bfb4103b08ee9f35c8facf60b3698136649e1..86155180094c848b82fbe2829947bfbdb4d4d3fb 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -149,8 +149,7 @@ class CompiledProgram(object): self._places = [_place_obj(p) for p in places] else: self._places = None - self._build_strategy.is_distribution = framework.is_pserver_mode( - self._program) + self._build_strategy.is_distribution = _is_pserver_mode(self._program) return self def with_inference_optimize(self, config): @@ -191,7 +190,7 @@ class CompiledProgram(object): assert scope is not None, "" self._local_scopes = [] - self._exec_strategy.use_cuda = isinstance(self._place, core.CUDAPlace) + self._exec_strategy.use_cuda = use_cuda has_set_place = (self._places is not None) if has_set_place: desire_place = _place_obj(self._place) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 517418da1cf2f745ee5578e3c2b118394db7fae7..6702fc808b121d80fe555412e2cc7f673d6d8389 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -99,7 +99,8 @@ class ParallelExecutor(object): build_strategy.num_trainers = num_trainers build_strategy.trainer_id = trainer_id - self._places = compiler.get_available_places(use_cuda) + self._places = framework.cuda_places( + ) if use_cuda else framework.cpu_places() self._scope = scope if scope is not None else executor.global_scope() main_program = main_program if main_program is not None \