parallel_executor.py 9.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import core
import multiprocessing
import framework
import executor
J
JiayiFeng 已提交
19
import warnings
Y
Yu Yang 已提交
20
import sys
21 22 23 24 25

__all__ = ['ParallelExecutor']


class ParallelExecutor(object):
X
Xin Pan 已提交
26 27
    def __init__(self,
                 use_cuda,
28 29
                 loss_name=None,
                 main_program=None,
X
Xin Pan 已提交
30
                 num_threads=None,
31
                 allow_op_delay=False,
Y
Yu Yang 已提交
32
                 share_vars_from=None,
T
testing  
typhoonzero 已提交
33
                 use_default_grad_scale=True,
34
                 balance_parameter_opt_between_cards=False,
T
typhoonzero 已提交
35
                 num_trainers=1,
T
testing  
typhoonzero 已提交
36
                 trainer_id=0):
37 38 39 40 41 42 43 44 45 46 47 48
        """
        ParallelExecutor can run program in parallel.

        Args:
            use_cuda(bool): Whether to use CUDA or not.
            loss_name(str, default None): The loss name must set in training.
            main_program(Program, default None): The program that need to run,
                if not provided, then default_main_program will be used.
            num_threads(int, default None): How many threads are used for
                training.
            allow_op_delay(bool, default False): Whether to delay and buffer
                some operators together for scheduling or not, which may
C
chengduoZH 已提交
49
                improve performance in some cases, default False.
50 51
            share_vars_from(ParallelExecutor, default None): If provied,
                it will share variables from the specified ParallelExecutor.
52 53
            use_default_grad_scale(bool, default True): If set True, a default
                scale value equal to `1./device_count` would be multiplied to
Y
yangyaming 已提交
54 55 56
                gradients of each device and scaled gradients would be
                aggregated. Otherwise, a customized scale value should be fed
                to the network.
C
chengduoZH 已提交
57 58 59
            balance_parameter_opt_between_cards(bool, default True): Whether
                updating different gradients on different cards. Currently, it
                is not recommended.
T
typhoonzero 已提交
60
            num_trainers(int, default 1): If greater than 1, NCCL will be
T
typhoonzero 已提交
61 62
                initialized with multpile rank of nodes, each node should have
                same number of GPUs. Distributed training will be enabled then.
T
typhoonzero 已提交
63
            trainer_id(int, default 0): Must use together with num_trainers.
T
typhoonzero 已提交
64
                trainer_id is the "rank" of current node starts from 0.
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

        Returns:
            A ParallelExecutor object.

        Raises:
            TypeError: If share_vars_from is provided, but not ParallelExecutor
                object.

        Examples:
            .. code-block:: python

              train_exe = fluid.ParallelExecutor(
                  use_cuda=True, loss_name=loss.name)
              test_exe = fluid.ParallelExecutor(
                  use_cuda=True,
                  main_program=test_program,
                  share_vars_from=train_exe)

83 84
              train_loss, = train_exe.run([loss.name], feed=feed_dict)
              test_loss, = test_exe.run([loss.name], feed=feed_dict)
85 86
        """

X
Xin Pan 已提交
87 88
        self._places = []
        self._act_places = []
89 90 91
        if use_cuda:
            for i in xrange(core.get_cuda_device_count()):
                p = core.Place()
X
Xin Pan 已提交
92 93 94
                self._act_places.append(core.CUDAPlace(i))
                p.set_place(self._act_places[-1])
                self._places.append(p)
95 96 97
        else:
            for i in xrange(multiprocessing.cpu_count()):
                p = core.Place()
L
Luo Tao 已提交
98
                self._act_places.append(core.CPUPlace())
X
Xin Pan 已提交
99 100 101
                p.set_place(self._act_places[-1])
                self._places.append(p)
        assert self._places, "no place for execution"
102 103

        if num_threads is None:
X
Xin Pan 已提交
104 105 106
            if use_cuda:
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduoZH 已提交
107
                num_threads = len(self._places) * 2
X
Xin Pan 已提交
108
            else:
109 110
                num_threads = min(
                    len(self._places) * 2, multiprocessing.cpu_count())
111

112 113
        main = main_program
        main = main if main else framework.default_main_program()
114 115
        scope = executor.global_scope()

116 117 118 119 120 121
        if share_vars_from and not isinstance(share_vars_from,
                                              ParallelExecutor):
            raise TypeError("share_vars_from must be ParallelExecutor.")
        local_scopes = share_vars_from.executor.local_scopes(
        ) if share_vars_from else []

T
typhoonzero 已提交
122
        self.persistable_vars = [
123
            v.name
124 125
            for v in filter(
                lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW,
T
typhoonzero 已提交
126
                main.list_vars())
127 128
        ]

129 130 131
        self.executor = core.ParallelExecutor(
            num_threads,
            True if use_cuda else False,  # use_event
X
Xin Pan 已提交
132
            self._places,
133 134 135 136
            set([
                p.name for p in main.global_block().iter_parameters()
                if not p.stop_gradient
            ]),
T
typhoonzero 已提交
137
            set(self.persistable_vars),
138
            main.desc,
139
            loss_name if loss_name else '',
X
Xin Pan 已提交
140
            scope,
141
            local_scopes,
Y
Yu Yang 已提交
142
            allow_op_delay,
T
testing  
typhoonzero 已提交
143
            use_default_grad_scale,
144
            balance_parameter_opt_between_cards,
T
typhoonzero 已提交
145
            num_trainers,
T
testing  
typhoonzero 已提交
146
            trainer_id)
147 148
        self.scope = scope

Y
Yu Yang 已提交
149
    def run(self, fetch_list, feed=None, feed_dict=None):
X
Xin Pan 已提交
150
        """
Y
Yu Yang 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])

X
Xin Pan 已提交
175

Y
Yu Yang 已提交
176 177
        Args:
            fetch_list(list): The fetched variable names
Y
Yu Yang 已提交
178 179 180 181
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
                to each device.
Y
Yu Yang 已提交
182
            feed_dict: Alias for feed parameter, for backward compatibility.
Y
Yu Yang 已提交
183
                This parameter is deprecated.
Y
Yu Yang 已提交
184 185 186

        Returns: fetched result list.

X
Xin Pan 已提交
187
        """
188
        if feed is None and feed_dict is not None:
J
JiayiFeng 已提交
189
            feed = feed_dict
Y
Yu Yang 已提交
190
            print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`"
Y
Yu Yang 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
            if len(feed) != len(self._act_places):
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
                        tmp.set(tensor, self._act_places[i])
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)
X
Xin Pan 已提交
227

228
        fetch_var_name = '@FETCHED_VAR_NAME@'
Y
Yu Yang 已提交
229
        self.executor.run(fetch_list, fetch_var_name)
230 231
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
        return [arr[i] for i in range(len(arr))]
T
typhoonzero 已提交
232 233 234

    def bcast_params(self):
        self.executor.bcast_params(set(self.persistable_vars))
Y
Yu Yang 已提交
235 236 237 238

    @property
    def device_count(self):
        return len(self._act_places)