parallel_executor.py 9.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import core
import multiprocessing
import framework
import executor
J
JiayiFeng 已提交
19
import warnings
Y
Yu Yang 已提交
20
import sys
21 22 23 24 25

__all__ = ['ParallelExecutor']


class ParallelExecutor(object):
X
Xin Pan 已提交
26 27
    def __init__(self,
                 use_cuda,
28 29
                 loss_name=None,
                 main_program=None,
X
Xin Pan 已提交
30
                 num_threads=None,
31
                 allow_op_delay=False,
Y
Yu Yang 已提交
32
                 share_vars_from=None,
T
testing  
typhoonzero 已提交
33 34 35
                 use_default_grad_scale=True,
                 num_nodes=0,
                 trainer_id=0):
36 37 38 39 40 41 42 43 44 45 46 47
        """
        ParallelExecutor can run program in parallel.

        Args:
            use_cuda(bool): Whether to use CUDA or not.
            loss_name(str, default None): The loss name must set in training.
            main_program(Program, default None): The program that need to run,
                if not provided, then default_main_program will be used.
            num_threads(int, default None): How many threads are used for
                training.
            allow_op_delay(bool, default False): Whether to delay and buffer
                some operators together for scheduling or not, which may
C
chengduoZH 已提交
48
                improve performance in some cases, default False.
49 50
            share_vars_from(ParallelExecutor, default None): If provied,
                it will share variables from the specified ParallelExecutor.
51 52
            use_default_grad_scale(bool, default True): If set True, a default
                scale value equal to `1./device_count` would be multiplied to
Y
yangyaming 已提交
53 54 55
                gradients of each device and scaled gradients would be
                aggregated. Otherwise, a customized scale value should be fed
                to the network.
T
typhoonzero 已提交
56 57 58 59 60
            num_nodes(int, default 0): If greater than 0, NCCL will be
                initialized with multpile rank of nodes, each node should have
                same number of GPUs. Distributed training will be enabled then.
            trainer_id(int, default 0): Must use together with num_nodes.
                trainer_id is the "rank" of current node starts from 0.
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

        Returns:
            A ParallelExecutor object.

        Raises:
            TypeError: If share_vars_from is provided, but not ParallelExecutor
                object.

        Examples:
            .. code-block:: python

              train_exe = fluid.ParallelExecutor(
                  use_cuda=True, loss_name=loss.name)
              test_exe = fluid.ParallelExecutor(
                  use_cuda=True,
                  main_program=test_program,
                  share_vars_from=train_exe)

79 80
              train_loss, = train_exe.run([loss.name], feed=feed_dict)
              test_loss, = test_exe.run([loss.name], feed=feed_dict)
81 82
        """

X
Xin Pan 已提交
83 84
        self._places = []
        self._act_places = []
85 86 87
        if use_cuda:
            for i in xrange(core.get_cuda_device_count()):
                p = core.Place()
X
Xin Pan 已提交
88 89 90
                self._act_places.append(core.CUDAPlace(i))
                p.set_place(self._act_places[-1])
                self._places.append(p)
91 92 93
        else:
            for i in xrange(multiprocessing.cpu_count()):
                p = core.Place()
L
Luo Tao 已提交
94
                self._act_places.append(core.CPUPlace())
X
Xin Pan 已提交
95 96 97
                p.set_place(self._act_places[-1])
                self._places.append(p)
        assert self._places, "no place for execution"
98 99

        if num_threads is None:
X
Xin Pan 已提交
100 101 102
            if use_cuda:
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduoZH 已提交
103
                num_threads = len(self._places) * 2
X
Xin Pan 已提交
104
            else:
105 106
                num_threads = min(
                    len(self._places) * 2, multiprocessing.cpu_count())
107

108 109
        main = main_program
        main = main if main else framework.default_main_program()
110 111
        scope = executor.global_scope()

112 113 114 115 116 117
        if share_vars_from and not isinstance(share_vars_from,
                                              ParallelExecutor):
            raise TypeError("share_vars_from must be ParallelExecutor.")
        local_scopes = share_vars_from.executor.local_scopes(
        ) if share_vars_from else []

T
typhoonzero 已提交
118
        self.persistable_vars = [
119
            v.name
120 121
            for v in filter(
                lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW,
T
typhoonzero 已提交
122
                main.list_vars())
123 124
        ]

125 126 127
        self.executor = core.ParallelExecutor(
            num_threads,
            True if use_cuda else False,  # use_event
X
Xin Pan 已提交
128
            self._places,
129 130 131 132
            set([
                p.name for p in main.global_block().iter_parameters()
                if not p.stop_gradient
            ]),
T
typhoonzero 已提交
133
            set(self.persistable_vars),
134
            main.desc,
135
            loss_name if loss_name else '',
X
Xin Pan 已提交
136
            scope,
137
            local_scopes,
Y
Yu Yang 已提交
138
            allow_op_delay,
T
testing  
typhoonzero 已提交
139 140 141
            use_default_grad_scale,
            num_nodes,
            trainer_id)
142 143
        self.scope = scope

Y
Yu Yang 已提交
144
    def run(self, fetch_list, feed=None, feed_dict=None):
X
Xin Pan 已提交
145
        """
Y
Yu Yang 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])

X
Xin Pan 已提交
170

Y
Yu Yang 已提交
171 172
        Args:
            fetch_list(list): The fetched variable names
Y
Yu Yang 已提交
173 174 175 176
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
                to each device.
Y
Yu Yang 已提交
177
            feed_dict: Alias for feed parameter, for backward compatibility.
Y
Yu Yang 已提交
178
                This parameter is deprecated.
Y
Yu Yang 已提交
179 180 181

        Returns: fetched result list.

X
Xin Pan 已提交
182
        """
183
        if feed is None and feed_dict is not None:
J
JiayiFeng 已提交
184
            feed = feed_dict
Y
Yu Yang 已提交
185
            print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`"
Y
Yu Yang 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
            if len(feed) != len(self._act_places):
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
                        tmp.set(tensor, self._act_places[i])
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)
X
Xin Pan 已提交
222

223
        fetch_var_name = '@FETCHED_VAR_NAME@'
Y
Yu Yang 已提交
224
        self.executor.run(fetch_list, fetch_var_name)
225 226
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
        return [arr[i] for i in range(len(arr))]
T
typhoonzero 已提交
227 228 229

    def bcast_params(self):
        self.executor.bcast_params(set(self.persistable_vars))
Y
Yu Yang 已提交
230 231 232 233

    @property
    def device_count(self):
        return len(self._act_places)