parallel_executor.py 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
import multiprocessing
17 18 19
from . import core
from . import framework
from . import executor
M
minqiyang 已提交
20
from .. import compat as cpt
J
JiayiFeng 已提交
21
import warnings
Y
Yu Yang 已提交
22
import sys
M
minqiyang 已提交
23
import six
C
chengduoZH 已提交
24
import os
25

Y
yuyang18 已提交
26
__all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy']
Y
yuyang18 已提交
27 28

ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
Y
yuyang18 已提交
29
BuildStrategy = core.ParallelExecutor.BuildStrategy
30 31 32


class ParallelExecutor(object):
C
chengduoZH 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45
    """
    ParallelExecutor can run program in parallel.

    Args:
        use_cuda (bool): Whether to use CUDA or not.
        loss_name (str): The loss name must set in training. Default None.
        main_program (Program): The program that need to run, if not provided,
            then default_main_program will be used. Default None.
        share_vars_from(ParallelExecutor): If provied, it will share variables
            from the specified ParallelExecutor. Default None.
        num_trainers(int): If greater than 1, NCCL will be initialized with
            multiple rank of nodes, each node should have same number of GPUs.
            Distributed training will be enabled then. Default 1.
W
Wu Yi 已提交
46
        trainer_id(int): Must use together with num_trainers. trainer_id is the
C
chengduoZH 已提交
47
            "rank" of current node starts from 0. Default 0.
W
Wu Yi 已提交
48
        scope(Scope): scope to run with, default use fluid.global_scope().
C
chengduoZH 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

    Returns:
        ParallelExecutor: The initialized ParallelExecutor object.

    Raises:
        TypeError: If share_vars_from is provided, but not ParallelExecutor object.

    Examples:
        .. code-block:: python

          train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
          test_exe = fluid.ParallelExecutor(use_cuda=True,
                                            main_program=test_program,
                                            share_vars_from=train_exe)

          train_loss, = train_exe.run([loss.name], feed=feed_dict)
          test_loss, = test_exe.run([loss.name], feed=feed_dict)
    """

X
Xin Pan 已提交
68 69
    def __init__(self,
                 use_cuda,
70 71
                 loss_name=None,
                 main_program=None,
Y
Yu Yang 已提交
72
                 share_vars_from=None,
Y
yuyang18 已提交
73
                 exec_strategy=None,
Y
yuyang18 已提交
74
                 build_strategy=None,
T
typhoonzero 已提交
75
                 num_trainers=1,
76
                 trainer_id=0,
W
Wu Yi 已提交
77
                 scope=None,
Y
yuyang18 已提交
78 79 80 81 82 83 84 85 86
                 **kwargs):
        if len(kwargs) != 0:
            err_msg = ""
            for key in kwargs:
                if key in dir(ExecutionStrategy):
                    err_msg += \
                        "Setting {0} by constructor is deprecated. Use " \
                        "strategy=ExecutionStrategy(); strategy.{0}=xxx; " \
                        "pe=ParallelExecutor(exec_strategy=strategy) " \
Y
yuyang18 已提交
87 88 89 90 91 92 93 94 95 96
                        "instead.\n ".format(key)
                elif key in dir(BuildStrategy):
                    err_msg += \
                        "Setting {0} by constructor is deprecated. Use " \
                        "strategy=BuildStrategy(); See help(" \
                        "paddle.fluid.ParallelExecutor.BuildStrategy) \n".format(
                            key)
                else:
                    err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format(
                        key)
Y
yuyang18 已提交
97
            raise ValueError(err_msg)
98

X
Xin Pan 已提交
99 100
        self._places = []
        self._act_places = []
101
        if use_cuda:
M
minqiyang 已提交
102
            for i in six.moves.range(core.get_cuda_device_count()):
103
                p = core.Place()
X
Xin Pan 已提交
104 105 106
                self._act_places.append(core.CUDAPlace(i))
                p.set_place(self._act_places[-1])
                self._places.append(p)
107
        else:
C
chengduoZH 已提交
108 109
            cpu_num = int(
                os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
M
minqiyang 已提交
110
            for i in six.moves.range(cpu_num):
111
                p = core.Place()
L
Luo Tao 已提交
112
                self._act_places.append(core.CPUPlace())
X
Xin Pan 已提交
113 114 115
                p.set_place(self._act_places[-1])
                self._places.append(p)
        assert self._places, "no place for execution"
116

Y
yuyang18 已提交
117 118
        if exec_strategy is None:
            exec_strategy = ExecutionStrategy()
119
        exec_strategy.use_cuda = use_cuda
Y
yuyang18 已提交
120 121

        if exec_strategy.num_threads == 0:
X
Xin Pan 已提交
122 123 124
            if use_cuda:
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduoZH 已提交
125
                exec_strategy.num_threads = len(self._places) * 4
X
Xin Pan 已提交
126
            else:
C
chengduoZH 已提交
127 128
                cpu_num = int(
                    os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
129
                exec_strategy.num_threads = cpu_num * 2
130

131 132 133 134 135 136 137
        # Set 1 thread num under nccl2 distribute 
        #   env to make sure all gpus run ops in same order.
        if num_trainers > 1:
            assert (use_cuda)
            # FIXME(gongwb): avoid this set.
            exec_strategy.num_threads = 1

Y
yuyang18 已提交
138 139 140
        if build_strategy is None:
            build_strategy = BuildStrategy()

141 142
        main = main_program
        main = main if main else framework.default_main_program()
W
Wu Yi 已提交
143 144
        if scope == None:
            scope = executor.global_scope()
145

146 147 148
        if share_vars_from and not isinstance(share_vars_from,
                                              ParallelExecutor):
            raise TypeError("share_vars_from must be ParallelExecutor.")
C
chengduoZH 已提交
149

150 151 152
        local_scopes = share_vars_from.executor.local_scopes(
        ) if share_vars_from else []

T
typhoonzero 已提交
153
        self.persistable_vars = [
154 155 156 157
            v.name for v in [
                var for var in main.list_vars()
                if var.persistable and var.type != core.VarDesc.VarType.RAW
            ]
158 159
        ]

160
        self.executor = core.ParallelExecutor(
X
Xin Pan 已提交
161
            self._places,
162
            set([
M
minqiyang 已提交
163
                cpt.to_text(p.name)
M
minqiyang 已提交
164
                for p in main.global_block().iter_parameters()
165 166
                if not p.stop_gradient
            ]),
M
minqiyang 已提交
167
            set(cpt.to_text(var) for var in self.persistable_vars), main.desc,
M
minqiyang 已提交
168
            cpt.to_text(loss_name)
M
minqiyang 已提交
169
            if loss_name else six.u(''), scope, local_scopes, exec_strategy,
170
            build_strategy, num_trainers, trainer_id)
171 172
        self.scope = scope

173
    def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
X
Xin Pan 已提交
174
        """
Y
Yu Yang 已提交
175 176 177 178 179 180 181 182
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
C
chengduoZH 已提交
183

Y
Yu Yang 已提交
184 185 186 187 188 189
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
C
chengduoZH 已提交
190

Y
Yu Yang 已提交
191 192 193 194 195 196 197 198 199 200
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])

Y
Yu Yang 已提交
201 202
        Args:
            fetch_list(list): The fetched variable names
Y
Yu Yang 已提交
203 204 205
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
C
chengduoZH 已提交
206
                to each device. Default None.
Y
Yu Yang 已提交
207
            feed_dict: Alias for feed parameter, for backward compatibility.
C
chengduoZH 已提交
208
                This parameter has been deprecated. Default None.
C
chengduo 已提交
209
            return_numpy(bool): Whether converts the fetched tensor to numpy.
210
                Default: True.
C
chengduoZH 已提交
211 212 213

        Returns:
            List: The fetched result list.
Y
Yu Yang 已提交
214

C
chengduoZH 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
        Raises:
            ValueError: If the feed is a list, but its length is not equal the
                length of active places, or its element's is not dict.

        NOTES:
            1. If the feed's type is dict, the number of data that feeds to
               ParallelExecutor must be bigger than active places. Otherwise,
               it will throw exception from C++ side. Special attention should be
               paid to check whether the last batch of the dataset is bigger
               than active places.
            2. If active places are more than one, the fetch results for each
               variable is a list, and each element of this list is the variable of
               respective active place.

        Examples:
            .. code-block:: python
Y
Yu Yang 已提交
231

C
chengduoZH 已提交
232 233 234 235 236
                pe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                            loss_name=avg_cost.name,
                                            main_program=fluid.default_main_program())
                loss = pe.run(feed=feeder.feed(cur_batch),
                              fetch_list=[avg_cost.name]))
X
Xin Pan 已提交
237
        """
238
        if feed is None and feed_dict is not None:
J
JiayiFeng 已提交
239
            feed = feed_dict
240 241 242
            print(
                "`feed_dict` is deprecated. Please use `feed=`",
                file=sys.stderr)
Y
Yu Yang 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
            if len(feed) != len(self._act_places):
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
                        tmp.set(tensor, self._act_places[i])
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)
X
Xin Pan 已提交
279

280
        fetch_var_name = '@FETCHED_VAR_NAME@'
281
        self.executor.run(fetch_list, fetch_var_name)
282
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
283

C
chengduo 已提交
284 285 286
        if return_numpy:
            return executor.as_numpy(arr)

287
        return [arr[i] for i in range(len(arr))]
T
typhoonzero 已提交
288

Y
Yu Yang 已提交
289 290 291
    @property
    def device_count(self):
        return len(self._act_places)