parallel_executor.py 12.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
import multiprocessing
17 18 19
from . import core
from . import framework
from . import executor
M
minqiyang 已提交
20
from .. import compat as cpt
J
JiayiFeng 已提交
21
import warnings
Y
Yu Yang 已提交
22
import sys
M
minqiyang 已提交
23
import six
C
chengduoZH 已提交
24
import os
25

Y
yuyang18 已提交
26
__all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy']
Y
yuyang18 已提交
27 28

ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
Y
yuyang18 已提交
29
BuildStrategy = core.ParallelExecutor.BuildStrategy
30 31 32


class ParallelExecutor(object):
C
chengduoZH 已提交
33
    """
C
chengduo 已提交
34 35 36 37 38 39 40 41 42
    ParallelExecutor is designed for data parallelism, which focuses on distributing
    the data across different nodes and every node operates on the data in parallel.
    If you use ParallelExecutor to run the current program on GPU, the node means GPU
    device, and ParallelExecutor will get the available GPU device automatically on
    the current machine. If you use ParallelExecutor to run the current program on CPU,
    the node means the CPU device, and you can specify the CPU device number by adding
    'CPU_NUM' environment variable, for example 'CPU_NUM=4', if the environment variable
    is not found, ParallelExecutor will call `multiprocessing.cpu_count` to get the number
    of CPUs in the system.
C
chengduoZH 已提交
43 44 45 46 47 48

    Args:
        use_cuda (bool): Whether to use CUDA or not.
        loss_name (str): The loss name must set in training. Default None.
        main_program (Program): The program that need to run, if not provided,
            then default_main_program will be used. Default None.
C
chengduo 已提交
49
        share_vars_from(ParallelExecutor): If provide, it will share variables
C
chengduoZH 已提交
50
            from the specified ParallelExecutor. Default None.
C
chengduo 已提交
51 52 53 54 55 56 57 58 59
        exec_strategy(ExecutionStrategy): exec_strategy is used to control how to run
            the program in ParallelExecutor, for example how many threads are used to
            execute the program, how many iterations to clean up the temp variables
            which is generated during execution. For more information, please refer
            to fluid.ExecutionStrategy. Default None.
        build_strategy(BuildStrategy): build_strategy is used to control how to
            build the SSA Graph in ParallelExecutor by setting the property,
            for example reduce_strategy, gradient_scale_strategy. For more information,
            please refer to fluid.BuildStrategy. Default None.
C
chengduoZH 已提交
60 61 62
        num_trainers(int): If greater than 1, NCCL will be initialized with
            multiple rank of nodes, each node should have same number of GPUs.
            Distributed training will be enabled then. Default 1.
W
Wu Yi 已提交
63
        trainer_id(int): Must use together with num_trainers. trainer_id is the
C
chengduoZH 已提交
64
            "rank" of current node starts from 0. Default 0.
W
Wu Yi 已提交
65
        scope(Scope): scope to run with, default use fluid.global_scope().
C
chengduoZH 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

    Returns:
        ParallelExecutor: The initialized ParallelExecutor object.

    Raises:
        TypeError: If share_vars_from is provided, but not ParallelExecutor object.

    Examples:
        .. code-block:: python

          train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
          test_exe = fluid.ParallelExecutor(use_cuda=True,
                                            main_program=test_program,
                                            share_vars_from=train_exe)

          train_loss, = train_exe.run([loss.name], feed=feed_dict)
          test_loss, = test_exe.run([loss.name], feed=feed_dict)
    """

X
Xin Pan 已提交
85 86
    def __init__(self,
                 use_cuda,
87 88
                 loss_name=None,
                 main_program=None,
Y
Yu Yang 已提交
89
                 share_vars_from=None,
Y
yuyang18 已提交
90
                 exec_strategy=None,
Y
yuyang18 已提交
91
                 build_strategy=None,
T
typhoonzero 已提交
92
                 num_trainers=1,
93
                 trainer_id=0,
X
Xin Pan 已提交
94
                 scope=None):
95
        # step1: get places, the places are used in run too.
X
Xin Pan 已提交
96
        self._places = []
97
        if use_cuda:
98 99 100 101
            gpus_env = os.getenv("FLAGS_selected_gpus")
            if gpus_env:
                gpus = [int(s) for s in gpus_env.split(",")]
            else:
102 103 104 105
                gpus = [
                    i for i in six.moves.range(core.get_cuda_device_count())
                ]
            self._places = [core.CUDAPlace(i) for i in gpus]
106
        else:
C
chengduoZH 已提交
107 108
            cpu_num = int(
                os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
109
            self._places = [core.CPUPlace() for _ in six.moves.range(cpu_num)]
X
Xin Pan 已提交
110
        assert self._places, "no place for execution"
111

112
        # step2: init exec_strategy
Y
yuyang18 已提交
113 114
        if exec_strategy is None:
            exec_strategy = ExecutionStrategy()
115
        exec_strategy.use_cuda = use_cuda
Y
yuyang18 已提交
116
        if exec_strategy.num_threads == 0:
X
Xin Pan 已提交
117 118 119
            if use_cuda:
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduoZH 已提交
120
                exec_strategy.num_threads = len(self._places) * 4
X
Xin Pan 已提交
121
            else:
C
chengduoZH 已提交
122 123
                cpu_num = int(
                    os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
124
                exec_strategy.num_threads = cpu_num * 2
125

126
        # step3: init build_strategy
Y
yuyang18 已提交
127 128
        if build_strategy is None:
            build_strategy = BuildStrategy()
129
        build_strategy.num_trainers = num_trainers
130
        build_strategy.trainer_id = trainer_id
131

132 133 134 135 136 137 138 139 140 141 142
        # step4: get main_program, scope, local_scopes
        main = main_program if main_program \
            else framework.default_main_program()
        scope = scope if scope is not None else executor.global_scope()

        if share_vars_from and not isinstance(share_vars_from,
                                              ParallelExecutor):
            raise TypeError("share_vars_from must be ParallelExecutor.")

        local_scopes = share_vars_from.executor.local_scopes()\
            if share_vars_from else []
143

144
        # step5: check trainers_endpoints, it is used for distribution.
145 146 147 148 149 150
        trainers_endpoints = main._trainers_endpoints
        if num_trainers > 1 and trainers_endpoints:
            assert num_trainers == len(
                trainers_endpoints), "num_trainers == len(end_points)"
            build_strategy.trainers_endpoints = trainers_endpoints

C
chengduo 已提交
151
        # step6: get persistable_vars, places. persistable_vars
152 153 154
        # need be broadcast to other local_scope.
        persistable_vars = set([
            cpt.to_text(v.name) for v in [
155 156 157
                var for var in main.list_vars()
                if var.persistable and var.type != core.VarDesc.VarType.RAW
            ]
158 159 160 161 162 163
        ])

        def place_obj(place):
            p = core.Place()
            p.set_place(place)
            return p
164

165 166
        places = list(map(place_obj, self._places))

C
chengduo 已提交
167
        # step7: init ParallelExecutor
168
        self.executor = core.ParallelExecutor(
169
            places, persistable_vars, main.desc,
M
minqiyang 已提交
170
            cpt.to_text(loss_name)
M
minqiyang 已提交
171
            if loss_name else six.u(''), scope, local_scopes, exec_strategy,
172
            build_strategy, num_trainers, trainer_id)
173

174 175
        self.scope = scope

176
    def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
X
Xin Pan 已提交
177
        """
Y
Yu Yang 已提交
178 179 180 181 182 183 184 185
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
C
chengduoZH 已提交
186

Y
Yu Yang 已提交
187 188 189 190 191 192
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
C
chengduoZH 已提交
193

Y
Yu Yang 已提交
194 195 196 197 198 199 200 201 202 203
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])

Y
Yu Yang 已提交
204 205
        Args:
            fetch_list(list): The fetched variable names
Y
Yu Yang 已提交
206 207 208
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
C
chengduoZH 已提交
209
                to each device. Default None.
Y
Yu Yang 已提交
210
            feed_dict: Alias for feed parameter, for backward compatibility.
C
chengduoZH 已提交
211
                This parameter has been deprecated. Default None.
C
chengduo 已提交
212
            return_numpy(bool): Whether converts the fetched tensor to numpy.
213
                Default: True.
C
chengduoZH 已提交
214 215 216

        Returns:
            List: The fetched result list.
Y
Yu Yang 已提交
217

C
chengduoZH 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
        Raises:
            ValueError: If the feed is a list, but its length is not equal the
                length of active places, or its element's is not dict.

        NOTES:
            1. If the feed's type is dict, the number of data that feeds to
               ParallelExecutor must be bigger than active places. Otherwise,
               it will throw exception from C++ side. Special attention should be
               paid to check whether the last batch of the dataset is bigger
               than active places.
            2. If active places are more than one, the fetch results for each
               variable is a list, and each element of this list is the variable of
               respective active place.

        Examples:
            .. code-block:: python
Y
Yu Yang 已提交
234

C
chengduoZH 已提交
235 236 237 238 239
                pe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                            loss_name=avg_cost.name,
                                            main_program=fluid.default_main_program())
                loss = pe.run(feed=feeder.feed(cur_batch),
                              fetch_list=[avg_cost.name]))
X
Xin Pan 已提交
240
        """
241
        if feed is None and feed_dict is not None:
J
JiayiFeng 已提交
242
            feed = feed_dict
243 244 245
            print(
                "`feed_dict` is deprecated. Please use `feed=`",
                file=sys.stderr)
Y
Yu Yang 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
261
            if len(feed) != len(self._places):
Y
Yu Yang 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
277
                        tmp.set(tensor, self._places[i])
Y
Yu Yang 已提交
278 279 280 281
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)
X
Xin Pan 已提交
282

283
        fetch_var_name = '@FETCHED_VAR_NAME@'
284
        self.executor.run(fetch_list, fetch_var_name)
285
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
286

C
chengduo 已提交
287 288 289
        if return_numpy:
            return executor.as_numpy(arr)

290
        return [arr[i] for i in range(len(arr))]
T
typhoonzero 已提交
291

Y
Yu Yang 已提交
292 293
    @property
    def device_count(self):
294
        return len(self._places)