parallel_executor.py 8.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import core
import multiprocessing
import framework
import executor
J
JiayiFeng 已提交
19
import warnings
Y
Yu Yang 已提交
20
import sys
21 22 23 24 25

__all__ = ['ParallelExecutor']


class ParallelExecutor(object):
X
Xin Pan 已提交
26 27
    def __init__(self,
                 use_cuda,
28 29
                 loss_name=None,
                 main_program=None,
X
Xin Pan 已提交
30
                 num_threads=None,
31
                 allow_op_delay=False,
Y
Yu Yang 已提交
32
                 share_vars_from=None,
33
                 use_default_grad_scale=True):
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
        """
        ParallelExecutor can run program in parallel.

        Args:
            use_cuda(bool): Whether to use CUDA or not.
            loss_name(str, default None): The loss name must set in training.
            main_program(Program, default None): The program that need to run,
                if not provided, then default_main_program will be used.
            num_threads(int, default None): How many threads are used for
                training.
            allow_op_delay(bool, default False): Whether to delay and buffer
                some operators together for scheduling or not, which may
                improve performance in some cases, defalut False.
            share_vars_from(ParallelExecutor, default None): If provied,
                it will share variables from the specified ParallelExecutor.
49 50
            use_default_grad_scale(bool, default True): If set True, a default
                scale value equal to `1./device_count` would be multiplied to
Y
yangyaming 已提交
51 52 53
                gradients of each device and scaled gradients would be
                aggregated. Otherwise, a customized scale value should be fed
                to the network.
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

        Returns:
            A ParallelExecutor object.

        Raises:
            TypeError: If share_vars_from is provided, but not ParallelExecutor
                object.

        Examples:
            .. code-block:: python

              train_exe = fluid.ParallelExecutor(
                  use_cuda=True, loss_name=loss.name)
              test_exe = fluid.ParallelExecutor(
                  use_cuda=True,
                  main_program=test_program,
                  share_vars_from=train_exe)

72 73
              train_loss, = train_exe.run([loss.name], feed=feed_dict)
              test_loss, = test_exe.run([loss.name], feed=feed_dict)
74 75
        """

X
Xin Pan 已提交
76 77
        self._places = []
        self._act_places = []
78 79 80
        if use_cuda:
            for i in xrange(core.get_cuda_device_count()):
                p = core.Place()
X
Xin Pan 已提交
81 82 83
                self._act_places.append(core.CUDAPlace(i))
                p.set_place(self._act_places[-1])
                self._places.append(p)
84 85 86
        else:
            for i in xrange(multiprocessing.cpu_count()):
                p = core.Place()
L
Luo Tao 已提交
87
                self._act_places.append(core.CPUPlace())
X
Xin Pan 已提交
88 89 90
                p.set_place(self._act_places[-1])
                self._places.append(p)
        assert self._places, "no place for execution"
91 92

        if num_threads is None:
X
Xin Pan 已提交
93 94 95
            if use_cuda:
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
X
Xin Pan 已提交
96
                num_threads = len(self._places)
X
Xin Pan 已提交
97
            else:
98 99
                num_threads = min(
                    len(self._places) * 2, multiprocessing.cpu_count())
100

101 102
        main = main_program
        main = main if main else framework.default_main_program()
103 104
        scope = executor.global_scope()

105 106 107 108 109 110
        if share_vars_from and not isinstance(share_vars_from,
                                              ParallelExecutor):
            raise TypeError("share_vars_from must be ParallelExecutor.")
        local_scopes = share_vars_from.executor.local_scopes(
        ) if share_vars_from else []

T
typhoonzero 已提交
111
        self.persistable_vars = [
112
            v.name
113 114
            for v in filter(
                lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW,
T
typhoonzero 已提交
115
                main.list_vars())
116 117
        ]

118 119 120
        self.executor = core.ParallelExecutor(
            num_threads,
            True if use_cuda else False,  # use_event
X
Xin Pan 已提交
121
            self._places,
122 123 124 125
            set([
                p.name for p in main.global_block().iter_parameters()
                if not p.stop_gradient
            ]),
T
typhoonzero 已提交
126
            set(self.persistable_vars),
127
            main.desc,
128
            loss_name if loss_name else '',
X
Xin Pan 已提交
129
            scope,
130
            local_scopes,
Y
Yu Yang 已提交
131
            allow_op_delay,
132
            use_default_grad_scale)
133 134
        self.scope = scope

Y
Yu Yang 已提交
135
    def run(self, fetch_list, feed=None, feed_dict=None):
X
Xin Pan 已提交
136
        """
Y
Yu Yang 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])

X
Xin Pan 已提交
161

Y
Yu Yang 已提交
162 163
        Args:
            fetch_list(list): The fetched variable names
Y
Yu Yang 已提交
164 165 166 167
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
                to each device.
Y
Yu Yang 已提交
168
            feed_dict: Alias for feed parameter, for backward compatibility.
Y
Yu Yang 已提交
169
                This parameter is deprecated.
Y
Yu Yang 已提交
170 171 172

        Returns: fetched result list.

X
Xin Pan 已提交
173
        """
174
        if feed is None and feed_dict is not None:
J
JiayiFeng 已提交
175
            feed = feed_dict
Y
Yu Yang 已提交
176
            print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`"
Y
Yu Yang 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
            if len(feed) != len(self._act_places):
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
                        tmp.set(tensor, self._act_places[i])
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)
X
Xin Pan 已提交
213

214
        fetch_var_name = '@FETCHED_VAR_NAME@'
Y
Yu Yang 已提交
215
        self.executor.run(fetch_list, fetch_var_name)
216 217
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
        return [arr[i] for i in range(len(arr))]
T
typhoonzero 已提交
218 219 220

    def bcast_params(self):
        self.executor.bcast_params(set(self.persistable_vars))
Y
Yu Yang 已提交
221 222 223 224

    @property
    def device_count(self):
        return len(self._act_places)