async_executor.py 14.7 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import contextlib
import six
from .framework import Program, default_main_program, Variable
from . import core
from .executor import global_scope, Executor
from paddle.fluid.proto import data_feed_pb2
from google.protobuf import text_format
from . import io
from .data_feed_desc import DataFeedDesc
27
from .trainer_desc import TrainerDesc, MultiTrainer, DistMultiTrainer
H
heqiaozhi 已提交
28
from .distributed import ps_instance
H
heqiaozhi 已提交
29
from .contrib.utils import hdfs_utils as hdfs
W
Wang Guibao 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

__all__ = ['AsyncExecutor']


class AsyncExecutor(object):
    """
    An asynchronous Executor in Python. Through exploiting the power of
    multi-core processor and data queueing, AsyncExecutor makes data reading
    and cosuming decoupled, each run in multiple threads in parallel.

    Instead of reading data in python side, AsyncExecutor accepts a training
    file list, which will be retrieved in C++, then training inputs will be
    read, parsed and fed to training network within C++ code.

    AsyncExecutor is in active development and the API might change in the near
    future.

    Example:
        >>> data_feed = fluid.DataFeedDesc('data.proto')
        >>> startup_program = fluid.default_startup_program()
        >>> main_program = fluid.default_main_program()
        >>> filelist = ["train_data/part-%d" % i for i in range(100)]
        >>> thread_num = len(filelist) / 4
        >>>
        >>> place = fluid.CPUPlace()
        >>> async_executor = fluid.AsyncExecutor(place)
        >>>
        >>> async_executor.run_startup_program(startup_program)
        >>>
        >>> epoch = 10
        >>> for i in range(epoch):
        >>>     async_executor.run(main_program,
        >>>                        data_feed,
        >>>                        filelist,
        >>>                        thread_num,
        >>>                        [acc],
        >>>                        debug=False)

    Args:
        place(fluid.CPUPlace|None): indicate the executor run on which device.
                                   Only CPUPlace supported

    Note:
        For debugging complicated network in parallel-GPUs, you can test it
        on the executor. They has the exactly same arguments, and expected
        the same results.

    Note: Only running on CPUPlace supported.
    """

D
dongdaxiang 已提交
80
    def __init__(self, place=None, run_mode=""):
X
xjqbest 已提交
81 82
        """
        Init.
X
xjqbest 已提交
83 84 85 86 87

        Example:
            >>> place = fluid.CPUPlace()
            >>> async_executor = fluid.AsyncExecutor(place)

X
xjqbest 已提交
88 89 90 91
        Args:
            place(Place): CPUPlace or GPUPlace.
            run_mode(str): default is empty string.
        """
W
Wang Guibao 已提交
92 93 94 95 96 97 98 99 100 101
        if place is None:
            place = core.CPUPlace()
        if not isinstance(place, core.CPUPlace):
            raise ValueError("AsyncExecutor only supports CPU device")

        p = core.Place()
        p.set_place(place)

        scope = global_scope()
        self.executor = core.AsyncExecutor(scope, p)
H
heqiaozhi 已提交
102
        self.instance = None
W
Wang Guibao 已提交
103

104
    def run(self, program, data_feed, filelist, thread_num, fetch, debug=False):
X
xjqbest 已提交
105 106
        """
        Run program by this AsyncExecutor.
X
xjqbest 已提交
107 108 109 110 111 112 113 114

        Example:
            >>> place = fluid.CPUPlace()
            >>> async_executor = fluid.AsyncExecutor(place)
            >>> async_executor.run(default_main_program(),
                                   my_data_feed_desc,
                                   ["a.txt", "b.txt"])

X
xjqbest 已提交
115 116 117 118 119 120 121 122 123 124
        Args:
            program(Program): the program that need to run, if not provied,
                              then default_main_program will be used.
            data_feed(DataFeedDesc): A DataFeedDesc object
            filelist(str|list): a file or a list of files
            thread_num(int): number of concurrent training threads.
            fetch(str|list): the var name or a list of var names to inspect
            debug(bool): When set to True, fetch vars will be printed to
                         standard output after each minibatch
        """
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
        if program is None:
            program = default_main_program()
        program_desc = program.desc

        if data_feed is None:
            raise ValueError('ValueError: data_feed should be provided')

        if filelist is None:
            raise ValueError('ValueError: filelist should be provided')

        if isinstance(filelist, str):
            filelist = [filelist]

        if not isinstance(thread_num, int):
            raise TypeError('TypeError: thread_num should be a positive number')

        is_local = self.instance == None
        trainer = None
        if is_local:
144
            trainer = MultiTrainer()
145
        else:
146 147 148
            trainer = DistMultiTrainer()
        trainer.gen_trainer_desc(
            dataset=data_feed, fleet_desc=self.dist_desc, worker="downpour")
149 150 151
        trainer.set_thread(thread_num)
        trainer.set_filelist(filelist)
        trainer.set_data_feed(data_feed)
H
heqiaozhi 已提交
152 153
        if not is_local:
            trainer.set_program_config(self.dist_desc, str(id(program)))
154 155 156
        with open("trainer_desc.proto", "w") as fout:
            fout.write(trainer._desc())
        # define a trainer and a device_worker here
H
heqiaozhi 已提交
157
        self.executor.run_from_files(program_desc,
H
heqiaozhi 已提交
158
                                     trainer._desc(), debug)
159 160

    '''
D
dongdaxiang 已提交
161 162 163 164 165 166 167 168
    def run(self,
            program,
            data_feed,
            filelist,
            thread_num,
            fetch,
            mode="",
            debug=False):
W
Wang Guibao 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        """
        Run program by this AsyncExecutor. Training dataset will be in filelist.
        Users can also inspect certain variables by naming them in parameter
        :code:`fetch`, like in fluid.Executor. Unlike fluid.Executor, however,
        AsyncExecutor doesn't return fetched variables, instead, it will dump
        the values of each fetched variable to stdandard output.

        Running the dataset will be on multiple threads, within each a thread
        local scope will be created, then all OPs also created in that scope.
        Parameters are updated by all the OPs simultaneously.

        Args:
            program(Program): the program that need to run, if not provied,
                              then default_main_program will be used.
            data_feed(DataFeedDesc): A DataFeedDesc object
            filelist(str): a file containing the training dataset file list
            thread_num(int): number of concurrent training threads. See
                             :code:`Note` for how to set this properly
            fetch(str|list): the var name or a list of var names to inspect
D
dongdaxiang 已提交
188
            mode(str): run mode of this interface
W
Wang Guibao 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
            debug(bool): When set to True, fetch vars will be printed to
                         standard output after each minibatch

        Note:
            the executor will run all operators in the program but not only
            the operators dependent by the fetch_list.

        Note:
            Running AsyncExecutor will be on multiple threads, each bound to a
            CPU core. To achieve best performance, it's suggested to set thread
            num to be equal or slightly less than that of CPU cores.
        """
        if program is None:
            program = default_main_program()
        program_desc = program.desc

        if data_feed is None:
            raise ValueError('ValueError: data_feed should be provided')

        if filelist is None:
            raise ValueError('ValueError: filelist should be provided')

        if isinstance(filelist, str):
            filelist = [filelist]

        if not isinstance(thread_num, int):
            raise TypeError('TypeError: thread_num should be a positive number')

        if fetch is not None:
            if isinstance(fetch, Variable):
                fetch = [fetch]
            fetch_var_names = [var.name for var in fetch]
            for fetch_var in fetch:
                shape = fetch_var.shape
                if shape[len(shape) - 1] != 1:
                    raise AssertionError(
                        "%s: Fetch variable has wrong shape. Only varibles "
                        "with the last dimension size 1 supported." %
                        (fetch_var.name))

        self.executor.run_from_files(program_desc,
                                     data_feed.desc(), filelist, thread_num,
H
heqiaozhi 已提交
231
                                     fetch_var_names, mode, debug, str(id(program_desc)))
232
    '''
H
heqiaozhi 已提交
233

D
dongdaxiang 已提交
234 235 236 237 238 239 240 241 242 243 244
    def download_data(self,
                      afs_path,
                      local_path,
                      fs_default_name,
                      ugi,
                      file_cnt,
                      hadoop_home="$HADOOP_HOME",
                      process_num=12):
        """
        download_data is a default download method for distributed training
        a user download data without this method
X
xjqbest 已提交
245

D
dongdaxiang 已提交
246 247 248
        Example:
            >>> exe = fluid.AsyncExecutor()
            >>> exe.download_data("/xxx/xxx/xx/",
X
xjqbest 已提交
249 250
            >>>                   "./data", "afs://
            >>>  xxx.xxx.xxx.xxx:9901", "xxx,yyy")
X
xjqbest 已提交
251

D
dongdaxiang 已提交
252 253 254 255 256
        Args:
            afs_path(str): afs_path defined by users
            local_path(str): download data path
            fs_default_name(str): file system server address
            ugi(str): hadoop ugi
X
xjqbest 已提交
257
            file_cnt(int): a user can specify file number for debugging
D
dongdaxiang 已提交
258 259 260
            hadoop_home(str): hadoop home path
            process_num(int): download process num
        """
H
heqiaozhi 已提交
261
        if self.instance is None:
D
dongdaxiang 已提交
262 263 264 265
            raise ValueError('instance is None, please run'
                             'config_distributed_nodes init instance')

        configs = {"fs.default.name": fs_default_name, "hadoop.job.ugi": ugi}
H
heqiaozhi 已提交
266 267 268 269

        client = hdfs.HDFSClient(hadoop_home, configs)
        downloads = hdfs.multi_download(
            client,
D
dongdaxiang 已提交
270 271
            afs_path,
            local_path,
H
heqiaozhi 已提交
272 273 274
            self.instance.get_worker_index(),
            self.instance.get_node_cnt() / 2,
            multi_processes=process_num)
D
dongdaxiang 已提交
275
        self.instance.barrier_worker()  #wait for download_data
H
heqiaozhi 已提交
276 277

    def get_instance(self):
D
dongdaxiang 已提交
278 279 280 281
        """
        get current node's instance so that user can do operations
        in distributed setting
        """
H
heqiaozhi 已提交
282
        if self.instance is None:
D
dongdaxiang 已提交
283 284 285 286 287 288 289 290
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
        return self.instance

    def config_distributed_nodes(self):
        """
        if a user needs to run distributed async executor
X
xjqbest 已提交
291
        he or she needs to do a global configuration so that
D
dongdaxiang 已提交
292 293 294
        information of current process can be obtained
        """
        self.instance = ps_instance.PaddlePSInstance(1, 2)
H
heqiaozhi 已提交
295 296
        return self.instance

H
heqiaozhi 已提交
297
    def stop(self):
D
dongdaxiang 已提交
298 299 300 301
        """
        at the end of process, users should call stop to servers
        and barrier all workers
        """
H
heqiaozhi 已提交
302
        if self.instance is None:
D
dongdaxiang 已提交
303 304 305 306
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
        self.instance.barrier_worker()  #worker do all things
H
heqiaozhi 已提交
307 308
        if self.instance.is_first_worker():
            self.executor.stop_server()
D
dongdaxiang 已提交
309
        self.instance.barrier_worker()  #sync
310 311
        self.instance.barrier_all()
        self.instance.finalize()
H
heqiaozhi 已提交
312

H
heqiaozhi 已提交
313
    def init_server(self, dist_desc):
D
dongdaxiang 已提交
314
        """
X
xjqbest 已提交
315 316
        Initialize server of current node if current process is a server.

D
dongdaxiang 已提交
317
        Args:
X
xjqbest 已提交
318 319
            dist_desc(str): a protobuf string that describes
                            how to init a worker and a server
D
dongdaxiang 已提交
320
        """
H
heqiaozhi 已提交
321
        if self.instance is None:
D
dongdaxiang 已提交
322 323 324
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
325 326 327
        self.dist_desc_str = text_format.MessageToString(dist_desc)
        self.dist_desc = dist_desc
        self.executor.init_server(self.dist_desc_str, self.instance._rankid)
H
heqiaozhi 已提交
328 329
        ip = self.executor.start_server()
        self.instance.set_ip(ip)
D
dongdaxiang 已提交
330
        self.instance.barrier_all()  #wait all server start
H
heqiaozhi 已提交
331 332
        ips = self.instance.gather_ips()
        self.executor.gather_servers(ips, self.instance.get_node_cnt())
D
dongdaxiang 已提交
333
        self.instance.barrier_all()  #wait all worker start
H
heqiaozhi 已提交
334

H
heqiaozhi 已提交
335
    def init_worker(self, dist_desc, startup_program):
D
dongdaxiang 已提交
336
        """
X
xjqbest 已提交
337 338
        Initialize worker of current node if current process is a worker.

D
dongdaxiang 已提交
339
        Args:
X
xjqbest 已提交
340 341 342
            dist_desc(str): a protobuf string that describes
                            how to init a worker and a server
            startup_program(fluid.Program): startup program of current process
D
dongdaxiang 已提交
343
        """
H
heqiaozhi 已提交
344
        if self.instance is None:
D
dongdaxiang 已提交
345 346 347
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
348

349
        self.dist_desc_str = text_format.MessageToString(dist_desc)
350
        self.dist_desc = dist_desc
H
heqiaozhi 已提交
351 352
        place = core.CPUPlace()
        executor = Executor(place)
H
heqiaozhi 已提交
353 354 355 356 357
        if isinstance(startup_program, list):
            for sp in startup_program:
                executor.run(sp)
        else:
            executor.run(startup_program)
H
heqiaozhi 已提交
358

D
dongdaxiang 已提交
359
        self.instance.barrier_all()  #wait all server start
H
heqiaozhi 已提交
360
        ips = self.instance.gather_ips()
361
        self.executor.init_worker(self.dist_desc_str, ips,
D
dongdaxiang 已提交
362 363 364
                                  self.instance.get_node_cnt(),
                                  self.instance._rankid)
        self.instance.barrier_all()  #wait all worker start
H
heqiaozhi 已提交
365 366
        if self.instance.is_first_worker():
            self.executor.init_model()
D
dongdaxiang 已提交
367 368
        self.instance.barrier_worker()  #wait init model

369
    def init_model(self):
D
dongdaxiang 已提交
370 371 372 373
        """
        init_model command that can be invoked from one of the worker
        model parameters are initialized in servers
        """
H
heqiaozhi 已提交
374
        if self.instance is None:
D
dongdaxiang 已提交
375 376 377
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
378 379 380
        self.executor.init_model()

    def save_model(self, save_path):
D
dongdaxiang 已提交
381 382
        """
        save_model command that can be invoked from one of the worker
X
xjqbest 已提交
383 384
        model parameters are saved in servers and upload to save_path of file system.

D
dongdaxiang 已提交
385
        Args:
X
xjqbest 已提交
386
            save_path(str): save path to file system
D
dongdaxiang 已提交
387
        """
H
heqiaozhi 已提交
388
        if self.instance is None:
D
dongdaxiang 已提交
389 390 391
            raise ValueError(
                'instance is None, please run config_distributed_nodes init instance'
            )
392
        self.executor.save_model(save_path)