trainer.py 19.1 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import contextlib
16
import os
17

Y
Yu Yang 已提交
18
import core
19

Y
Yu Yang 已提交
20
import data_feeder
21 22
import executor
import framework
J
Jeff Wang 已提交
23
import io
Y
Yu Yang 已提交
24 25
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
26
import parallel_executor
Y
Yancey 已提交
27
from transpiler import distribute_transpiler
Y
Yu Yang 已提交
28

H
Helin Wang 已提交
29
__all__ = [
30 31
    'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent',
    'EndStepEvent', 'CheckpointConfig'
H
Helin Wang 已提交
32 33 34
]


Y
Yu Yang 已提交
35 36 37 38 39 40
class BeginEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id


class EndEpochEvent(object):
Y
yuyang18 已提交
41 42 43 44 45 46 47
    """
    The end of a training epoch.

    Args:
        epoch_id(int): The current epoch ID.
    """

Y
Yu Yang 已提交
48 49
    def __init__(self, epoch_id):
        self.epoch = epoch_id
H
Helin Wang 已提交
50

Y
Yu Yang 已提交
51 52 53 54 55

class BeginStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
56
        self.fetch_metrics = True
Y
Yu Yang 已提交
57 58 59


class EndStepEvent(object):
Y
yuyang18 已提交
60 61 62 63 64 65 66 67 68 69
    """
    The end of a training step.

    Args:
        epoch_id(int): The current epoch ID.
        step_id(int): The current step ID.
        metrics(list): A list of fetched tensor. The order of this list is same
        as the :code:`train_func` returns.
    """

Y
yuyang18 已提交
70
    def __init__(self, epoch_id, step_id, metrics):
Y
Yu Yang 已提交
71 72
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
73
        self.metrics = metrics
H
Helin Wang 已提交
74 75


76
class CheckpointConfig(object):
Y
yuyang18 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    """
    Parameter object for :code:`fluid.io.save_checkpoint` and
    :code:`fluid.Trainer`. Used to configuration how to save checkpoint.

    Args:
        checkpoint_dir(str): Directory path to save check point. Default is the
        current directory.

        max_num_checkpoints(int): The max number of local check points.
        epoch_interval(int): Every number of epoch to save check point.
        step_interval(int): Every number of step to save check point.

    Examples:
        >>> config = fluid.CheckpointConfig("./checkpoints")
        >>> trainer = fluid.Trainer(train_func=train_program,
        >>>                         place=place,
        >>>                         optimizer_func=optimizer_func,
        >>>                         checkpoint_config=config)
        >>> trainer.train(...)
    """

98 99 100
    def __init__(self,
                 checkpoint_dir=None,
                 max_num_checkpoints=3,
T
tangwei12 已提交
101 102
                 epoch_interval=1,
                 step_interval=10):
103 104
        if checkpoint_dir is None:
            self.checkpoint_dir = os.getcwd()
T
tangwei12 已提交
105 106 107
        else:
            self.checkpoint_dir = checkpoint_dir

108
        self.max_num_checkpoints = max_num_checkpoints
T
tangwei12 已提交
109 110 111 112 113 114 115 116 117 118

        if epoch_interval < 1:
            self.epoch_interval = 1
        else:
            self.epoch_interval = epoch_interval

        if step_interval < 1:
            self.step_interval = 10
        else:
            self.step_interval = step_interval
119

120 121
        self.epoch_id = 0
        self.step_id = 0
T
tangwei12 已提交
122 123
        self.load_serial = None
        self.is_pserver = False
T
tangwei12 已提交
124

125

Q
Qiao Longfei 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
def check_and_get_place(place):
    """
    Check the type of place or get the default place
    Args:
        place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.

    Raises:
        TypeError if the type mismatched.

    Returns:
        the original place if it is not None.
        if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
        Otherwise returns CPUPlace by default.
    """
    if place is None:
        if core.is_compiled_with_cuda():
            return core.CUDAPlace(0)
        else:
            return core.CPUPlace()
    else:
        if not isinstance(place, core.CUDAPlace) and not isinstance(
                place, core.CPUPlace):
            raise TypeError("Place should be either CUDAPlace or CPUPlace")
        return place


H
Helin Wang 已提交
152
class Trainer(object):
Y
Yu Yang 已提交
153 154 155
    """

    Args:
Q
Qiao Longfei 已提交
156
        train_func(callable): A function which will return loss. The loss must be a scalar.
157
        optimizer_func(callable): A function that returns an Optimizer object.
Y
Yu Yang 已提交
158 159 160
        place: The device place of this trainer.
    """

Q
Qiao Longfei 已提交
161 162
    def __init__(self,
                 train_func,
163
                 optimizer_func,
T
tangwei12 已提交
164
                 param_path=None,
Y
yuyang18 已提交
165
                 place=None,
166 167
                 parallel=False,
                 checkpoint_config=None):
168
        self.__stop = False
Y
yuyang18 已提交
169
        self.parallel = parallel
H
Helin Wang 已提交
170
        # 1. we need to generate a framework.Program by calling
H
Helin Wang 已提交
171
        # program_func. Reference: fluid.program_guard in
H
Helin Wang 已提交
172
        # test_word2vec.py
Q
Qiao Longfei 已提交
173

174 175
        # config for checkpoint
        # only chief worker will save variables
T
tangwei12 已提交
176
        self.trainer_id = 0
T
tangwei12 已提交
177 178 179
        self.checkpoint_cfg = checkpoint_config
        if self.checkpoint_cfg:
            assert isinstance(self.checkpoint_cfg, CheckpointConfig)
T
tangwei12 已提交
180
            serial = io.get_latest_checkpoint_serial(
T
tangwei12 已提交
181 182
                self.checkpoint_cfg.checkpoint_dir)
            self.checkpoint_cfg.load_serial = serial if serial >= 0 else None
183

H
Helin Wang 已提交
184
        self.scope = core.Scope()
Y
Yu Yang 已提交
185 186 187 188 189

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
Q
Qiao Longfei 已提交
190
            program_func_outs = train_func()
Y
yuyang18 已提交
191
            self.train_func_outputs = program_func_outs if isinstance(
F
fengjiayi 已提交
192
                program_func_outs, list) else [program_func_outs]
193
            self.test_program = self.train_program.clone(for_test=True)
194

195
            # The first element of program_func_outs is loss.
196 197 198
            loss = self.train_func_outputs[0]

            optimizer = optimizer_func()
Y
Yu Yang 已提交
199 200 201
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")
202
            optimize_ops, params_grads = optimizer.minimize(loss)
Y
Yu Yang 已提交
203

Q
Qiao Longfei 已提交
204
        self.place = check_and_get_place(place)
H
Helin Wang 已提交
205

Q
Qiao Longfei 已提交
206
        self._dist_transpile_if_necessary(optimize_ops, params_grads)
207

H
Helin Wang 已提交
208 209
        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
Y
Yu Yang 已提交
210
        # Run startup program
211 212 213
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)
H
Helin Wang 已提交
214

T
tangwei12 已提交
215
        if self.checkpoint_cfg and self.checkpoint_cfg.load_serial:
T
bug fix  
tangwei12 已提交
216 217
            with self._prog_and_scope_guard():
                exe = executor.Executor(place)
T
tangwei12 已提交
218 219
                io.load_checkpoint(exe, self.checkpoint_cfg.checkpoint_dir,
                                   self.checkpoint_cfg.load_serial,
T
bug fix  
tangwei12 已提交
220
                                   self.startup_program)
Y
Yu Yang 已提交
221

T
tangwei12 已提交
222
            if not self.checkpoint_cfg.is_pserver:
T
tangwei12 已提交
223
                epoch_id, step_id = io.load_trainer_args(
T
tangwei12 已提交
224 225 226 227 228
                    self.checkpoint_cfg.checkpoint_dir,
                    self.checkpoint_cfg.load_serial, self.trainer_id,
                    self._get_checkpoint_load_args())
                self.checkpoint_cfg.epoch_id = int(epoch_id)
                self.checkpoint_cfg.step_id = int(step_id)
T
tangwei12 已提交
229 230

        if param_path and os.path.isdir(param_path):
T
tangwei12 已提交
231
            # load params from param_path into scope
232
            io.load_persist_vars_without_grad(
T
tangwei12 已提交
233
                exe, dirname=param_path, program=self.startup_program)
T
tangwei12 已提交
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
    def _transpile_nccl2_dist(self):
        # PADDLE_TRAINER_IPS
        if "PADDLE_TRAINER_IPS" not in os.environ:
            self.nccl_id_var = None
        else:
            self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
            port = os.getenv("PADDLE_PSERVER_PORT")
            worker_ips = os.getenv("PADDLE_TRAINER_IPS")
            worker_endpoints = []
            for ip in worker_ips.split(","):
                worker_endpoints.append(':'.join([ip, port]))
            self.num_trainers = len(worker_endpoints)
            current_endpoint = os.getenv("POD_IP") + ":" + port
            worker_endpoints.remove(current_endpoint)
            # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id
            # in ParallelExecutor to start
            # distributed training using NCCL2
            self.nccl_id_var = self.startup_program.global_block().create_var(
                name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
            self.startup_program.global_block().append_op(
                type="gen_nccl_id",
                inputs={},
                outputs={"NCCLID": self.nccl_id_var},
                attrs={
                    "endpoint": current_endpoint,
                    "endpoint_list": worker_endpoints,
                    "trainer_id": self.trainer_id
                })

Q
Qiao Longfei 已提交
264
    def _dist_transpile_if_necessary(self, optimize_ops, params_grads):
265 266 267 268
        self._transpile_nccl2_dist()
        if self.nccl_id_var != None:
            return

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
        if "PADDLE_TRAINING_ROLE" not in os.environ:
            return

        # the port of all pservers, needed by both trainer and pserver
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        # comma separated ips of all pservers, needed by trainer and
        # pserver
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)
        # total number of workers/trainers in the job, needed by
        # trainer and pserver
        trainers = int(os.getenv("PADDLE_TRAINERS"))
        # the IP of the local machine, needed by pserver only
        current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
        # the unique trainer id, starting from 0, needed by trainer
        # only
T
bug fix  
tangwei12 已提交
288
        self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
T
tangwei12 已提交
289

290 291 292 293 294
        # the role, should be either PSERVER or TRAINER
        training_role = os.getenv("PADDLE_TRAINING_ROLE")
        with self._prog_and_scope_guard():
            t = distribute_transpiler.DistributeTranspiler()
            t.transpile(
T
bug fix  
tangwei12 已提交
295
                self.trainer_id, pservers=pserver_endpoints, trainers=trainers)
296
            if training_role == "PSERVER":
T
tangwei12 已提交
297
                if self.checkpoint_cfg:
T
tangwei12 已提交
298 299
                    self.is_pserver = True

300 301 302 303 304 305 306 307 308
                self.train_program = t.get_pserver_program(current_endpoint)
                self.startup_program = t.get_startup_program(current_endpoint,
                                                             self.train_program)
            elif training_role == "TRAINER":
                self.train_program = t.get_trainer_program()
            else:
                raise ValueError(
                    'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
                )
H
Helin Wang 已提交
309

310 311 312 313 314 315
    def stop(self):
        """
        stop training
        """
        self.__stop = True

Y
yuyang18 已提交
316
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
Y
Yu Yang 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
330 331 332 333 334 335
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
Y
yuyang18 已提交
336 337 338 339 340 341
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
H
Helin Wang 已提交
342

343
    def test(self, reader, feed_order):
F
fengjiayi 已提交
344 345 346 347 348 349 350 351 352
        """
        Test the model on given test data

        Args:
            reader: The reader that yields test data.
            feed_order: Feeding order of reader. None will following the defining
                order in program
        """

Y
yuyang18 已提交
353 354
        return self._test_by_executor(reader, feed_order,
                                      self.train_func_outputs)
Y
Yu Yang 已提交
355

H
Helin Wang 已提交
356 357
    def save_params(self, param_path):
        # reference: save_persistables in io.py
358 359 360
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

    @contextlib.contextmanager
    def _prog_and_scope_guard(self):
        with framework.program_guard(
                main_program=self.train_program,
                startup_program=self.startup_program):
            with executor.scope_guard(self.scope):
                yield

    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
F
fengjiayi 已提交
384
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
Y
Yu Yang 已提交
385 386
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
F
fengjiayi 已提交
387
            exe = executor.Executor(self.place)
Y
yuyang18 已提交
388 389 390 391
            reader = feeder.decorate_reader(reader, multi_devices=False)
            self._train_by_any_executor(event_handler, exe, num_epochs, reader)

    def _train_by_any_executor(self, event_handler, exe, num_epochs, reader):
T
tangwei12 已提交
392
        if self.checkpoint_cfg:
T
bug fix  
tangwei12 已提交
393 394
            epochs = [
                epoch_id for epoch_id in range(num_epochs)
T
tangwei12 已提交
395
                if epoch_id >= self.checkpoint_cfg.epoch_id
T
bug fix  
tangwei12 已提交
396 397 398 399
            ]
        else:
            epochs = [epoch_id for epoch_id in range(num_epochs)]

T
tangwei12 已提交
400
        for epoch_id in epochs:
Y
yuyang18 已提交
401 402
            event_handler(BeginEpochEvent(epoch_id))
            for step_id, data in enumerate(reader()):
403
                if self.__stop:
T
bug fix  
tangwei12 已提交
404 405
                    if self.checkpoint_cfg:
                        self._clean_checkpoint()
406
                    return
T
tangwei12 已提交
407

T
tangwei12 已提交
408 409
                if self.checkpoint_cfg and self.checkpoint_cfg.load_serial \
                    and self.checkpoint_cfg.step_id >= step_id and self.checkpoint_cfg.epoch_id == epoch_id:
T
tangwei12 已提交
410 411
                    continue

Y
yuyang18 已提交
412 413 414 415 416 417 418 419 420 421
                begin_event = BeginStepEvent(epoch_id, step_id)
                event_handler(begin_event)
                if begin_event.fetch_metrics:
                    metrics = exe.run(feed=data,
                                      fetch_list=[
                                          var.name
                                          for var in self.train_func_outputs
                                      ])
                else:
                    metrics = exe.run(feed=data, fetch_list=[])
T
tangwei12 已提交
422

T
tangwei12 已提交
423 424
                if self.checkpoint_cfg:
                    self._save_checkpoint(epoch_id, step_id)
T
tangwei12 已提交
425
                event_handler(EndStepEvent(epoch_id, step_id, metrics))
Y
yuyang18 已提交
426
            event_handler(EndEpochEvent(epoch_id))
T
tangwei12 已提交
427 428
        if self.checkpoint_cfg:
            self._clean_checkpoint()
F
fengjiayi 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

    def _test_by_executor(self, reader, feed_order, fetch_list):
        with executor.scope_guard(self.scope):
            feed_var_list = build_feed_var_list(self.test_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            accumulated = len(fetch_list) * [0]
            count = 0
            for data in reader():
                outs = exe.run(program=self.test_program,
                               feed=feeder.feed(data),
                               fetch_list=fetch_list)
                accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
                count += 1

            return [x / count for x in accumulated]

Y
yuyang18 已提交
447 448 449 450 451 452 453 454
    def _train_by_parallel_executor(self, num_epochs, event_handler, reader,
                                    feed_order):
        with self._prog_and_scope_guard():
            pe = self._get_or_create_parallel_executor()
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            reader = feeder.decorate_reader(reader, multi_devices=True)
455
            self._train_by_any_executor(event_handler, pe, num_epochs, reader)
Y
yuyang18 已提交
456 457 458 459 460 461 462 463 464 465 466

    def _get_parallel_executor(self):
        return getattr(self, 'parallel_executor', None)

    def _get_or_create_parallel_executor(self):
        if self._get_parallel_executor() is None:
            self.parallel_executor = parallel_executor.ParallelExecutor(
                use_cuda=isinstance(self.place, core.CUDAPlace),
                loss_name=self.train_func_outputs[0].name)
        return self._get_parallel_executor()

T
tangwei12 已提交
467
    def _clean_checkpoint(self):
T
tangwei12 已提交
468 469
        assert self.checkpoint_cfg
        io.clean_checkpoint(checkpoint_dir=self.checkpoint_cfg.checkpoint_dir)
T
tangwei12 已提交
470

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
    def _get_checkpoint_load_args(self):
        """
        epoch_id and step_id are runtime arguments, they are not variables, will load them independently.
        """
        return ["epoch_id", "step_id"]

    def _get_checkpoint_save_args(self, epoch_id, step_id):
        """
        epoch_id and step_id are runtime arguments, they are not variables, will save them independently.
        """
        trainer_args = {}
        trainer_args["epoch_id"] = epoch_id
        trainer_args["step_id"] = step_id
        return trainer_args

T
tangwei12 已提交
486
    def _save_checkpoint(self, epoch_id, step_id):
T
tangwei12 已提交
487
        assert self.checkpoint_cfg
T
tangwei12 已提交
488

T
tangwei12 已提交
489
        if epoch_id % self.checkpoint_cfg.epoch_interval == 0 and step_id % self.checkpoint_cfg.step_interval == 0:
T
tangwei12 已提交
490 491 492
            exe = executor.Executor(self.place)
            io.save_checkpoint(
                executor=exe,
T
tangwei12 已提交
493
                checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
T
tangwei12 已提交
494
                trainer_id=self.trainer_id,
495
                trainer_args=self._get_checkpoint_save_args(epoch_id, step_id),
T
tangwei12 已提交
496
                main_program=self.train_program,
T
tangwei12 已提交
497
                max_num_checkpoints=self.checkpoint_cfg.max_num_checkpoints)
T
tangwei12 已提交
498

F
fengjiayi 已提交
499 500 501 502 503

def build_feed_var_list(program, feed_order):
    if not isinstance(program, framework.Program):
        raise TypeError("The 'program' should be an object of Program")

504
    if isinstance(feed_order, list):
F
fengjiayi 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
        feed_var_list = [
            program.global_block().var(var_name) for var_name in feed_order
        ]
    else:
        if not isinstance(feed_order, dict):
            raise TypeError(
                "The 'feed_order' should be either None, list or dict.")
        if not sorted(feed_order.values()) == range(len(feed_order)):
            raise ValueError(
                "The values of 'feed_order' should be a permutation of [0, len(feed_order))"
            )
        sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1])
        feed_var_list = [
            program.global_block().var(pair[0]) for pair in sorted_pair_list
        ]
    return feed_var_list