trainer.py 17.3 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import contextlib
16
import os
17

Y
Yu Yang 已提交
18
import core
19

Y
Yu Yang 已提交
20
import data_feeder
21 22
import executor
import framework
J
Jeff Wang 已提交
23
import io
Y
Yu Yang 已提交
24 25
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
26
import parallel_executor
Y
Yancey 已提交
27
from transpiler import distribute_transpiler
Y
Yu Yang 已提交
28

H
Helin Wang 已提交
29
__all__ = [
30 31
    'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent',
    'EndStepEvent', 'CheckpointConfig'
H
Helin Wang 已提交
32 33 34
]


Y
Yu Yang 已提交
35 36 37 38 39 40 41 42
class BeginEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id


class EndEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id
H
Helin Wang 已提交
43

Y
Yu Yang 已提交
44 45 46 47 48

class BeginStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
49
        self.fetch_metrics = True
Y
Yu Yang 已提交
50 51 52


class EndStepEvent(object):
Y
yuyang18 已提交
53
    def __init__(self, epoch_id, step_id, metrics):
Y
Yu Yang 已提交
54 55
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
56
        self.metrics = metrics
H
Helin Wang 已提交
57 58


59 60 61 62
class CheckpointConfig(object):
    def __init__(self,
                 checkpoint_dir=None,
                 max_num_checkpoints=3,
T
tangwei12 已提交
63 64
                 epoch_interval=1,
                 step_interval=10):
65 66
        if checkpoint_dir is None:
            self.checkpoint_dir = os.getcwd()
T
tangwei12 已提交
67 68 69
        else:
            self.checkpoint_dir = checkpoint_dir

70
        self.max_num_checkpoints = max_num_checkpoints
T
tangwei12 已提交
71 72 73 74 75 76 77 78 79 80

        if epoch_interval < 1:
            self.epoch_interval = 1
        else:
            self.epoch_interval = epoch_interval

        if step_interval < 1:
            self.step_interval = 10
        else:
            self.step_interval = step_interval
81

T
tangwei12 已提交
82 83 84
        self._epoch_id = 0
        self._step_id = 0
        self._load_serial = None
T
tangwei12 已提交
85

86

Q
Qiao Longfei 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
def check_and_get_place(place):
    """
    Check the type of place or get the default place
    Args:
        place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.

    Raises:
        TypeError if the type mismatched.

    Returns:
        the original place if it is not None.
        if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
        Otherwise returns CPUPlace by default.
    """
    if place is None:
        if core.is_compiled_with_cuda():
            return core.CUDAPlace(0)
        else:
            return core.CPUPlace()
    else:
        if not isinstance(place, core.CUDAPlace) and not isinstance(
                place, core.CPUPlace):
            raise TypeError("Place should be either CUDAPlace or CPUPlace")
        return place


H
Helin Wang 已提交
113
class Trainer(object):
Y
Yu Yang 已提交
114 115 116
    """

    Args:
Q
Qiao Longfei 已提交
117
        train_func(callable): A function which will return loss. The loss must be a scalar.
Y
Yu Yang 已提交
118 119 120 121
        optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer
        place: The device place of this trainer.
    """

Q
Qiao Longfei 已提交
122 123 124
    def __init__(self,
                 train_func,
                 optimizer,
T
tangwei12 已提交
125
                 param_path=None,
Y
yuyang18 已提交
126
                 place=None,
127 128
                 parallel=False,
                 checkpoint_config=None):
129
        self.__stop = False
Y
yuyang18 已提交
130
        self.parallel = parallel
H
Helin Wang 已提交
131
        # 1. we need to generate a framework.Program by calling
H
Helin Wang 已提交
132
        # program_func. Reference: fluid.program_guard in
H
Helin Wang 已提交
133
        # test_word2vec.py
Q
Qiao Longfei 已提交
134 135 136
        if not isinstance(optimizer, opt_module.Optimizer):
            raise TypeError("The optimizer should be an instance of Optimizer")

137 138
        # config for checkpoint
        # only chief worker will save variables
T
tangwei12 已提交
139
        self.trainer_id = 0
140 141
        self.chief = True
        self.checkpoint = checkpoint_config
T
tangwei12 已提交
142 143
        if self.checkpoint and \
            not isinstance(self.checkpoint, CheckpointConfig):
144 145 146
            raise TypeError(
                "The checkpoint_config shoule be an instance of CheckpointConfig"
            )
T
tangwei12 已提交
147
        self.checkpoint._load_serial = io.need_load_checkpoint(
T
tangwei12 已提交
148
            self.checkpoint.checkpoint_dir)
149

H
Helin Wang 已提交
150
        self.scope = core.Scope()
Y
Yu Yang 已提交
151 152 153 154 155

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
Q
Qiao Longfei 已提交
156
            program_func_outs = train_func()
Y
yuyang18 已提交
157
            self.train_func_outputs = program_func_outs if isinstance(
F
fengjiayi 已提交
158 159
                program_func_outs, list) else [program_func_outs]
            self.test_program = self.train_program.clone()
Y
Yu Yang 已提交
160 161 162
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")
F
fengjiayi 已提交
163
            # The fisrt element of program_func_outs is loss.
Y
yuyang18 已提交
164
            loss = self.train_func_outputs[0]
165
            optimize_ops, params_grads = optimizer.minimize(loss)
Y
Yu Yang 已提交
166

Q
Qiao Longfei 已提交
167
        self.place = check_and_get_place(place)
H
Helin Wang 已提交
168

Q
Qiao Longfei 已提交
169
        self._dist_transpile_if_necessary(optimize_ops, params_grads)
170

H
Helin Wang 已提交
171 172
        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
Y
Yu Yang 已提交
173
        # Run startup program
174 175 176
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)
H
Helin Wang 已提交
177

T
tangwei12 已提交
178
        if self.checkpoint._load_serial:
179 180
            exe = executor.Executor(place)
            io.load_checkpoint(exe, self.checkpoint.checkpoint_dir,
T
tangwei12 已提交
181
                               self.checkpoint._load_serial,
182
                               self.startup_program)
Y
Yu Yang 已提交
183

T
tangwei12 已提交
184
            epoch_id, step_id = io.load_trainer_args(
T
tangwei12 已提交
185
                self.checkpoint.checkpoint_dir, self.checkpoint._load_serial,
T
tangwei12 已提交
186
                self.trainer_id, ["epoch_id", "step_id"])
T
tangwei12 已提交
187 188
            self.checkpoint._epoch_id = int(epoch_id)
            self.checkpoint._step_id = int(step_id)
T
tangwei12 已提交
189 190

        if param_path and os.path.isdir(param_path):
T
tangwei12 已提交
191
            # load params from param_path into scope
192
            io.load_persist_vars_without_grad(
T
tangwei12 已提交
193 194 195 196
                exe,
                dirname=param_path,
                program=self.startup_program,
                nest=False)
T
tangwei12 已提交
197

198 199 200 201 202 203
    def _transpile_nccl2_dist(self):
        # PADDLE_TRAINER_IPS
        if "PADDLE_TRAINER_IPS" not in os.environ:
            self.nccl_id_var = None
        else:
            self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
204
            self.chief = self.trainer_id == 0
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
            port = os.getenv("PADDLE_PSERVER_PORT")
            worker_ips = os.getenv("PADDLE_TRAINER_IPS")
            worker_endpoints = []
            for ip in worker_ips.split(","):
                worker_endpoints.append(':'.join([ip, port]))
            self.num_trainers = len(worker_endpoints)
            current_endpoint = os.getenv("POD_IP") + ":" + port
            worker_endpoints.remove(current_endpoint)
            # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id
            # in ParallelExecutor to start
            # distributed training using NCCL2
            self.nccl_id_var = self.startup_program.global_block().create_var(
                name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
            self.startup_program.global_block().append_op(
                type="gen_nccl_id",
                inputs={},
                outputs={"NCCLID": self.nccl_id_var},
                attrs={
                    "endpoint": current_endpoint,
                    "endpoint_list": worker_endpoints,
                    "trainer_id": self.trainer_id
                })

Q
Qiao Longfei 已提交
228
    def _dist_transpile_if_necessary(self, optimize_ops, params_grads):
229 230 231 232
        self._transpile_nccl2_dist()
        if self.nccl_id_var != None:
            return

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
        if "PADDLE_TRAINING_ROLE" not in os.environ:
            return

        # the port of all pservers, needed by both trainer and pserver
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        # comma separated ips of all pservers, needed by trainer and
        # pserver
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)
        # total number of workers/trainers in the job, needed by
        # trainer and pserver
        trainers = int(os.getenv("PADDLE_TRAINERS"))
        # the IP of the local machine, needed by pserver only
        current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
        # the unique trainer id, starting from 0, needed by trainer
        # only
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
253
        self.chief = self.trainer_id == 0
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
        # the role, should be either PSERVER or TRAINER
        training_role = os.getenv("PADDLE_TRAINING_ROLE")
        with self._prog_and_scope_guard():
            t = distribute_transpiler.DistributeTranspiler()
            t.transpile(
                trainer_id, pservers=pserver_endpoints, trainers=trainers)
            if training_role == "PSERVER":
                self.train_program = t.get_pserver_program(current_endpoint)
                self.startup_program = t.get_startup_program(current_endpoint,
                                                             self.train_program)
            elif training_role == "TRAINER":
                self.train_program = t.get_trainer_program()
            else:
                raise ValueError(
                    'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
                )
H
Helin Wang 已提交
270

271 272 273 274 275 276
    def stop(self):
        """
        stop training
        """
        self.__stop = True

Y
yuyang18 已提交
277
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
Y
Yu Yang 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
291 292 293 294 295 296
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
Y
yuyang18 已提交
297 298 299 300 301 302
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
H
Helin Wang 已提交
303

304
    def test(self, reader, feed_order):
F
fengjiayi 已提交
305 306 307 308 309 310 311 312 313
        """
        Test the model on given test data

        Args:
            reader: The reader that yields test data.
            feed_order: Feeding order of reader. None will following the defining
                order in program
        """

Y
yuyang18 已提交
314 315
        return self._test_by_executor(reader, feed_order,
                                      self.train_func_outputs)
Y
Yu Yang 已提交
316

H
Helin Wang 已提交
317 318
    def save_params(self, param_path):
        # reference: save_persistables in io.py
319 320 321
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

    @contextlib.contextmanager
    def _prog_and_scope_guard(self):
        with framework.program_guard(
                main_program=self.train_program,
                startup_program=self.startup_program):
            with executor.scope_guard(self.scope):
                yield

    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
F
fengjiayi 已提交
345
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
Y
Yu Yang 已提交
346 347
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
F
fengjiayi 已提交
348
            exe = executor.Executor(self.place)
Y
yuyang18 已提交
349 350 351 352
            reader = feeder.decorate_reader(reader, multi_devices=False)
            self._train_by_any_executor(event_handler, exe, num_epochs, reader)

    def _train_by_any_executor(self, event_handler, exe, num_epochs, reader):
T
tangwei12 已提交
353 354
        epochs = [
            epoch_id for epoch_id in range(num_epochs)
T
tangwei12 已提交
355
            if epoch_id >= self.checkpoint._epoch_id
T
tangwei12 已提交
356 357
        ]
        for epoch_id in epochs:
Y
yuyang18 已提交
358 359
            event_handler(BeginEpochEvent(epoch_id))
            for step_id, data in enumerate(reader()):
360
                if self.__stop:
T
tangwei12 已提交
361
                    self._clean_checkpoint()
362
                    return
T
tangwei12 已提交
363

T
tangwei12 已提交
364 365
                if self.checkpoint and self.checkpoint._load_serial \
                    and self.checkpoint._step_id >= step_id and self.checkpoint._epoch_id == epoch_id:
T
tangwei12 已提交
366 367
                    continue

Y
yuyang18 已提交
368 369 370 371 372 373 374 375 376 377
                begin_event = BeginStepEvent(epoch_id, step_id)
                event_handler(begin_event)
                if begin_event.fetch_metrics:
                    metrics = exe.run(feed=data,
                                      fetch_list=[
                                          var.name
                                          for var in self.train_func_outputs
                                      ])
                else:
                    metrics = exe.run(feed=data, fetch_list=[])
T
tangwei12 已提交
378

Y
yuyang18 已提交
379
                event_handler(EndStepEvent(epoch_id, step_id, metrics))
T
tangwei12 已提交
380
                self._save_checkpoint(epoch_id, step_id)
Y
yuyang18 已提交
381
            event_handler(EndEpochEvent(epoch_id))
T
tangwei12 已提交
382
        self._clean_checkpoint()
F
fengjiayi 已提交
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

    def _test_by_executor(self, reader, feed_order, fetch_list):
        with executor.scope_guard(self.scope):
            feed_var_list = build_feed_var_list(self.test_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            accumulated = len(fetch_list) * [0]
            count = 0
            for data in reader():
                outs = exe.run(program=self.test_program,
                               feed=feeder.feed(data),
                               fetch_list=fetch_list)
                accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
                count += 1

            return [x / count for x in accumulated]

Y
yuyang18 已提交
401 402 403 404 405 406 407 408
    def _train_by_parallel_executor(self, num_epochs, event_handler, reader,
                                    feed_order):
        with self._prog_and_scope_guard():
            pe = self._get_or_create_parallel_executor()
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            reader = feeder.decorate_reader(reader, multi_devices=True)
409
            self._train_by_any_executor(event_handler, pe, num_epochs, reader)
Y
yuyang18 已提交
410 411 412 413 414 415 416 417 418 419 420

    def _get_parallel_executor(self):
        return getattr(self, 'parallel_executor', None)

    def _get_or_create_parallel_executor(self):
        if self._get_parallel_executor() is None:
            self.parallel_executor = parallel_executor.ParallelExecutor(
                use_cuda=isinstance(self.place, core.CUDAPlace),
                loss_name=self.train_func_outputs[0].name)
        return self._get_parallel_executor()

T
tangwei12 已提交
421 422 423 424 425
    def _clean_checkpoint(self):
        if not self.checkpoint:
            return
        io.clean_checkpoint(checkpoint_dir=self.checkpoint.checkpoint_dir)

T
tangwei12 已提交
426
    def _save_checkpoint(self, epoch_id, step_id):
T
tangwei12 已提交
427
        if not self.checkpoint:
T
tangwei12 已提交
428 429 430
            return

        if epoch_id % self.checkpoint.epoch_interval == 0 and step_id % self.checkpoint.step_interval == 0:
T
tangwei12 已提交
431 432 433 434
            trainer_args = {}
            trainer_args["epoch_id"] = epoch_id
            trainer_args["step_id"] = step_id

T
tangwei12 已提交
435 436 437 438
            exe = executor.Executor(self.place)
            io.save_checkpoint(
                executor=exe,
                checkpoint_dir=self.checkpoint.checkpoint_dir,
T
tangwei12 已提交
439 440 441 442 443
                trainer_id=self.trainer_id,
                is_chief=self.chief,
                trainer_args=trainer_args,
                main_program=self.train_program,
                max_num_checkpoints=self.checkpoint.max_num_checkpoints)
T
tangwei12 已提交
444

F
fengjiayi 已提交
445 446 447 448 449

def build_feed_var_list(program, feed_order):
    if not isinstance(program, framework.Program):
        raise TypeError("The 'program' should be an object of Program")

450
    if isinstance(feed_order, list):
F
fengjiayi 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
        feed_var_list = [
            program.global_block().var(var_name) for var_name in feed_order
        ]
    else:
        if not isinstance(feed_order, dict):
            raise TypeError(
                "The 'feed_order' should be either None, list or dict.")
        if not sorted(feed_order.values()) == range(len(feed_order)):
            raise ValueError(
                "The values of 'feed_order' should be a permutation of [0, len(feed_order))"
            )
        sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1])
        feed_var_list = [
            program.global_block().var(pair[0]) for pair in sorted_pair_list
        ]
    return feed_var_list