trainer.py 15.8 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import contextlib
16
import os
17

Y
Yu Yang 已提交
18
import core
19

Y
Yu Yang 已提交
20
import data_feeder
21 22
import executor
import framework
J
Jeff Wang 已提交
23
import io
Y
Yu Yang 已提交
24 25
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
26
import parallel_executor
Y
Yancey 已提交
27
from transpiler import distribute_transpiler
Y
Yu Yang 已提交
28

H
Helin Wang 已提交
29
__all__ = [
30 31
    'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent',
    'EndStepEvent', 'CheckpointConfig'
H
Helin Wang 已提交
32 33 34
]


Y
Yu Yang 已提交
35 36 37 38 39 40 41 42
class BeginEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id


class EndEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id
H
Helin Wang 已提交
43

Y
Yu Yang 已提交
44 45 46 47 48

class BeginStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
49
        self.fetch_metrics = True
Y
Yu Yang 已提交
50 51 52


class EndStepEvent(object):
Y
yuyang18 已提交
53
    def __init__(self, epoch_id, step_id, metrics):
Y
Yu Yang 已提交
54 55
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
56
        self.metrics = metrics
H
Helin Wang 已提交
57 58


59 60 61 62
class CheckpointConfig(object):
    def __init__(self,
                 checkpoint_dir=None,
                 max_num_checkpoints=3,
T
tangwei12 已提交
63 64
                 epoch_interval=1,
                 step_interval=10):
65 66
        if checkpoint_dir is None:
            self.checkpoint_dir = os.getcwd()
T
tangwei12 已提交
67 68 69
        else:
            self.checkpoint_dir = checkpoint_dir

70
        self.max_num_checkpoints = max_num_checkpoints
T
tangwei12 已提交
71 72 73 74 75 76 77 78 79 80

        if epoch_interval < 1:
            self.epoch_interval = 1
        else:
            self.epoch_interval = epoch_interval

        if step_interval < 1:
            self.step_interval = 10
        else:
            self.step_interval = step_interval
81 82


Q
Qiao Longfei 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
def check_and_get_place(place):
    """
    Check the type of place or get the default place
    Args:
        place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.

    Raises:
        TypeError if the type mismatched.

    Returns:
        the original place if it is not None.
        if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
        Otherwise returns CPUPlace by default.
    """
    if place is None:
        if core.is_compiled_with_cuda():
            return core.CUDAPlace(0)
        else:
            return core.CPUPlace()
    else:
        if not isinstance(place, core.CUDAPlace) and not isinstance(
                place, core.CPUPlace):
            raise TypeError("Place should be either CUDAPlace or CPUPlace")
        return place


H
Helin Wang 已提交
109
class Trainer(object):
Y
Yu Yang 已提交
110 111 112
    """

    Args:
Q
Qiao Longfei 已提交
113
        train_func(callable): A function which will return loss. The loss must be a scalar.
Y
Yu Yang 已提交
114 115 116 117
        optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer
        place: The device place of this trainer.
    """

Q
Qiao Longfei 已提交
118 119 120
    def __init__(self,
                 train_func,
                 optimizer,
T
tangwei12 已提交
121
                 param_path=None,
Y
yuyang18 已提交
122
                 place=None,
123 124
                 parallel=False,
                 checkpoint_config=None):
125
        self.__stop = False
Y
yuyang18 已提交
126
        self.parallel = parallel
H
Helin Wang 已提交
127
        # 1. we need to generate a framework.Program by calling
H
Helin Wang 已提交
128
        # program_func. Reference: fluid.program_guard in
H
Helin Wang 已提交
129
        # test_word2vec.py
Q
Qiao Longfei 已提交
130 131 132
        if not isinstance(optimizer, opt_module.Optimizer):
            raise TypeError("The optimizer should be an instance of Optimizer")

133 134 135 136
        # config for checkpoint
        # only chief worker will save variables
        self.chief = True
        self.checkpoint = checkpoint_config
T
tangwei12 已提交
137 138
        if self.checkpoint and \
            not isinstance(self.checkpoint, CheckpointConfig):
139 140 141 142
            raise TypeError(
                "The checkpoint_config shoule be an instance of CheckpointConfig"
            )

H
Helin Wang 已提交
143
        self.scope = core.Scope()
Y
Yu Yang 已提交
144 145 146 147 148

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
Q
Qiao Longfei 已提交
149
            program_func_outs = train_func()
Y
yuyang18 已提交
150
            self.train_func_outputs = program_func_outs if isinstance(
F
fengjiayi 已提交
151 152
                program_func_outs, list) else [program_func_outs]
            self.test_program = self.train_program.clone()
Y
Yu Yang 已提交
153 154 155
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")
F
fengjiayi 已提交
156
            # The fisrt element of program_func_outs is loss.
Y
yuyang18 已提交
157
            loss = self.train_func_outputs[0]
158
            optimize_ops, params_grads = optimizer.minimize(loss)
Y
Yu Yang 已提交
159

Q
Qiao Longfei 已提交
160
        self.place = check_and_get_place(place)
H
Helin Wang 已提交
161

Q
Qiao Longfei 已提交
162
        self._dist_transpile_if_necessary(optimize_ops, params_grads)
163

H
Helin Wang 已提交
164 165
        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
Y
Yu Yang 已提交
166
        # Run startup program
167 168 169
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)
H
Helin Wang 已提交
170

171 172 173 174
        if self.checkpoint:
            exe = executor.Executor(place)
            io.load_checkpoint(exe, self.checkpoint.checkpoint_dir,
                               self.startup_program)
Y
Yu Yang 已提交
175

T
tangwei12 已提交
176 177
        if param_path:
            # load params from param_path into scope
178 179
            io.load_persist_vars_without_grad(
                exe, dirname=param_path, program=self.startup_program)
T
tangwei12 已提交
180

181 182 183 184 185 186
    def _transpile_nccl2_dist(self):
        # PADDLE_TRAINER_IPS
        if "PADDLE_TRAINER_IPS" not in os.environ:
            self.nccl_id_var = None
        else:
            self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
187
            self.chief = self.trainer_id == 0
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
            port = os.getenv("PADDLE_PSERVER_PORT")
            worker_ips = os.getenv("PADDLE_TRAINER_IPS")
            worker_endpoints = []
            for ip in worker_ips.split(","):
                worker_endpoints.append(':'.join([ip, port]))
            self.num_trainers = len(worker_endpoints)
            current_endpoint = os.getenv("POD_IP") + ":" + port
            worker_endpoints.remove(current_endpoint)
            # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id
            # in ParallelExecutor to start
            # distributed training using NCCL2
            self.nccl_id_var = self.startup_program.global_block().create_var(
                name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
            self.startup_program.global_block().append_op(
                type="gen_nccl_id",
                inputs={},
                outputs={"NCCLID": self.nccl_id_var},
                attrs={
                    "endpoint": current_endpoint,
                    "endpoint_list": worker_endpoints,
                    "trainer_id": self.trainer_id
                })

Q
Qiao Longfei 已提交
211
    def _dist_transpile_if_necessary(self, optimize_ops, params_grads):
212 213 214 215
        self._transpile_nccl2_dist()
        if self.nccl_id_var != None:
            return

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
        if "PADDLE_TRAINING_ROLE" not in os.environ:
            return

        # the port of all pservers, needed by both trainer and pserver
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        # comma separated ips of all pservers, needed by trainer and
        # pserver
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)
        # total number of workers/trainers in the job, needed by
        # trainer and pserver
        trainers = int(os.getenv("PADDLE_TRAINERS"))
        # the IP of the local machine, needed by pserver only
        current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
        # the unique trainer id, starting from 0, needed by trainer
        # only
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
236
        self.chief = self.trainer_id == 0
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
        # the role, should be either PSERVER or TRAINER
        training_role = os.getenv("PADDLE_TRAINING_ROLE")
        with self._prog_and_scope_guard():
            t = distribute_transpiler.DistributeTranspiler()
            t.transpile(
                trainer_id, pservers=pserver_endpoints, trainers=trainers)
            if training_role == "PSERVER":
                self.train_program = t.get_pserver_program(current_endpoint)
                self.startup_program = t.get_startup_program(current_endpoint,
                                                             self.train_program)
            elif training_role == "TRAINER":
                self.train_program = t.get_trainer_program()
            else:
                raise ValueError(
                    'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
                )
H
Helin Wang 已提交
253

254 255 256 257 258 259
    def stop(self):
        """
        stop training
        """
        self.__stop = True

Y
yuyang18 已提交
260
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
Y
Yu Yang 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
274 275 276 277 278 279
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
Y
yuyang18 已提交
280 281 282 283 284 285
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
H
Helin Wang 已提交
286

287
    def test(self, reader, feed_order):
F
fengjiayi 已提交
288 289 290 291 292 293 294 295 296
        """
        Test the model on given test data

        Args:
            reader: The reader that yields test data.
            feed_order: Feeding order of reader. None will following the defining
                order in program
        """

Y
yuyang18 已提交
297 298
        return self._test_by_executor(reader, feed_order,
                                      self.train_func_outputs)
Y
Yu Yang 已提交
299

H
Helin Wang 已提交
300 301
    def save_params(self, param_path):
        # reference: save_persistables in io.py
302 303 304
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327

    @contextlib.contextmanager
    def _prog_and_scope_guard(self):
        with framework.program_guard(
                main_program=self.train_program,
                startup_program=self.startup_program):
            with executor.scope_guard(self.scope):
                yield

    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
F
fengjiayi 已提交
328
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
Y
Yu Yang 已提交
329 330
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
F
fengjiayi 已提交
331
            exe = executor.Executor(self.place)
Y
yuyang18 已提交
332 333 334 335 336 337 338
            reader = feeder.decorate_reader(reader, multi_devices=False)
            self._train_by_any_executor(event_handler, exe, num_epochs, reader)

    def _train_by_any_executor(self, event_handler, exe, num_epochs, reader):
        for epoch_id in range(num_epochs):
            event_handler(BeginEpochEvent(epoch_id))
            for step_id, data in enumerate(reader()):
339 340
                if self.__stop:
                    return
Y
yuyang18 已提交
341 342 343 344 345 346 347 348 349 350
                begin_event = BeginStepEvent(epoch_id, step_id)
                event_handler(begin_event)
                if begin_event.fetch_metrics:
                    metrics = exe.run(feed=data,
                                      fetch_list=[
                                          var.name
                                          for var in self.train_func_outputs
                                      ])
                else:
                    metrics = exe.run(feed=data, fetch_list=[])
T
tangwei12 已提交
351

Y
yuyang18 已提交
352
                event_handler(EndStepEvent(epoch_id, step_id, metrics))
T
tangwei12 已提交
353
                self._save_checkpoint(epoch_id, step_id)
Y
yuyang18 已提交
354
            event_handler(EndEpochEvent(epoch_id))
F
fengjiayi 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372

    def _test_by_executor(self, reader, feed_order, fetch_list):
        with executor.scope_guard(self.scope):
            feed_var_list = build_feed_var_list(self.test_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            accumulated = len(fetch_list) * [0]
            count = 0
            for data in reader():
                outs = exe.run(program=self.test_program,
                               feed=feeder.feed(data),
                               fetch_list=fetch_list)
                accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
                count += 1

            return [x / count for x in accumulated]

Y
yuyang18 已提交
373 374 375 376 377 378 379 380
    def _train_by_parallel_executor(self, num_epochs, event_handler, reader,
                                    feed_order):
        with self._prog_and_scope_guard():
            pe = self._get_or_create_parallel_executor()
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            reader = feeder.decorate_reader(reader, multi_devices=True)
381
            self._train_by_any_executor(event_handler, pe, num_epochs, reader)
Y
yuyang18 已提交
382 383 384 385 386 387 388 389 390 391 392

    def _get_parallel_executor(self):
        return getattr(self, 'parallel_executor', None)

    def _get_or_create_parallel_executor(self):
        if self._get_parallel_executor() is None:
            self.parallel_executor = parallel_executor.ParallelExecutor(
                use_cuda=isinstance(self.place, core.CUDAPlace),
                loss_name=self.train_func_outputs[0].name)
        return self._get_parallel_executor()

T
tangwei12 已提交
393 394 395 396 397 398 399 400 401 402 403 404
    def _save_checkpoint(self, epoch_id, step_id):
        if not self.checkpoint or not self.chief:
            return

        if epoch_id % self.checkpoint.epoch_interval == 0 and step_id % self.checkpoint.step_interval == 0:
            exe = executor.Executor(self.place)
            io.save_checkpoint(
                executor=exe,
                checkpoint_dir=self.checkpoint.checkpoint_dir,
                max_num_checkpoints=self.checkpoint.max_num_checkpoints,
                main_program=self.train_program)

F
fengjiayi 已提交
405 406 407 408 409

def build_feed_var_list(program, feed_order):
    if not isinstance(program, framework.Program):
        raise TypeError("The 'program' should be an object of Program")

410
    if isinstance(feed_order, list):
F
fengjiayi 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
        feed_var_list = [
            program.global_block().var(var_name) for var_name in feed_order
        ]
    else:
        if not isinstance(feed_order, dict):
            raise TypeError(
                "The 'feed_order' should be either None, list or dict.")
        if not sorted(feed_order.values()) == range(len(feed_order)):
            raise ValueError(
                "The values of 'feed_order' should be a permutation of [0, len(feed_order))"
            )
        sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1])
        feed_var_list = [
            program.global_block().var(pair[0]) for pair in sorted_pair_list
        ]
    return feed_var_list