trainer.py 13.7 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import contextlib
16
import os
17

Y
Yu Yang 已提交
18
import core
19

Y
Yu Yang 已提交
20
import data_feeder
21 22
import executor
import framework
J
Jeff Wang 已提交
23
import io
Y
Yu Yang 已提交
24 25
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
26
import parallel_executor
Y
Yancey 已提交
27
from transpiler import distribute_transpiler
Y
Yu Yang 已提交
28

H
Helin Wang 已提交
29 30
__all__ = [
    'Trainer',
Y
Yu Yang 已提交
31 32 33 34
    'BeginEpochEvent',
    'EndEpochEvent',
    'BeginStepEvent',
    'EndStepEvent',
H
Helin Wang 已提交
35 36 37
]


Y
Yu Yang 已提交
38 39 40 41 42 43 44 45
class BeginEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id


class EndEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id
H
Helin Wang 已提交
46

Y
Yu Yang 已提交
47 48 49 50 51

class BeginStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
52
        self.fetch_metrics = True
Y
Yu Yang 已提交
53 54 55


class EndStepEvent(object):
Y
yuyang18 已提交
56
    def __init__(self, epoch_id, step_id, metrics):
Y
Yu Yang 已提交
57 58
        self.epoch = epoch_id
        self.step = step_id
Y
yuyang18 已提交
59
        self.metrics = metrics
H
Helin Wang 已提交
60 61


Q
Qiao Longfei 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
def check_and_get_place(place):
    """
    Check the type of place or get the default place
    Args:
        place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.

    Raises:
        TypeError if the type mismatched.

    Returns:
        the original place if it is not None.
        if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
        Otherwise returns CPUPlace by default.
    """
    if place is None:
        if core.is_compiled_with_cuda():
            return core.CUDAPlace(0)
        else:
            return core.CPUPlace()
    else:
        if not isinstance(place, core.CUDAPlace) and not isinstance(
                place, core.CPUPlace):
            raise TypeError("Place should be either CUDAPlace or CPUPlace")
        return place


H
Helin Wang 已提交
88
class Trainer(object):
Y
Yu Yang 已提交
89 90 91
    """

    Args:
Q
Qiao Longfei 已提交
92
        train_func(callable): A function which will return loss. The loss must be a scalar.
93
        optimizer_func(callable): A function that returns an Optimizer object.
Y
Yu Yang 已提交
94 95 96
        place: The device place of this trainer.
    """

Q
Qiao Longfei 已提交
97 98
    def __init__(self,
                 train_func,
99
                 optimizer_func,
Q
Qiao Longfei 已提交
100
                 param_path=None,
Y
yuyang18 已提交
101 102
                 place=None,
                 parallel=False):
103
        self.__stop = False
Y
yuyang18 已提交
104
        self.parallel = parallel
H
Helin Wang 已提交
105
        # 1. we need to generate a framework.Program by calling
H
Helin Wang 已提交
106
        # program_func. Reference: fluid.program_guard in
H
Helin Wang 已提交
107
        # test_word2vec.py
Q
Qiao Longfei 已提交
108

H
Helin Wang 已提交
109
        self.scope = core.Scope()
Y
Yu Yang 已提交
110 111 112 113 114

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
Q
Qiao Longfei 已提交
115
            program_func_outs = train_func()
Y
yuyang18 已提交
116
            self.train_func_outputs = program_func_outs if isinstance(
F
fengjiayi 已提交
117 118
                program_func_outs, list) else [program_func_outs]
            self.test_program = self.train_program.clone()
119 120 121 122 123

            # The fisrt element of program_func_outs is loss.
            loss = self.train_func_outputs[0]

            optimizer = optimizer_func()
Y
Yu Yang 已提交
124 125 126
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")
127
            optimize_ops, params_grads = optimizer.minimize(loss)
Y
Yu Yang 已提交
128

Q
Qiao Longfei 已提交
129
        self.place = check_and_get_place(place)
H
Helin Wang 已提交
130

Q
Qiao Longfei 已提交
131
        self._dist_transpile_if_necessary(optimize_ops, params_grads)
132

H
Helin Wang 已提交
133 134
        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
Y
Yu Yang 已提交
135
        # Run startup program
136 137 138
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)
H
Helin Wang 已提交
139

H
Helin Wang 已提交
140 141
        if param_path:
            # load params from param_path into scope
J
Jeff Wang 已提交
142
            io.load_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
    def _transpile_nccl2_dist(self):
        # PADDLE_TRAINER_IPS
        if "PADDLE_TRAINER_IPS" not in os.environ:
            self.nccl_id_var = None
        else:
            self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
            port = os.getenv("PADDLE_PSERVER_PORT")
            worker_ips = os.getenv("PADDLE_TRAINER_IPS")
            worker_endpoints = []
            for ip in worker_ips.split(","):
                worker_endpoints.append(':'.join([ip, port]))
            self.num_trainers = len(worker_endpoints)
            current_endpoint = os.getenv("POD_IP") + ":" + port
            worker_endpoints.remove(current_endpoint)
            # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id
            # in ParallelExecutor to start
            # distributed training using NCCL2
            self.nccl_id_var = self.startup_program.global_block().create_var(
                name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
            self.startup_program.global_block().append_op(
                type="gen_nccl_id",
                inputs={},
                outputs={"NCCLID": self.nccl_id_var},
                attrs={
                    "endpoint": current_endpoint,
                    "endpoint_list": worker_endpoints,
                    "trainer_id": self.trainer_id
                })

Q
Qiao Longfei 已提交
173
    def _dist_transpile_if_necessary(self, optimize_ops, params_grads):
174 175 176 177
        self._transpile_nccl2_dist()
        if self.nccl_id_var != None:
            return

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
        if "PADDLE_TRAINING_ROLE" not in os.environ:
            return

        # the port of all pservers, needed by both trainer and pserver
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        # comma separated ips of all pservers, needed by trainer and
        # pserver
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)
        # total number of workers/trainers in the job, needed by
        # trainer and pserver
        trainers = int(os.getenv("PADDLE_TRAINERS"))
        # the IP of the local machine, needed by pserver only
        current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
        # the unique trainer id, starting from 0, needed by trainer
        # only
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
        # the role, should be either PSERVER or TRAINER
        training_role = os.getenv("PADDLE_TRAINING_ROLE")
        with self._prog_and_scope_guard():
            t = distribute_transpiler.DistributeTranspiler()
            t.transpile(
                trainer_id, pservers=pserver_endpoints, trainers=trainers)
            if training_role == "PSERVER":
                self.train_program = t.get_pserver_program(current_endpoint)
                self.startup_program = t.get_startup_program(current_endpoint,
                                                             self.train_program)
            elif training_role == "TRAINER":
                self.train_program = t.get_trainer_program()
            else:
                raise ValueError(
                    'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
                )
H
Helin Wang 已提交
214

215 216 217 218 219 220
    def stop(self):
        """
        stop training
        """
        self.__stop = True

Y
yuyang18 已提交
221
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
Y
Yu Yang 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
235 236 237 238 239 240
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
Y
yuyang18 已提交
241 242 243 244 245 246
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
H
Helin Wang 已提交
247

248
    def test(self, reader, feed_order):
F
fengjiayi 已提交
249 250 251 252 253 254 255 256 257
        """
        Test the model on given test data

        Args:
            reader: The reader that yields test data.
            feed_order: Feeding order of reader. None will following the defining
                order in program
        """

Y
yuyang18 已提交
258 259
        return self._test_by_executor(reader, feed_order,
                                      self.train_func_outputs)
Y
Yu Yang 已提交
260

H
Helin Wang 已提交
261 262
    def save_params(self, param_path):
        # reference: save_persistables in io.py
263 264 265
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

    @contextlib.contextmanager
    def _prog_and_scope_guard(self):
        with framework.program_guard(
                main_program=self.train_program,
                startup_program=self.startup_program):
            with executor.scope_guard(self.scope):
                yield

    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
F
fengjiayi 已提交
289
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
Y
Yu Yang 已提交
290 291
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
F
fengjiayi 已提交
292
            exe = executor.Executor(self.place)
Y
yuyang18 已提交
293 294 295 296 297 298 299
            reader = feeder.decorate_reader(reader, multi_devices=False)
            self._train_by_any_executor(event_handler, exe, num_epochs, reader)

    def _train_by_any_executor(self, event_handler, exe, num_epochs, reader):
        for epoch_id in range(num_epochs):
            event_handler(BeginEpochEvent(epoch_id))
            for step_id, data in enumerate(reader()):
300 301
                if self.__stop:
                    return
Y
yuyang18 已提交
302 303 304 305 306 307 308 309 310 311 312 313
                begin_event = BeginStepEvent(epoch_id, step_id)
                event_handler(begin_event)
                if begin_event.fetch_metrics:
                    metrics = exe.run(feed=data,
                                      fetch_list=[
                                          var.name
                                          for var in self.train_func_outputs
                                      ])
                else:
                    metrics = exe.run(feed=data, fetch_list=[])
                event_handler(EndStepEvent(epoch_id, step_id, metrics))
            event_handler(EndEpochEvent(epoch_id))
F
fengjiayi 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331

    def _test_by_executor(self, reader, feed_order, fetch_list):
        with executor.scope_guard(self.scope):
            feed_var_list = build_feed_var_list(self.test_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            accumulated = len(fetch_list) * [0]
            count = 0
            for data in reader():
                outs = exe.run(program=self.test_program,
                               feed=feeder.feed(data),
                               fetch_list=fetch_list)
                accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
                count += 1

            return [x / count for x in accumulated]

Y
yuyang18 已提交
332 333 334 335 336 337 338 339
    def _train_by_parallel_executor(self, num_epochs, event_handler, reader,
                                    feed_order):
        with self._prog_and_scope_guard():
            pe = self._get_or_create_parallel_executor()
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            reader = feeder.decorate_reader(reader, multi_devices=True)
340
            self._train_by_any_executor(event_handler, pe, num_epochs, reader)
Y
yuyang18 已提交
341 342 343 344 345 346 347 348 349 350 351

    def _get_parallel_executor(self):
        return getattr(self, 'parallel_executor', None)

    def _get_or_create_parallel_executor(self):
        if self._get_parallel_executor() is None:
            self.parallel_executor = parallel_executor.ParallelExecutor(
                use_cuda=isinstance(self.place, core.CUDAPlace),
                loss_name=self.train_func_outputs[0].name)
        return self._get_parallel_executor()

F
fengjiayi 已提交
352 353 354 355 356

def build_feed_var_list(program, feed_order):
    if not isinstance(program, framework.Program):
        raise TypeError("The 'program' should be an object of Program")

357
    if isinstance(feed_order, list):
F
fengjiayi 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
        feed_var_list = [
            program.global_block().var(var_name) for var_name in feed_order
        ]
    else:
        if not isinstance(feed_order, dict):
            raise TypeError(
                "The 'feed_order' should be either None, list or dict.")
        if not sorted(feed_order.values()) == range(len(feed_order)):
            raise ValueError(
                "The values of 'feed_order' should be a permutation of [0, len(feed_order))"
            )
        sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1])
        feed_var_list = [
            program.global_block().var(pair[0]) for pair in sorted_pair_list
        ]
    return feed_var_list