trainer.py 8.6 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Y
Yu Yang 已提交
16 17 18 19 20
import core
import framework
import executor
import data_feeder
import contextlib
J
Jeff Wang 已提交
21
import io
Y
Yancey 已提交
22
import transpiler
Y
Yu Yang 已提交
23 24 25

# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
Y
Yancey 已提交
26
from transpiler import distribute_transpiler
Y
Yu Yang 已提交
27

H
Helin Wang 已提交
28 29
__all__ = [
    'Trainer',
Y
Yu Yang 已提交
30 31 32 33
    'BeginEpochEvent',
    'EndEpochEvent',
    'BeginStepEvent',
    'EndStepEvent',
H
Helin Wang 已提交
34 35 36
]


Y
Yu Yang 已提交
37 38 39 40 41 42 43 44
class BeginEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id


class EndEpochEvent(object):
    def __init__(self, epoch_id):
        self.epoch = epoch_id
H
Helin Wang 已提交
45

Y
Yu Yang 已提交
46 47 48 49 50 51 52 53 54 55 56

class BeginStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id


class EndStepEvent(object):
    def __init__(self, epoch_id, step_id):
        self.epoch = epoch_id
        self.step = step_id
H
Helin Wang 已提交
57 58 59


class Trainer(object):
Y
Yu Yang 已提交
60 61 62
    """

    Args:
H
Helin Wang 已提交
63
        program_func(callable): A function which will return loss. The loss must be a scaler.
Y
Yu Yang 已提交
64 65 66 67
        optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer
        place: The device place of this trainer.
    """

H
Helin Wang 已提交
68
    def __init__(self, program_func, optimizer, param_path=None, place=None):
H
Helin Wang 已提交
69
        # 1. we need to generate a framework.Program by calling
H
Helin Wang 已提交
70
        # program_func. Reference: fluid.program_guard in
H
Helin Wang 已提交
71
        # test_word2vec.py
H
Helin Wang 已提交
72
        self.scope = core.Scope()
Y
Yu Yang 已提交
73 74 75 76 77

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
H
Helin Wang 已提交
78
            loss = program_func()
Y
Yu Yang 已提交
79 80 81 82
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")

83
            optimize_ops, params_grads = optimizer.minimize(loss)
Y
Yu Yang 已提交
84 85

        self.place = Trainer._check_and_get_place(place)
H
Helin Wang 已提交
86

87 88
        self.dist_transpile_if_necessary(optimize_ops, params_grads)

H
Helin Wang 已提交
89 90
        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
Y
Yu Yang 已提交
91
        # Run startup program
92 93 94
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)
H
Helin Wang 已提交
95

H
Helin Wang 已提交
96 97
        if param_path:
            # load params from param_path into scope
J
Jeff Wang 已提交
98
            io.load_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
99

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
    def dist_transpile_if_necessary(self, optimize_ops, params_grads):
        if "PADDLE_TRAINING_ROLE" not in os.environ:
            return

        # the port of all pservers, needed by both trainer and pserver
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        # comma separated ips of all pservers, needed by trainer and
        # pserver
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)
        # total number of workers/trainers in the job, needed by
        # trainer and pserver
        trainers = int(os.getenv("PADDLE_TRAINERS"))
        # the IP of the local machine, needed by pserver only
        current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
        # the unique trainer id, starting from 0, needed by trainer
        # only
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
        # the role, should be either PSERVER or TRAINER
        training_role = os.getenv("PADDLE_TRAINING_ROLE")
        with self._prog_and_scope_guard():
            t = distribute_transpiler.DistributeTranspiler()
            t.transpile(
                trainer_id, pservers=pserver_endpoints, trainers=trainers)
            if training_role == "PSERVER":
                self.train_program = t.get_pserver_program(current_endpoint)
                self.startup_program = t.get_startup_program(current_endpoint,
                                                             self.train_program)
            elif training_role == "TRAINER":
                self.train_program = t.get_trainer_program()
            else:
                raise ValueError(
                    'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
                )
H
Helin Wang 已提交
137

Y
Yu Yang 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
    def train(self,
              num_epochs,
              event_handler,
              reader=None,
              parallel=False,
              feed_order=None):
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            parallel: True if use multi-CPUs or multi-GPUs
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
        if parallel:
            raise NotImplementedError(
                "Parallel Executor version of trainer is not implemented")

162 163 164 165 166 167 168
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return

Y
Yu Yang 已提交
169
        self._train_by_executor(num_epochs, event_handler, reader, feed_order)
H
Helin Wang 已提交
170 171 172

    def test(self, reader):
        pass
Y
Yu Yang 已提交
173

H
Helin Wang 已提交
174 175
    def save_params(self, param_path):
        # reference: save_persistables in io.py
176 177 178
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Y
Yu Yang 已提交
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

    @staticmethod
    def _check_and_get_place(place):
        """
        Check the type of place or get the default place
        Args:
            place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.

        Raises:
            TypeError if the type mismatched.

        Returns:
            the original place if it is not None.
            if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
            Otherwise returns CPUPlace by default.
        """
        if place is None:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            else:
                return core.CPUPlace()
        else:
            if not isinstance(place, core.CUDAPlace) and not isinstance(
                    place, core.CPUPlace):
                raise TypeError("Place should be either CUDAPlace or CPUPlace")
            return place

    @contextlib.contextmanager
    def _prog_and_scope_guard(self):
        with framework.program_guard(
                main_program=self.train_program,
                startup_program=self.startup_program):
            with executor.scope_guard(self.scope):
                yield

    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            if feed_order is None:
                feed_var_list = [
                    var
                    for var in self.train_program.global_block(
                    ).vars.itervalues()
                    if hasattr(var, 'is_data') and var.is_data
                ]
            else:
                feed_var_list = [
                    self.train_program.global_block().var(var_name)
                    for var_name in feed_order
                ]

            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            for epoch_id in range(num_epochs):
                event_handler(BeginEpochEvent(epoch_id))
                for step_id, data in enumerate(reader()):
                    event_handler(BeginStepEvent(epoch_id, step_id))
                    exe.run(feed=feeder.feed(data), fetch_list=[])
                    event_handler(EndStepEvent(epoch_id, step_id))
                event_handler(EndEpochEvent(epoch_id))