model.py 17.7 KB
Newer Older
Y
Yang Zhang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yang Zhang 已提交
15 16 17
from __future__ import absolute_import

import inspect
Y
Yang Zhang 已提交
18 19
import os
import pickle
Y
Yang Zhang 已提交
20 21 22 23 24
from collections import OrderedDict

import numpy as np

from paddle import fluid
Y
Yang Zhang 已提交
25 26 27
from paddle.fluid.framework import in_dygraph_mode, Variable
from paddle.fluid.executor import global_scope
from paddle.fluid.io import is_belong_to_optimizer
Y
Yang Zhang 已提交
28 29
from paddle.fluid.dygraph.base import to_variable

Q
qingqing01 已提交
30 31 32 33 34 35 36 37 38 39 40
__all__ = ['Model', 'Loss', 'CrossEntropy', 'Input']


class Input(fluid.dygraph.Layer):
    def __init__(self, shape=None, dtype=None, name=None):
        self.shape = shape
        self.dtype = dtype
        self.name = name

    def forward(self):
        return fluid.data(self.name, shape=self.shape, dtype=self.dtype)
Y
Yang Zhang 已提交
41 42 43 44 45 46 47 48


def to_list(value):
    if isinstance(value, (list, tuple)):
        return value
    return [value]


49 50 51 52 53 54 55 56
def to_numpy(var):
    assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable"
    if isinstance(var, fluid.core.VarBase):
        return var.numpy()
    t = global_scope().find_var(var.name).get_tensor()
    return np.array(t)


Y
Yang Zhang 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69
class Loss(object):
    def __init__(self, average=True):
        super(Loss, self).__init__()
        self.average = average

    def forward(self, outputs, labels):
        raise NotImplementedError()

    def __call__(self, outputs, labels):
        labels = to_list(labels)
        if in_dygraph_mode():
            labels = [to_variable(l) for l in labels]
        losses = to_list(self.forward(to_list(outputs), labels))
Q
qingqing01 已提交
70 71 72 73 74
        if self.average:
            losses = [fluid.layers.reduce_mean(l) for l in losses]
        else:
            losses = [fluid.layers.reduce_sum(l) for l in losses]
        return losses
Y
Yang Zhang 已提交
75 76 77


class CrossEntropy(Loss):
Q
qingqing01 已提交
78
    def __init__(self, average=True):
Y
Yang Zhang 已提交
79 80 81
        super(CrossEntropy, self).__init__()

    def forward(self, outputs, labels):
Q
qingqing01 已提交
82 83 84
        return [
            fluid.layers.cross_entropy(o, l) for o, l in zip(outputs, labels)
        ]
Y
Yang Zhang 已提交
85 86


Y
Yang Zhang 已提交
87 88 89 90 91 92 93
class StaticGraphAdapter(object):
    def __init__(self, model):
        super(StaticGraphAdapter, self).__init__()
        self.model = model
        # with `_build_once` gone, parameters are now created in `__init__`
        # so we need to keep track of the parameters already created
        self._startup_prog = fluid.default_startup_program()
94
        self._orig_prog = fluid.default_main_program()
Y
Yang Zhang 已提交
95

96
        self._label_vars = {}  # label variables
Q
qingqing01 已提交
97
        self._input_vars = {}  # label variables
Y
Yang Zhang 已提交
98 99 100 101 102 103
        self._endpoints = {}
        self._loss_endpoint = None
        self._executor = None
        self._progs = {}
        self._compiled_progs = {}

104 105
        self._lazy_load_optimizer = None

Y
Yang Zhang 已提交
106 107 108 109 110 111 112 113
    @property
    def mode(self):
        return self.model.mode

    @mode.setter
    def mode(self, value):
        self.model.mode = value

Q
qingqing01 已提交
114 115
    def train(self, inputs, labels=None, device='CPU', device_ids=None):
        assert self.model._optimizer, \
Y
Yang Zhang 已提交
116 117 118 119
            "model not ready, please call `model.prepare()` first"
        self.mode = 'train'
        return self._run(inputs, labels, device, device_ids)

Q
qingqing01 已提交
120
    def eval(self, inputs, labels=None, device='CPU', device_ids=None):
Y
Yang Zhang 已提交
121 122 123 124 125 126 127
        self.mode = 'eval'
        return self._run(inputs, labels, device, device_ids)

    def test(self, inputs, device='CPU', device_ids=None):
        self.mode = 'test'
        return self._run(inputs, None, device, device_ids)

128 129 130
    def parameters(self, *args, **kwargs):
        return None

Y
Yang Zhang 已提交
131
    def save(self, path):
Y
Yang Zhang 已提交
132 133 134
        def _save(state, path):
            if not state:
                return
Q
qingqing01 已提交
135 136 137 138
            state = {
                k: to_numpy(v) if isinstance(v, Variable) else v
                for k, v in state.items()
            }
Y
Yang Zhang 已提交
139 140 141 142 143 144 145
            with open(path, 'wb') as f:
                pickle.dump(state, f)

        base = os.path.basename(path)
        assert base != "", "path should be of 'dirname/filename' format"
        param_path = path + ".pdparams"
        _save(self.model.state_dict(), param_path)
Y
Yang Zhang 已提交
146 147
        prog = self._progs.get('train', None)
        if prog is None or self.model._optimizer is None:
Y
Yang Zhang 已提交
148 149 150
            return
        # XXX `optimizer.state_dict()` only work in dygraph mode
        optim_path = path + ".pdopt"
Q
qingqing01 已提交
151 152 153 154
        optim = {
            p.name: p
            for p in filter(is_belong_to_optimizer, prog.list_vars())
        }
155 156
        if not optim:
            return
Y
Yang Zhang 已提交
157 158 159 160
        # HACK this is contrived, optimizer state is not the same for
        # static/dynamic graph mode
        optim['__static_graph_only__'] = True
        _save(optim, optim_path)
Y
Yang Zhang 已提交
161 162

    def load(self, path):
Y
Yang Zhang 已提交
163 164 165 166 167 168 169
        def _load(path):
            if not os.path.exists(path):
                return
            with open(path, 'rb') as f:
                return pickle.load(f)

        param_path = path + ".pdparams"
Y
Yang Zhang 已提交
170 171 172 173 174 175 176 177 178 179 180
        param_state = _load(param_path)
        assert param_state, "failed to load parameters, please check path"

        if self._executor is None:
            executor = fluid.Executor(fluid.CPUPlace())._default_executor
        else:
            executor = self._executor._default_executor

        fluid.core._create_loaded_parameter(
            list(self.model.state_dict().values()), global_scope(), executor)

Y
Yang Zhang 已提交
181
        for key, var in self.model.state_dict().items():
Y
Yang Zhang 已提交
182
            assert key in param_state, \
Y
Yang Zhang 已提交
183 184
                "parameter [{}] is not found in model file [{}]".format(
                    key, param_path)
185
            self._set_var(var, param_state[key])
Y
Yang Zhang 已提交
186 187 188 189 190 191 192 193 194 195 196

        # FIXME what if a different optimizer is used?
        if not self.model._optimizer:
            return
        optim_path = path + ".pdopt"
        optim_state = _load(optim_path)
        if optim_state is None:
            return
        assert '__static_graph_only__' in optim_state, \
            "optimizer saved in dygraph mode is not usable in static graph"

197
        if self._executor is not None:
Q
qingqing01 已提交
198
            self._load_optimizer(optim_state)
199
        else:
Q
qingqing01 已提交
200
            self._lazy_load_optimizer = optim_state
201 202 203 204 205 206 207

    def _load_optimizer(self, state):
        prog = self._progs.get('train', None)
        optim = list(filter(is_belong_to_optimizer, prog.list_vars()))
        if not optim:
            return

Q
qingqing01 已提交
208 209 210
        fluid.core._create_loaded_parameter(optim,
                                            global_scope(),
                                            self._executor._default_executor)
Y
Yang Zhang 已提交
211 212

        for var in optim:
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
            assert var.name in state, \
                "variable [{}] is not in optimizer state file".format(var.name)
            self._set_var(var, state[var.name])

    def _set_var(self, var, ndarray):
        t = global_scope().find_var(var.name).get_tensor()
        p = t._place()
        if p.is_cpu_place():
            place = fluid.CPUPlace()
        elif p.is_cuda_pinned_place():
            place = fluid.CUDAPinnedPlace()
        else:
            p = fluid.core.Place()
            p.set_place(t._place())
            place = fluid.CUDAPlace(p.gpu_device_id())

        t.set(ndarray, place)
Y
Yang Zhang 已提交
230 231 232 233 234

    def _run(self, inputs, labels=None, device='CPU', device_ids=None):
        inputs = to_list(inputs)
        if labels is not None:
            labels = to_list(labels)
Q
qingqing01 已提交
235
        assert len(inputs) == len(self.model._inputs), "number of inputs" \
Y
Yang Zhang 已提交
236 237
            + " does not match number of arguments of `forward` method"

Y
Yang Zhang 已提交
238
        if self._progs.get(self.mode, None) is None:
Q
qingqing01 已提交
239 240 241 242 243 244
            if self.model._inputs is None:
                raise ValueError("The inputs of Model must be not None.")
            self._input_vars = [
                k.forward() for k in to_list(self.model._inputs)
            ]
            self._make_program(self._input_vars)
Y
Yang Zhang 已提交
245

Q
qingqing01 已提交
246 247
        compiled_prog = self._compile_and_initialize(self._progs[self.mode],
                                                     device, device_ids)
Y
Yang Zhang 已提交
248 249

        feed = {}
Q
qingqing01 已提交
250
        input_names = [v.name for v in self._input_vars]
Y
Yang Zhang 已提交
251 252 253 254 255
        for idx, n in enumerate(input_names):
            # train and test may take different arguments
            if inputs[idx] is not None:
                feed[n] = inputs[idx]
        if labels is not None:
256
            for idx, v in enumerate(self._label_vars[self.mode]):
Y
Yang Zhang 已提交
257 258
                feed[v.name] = labels[idx]

259
        endpoints = self._endpoints[self.mode]
Q
qingqing01 已提交
260 261 262
        fetch_list = endpoints['output']
        if 'loss' in endpoints:
            fetch_list += endpoints['loss']
263
        num_output = len(endpoints['output'])
Q
qingqing01 已提交
264 265 266
        out = self._executor.run(compiled_prog,
                                 feed=feed,
                                 fetch_list=fetch_list)
267 268 269 270
        if self.mode == 'test':
            return out[:num_output]
        else:
            return out[:num_output], out[num_output:]
Y
Yang Zhang 已提交
271 272

    def _make_program(self, inputs):
273
        prog = self._orig_prog.clone()
274
        if self.mode == 'train' and self.model._optimizer._learning_rate_map:
275 276 277 278
            # HACK workaround learning rate map issue
            lr_var = self.model._optimizer._learning_rate_map[self._orig_prog]
            self.model._optimizer._learning_rate_map[prog] = lr_var
        losses = []
Y
Yang Zhang 已提交
279 280 281
        with fluid.program_guard(prog, self._startup_prog):
            outputs = to_list(self.model.forward(*inputs))
            if self.mode != 'test':
Q
qingqing01 已提交
282
                losses = self._get_loss(outputs)
Y
Yang Zhang 已提交
283 284 285
                if self.mode == 'train':
                    self._loss_endpoint = fluid.layers.sum(losses)
                    self.model._optimizer.minimize(self._loss_endpoint)
286 287
        if self.mode != 'train':  # clone again to put it in test mode
            prog = prog.clone(for_test=True)
Y
Yang Zhang 已提交
288
        self._progs[self.mode] = prog
289 290 291
        self._endpoints[self.mode] = {
            "output": outputs,
            "loss": losses
Q
qingqing01 已提交
292 293
        } if self.model._loss_function else {
            'output': outputs
294
        }
Y
Yang Zhang 已提交
295

Q
qingqing01 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
    def _get_loss(self, outputs):
        if self.model._loss_function and self.model._loss:
            raise ValueError(
                "Do not set loss by model.set_loss() and "
                "loss_function in model.prepare() at the same time.")
        if self.model._loss_function is not None:
            if self.model._labels is None:
                raise ValueError("The labels of Model must be not None.")
            label_vars = [k.forward() for k in to_list(self.model._labels)]
            self._label_vars[self.mode] = label_vars
            losses = self.model._loss_function(outputs, label_vars)
        else:
            assert self.model._loss
            losses = to_list(self.model._loss)
        return losses
Y
Yang Zhang 已提交
311 312

    def _compile_and_initialize(self, prog, device='CPU', device_ids=None):
313 314 315
        compiled_prog = self._compiled_progs.get(self.mode, None)
        if compiled_prog is not None:
            return compiled_prog
Y
Yang Zhang 已提交
316

Q
qingqing01 已提交
317 318 319 320
        places = [
            device.lower() == 'gpu' and fluid.CUDAPlace(i) or fluid.CPUPlace()
            for i in device_ids
        ]
Y
Yang Zhang 已提交
321

Y
Yang Zhang 已提交
322 323 324
        # XXX *ALL WEIGHTS* should be initialized upon model construction
        # even if `forward()` may run different code path for different mode
        # therefore startup program only needs to run once
Y
Yang Zhang 已提交
325
        if self._executor is None:
326
            self._executor = fluid.Executor(places[0])
Y
Yang Zhang 已提交
327 328 329 330 331 332 333 334 335 336
            # XXX incremental initialization
            uninitialized = []
            for var_py in self._startup_prog.list_vars():
                var = fluid.global_scope().find_var(var_py.name)
                if var and var.get_tensor()._is_initialized():
                    continue
                uninitialized.append(var_py)
            if uninitialized:
                startup_prog = self._startup_prog._prune(uninitialized)
                self._executor.run(startup_prog)
Y
Yang Zhang 已提交
337

338 339 340 341
            if self.mode == 'train' and self._lazy_load_optimizer:
                self._load_optimizer(self._lazy_load_optimizer)
                self._lazy_load_optimizer = None

342
        compiled_prog = fluid.CompiledProgram(prog)
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
        if len(device_ids) > 1:
            loss_name = None
            if self.mode == 'train' and self._loss_endpoint is not None:
                loss_name = self._loss_endpoint.name

            share_vars_from = None
            if self.mode == 'eval' and 'train' in self._compiled_progs:
                share_vars_from = self._compiled_progs['train']
            # HACK invalidate eval program if is compiled before train program
            # quite hackish, OTOH, it is generally uncommon that the eval
            # program will be run before the train program
            if self.mode == 'train' and 'eval' in self._compiled_progs:
                del self._compiled_progs['eval']

            compiled_prog = compiled_prog.with_data_parallel(
Q
qingqing01 已提交
358 359
                loss_name=loss_name,
                places=places,
360 361
                share_vars_from=share_vars_from)

362
        self._compiled_progs[self.mode] = compiled_prog
Y
Yang Zhang 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
        return compiled_prog


class DynamicGraphAdapter(object):
    def __init__(self, model):
        super(DynamicGraphAdapter, self).__init__()
        self.model = model

    @property
    def mode(self):
        return self.model.mode

    @mode.setter
    def mode(self, value):
        self.model.mode = value

379
    # TODO multi device in dygraph mode not implemented at present time
Q
qingqing01 已提交
380 381
    def train(self, inputs, labels=None, device='CPU', device_ids=None):
        assert self.model._optimizer, \
Y
Yang Zhang 已提交
382 383 384 385
            "model not ready, please call `model.prepare()` first"
        super(Model, self.model).train()
        self.mode = 'train'
        inputs = to_list(inputs)
Q
qingqing01 已提交
386 387
        if labels is not None:
            labels = to_list(labels)
Y
Yang Zhang 已提交
388
        outputs = self.model.forward(*[to_variable(x) for x in inputs])
Q
qingqing01 已提交
389
        losses = self._get_loss(outputs, labels)
Y
Yang Zhang 已提交
390 391 392 393
        final_loss = fluid.layers.sum(losses)
        final_loss.backward()
        self.model._optimizer.minimize(final_loss)
        self.model.clear_gradients()
394 395
        return [to_numpy(o) for o in to_list(outputs)], \
            [to_numpy(l) for l in losses]
Y
Yang Zhang 已提交
396

Q
qingqing01 已提交
397
    def eval(self, inputs, labels=None, device='CPU', device_ids=None):
398
        super(Model, self.model).eval()
Y
Yang Zhang 已提交
399 400
        self.mode = 'eval'
        inputs = to_list(inputs)
Q
qingqing01 已提交
401 402
        if labels is not None:
            labels = to_list(labels)
Y
Yang Zhang 已提交
403
        outputs = self.model.forward(*[to_variable(x) for x in inputs])
Q
qingqing01 已提交
404
        losses = self._get_loss(outputs, labels)
405 406
        return [to_numpy(o) for o in to_list(outputs)], \
            [to_numpy(l) for l in losses]
Y
Yang Zhang 已提交
407 408

    def test(self, inputs, device='CPU', device_ids=None):
409
        super(Model, self.model).eval()
Y
Yang Zhang 已提交
410
        self.mode = 'test'
411 412 413
        inputs = [to_variable(x) for x in to_list(inputs)]
        outputs = self.model.forward(*inputs)
        return [to_numpy(o) for o in to_list(outputs)]
Y
Yang Zhang 已提交
414

Q
qingqing01 已提交
415 416 417 418 419 420 421 422 423 424
    def _get_loss(self, outputs, labels):
        if self.model._loss_function and self.model._loss:
            raise ValueError(
                "Do not set loss by model.set_loss() and "
                "loss_function in model.prepare() at the same time.")
        if self.model._loss_function is not None:
            return self.model._loss_function(outputs, labels)
        else:
            return to_list(self.model._loss)

425 426 427
    def parameters(self, *args, **kwargs):
        return super(Model, self.model).parameters(*args, **kwargs)

Y
Yang Zhang 已提交
428 429 430 431 432 433 434 435 436 437 438 439
    def save(self, path):
        params = self.model.state_dict()
        fluid.save_dygraph(params, path)
        if self.model._optimizer is None:
            return
        if self.model._optimizer.state_dict():
            optim = self.model._optimizer.state_dict()
            fluid.save_dygraph(optim, path)

    def load(self, path):
        params, optim = fluid.load_dygraph(path)
        self.model.set_dict(params)
Y
Yang Zhang 已提交
440
        if self.model._optimizer is None or optim is None:
Y
Yang Zhang 已提交
441 442 443 444 445
            return
        self.model._optimizer.set_dict(optim)


class Model(fluid.dygraph.Layer):
Q
qingqing01 已提交
446
    def __init__(self, inputs=None, labels=None):
Y
Yang Zhang 已提交
447 448
        super(Model, self).__init__(self.__class__.__name__)
        self.mode = 'train'
Q
qingqing01 已提交
449 450
        self._inputs = inputs
        self._labels = labels
Y
Yang Zhang 已提交
451
        self._loss_function = None
Y
Yang Zhang 已提交
452
        self._loss_weights = None
Q
qingqing01 已提交
453
        self._loss = None
Y
Yang Zhang 已提交
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
        self._optimizer = None
        if in_dygraph_mode():
            self._adapter = DynamicGraphAdapter(self)
        else:
            self._adapter = StaticGraphAdapter(self)

    def train(self, *args, **kwargs):
        return self._adapter.train(*args, **kwargs)

    def eval(self, *args, **kwargs):
        return self._adapter.eval(*args, **kwargs)

    def test(self, *args, **kwargs):
        return self._adapter.test(*args, **kwargs)

    def save(self, *args, **kwargs):
        return self._adapter.save(*args, **kwargs)

    def load(self, *args, **kwargs):
        return self._adapter.load(*args, **kwargs)

Q
qingqing01 已提交
475
    def prepare(self, optimizer, loss_function=None):
Y
Yang Zhang 已提交
476
        self._optimizer = optimizer
Q
qingqing01 已提交
477 478 479 480
        if loss_function:
            if not isinstance(loss_function, Loss):
                raise TypeError(
                    "'loss_function' must be sub classes of 'Loss'")
Y
Yang Zhang 已提交
481
        self._loss_function = loss_function
482 483 484

    def parameters(self, *args, **kwargs):
        return self._adapter.parameters(*args, **kwargs)
Q
qingqing01 已提交
485 486 487 488 489 490 491 492 493

    def set_loss(self, loss):
        if loss and self._loss_function:
            raise ValueError(
                "Do not set loss by model.set_loss() and "
                "loss_function in model.prepare() at the same time.")
        if not isinstance(loss, (Variable, fluid.core.VarBase)):
            raise TypeError("loss type should be a Variable or VarBase.")
        self._loss = loss