standard_updater.py 7.9 KB
Newer Older
H
Hui Zhang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
H
Hui Zhang 已提交
14
# Modified from chainer(https://github.com/chainer/chainer)
H
Hui Zhang 已提交
15 16 17
from typing import Dict
from typing import Optional

H
Hui Zhang 已提交
18
import paddle
H
Hui Zhang 已提交
19 20 21 22
from paddle.io import DataLoader
from paddle.io import DistributedBatchSampler
from paddle.nn import Layer
from paddle.optimizer import Optimizer
H
Hui Zhang 已提交
23
from paddle.optimizer.lr import LRScheduler
H
Hui Zhang 已提交
24

25 26 27 28
from paddlespeech.s2t.training.reporter import report
from paddlespeech.s2t.training.updaters.updater import UpdaterBase
from paddlespeech.s2t.training.updaters.updater import UpdaterState
from paddlespeech.s2t.utils.log import Log
H
Hui Zhang 已提交
29 30 31 32 33

__all__ = ["StandardUpdater"]

logger = Log(__name__).getlog()

H
Hui Zhang 已提交
34

H
Hui Zhang 已提交
35 36 37 38 39 40 41 42
class StandardUpdater(UpdaterBase):
    """An example of over-simplification. Things may not be that simple, but
    you can subclass it to fit your need.
    """

    def __init__(self,
                 model: Layer,
                 optimizer: Optimizer,
H
Hui Zhang 已提交
43
                 scheduler: LRScheduler,
H
Hui Zhang 已提交
44 45
                 dataloader: DataLoader,
                 init_state: Optional[UpdaterState]=None):
H
Hui Zhang 已提交
46
        super().__init__(init_state)
H
Hui Zhang 已提交
47 48 49 50 51 52 53 54 55 56
        # it is designed to hold multiple models
        models = {"main": model}
        self.models: Dict[str, Layer] = models
        self.model = model

        # it is designed to hold multiple optimizers
        optimizers = {"main": optimizer}
        self.optimizer = optimizer
        self.optimizers: Dict[str, Optimizer] = optimizers

H
Hui Zhang 已提交
57 58 59 60 61
        # it is designed to hold multiple scheduler
        schedulers = {"main": scheduler}
        self.scheduler = scheduler
        self.schedulers: Dict[str, LRScheduler] = schedulers

H
Hui Zhang 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
        # dataloaders
        self.dataloader = dataloader

        self.train_iterator = iter(dataloader)

    def update(self):
        # We increase the iteration index after updating and before extension.
        # Here are the reasons.

        # 0. Snapshotting(as well as other extensions, like visualizer) is
        #    executed after a step of updating;
        # 1. We decide to increase the iteration index after updating and
        #    before any all extension is executed. 
        # 3. We do not increase the iteration after extension because we
        #    prefer a consistent resume behavior, when load from a
        #    `snapshot_iter_100.pdz` then the next step to train is `101`,
        #    naturally. But if iteration is increased increased after
        #    extension(including snapshot), then, a `snapshot_iter_99` is
        #    loaded. You would need a extra increasing of the iteration idex
        #    before training to avoid another iteration `99`, which has been
        #    done before snapshotting.
        # 4. Thus iteration index represrnts "currently how mant epochs has
        #    been done."
        # NOTE: use report to capture the correctly value. If you want to
        # report the learning rate used for a step, you must report it before
        # the learning rate scheduler's step() has been called. In paddle's
        # convention, we do not use an extension to change the learning rate.
        # so if you want to report it, do it in the updater.

        # Then here comes the next question. When is the proper time to
        # increase the epoch index? Since all extensions are executed after
        # updating, it is the time that after updating is the proper time to
        # increase epoch index.
        # 1. If we increase the epoch index before updating, then an extension
        #    based ot epoch would miss the correct timing. It could only be
        #    triggerd after an extra updating.
        # 2. Theoretically, when an epoch is done, the epoch index should be
        #    increased. So it would be increase after updating.
        # 3. Thus, eppoch index represents "currently how many epochs has been
        #    done." So it starts from 0.

        # switch to training mode
        for model in self.models.values():
            model.train()

        # training for a step is implemented here
H
Hui Zhang 已提交
108 109 110 111
        with Timier("data time cost:{}"):
            batch = self.read_batch()
        with Timier("step time cost:{}"):
            self.update_core(batch)
H
Hui Zhang 已提交
112 113 114 115 116 117 118 119 120 121

        self.state.iteration += 1
        if self.updates_per_epoch is not None:
            if self.state.iteration % self.updates_per_epoch == 0:
                self.state.epoch += 1

    def update_core(self, batch):
        """A simple case for a training step. Basic assumptions are:
        Single model;
        Single optimizer;
H
Hui Zhang 已提交
122
        Single scheduler, and update learning rate each step;
H
Hui Zhang 已提交
123 124 125 126 127 128
        A batch from the dataloader is just the input of the model;
        The model return a single loss, or a dict containing serval losses.
        Parameters updates at every batch, no gradient accumulation.
        """
        loss = self.model(*batch)

H
Hui Zhang 已提交
129
        if isinstance(loss, paddle.Tensor):
H
Hui Zhang 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142
            loss_dict = {"main": loss}
        else:
            # Dict[str, Tensor]
            loss_dict = loss
            if "main" not in loss_dict:
                main_loss = 0
                for loss_item in loss.values():
                    main_loss += loss_item
                loss_dict["main"] = main_loss

        for name, loss_item in loss_dict.items():
            report(name, float(loss_item))

H
Hui Zhang 已提交
143
        self.optimizer.clear_grad()
H
Hui Zhang 已提交
144
        loss_dict["main"].backward()
H
Hui Zhang 已提交
145 146
        self.optimizer.step()
        self.scheduler.step()
H
Hui Zhang 已提交
147 148 149

    @property
    def updates_per_epoch(self):
H
Hui Zhang 已提交
150 151
        """Number of steps per epoch, 
        determined by the length of the dataloader."""
H
Hui Zhang 已提交
152 153 154 155 156 157 158 159 160 161 162 163
        length_of_dataloader = None
        try:
            length_of_dataloader = len(self.dataloader)
        except TypeError:
            logger.debug("This dataloader has no __len__.")
        finally:
            return length_of_dataloader

    def new_epoch(self):
        """Start a new epoch."""
        # NOTE: all batch sampler for distributed training should
        # subclass DistributedBatchSampler and implement `set_epoch` method
H
Hui Zhang 已提交
164
        if hasattr(self.dataloader, "batch_sampler"):
H
Hui Zhang 已提交
165 166 167 168 169 170 171
            batch_sampler = self.dataloader.batch_sampler
            if isinstance(batch_sampler, DistributedBatchSampler):
                batch_sampler.set_epoch(self.state.epoch)
        self.train_iterator = iter(self.dataloader)

    def read_batch(self):
        """Read a batch from the data loader, auto renew when data is exhausted."""
H
Hui Zhang 已提交
172 173 174 175 176
        try:
            batch = next(self.train_iterator)
        except StopIteration:
            self.new_epoch()
            batch = next(self.train_iterator)
H
Hui Zhang 已提交
177 178 179
        return batch

    def state_dict(self):
H
Hui Zhang 已提交
180 181
        """State dict of a Updater, model, optimizers/schedulers 
        and updater state are included."""
H
Hui Zhang 已提交
182 183 184 185 186 187 188 189 190
        state_dict = super().state_dict()
        for name, model in self.models.items():
            state_dict[f"{name}_params"] = model.state_dict()
        for name, optim in self.optimizers.items():
            state_dict[f"{name}_optimizer"] = optim.state_dict()
        return state_dict

    def set_state_dict(self, state_dict):
        """Set state dict for a Updater. Parameters of models, states for
H
Hui Zhang 已提交
191
        optimizers/schedulers and UpdaterState are restored."""
H
Hui Zhang 已提交
192 193 194 195
        for name, model in self.models.items():
            model.set_state_dict(state_dict[f"{name}_params"])
        for name, optim in self.optimizers.items():
            optim.set_state_dict(state_dict[f"{name}_optimizer"])
H
Hui Zhang 已提交
196
        super().set_state_dict(state_dict)