trainer.py 11.2 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

L
LielinJiang 已提交
15 16
import os
import time
L
LielinJiang 已提交
17
import copy
L
LielinJiang 已提交
18

L
LielinJiang 已提交
19
import logging
20
import paddle
L
LielinJiang 已提交
21

L
LielinJiang 已提交
22
from paddle.distributed import ParallelEnv
L
LielinJiang 已提交
23 24 25 26

from ..datasets.builder import build_dataloader
from ..models.builder import build_model
from ..utils.visual import tensor2img, save_image
L
LielinJiang 已提交
27
from ..utils.filesystem import makedirs, save, load
28
from ..utils.timer import TimeAverager
L
LielinJiang 已提交
29
from ..metric.psnr_ssim import calculate_psnr, calculate_ssim
L
LielinJiang 已提交
30

L
fix nan  
LielinJiang 已提交
31

L
LielinJiang 已提交
32 33 34 35 36
class Trainer:
    def __init__(self, cfg):

        # build train dataloader
        self.train_dataloader = build_dataloader(cfg.dataset.train)
L
LielinJiang 已提交
37

L
LielinJiang 已提交
38
        if 'lr_scheduler' in cfg.optimizer:
L
LielinJiang 已提交
39 40 41
            cfg.optimizer.lr_scheduler.step_per_epoch = len(
                self.train_dataloader)

L
LielinJiang 已提交
42 43
        # build model
        self.model = build_model(cfg)
44 45 46
        # multiple gpus prepare
        if ParallelEnv().nranks > 1:
            self.distributed_data_parallel()
L
LielinJiang 已提交
47 48

        self.logger = logging.getLogger(__name__)
49

L
LielinJiang 已提交
50 51 52
        # base config
        self.output_dir = cfg.output_dir
        self.epochs = cfg.epochs
L
LielinJiang 已提交
53 54
        self.start_epoch = 1
        self.current_epoch = 1
L
LielinJiang 已提交
55 56 57 58
        self.batch_id = 0
        self.weight_interval = cfg.snapshot_config.interval
        self.log_interval = cfg.log_config.interval
        self.visual_interval = cfg.log_config.visiual_interval
L
LielinJiang 已提交
59 60 61
        self.validate_interval = -1
        if cfg.get('validate', None) is not None:
            self.validate_interval = cfg.validate.get('interval', -1)
L
LielinJiang 已提交
62 63 64
        self.cfg = cfg

        self.local_rank = ParallelEnv().local_rank
65 66 67

        # time count
        self.time_count = {}
L
LielinJiang 已提交
68 69
        self.best_metric = {}

70
    def distributed_data_parallel(self):
L
LielinJiang 已提交
71
        strategy = paddle.distributed.prepare_context()
72 73
        for net_name, net in self.model.nets.items():
            self.model.nets[net_name] = paddle.DataParallel(net, strategy)
74

L
LielinJiang 已提交
75
    def train(self):
76 77
        reader_cost_averager = TimeAverager()
        batch_cost_averager = TimeAverager()
L
LielinJiang 已提交
78

L
LielinJiang 已提交
79
        for epoch in range(self.start_epoch, self.epochs + 1):
L
LielinJiang 已提交
80
            self.current_epoch = epoch
81
            start_time = step_start_time = time.time()
L
LielinJiang 已提交
82
            for i, data in enumerate(self.train_dataloader):
83 84
                reader_cost_averager.record(time.time() - step_start_time)

L
LielinJiang 已提交
85 86
                self.batch_id = i
                # unpack data from dataset and apply preprocessing
L
LielinJiang 已提交
87
                # data input should be dict
L
LielinJiang 已提交
88 89
                self.model.set_input(data)
                self.model.optimize_parameters()
L
LielinJiang 已提交
90

91 92 93
                batch_cost_averager.record(time.time() - step_start_time,
                                           num_samples=self.cfg.get(
                                               'batch_size', 1))
L
LielinJiang 已提交
94
                if i % self.log_interval == 0:
95 96
                    self.data_time = reader_cost_averager.get_average()
                    self.step_time = batch_cost_averager.get_average()
97
                    self.ips = batch_cost_averager.get_ips_average()
L
LielinJiang 已提交
98
                    self.print_log()
L
LielinJiang 已提交
99

100 101 102
                    reader_cost_averager.reset()
                    batch_cost_averager.reset()

L
LielinJiang 已提交
103 104 105
                if i % self.visual_interval == 0:
                    self.visual('visual_train')

106
                step_start_time = time.time()
L
fix nan  
LielinJiang 已提交
107

108 109
            self.logger.info('train one epoch time: {}'.format(time.time() -
                                                               start_time))
L
LielinJiang 已提交
110 111
            if self.validate_interval > -1 and epoch % self.validate_interval:
                self.validate()
L
LielinJiang 已提交
112
            self.model.lr_scheduler.step()
L
LielinJiang 已提交
113 114 115 116
            if epoch % self.weight_interval == 0:
                self.save(epoch, 'weight', keep=-1)
            self.save(epoch)

L
LielinJiang 已提交
117 118
    def validate(self):
        if not hasattr(self, 'val_dataloader'):
119 120
            self.val_dataloader = build_dataloader(self.cfg.dataset.val,
                                                   is_train=False)
L
LielinJiang 已提交
121 122 123 124 125 126 127 128 129 130 131 132

        metric_result = {}

        for i, data in enumerate(self.val_dataloader):
            self.batch_id = i

            self.model.set_input(data)
            self.model.test()

            visual_results = {}
            current_paths = self.model.get_image_paths()
            current_visuals = self.model.get_current_visuals()
L
fix nan  
LielinJiang 已提交
133

L
LielinJiang 已提交
134 135 136 137 138 139 140 141
            for j in range(len(current_paths)):
                short_path = os.path.basename(current_paths[j])
                basename = os.path.splitext(short_path)[0]
                for k, img_tensor in current_visuals.items():
                    name = '%s_%s' % (basename, k)
                    visual_results.update({name: img_tensor[j]})
                if 'psnr' in self.cfg.validate.metrics:
                    if 'psnr' not in metric_result:
L
fix nan  
LielinJiang 已提交
142 143 144 145
                        metric_result['psnr'] = calculate_psnr(
                            tensor2img(current_visuals['output'][j], (0., 1.)),
                            tensor2img(current_visuals['gt'][j], (0., 1.)),
                            **self.cfg.validate.metrics.psnr)
L
LielinJiang 已提交
146
                    else:
L
fix nan  
LielinJiang 已提交
147 148 149 150
                        metric_result['psnr'] += calculate_psnr(
                            tensor2img(current_visuals['output'][j], (0., 1.)),
                            tensor2img(current_visuals['gt'][j], (0., 1.)),
                            **self.cfg.validate.metrics.psnr)
L
LielinJiang 已提交
151 152
                if 'ssim' in self.cfg.validate.metrics:
                    if 'ssim' not in metric_result:
L
fix nan  
LielinJiang 已提交
153 154 155 156
                        metric_result['ssim'] = calculate_ssim(
                            tensor2img(current_visuals['output'][j], (0., 1.)),
                            tensor2img(current_visuals['gt'][j], (0., 1.)),
                            **self.cfg.validate.metrics.ssim)
L
LielinJiang 已提交
157
                    else:
L
fix nan  
LielinJiang 已提交
158 159 160 161 162
                        metric_result['ssim'] += calculate_ssim(
                            tensor2img(current_visuals['output'][j], (0., 1.)),
                            tensor2img(current_visuals['gt'][j], (0., 1.)),
                            **self.cfg.validate.metrics.ssim)

L
LielinJiang 已提交
163 164 165
            self.visual('visual_val', visual_results=visual_results)

            if i % self.log_interval == 0:
166 167
                self.logger.info('val iter: [%d/%d]' %
                                 (i, len(self.val_dataloader)))
L
fix nan  
LielinJiang 已提交
168

L
LielinJiang 已提交
169 170 171
        for metric_name in metric_result.keys():
            metric_result[metric_name] /= len(self.val_dataloader.dataset)

L
fix nan  
LielinJiang 已提交
172 173
        self.logger.info('Epoch {} validate end: {}'.format(
            self.current_epoch, metric_result))
L
LielinJiang 已提交
174

L
LielinJiang 已提交
175 176
    def test(self):
        if not hasattr(self, 'test_dataloader'):
177 178
            self.test_dataloader = build_dataloader(self.cfg.dataset.test,
                                                    is_train=False)
L
LielinJiang 已提交
179 180 181 182 183

        # data[0]: img, data[1]: img path index
        # test batch size must be 1
        for i, data in enumerate(self.test_dataloader):
            self.batch_id = i
L
LielinJiang 已提交
184 185 186

            self.model.set_input(data)
            self.model.test()
L
LielinJiang 已提交
187 188

            visual_results = {}
L
LielinJiang 已提交
189 190 191
            current_paths = self.model.get_image_paths()
            current_visuals = self.model.get_current_visuals()

L
LielinJiang 已提交
192
            for j in range(len(current_paths)):
L
LielinJiang 已提交
193 194 195 196 197
                short_path = os.path.basename(current_paths[j])
                basename = os.path.splitext(short_path)[0]
                for k, img_tensor in current_visuals.items():
                    name = '%s_%s' % (basename, k)
                    visual_results.update({name: img_tensor[j]})
L
LielinJiang 已提交
198 199

            self.visual('visual_test', visual_results=visual_results)
L
LielinJiang 已提交
200

L
LielinJiang 已提交
201
            if i % self.log_interval == 0:
202 203
                self.logger.info('Test iter: [%d/%d]' %
                                 (i, len(self.test_dataloader)))
L
LielinJiang 已提交
204 205 206 207

    def print_log(self):
        losses = self.model.get_current_losses()
        message = 'Epoch: %d, iters: %d ' % (self.current_epoch, self.batch_id)
L
LielinJiang 已提交
208

L
LielinJiang 已提交
209 210 211 212 213
        message += '%s: %.6f ' % ('lr', self.current_learning_rate)

        for k, v in losses.items():
            message += '%s: %.3f ' % (k, v)

214 215 216
        if hasattr(self, 'step_time'):
            message += 'batch_cost: %.5f sec ' % self.step_time

217
        if hasattr(self, 'data_time'):
218
            message += 'reader_cost: %.5f sec ' % self.data_time
219

220 221
        if hasattr(self, 'ips'):
            message += 'ips: %.5f images/s' % self.ips
222

L
LielinJiang 已提交
223 224 225 226 227
        # print the message
        self.logger.info(message)

    @property
    def current_learning_rate(self):
L
LielinJiang 已提交
228 229
        for optimizer in self.model.optimizers.values():
            return optimizer.get_lr()
L
LielinJiang 已提交
230 231 232 233 234 235 236

    def visual(self, results_dir, visual_results=None):
        self.model.compute_visuals()

        if visual_results is None:
            visual_results = self.model.get_current_visuals()

L
LielinJiang 已提交
237
        if self.cfg.is_train:
L
LielinJiang 已提交
238 239 240 241 242
            msg = 'epoch%.3d_' % self.current_epoch
        else:
            msg = ''

        makedirs(os.path.join(self.output_dir, results_dir))
L
LielinJiang 已提交
243 244 245
        min_max = self.cfg.get('min_max', None)
        if min_max is None:
            min_max = (-1., 1.)
L
LielinJiang 已提交
246
        for label, image in visual_results.items():
L
LielinJiang 已提交
247
            image_numpy = tensor2img(image, min_max)
L
LielinJiang 已提交
248 249
            img_path = os.path.join(self.output_dir, results_dir,
                                    msg + '%s.png' % (label))
L
LielinJiang 已提交
250 251 252 253 254
            save_image(image_numpy, img_path)

    def save(self, epoch, name='checkpoint', keep=1):
        if self.local_rank != 0:
            return
L
LielinJiang 已提交
255

L
LielinJiang 已提交
256 257 258 259 260
        assert name in ['checkpoint', 'weight']

        state_dicts = {}
        save_filename = 'epoch_%s_%s.pkl' % (epoch, name)
        save_path = os.path.join(self.output_dir, save_filename)
L
LielinJiang 已提交
261 262
        for net_name, net in self.model.nets.items():
            state_dicts[net_name] = net.state_dict()
L
LielinJiang 已提交
263 264 265 266 267 268 269

        if name == 'weight':
            save(state_dicts, save_path)
            return

        state_dicts['epoch'] = epoch

L
LielinJiang 已提交
270 271
        for opt_name, opt in self.model.optimizers.items():
            state_dicts[opt_name] = opt.state_dict()
L
LielinJiang 已提交
272 273 274 275 276

        save(state_dicts, save_path)

        if keep > 0:
            try:
L
LielinJiang 已提交
277 278
                checkpoint_name_to_be_removed = os.path.join(
                    self.output_dir, 'epoch_%s_%s.pkl' % (epoch - keep, name))
L
LielinJiang 已提交
279 280 281 282 283 284 285 286 287 288
                if os.path.exists(checkpoint_name_to_be_removed):
                    os.remove(checkpoint_name_to_be_removed)

            except Exception as e:
                self.logger.info('remove old checkpoints error: {}'.format(e))

    def resume(self, checkpoint_path):
        state_dicts = load(checkpoint_path)
        if state_dicts.get('epoch', None) is not None:
            self.start_epoch = state_dicts['epoch'] + 1
L
LielinJiang 已提交
289

L
LielinJiang 已提交
290
        for net_name, net in self.model.nets.items():
291
            net.set_state_dict(state_dicts[net_name])
L
LielinJiang 已提交
292

L
LielinJiang 已提交
293
        for opt_name, opt in self.model.optimizers.items():
294
            opt.set_state_dict(state_dicts[opt_name])
L
LielinJiang 已提交
295 296 297

    def load(self, weight_path):
        state_dicts = load(weight_path)
L
LielinJiang 已提交
298

L
LielinJiang 已提交
299
        for net_name, net in self.model.nets.items():
300
            net.set_state_dict(state_dicts[net_name])