deeplabv3p.py 6.3 KB
Newer Older
M
michaelowenliu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse

import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv

C
chenguowei01 已提交
20 21
from dygraph.datasets import DATASETS
import dygraph.transforms as T
C
chenguowei01 已提交
22 23
#from dygraph.models import MODELS
from dygraph.cvlibs import manager
C
chenguowei01 已提交
24
from dygraph.utils import get_environ_info
C
chenguowei01 已提交
25
from dygraph.utils import logger
C
chenguowei01 已提交
26
from dygraph.core import train
M
michaelowenliu 已提交
27 28 29 30 31 32 33 34 35 36


def parse_args():
    parser = argparse.ArgumentParser(description='Model training')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for training, which is one of {}'.format(
C
chenguowei01 已提交
37
            str(list(manager.MODELS.components_dict.keys()))),
M
michaelowenliu 已提交
38 39 40 41 42 43 44
        type=str,
        default='UNet')

    # params of dataset
    parser.add_argument(
        '--dataset',
        dest='dataset',
C
chenguowei01 已提交
45 46
        help="The dataset you want to train, which is one of {}".format(
            str(list(DATASETS.keys()))),
M
michaelowenliu 已提交
47 48
        type=str,
        default='OpticDiscSeg')
C
chenguowei01 已提交
49 50 51 52 53 54
    parser.add_argument(
        '--dataset_root',
        dest='dataset_root',
        help="dataset root directory",
        type=str,
        default=None)
M
michaelowenliu 已提交
55 56 57 58 59 60 61 62 63 64

    # params of training
    parser.add_argument(
        "--input_size",
        dest="input_size",
        help="The image size for net inputs.",
        nargs=2,
        default=[512, 512],
        type=int)
    parser.add_argument(
C
chenguowei01 已提交
65 66 67
        '--iters',
        dest='iters',
        help='iters for training',
M
michaelowenliu 已提交
68
        type=int,
C
chenguowei01 已提交
69
        default=10000)
M
michaelowenliu 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
    parser.add_argument(
        '--batch_size',
        dest='batch_size',
        help='Mini batch size of one gpu or cpu',
        type=int,
        default=2)
    parser.add_argument(
        '--learning_rate',
        dest='learning_rate',
        help='Learning rate',
        type=float,
        default=0.01)
    parser.add_argument(
        '--pretrained_model',
        dest='pretrained_model',
        help='The path of pretrained model',
        type=str,
        default=None)
    parser.add_argument(
        '--resume_model',
        dest='resume_model',
        help='The path of resume model',
        type=str,
        default=None)
    parser.add_argument(
C
chenguowei01 已提交
95 96 97
        '--save_interval_iters',
        dest='save_interval_iters',
        help='The interval iters for save a model snapshot',
M
michaelowenliu 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
        type=int,
        default=5)
    parser.add_argument(
        '--save_dir',
        dest='save_dir',
        help='The directory for saving the model snapshot',
        type=str,
        default='./output')
    parser.add_argument(
        '--num_workers',
        dest='num_workers',
        help='Num workers for data loader',
        type=int,
        default=0)
    parser.add_argument(
        '--do_eval',
        dest='do_eval',
        help='Eval while training',
        action='store_true')
    parser.add_argument(
C
chenguowei01 已提交
118 119 120
        '--log_iters',
        dest='log_iters',
        help='Display logging information at every log_iters',
M
michaelowenliu 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133
        default=10,
        type=int)
    parser.add_argument(
        '--use_vdl',
        dest='use_vdl',
        help='Whether to record the data to VisualDL during training',
        action='store_true')

    return parser.parse_args()


def main(args):
    env_info = get_environ_info()
C
chenguowei01 已提交
134 135 136 137
    info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
    info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info +
                     ['-' * 48])
    logger.info(info)
C
chenguowei01 已提交
138

M
michaelowenliu 已提交
139
    places = fluid.CUDAPlace(ParallelEnv().dev_id) \
C
chenguowei01 已提交
140
        if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \
M
michaelowenliu 已提交
141 142
        else fluid.CPUPlace()

C
chenguowei01 已提交
143 144 145 146
    if args.dataset not in DATASETS:
        raise Exception('`--dataset` is invalid. it should be one of {}'.format(
            str(list(DATASETS.keys()))))
    dataset = DATASETS[args.dataset]
M
michaelowenliu 已提交
147 148 149 150

    with fluid.dygraph.guard(places):
        # Creat dataset reader
        train_transforms = T.Compose([
C
chenguowei01 已提交
151
            T.RandomHorizontalFlip(0.5),
M
michaelowenliu 已提交
152 153
            T.ResizeStepScaling(0.5, 2.0, 0.25),
            T.RandomPaddingCrop(args.input_size),
C
chenguowei01 已提交
154 155
            T.RandomDistort(),
            T.Normalize(),
M
michaelowenliu 已提交
156
        ])
C
chenguowei01 已提交
157 158 159 160
        train_dataset = dataset(
            dataset_root=args.dataset_root,
            transforms=train_transforms,
            mode='train')
M
michaelowenliu 已提交
161 162 163 164

        eval_dataset = None
        if args.do_eval:
            eval_transforms = T.Compose(
C
chenguowei01 已提交
165
                [T.Padding((2049, 1025)),
C
chenguowei01 已提交
166 167 168 169 170
                 T.Normalize()])
            eval_dataset = dataset(
                dataset_root=args.dataset_root,
                transforms=eval_transforms,
                mode='val')
M
michaelowenliu 已提交
171

C
chenguowei01 已提交
172 173
        model = manager.MODELS[args.model_name](
            num_classes=train_dataset.num_classes)
M
michaelowenliu 已提交
174 175 176

        # Creat optimizer
        # todo, may less one than len(loader)
C
chenguowei01 已提交
177
        num_iters_each_epoch = len(train_dataset) // (
M
michaelowenliu 已提交
178 179
            args.batch_size * ParallelEnv().nranks)
        lr_decay = fluid.layers.polynomial_decay(
C
chenguowei01 已提交
180
            args.learning_rate, args.iters, end_learning_rate=0, power=0.9)
M
michaelowenliu 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193
        optimizer = fluid.optimizer.Momentum(
            lr_decay,
            momentum=0.9,
            parameter_list=model.parameters(),
            regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5))

        train(
            model,
            train_dataset,
            places=places,
            eval_dataset=eval_dataset,
            optimizer=optimizer,
            save_dir=args.save_dir,
C
chenguowei01 已提交
194
            iters=args.iters,
M
michaelowenliu 已提交
195 196
            batch_size=args.batch_size,
            resume_model=args.resume_model,
C
chenguowei01 已提交
197 198
            save_interval_iters=args.save_interval_iters,
            log_iters=args.log_iters,
M
michaelowenliu 已提交
199 200 201 202 203 204 205 206
            num_classes=train_dataset.num_classes,
            num_workers=args.num_workers,
            use_vdl=args.use_vdl)


if __name__ == '__main__':
    args = parse_args()
    main(args)