train.py 6.1 KB
Newer Older
F
FDInSky 已提交
1 2 3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
4 5 6 7 8 9
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
    sys.path.append(parent_path)

F
FDInSky 已提交
10 11 12 13 14
import time
# ignore numba warning
import warnings
warnings.filterwarnings('ignore')
import random
15
import datetime
F
FDInSky 已提交
16
import numpy as np
17
from collections import deque
W
wangxinxin08 已提交
18 19
import paddle
from paddle import fluid
F
FDInSky 已提交
20 21
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.data.reader import create_reader
22
from ppdet.utils.stats import TrainingStats
F
FDInSky 已提交
23 24 25
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
from ppdet.utils.checkpoint import load_dygraph_ckpt, save_dygraph_ckpt
W
wangguanzhong 已提交
26
import paddle.distributed as dist
27 28 29 30
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
F
FDInSky 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89


def parse_args():
    parser = ArgsParser()
    parser.add_argument(
        "-ckpt_type",
        default='pretrain',
        type=str,
        help="Loading Checkpoints only support 'pretrain', 'finetune', 'resume'."
    )
    parser.add_argument(
        "--fp16",
        action='store_true',
        default=False,
        help="Enable mixed precision training.")
    parser.add_argument(
        "--loss_scale",
        default=8.,
        type=float,
        help="Mixed precision training loss scale.")
    parser.add_argument(
        "--eval",
        action='store_true',
        default=False,
        help="Whether to perform evaluation in train")
    parser.add_argument(
        "--output_eval",
        default=None,
        type=str,
        help="Evaluation directory, default is current directory.")
    parser.add_argument(
        "--use_tb",
        type=bool,
        default=False,
        help="whether to record the data to Tensorboard.")
    parser.add_argument(
        '--tb_log_dir',
        type=str,
        default="tb_log_dir/scalar",
        help='Tensorboard logging directory for scalar.')
    parser.add_argument(
        "--enable_ce",
        type=bool,
        default=False,
        help="If set True, enable continuous evaluation job."
        "This flag is only used for internal test.")
    parser.add_argument(
        "--use_gpu", action='store_true', default=False, help="data parallel")

    parser.add_argument(
        '--is_profiler',
        type=int,
        default=0,
        help='The switch of profiler tools. (used for benchmark)')

    args = parser.parse_args()
    return args


W
wangguanzhong 已提交
90 91 92 93 94 95 96 97
def run():
    FLAGS = parse_args()

    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    check_config(cfg)
    check_gpu(cfg.use_gpu)
    check_version()
F
FDInSky 已提交
98
    env = os.environ
W
wangguanzhong 已提交
99

F
FDInSky 已提交
100 101 102 103 104 105 106
    FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
    if FLAGS.dist:
        trainer_id = int(env['PADDLE_TRAINER_ID'])
        local_seed = (99 + trainer_id)
        random.seed(local_seed)
        np.random.seed(local_seed)

107
    if FLAGS.enable_ce:
F
FDInSky 已提交
108 109 110 111 112
        random.seed(0)
        np.random.seed(0)

    # Model
    main_arch = cfg.architecture
113
    model = create(cfg.architecture)
F
FDInSky 已提交
114 115 116 117 118 119 120 121 122 123

    # Optimizer
    lr = create('LearningRate')()
    optimizer = create('OptimizerBuilder')(lr, model.parameters())

    # Init Model & Optimzer   
    model = load_dygraph_ckpt(
        model,
        optimizer,
        cfg.pretrain_weights,
124
        ckpt_type=FLAGS.ckpt_type,
W
wangguanzhong 已提交
125
        load_static_weights=cfg.get('load_static_weights', False))
F
FDInSky 已提交
126

W
wangguanzhong 已提交
127
    # Parallel Model 
W
wangguanzhong 已提交
128
    if dist.ParallelEnv().nranks > 1:
W
wangxinxin08 已提交
129 130
        strategy = paddle.distributed.init_parallel_env()
        model = paddle.DataParallel(model, strategy)
W
wangguanzhong 已提交
131

F
FDInSky 已提交
132 133 134
    # Data Reader 
    start_iter = 0
    if cfg.use_gpu:
W
wangguanzhong 已提交
135
        devices_num = fluid.core.get_cuda_device_count()
F
FDInSky 已提交
136 137 138 139
    else:
        devices_num = int(os.environ.get('CPU_NUM', 1))

    train_reader = create_reader(
140
        cfg.TrainReader, (cfg.max_iters - start_iter), cfg, devices_num=1)
F
FDInSky 已提交
141

142
    time_stat = deque(maxlen=cfg.log_iter)
143 144
    start_time = time.time()
    end_time = time.time()
F
FDInSky 已提交
145 146
    # Run Train 
    for iter_id, data in enumerate(train_reader()):
147 148 149 150 151 152 153

        start_time = end_time
        end_time = time.time()
        time_stat.append(end_time - start_time)
        time_cost = np.mean(time_stat)
        eta_sec = (cfg.max_iters - iter_id) * time_cost
        eta = str(datetime.timedelta(seconds=int(eta_sec)))
F
FDInSky 已提交
154 155 156

        # Model Forward
        model.train()
157 158
        outputs = model(data, cfg['TrainReader']['inputs_def']['fields'],
                        'train')
F
FDInSky 已提交
159 160 161

        # Model Backward
        loss = outputs['loss']
W
wangguanzhong 已提交
162
        if dist.ParallelEnv().nranks > 1:
F
FDInSky 已提交
163 164 165 166 167 168
            loss = model.scale_loss(loss)
            loss.backward()
            model.apply_collective_grads()
        else:
            loss.backward()
        optimizer.minimize(loss)
W
wangxinxin08 已提交
169 170 171 172
        optimizer.step()
        curr_lr = optimizer.get_lr()
        lr.step()
        optimizer.clear_grad()
173

W
wangguanzhong 已提交
174
        if dist.ParallelEnv().nranks < 2 or dist.ParallelEnv().local_rank == 0:
175 176
            # Log state 
            if iter_id == 0:
177
                train_stats = TrainingStats(cfg.log_iter, outputs.keys())
178 179 180
            train_stats.update(outputs)
            logs = train_stats.log()
            if iter_id % cfg.log_iter == 0:
181 182 183
                ips = float(cfg['TrainReader']['batch_size']) / time_cost
                strs = 'iter: {}, lr: {:.6f}, {}, eta: {}, batch_cost: {:.5f} sec, ips: {:.5f} images/sec'.format(
                    iter_id, curr_lr, logs, eta, time_cost, ips)
184 185 186 187 188 189 190
                logger.info(strs)
            # Save Stage 
            if iter_id > 0 and iter_id % int(
                    cfg.snapshot_iter) == 0 or iter_id == cfg.max_iters - 1:
                cfg_name = os.path.basename(FLAGS.config).split('.')[0]
                save_name = str(
                    iter_id) if iter_id != cfg.max_iters - 1 else "model_final"
W
wangguanzhong 已提交
191 192
                save_dir = os.path.join(cfg.save_dir, cfg_name)
                save_dygraph_ckpt(model, optimizer, save_dir, save_name)
F
FDInSky 已提交
193 194 195


def main():
W
wangguanzhong 已提交
196
    dist.spawn(run)
F
FDInSky 已提交
197 198 199 200


if __name__ == "__main__":
    main()