train.py 5.8 KB
Newer Older
J
JiaQi Xu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
from nets.ssd import get_ssd
from nets.ssd_training import Generator,MultiBoxLoss
from utils.config import Config
from torchsummary import summary
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
def adjust_learning_rate(optimizer, lr, gamma, step):
    lr = lr * (gamma ** (step))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return lr

if __name__ == "__main__":
    Batch_size = 4
J
JiaQi Xu 已提交
21 22 23 24 25 26 27 28
    # ------------------------------------#
    #   先冻结一部分权重训练
    #   后解冻全部权重训练
    #   先大学习率
    #   后小学习率
    # ------------------------------------#
    lr = 1e-4
    freeze_lr = 1e-5
J
JiaQi Xu 已提交
29 30
    Cuda = True
    Start_iter = 0
J
JiaQi Xu 已提交
31 32 33
    Freeze_epoch = 25
    Epoch = 50

J
JiaQi Xu 已提交
34 35 36 37 38
    model = get_ssd("train",Config["num_classes"])

    print('Loading weights into state dict...')
    model_dict = model.state_dict()
    pretrained_dict = torch.load("model_data/ssd_weights.pth")
J
JiaQi Xu 已提交
39
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) ==  np.shape(v)}
J
JiaQi Xu 已提交
40
    model_dict.update(pretrained_dict)
J
JiaQi Xu 已提交
41
    model.load_state_dict(model_dict)
J
JiaQi Xu 已提交
42 43
    print('Finished!')

J
JiaQi Xu 已提交
44
    net = model.train()
J
JiaQi Xu 已提交
45 46 47 48 49 50 51 52 53 54 55
    if Cuda:
        net = torch.nn.DataParallel(model)
        cudnn.benchmark = True
        net = net.cuda()

    annotation_path = '2007_train.txt'
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
J
JiaQi Xu 已提交
56 57 58
    num_train = len(lines)

    gen = Generator(Batch_size, lines,
J
JiaQi Xu 已提交
59 60 61 62
                    (Config["min_dim"], Config["min_dim"]), Config["num_classes"]).generate()

    criterion = MultiBoxLoss(Config['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, Cuda)
J
JiaQi Xu 已提交
63
    epoch_size = num_train // Batch_size
J
JiaQi Xu 已提交
64

J
JiaQi Xu 已提交
65 66 67 68 69 70
    if True:
        # ------------------------------------#
        #   冻结一定部分训练
        # ------------------------------------#
        for param in model.vgg.parameters():
            param.requires_grad = False
J
JiaQi Xu 已提交
71

J
JiaQi Xu 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        optimizer = optim.Adam(net.parameters(), lr=lr)
        for epoch in range(Start_iter,Freeze_epoch):
            if epoch%10==0:
                adjust_learning_rate(optimizer,lr,0.95,epoch)
            loc_loss = 0
            conf_loss = 0
            for iteration in range(epoch_size):
                images, targets = next(gen)
                with torch.no_grad():
                    if Cuda:
                        images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
                        targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets]
                    else:
                        images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
                        targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
                # 前向传播
                out = net(images)
                # 清零梯度
                optimizer.zero_grad()
                # 计算loss
                loss_l, loss_c = criterion(out, targets)
                loss = loss_l + loss_c
                # 反向传播
                loss.backward()
                optimizer.step()
                # 加上
                loc_loss += loss_l.item()
                conf_loss += loss_c.item()
J
JiaQi Xu 已提交
100

J
JiaQi Xu 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
                print('\nEpoch:'+ str(epoch+1) + '/' + str(Freeze_epoch))
                print('iter:' + str(iteration) + '/' + str(epoch_size) + ' || Loc_Loss: %.4f || Conf_Loss: %.4f ||' % (loc_loss/(iteration+1),conf_loss/(iteration+1)), end=' ')

            print('Saving state, iter:', str(epoch+1))
            torch.save(model.state_dict(), 'logs/Epoch%d-Loc%.4f-Conf%.4f.pth'%((epoch+1),loc_loss/(iteration+1),conf_loss/(iteration+1)))

    if True:
        # ------------------------------------#
        #   全部解冻训练
        # ------------------------------------#
        for param in model.vgg.parameters():
            param.requires_grad = True

        optimizer = optim.Adam(net.parameters(), lr=freeze_lr)
        for epoch in range(Freeze_epoch,Epoch):
            if epoch%10==0:
                adjust_learning_rate(optimizer,freeze_lr,0.95,epoch)
            loc_loss = 0
            conf_loss = 0
            for iteration in range(epoch_size):
                images, targets = next(gen)
                with torch.no_grad():
                    if Cuda:
                        images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
                        targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets]
                    else:
                        images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
                        targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
                # 前向传播
                out = net(images)
                # 清零梯度
                optimizer.zero_grad()
                # 计算loss
                loss_l, loss_c = criterion(out, targets)
                loss = loss_l + loss_c
                # 反向传播
                loss.backward()
                optimizer.step()
                # 加上
                loc_loss += loss_l.item()
                conf_loss += loss_c.item()
J
JiaQi Xu 已提交
142

J
JiaQi Xu 已提交
143 144
                print('\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))
                print('iter:' + str(iteration) + '/' + str(epoch_size) + ' || Loc_Loss: %.4f || Conf_Loss: %.4f ||' % (loc_loss/(iteration+1),conf_loss/(iteration+1)), end=' ')
J
JiaQi Xu 已提交
145

J
JiaQi Xu 已提交
146 147
            print('Saving state, iter:', str(epoch+1))
            torch.save(model.state_dict(), 'logs/Epoch%d-Loc%.4f-Conf%.4f.pth'%((epoch+1),loc_loss/(iteration+1),conf_loss/(iteration+1)))