未验证 提交 fbfce974 编写于 作者: B Bubbliiiing 提交者: GitHub

Update train.py

上级 e0ee93eb
......@@ -10,9 +10,26 @@ import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from utils.config import Config
from nets.yolo_training import YOLOLoss,Generator
from nets.yolo3 import YoloBody
from nets.yolo4 import YoloBody
#---------------------------------------------------#
# 获得类和先验框
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1,3,2])[::-1,:,:]
def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):
total_loss = 0
......@@ -72,16 +89,42 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
print('Saving state, iter:', str(epoch+1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
if __name__ == "__main__":
# 参数初始化
annotation_path = '2007_train.txt'
model = YoloBody(Config)
#-------------------------------#
# 输入的shape大小
# 显存比较小可以使用416x416
# 显存比较大可以使用608x608
#-------------------------------#
input_shape = (416,416)
#-------------------------------#
# tricks的使用设置
#-------------------------------#
Cosine_lr = False
mosaic = True
# 用于设定是否使用cuda
Cuda = True
smoooth_label = 0
annotation_path = '2007_train.txt'
#-------------------------------#
# 获得先验框和类
#-------------------------------#
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/voc_classes.txt'
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
# 创建模型
model = YoloBody(len(anchors[0]),num_classes)
model_path = "model_data/yolo4_weights.pth"
# 加快模型训练的效率
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load("model_data/yolo_weights.pth", map_location=device)
pretrained_dict = torch.load(model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
......@@ -97,8 +140,8 @@ if __name__ == "__main__":
# 建立loss函数
yolo_losses = []
for i in range(3):
yolo_losses.append(YOLOLoss(np.reshape(Config["yolo"]["anchors"],[-1,2]),
Config["yolo"]["classes"], (Config["img_w"], Config["img_h"]), Cuda))
yolo_losses.append(YOLOLoss(np.reshape(anchors,[-1,2]),num_classes, \
(input_shape[1], input_shape[0]), smoooth_label, Cuda))
# 0.1用于验证,0.9用于训练
val_split = 0.1
......@@ -110,23 +153,24 @@ if __name__ == "__main__":
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if True:
# 最开始使用1e-3的学习率可以收敛的更快
lr = 1e-3
Batch_size = 8
Batch_size = 4
Init_Epoch = 0
Freeze_Epoch = 25
optimizer = optim.Adam(net.parameters(),lr)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.9)
gen = Generator(Batch_size, lines[:num_train],
(Config["img_h"], Config["img_w"])).generate()
(input_shape[0], input_shape[1])).generate(mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(Config["img_h"], Config["img_w"])).generate()
(input_shape[0], input_shape[1])).generate(mosaic = False)
epoch_size = num_train//Batch_size
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 冻结一定部分训练
......@@ -137,21 +181,25 @@ if __name__ == "__main__":
for epoch in range(Init_Epoch,Freeze_Epoch):
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda)
lr_scheduler.step()
if True:
lr = 1e-4
Batch_size = 4
Batch_size = 2
Freeze_Epoch = 25
Unfreeze_Epoch = 50
optimizer = optim.Adam(net.parameters(),lr)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.9)
gen = Generator(Batch_size, lines[:num_train],
(Config["img_h"], Config["img_w"])).generate()
(input_shape[0], input_shape[1])).generate(mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(Config["img_h"], Config["img_w"])).generate()
(input_shape[0], input_shape[1])).generate(mosaic = False)
epoch_size = num_train//Batch_size
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 解冻后训练
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册