未验证 提交 2867a346 编写于 作者: B Bubbliiiing 提交者: GitHub

Update train.py

上级 89c236d8
...@@ -84,7 +84,7 @@ if __name__ == "__main__": ...@@ -84,7 +84,7 @@ if __name__ == "__main__":
param.requires_grad = False param.requires_grad = False
optimizer = optim.Adam(net.parameters(), lr=lr) optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.9) lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)
for epoch in range(Start_iter,Freeze_epoch): for epoch in range(Start_iter,Freeze_epoch):
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Freeze_epoch}',postfix=dict,mininterval=0.3) as pbar: with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Freeze_epoch}',postfix=dict,mininterval=0.3) as pbar:
loc_loss = 0 loc_loss = 0
...@@ -131,7 +131,7 @@ if __name__ == "__main__": ...@@ -131,7 +131,7 @@ if __name__ == "__main__":
param.requires_grad = True param.requires_grad = True
optimizer = optim.Adam(net.parameters(), lr=freeze_lr) optimizer = optim.Adam(net.parameters(), lr=freeze_lr)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.9) lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)
for epoch in range(Freeze_epoch,Epoch): for epoch in range(Freeze_epoch,Epoch):
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Freeze_epoch}',postfix=dict,mininterval=0.3) as pbar: with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Freeze_epoch}',postfix=dict,mininterval=0.3) as pbar:
loc_loss = 0 loc_loss = 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册