提交 5912ba53 编写于 作者: H huangyuxin

fix log_interval and lr when resume training, test=asr

上级 219b6dd5
...@@ -289,6 +289,7 @@ class Trainer(): ...@@ -289,6 +289,7 @@ class Trainer():
float) else f"{v}" float) else f"{v}"
msg += "," msg += ","
msg = msg[:-1] # remove the last "," msg = msg[:-1] # remove the last ","
if (batch_index + 1) % self.config.log_interval == 0:
logger.info(msg) logger.info(msg)
data_start_time = time.time() data_start_time = time.time()
except Exception as e: except Exception as e:
...@@ -316,10 +317,10 @@ class Trainer(): ...@@ -316,10 +317,10 @@ class Trainer():
self.visualizer.add_scalar( self.visualizer.add_scalar(
tag='eval/lr', value=self.lr_scheduler(), step=self.epoch) tag='eval/lr', value=self.lr_scheduler(), step=self.epoch)
# after epoch
self.save(tag=self.epoch, infos={'val_loss': cv_loss})
# step lr every epoch # step lr every epoch
self.lr_scheduler.step() self.lr_scheduler.step()
# after epoch
self.save(tag=self.epoch, infos={'val_loss': cv_loss})
self.new_epoch() self.new_epoch()
def run(self): def run(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册