提交 2b67448e 编写于 作者: H Hui Zhang

fix log

上级 1e37e2cc
......@@ -55,7 +55,7 @@ class DeepSpeech2Trainer(Trainer):
losses_np = {
'train_loss': float(loss),
}
msg += "time: {:>.3f}s, ".format(iteration_time)
msg += "train time: {:>.3f}s, ".format(iteration_time)
msg += "batch size: {}, ".format(self.config.data.batch_size)
msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_np.items())
......
......@@ -96,7 +96,7 @@ class U2Trainer(Trainer):
iteration_time = time.time() - start
if (batch_index + 1) % train_conf.log_interval == 0:
msg += "time: {:>.3f}s, ".format(iteration_time)
msg += "train time: {:>.3f}s, ".format(iteration_time)
msg += "batch size: {}, ".format(self.config.data.batch_size)
msg += "accum: {}, ".format(train_conf.accum_grad)
msg += ', '.join('{}: {:>.6f}'.format(k, v)
......@@ -177,7 +177,7 @@ class U2Trainer(Trainer):
msg += "batch : {}/{}, ".format(batch_index + 1,
len(self.train_loader))
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
msg += "dataloader time: {:>.3f}s, ".format(dataload_time)
msg += "data time: {:>.3f}s, ".format(dataload_time)
self.train_batch(batch_index, batch, msg)
data_start_time = time.time()
except Exception as e:
......@@ -275,6 +275,7 @@ class U2Trainer(Trainer):
if self.parallel:
model = paddle.DataParallel(model)
logger.info(f"{model}")
layer_tools.print_params(model, logger.info)
train_config = config.training
......
......@@ -194,7 +194,7 @@ class Trainer():
msg += "batch : {}/{}, ".format(batch_index + 1,
len(self.train_loader))
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
msg += "dataloader time: {:>.3f}s, ".format(dataload_time)
msg += "data time: {:>.3f}s, ".format(dataload_time)
self.train_batch(batch_index, batch, msg)
data_start_time = time.time()
except Exception as e:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册