diff --git a/deepspeech/exps/deepspeech2/model.py b/deepspeech/exps/deepspeech2/model.py index e21a03f6f60333204f196bbd53dde1f11614c329..ca9fff2b053e7f1b6eb7391c88125a016d94e342 100644 --- a/deepspeech/exps/deepspeech2/model.py +++ b/deepspeech/exps/deepspeech2/model.py @@ -55,7 +55,7 @@ class DeepSpeech2Trainer(Trainer): losses_np = { 'train_loss': float(loss), } - msg += "time: {:>.3f}s, ".format(iteration_time) + msg += "train time: {:>.3f}s, ".format(iteration_time) msg += "batch size: {}, ".format(self.config.data.batch_size) msg += ', '.join('{}: {:>.6f}'.format(k, v) for k, v in losses_np.items()) diff --git a/deepspeech/exps/u2/model.py b/deepspeech/exps/u2/model.py index 58076e4be4f5625cd2720f58d5c53c9832e2da90..9948c30ed54610cbc6562650b967d60ea5eea3c9 100644 --- a/deepspeech/exps/u2/model.py +++ b/deepspeech/exps/u2/model.py @@ -96,7 +96,7 @@ class U2Trainer(Trainer): iteration_time = time.time() - start if (batch_index + 1) % train_conf.log_interval == 0: - msg += "time: {:>.3f}s, ".format(iteration_time) + msg += "train time: {:>.3f}s, ".format(iteration_time) msg += "batch size: {}, ".format(self.config.data.batch_size) msg += "accum: {}, ".format(train_conf.accum_grad) msg += ', '.join('{}: {:>.6f}'.format(k, v) @@ -177,7 +177,7 @@ class U2Trainer(Trainer): msg += "batch : {}/{}, ".format(batch_index + 1, len(self.train_loader)) msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) - msg += "dataloader time: {:>.3f}s, ".format(dataload_time) + msg += "data time: {:>.3f}s, ".format(dataload_time) self.train_batch(batch_index, batch, msg) data_start_time = time.time() except Exception as e: @@ -275,6 +275,7 @@ class U2Trainer(Trainer): if self.parallel: model = paddle.DataParallel(model) + logger.info(f"{model}") layer_tools.print_params(model, logger.info) train_config = config.training diff --git a/deepspeech/training/trainer.py b/deepspeech/training/trainer.py index 128432aa94e1c0571bdeaa1712afa1a211e3d36d..c8b3fc4d890df128a4542fd23cd25a6b55fbd044 100644 --- a/deepspeech/training/trainer.py +++ b/deepspeech/training/trainer.py @@ -194,7 +194,7 @@ class Trainer(): msg += "batch : {}/{}, ".format(batch_index + 1, len(self.train_loader)) msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) - msg += "dataloader time: {:>.3f}s, ".format(dataload_time) + msg += "data time: {:>.3f}s, ".format(dataload_time) self.train_batch(batch_index, batch, msg) data_start_time = time.time() except Exception as e: