From bf6d9ef06f93918375550cae42f67c6f5bc731f4 Mon Sep 17 00:00:00 2001 From: ShenYuhan Date: Fri, 7 Aug 2020 16:28:21 +0800 Subject: [PATCH] add visualdl for parakeet --- examples/clarinet/synthesis.py | 1 - examples/clarinet/train.py | 4 +- examples/clarinet/utils.py | 1 - examples/deepvoice3/synthesize.py | 1 - examples/deepvoice3/train.py | 4 +- examples/fastspeech/synthesis.py | 4 +- examples/fastspeech/train.py | 6 +-- examples/transformer_tts/synthesis.py | 7 ++- examples/transformer_tts/train_transformer.py | 51 +++++++++++++++---- examples/transformer_tts/train_vocoder.py | 6 +-- examples/waveflow/train.py | 11 ++-- examples/waveflow/waveflow.py | 20 ++++---- examples/wavenet/synthesis.py | 1 - examples/wavenet/train.py | 4 +- setup.py | 3 +- 15 files changed, 77 insertions(+), 47 deletions(-) diff --git a/examples/clarinet/synthesis.py b/examples/clarinet/synthesis.py index 32ed860..185a7ac 100644 --- a/examples/clarinet/synthesis.py +++ b/examples/clarinet/synthesis.py @@ -21,7 +21,6 @@ import random from tqdm import tqdm import pickle import numpy as np -from tensorboardX import SummaryWriter import paddle.fluid.dygraph as dg from paddle import fluid diff --git a/examples/clarinet/train.py b/examples/clarinet/train.py index fcfbd8d..ef7a93f 100644 --- a/examples/clarinet/train.py +++ b/examples/clarinet/train.py @@ -21,7 +21,7 @@ import random from tqdm import tqdm import pickle import numpy as np -from tensorboardX import SummaryWriter +from visualdl import LogWriter import paddle.fluid.dygraph as dg from paddle import fluid @@ -179,7 +179,7 @@ if __name__ == "__main__": checkpoint_dir = os.path.join(args.output, "checkpoints") state_dir = os.path.join(args.output, "states") log_dir = os.path.join(args.output, "log") - writer = SummaryWriter(log_dir) + writer = LogWriter(log_dir) if args.checkpoint is not None: iteration = io.load_parameters( diff --git a/examples/clarinet/utils.py b/examples/clarinet/utils.py index 1cbc1b6..1e1c46a 100644 --- a/examples/clarinet/utils.py +++ b/examples/clarinet/utils.py @@ -15,7 +15,6 @@ from __future__ import division import os import soundfile as sf -from tensorboardX import SummaryWriter from collections import OrderedDict from paddle import fluid diff --git a/examples/deepvoice3/synthesize.py b/examples/deepvoice3/synthesize.py index 1f311fe..3540d6e 100644 --- a/examples/deepvoice3/synthesize.py +++ b/examples/deepvoice3/synthesize.py @@ -11,7 +11,6 @@ from paddle import fluid from paddle.fluid import layers as F from paddle.fluid import dygraph as dg from paddle.fluid.io import DataLoader -from tensorboardX import SummaryWriter import soundfile as sf from parakeet.data import SliceDataset, DataCargo, PartialyRandomizedSimilarTimeLengthSampler, SequentialSampler diff --git a/examples/deepvoice3/train.py b/examples/deepvoice3/train.py index 76ced55..b43941d 100644 --- a/examples/deepvoice3/train.py +++ b/examples/deepvoice3/train.py @@ -9,7 +9,7 @@ from paddle import fluid from paddle.fluid import layers as F from paddle.fluid import dygraph as dg from paddle.fluid.io import DataLoader -from tensorboardX import SummaryWriter +from visualdl import LogWriter from parakeet.models.deepvoice3 import Encoder, Decoder, PostNet, SpectraNet from parakeet.data import SliceDataset, DataCargo, PartialyRandomizedSimilarTimeLengthSampler, SequentialSampler @@ -181,7 +181,7 @@ if __name__ == "__main__": global global_step global_step = 1 global writer - writer = SummaryWriter() + writer = LogWriter() print("[Training] tensorboard log and checkpoints are save in {}".format( writer.logdir)) train(args, config) \ No newline at end of file diff --git a/examples/fastspeech/synthesis.py b/examples/fastspeech/synthesis.py index dde776f..9ff4ef7 100644 --- a/examples/fastspeech/synthesis.py +++ b/examples/fastspeech/synthesis.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from tensorboardX import SummaryWriter +from visualdl import LogWriter from scipy.io.wavfile import write from collections import OrderedDict import argparse @@ -78,7 +78,7 @@ def synthesis(text_input, args): if not os.path.exists(args.output): os.mkdir(args.output) - writer = SummaryWriter(os.path.join(args.output, 'log')) + writer = LogWriter(os.path.join(args.output, 'log')) model = FastSpeech(cfg['network'], num_mels=cfg['audio']['num_mels']) # Load parameters. diff --git a/examples/fastspeech/train.py b/examples/fastspeech/train.py index e575d0f..389e0bf 100644 --- a/examples/fastspeech/train.py +++ b/examples/fastspeech/train.py @@ -22,7 +22,7 @@ from ruamel import yaml from tqdm import tqdm from matplotlib import cm from collections import OrderedDict -from tensorboardX import SummaryWriter +from visualdl import LogWriter import paddle.fluid.dygraph as dg import paddle.fluid.layers as layers import paddle.fluid as fluid @@ -69,8 +69,8 @@ def main(args): if not os.path.exists(args.output): os.mkdir(args.output) - writer = SummaryWriter(os.path.join(args.output, - 'log')) if local_rank == 0 else None + writer = LogWriter(os.path.join(args.output, + 'log')) if local_rank == 0 else None model = FastSpeech(cfg['network'], num_mels=cfg['audio']['num_mels']) model.train() diff --git a/examples/transformer_tts/synthesis.py b/examples/transformer_tts/synthesis.py index effbffd..0cbb853 100644 --- a/examples/transformer_tts/synthesis.py +++ b/examples/transformer_tts/synthesis.py @@ -16,7 +16,7 @@ from scipy.io.wavfile import write import numpy as np from tqdm import tqdm from matplotlib import cm -from tensorboardX import SummaryWriter +from visualdl import LogWriter from ruamel import yaml from pathlib import Path import argparse @@ -81,7 +81,7 @@ def synthesis(text_input, args): if not os.path.exists(args.output): os.mkdir(args.output) - writer = SummaryWriter(os.path.join(args.output, 'log')) + writer = LogWriter(os.path.join(args.output, 'log')) fluid.enable_dygraph(place) with fluid.unique_name.guard(): @@ -121,8 +121,7 @@ def synthesis(text_input, args): writer.add_image( 'Attention_%d_0' % global_step, x, - i * 4 + j, - dataformats="HWC") + i * 4 + j) if args.vocoder == 'griffin-lim': #synthesis use griffin-lim diff --git a/examples/transformer_tts/train_transformer.py b/examples/transformer_tts/train_transformer.py index 299676c..a0ca16b 100644 --- a/examples/transformer_tts/train_transformer.py +++ b/examples/transformer_tts/train_transformer.py @@ -13,7 +13,7 @@ # limitations under the License. import os from tqdm import tqdm -from tensorboardX import SummaryWriter +from visualdl import LogWriter from collections import OrderedDict import argparse from pprint import pprint @@ -29,6 +29,41 @@ from parakeet.models.transformer_tts import TransformerTTS from parakeet.utils import io +def add_scalars(self, main_tag, tag_scalar_dict, step, walltime=None): + """Add scalars to vdl record file. + Args: + main_tag (string): The parent name for the tags + tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values + step (int): Step of scalars + walltime (float): Wall time of scalars. + Example: + for index in range(1, 101): + writer.add_scalar(tag="train/loss", value=index*0.2, step=index) + writer.add_scalar(tag="train/lr", value=index*0.5, step=index) + """ + import time + from visualdl.writer.record_writer import RecordFileWriter + from visualdl.component.base_component import scalar + + fw_logdir = self.logdir + walltime = round(time.time()) if walltime is None else walltime + for tag, value in tag_scalar_dict.items(): + tag = os.path.join(fw_logdir, main_tag, tag) + if '%' in tag: + raise RuntimeError("% can't appear in tag!") + if tag in self._all_writers: + fw = self._all_writers[tag] + else: + fw = RecordFileWriter( + logdir=tag, + max_queue_size=self._max_queue, + flush_secs=self._flush_secs, + filename_suffix=self._filename_suffix) + self._all_writers.update({tag: fw}) + fw.add_record( + scalar(tag=main_tag, value=value, step=step, walltime=walltime)) + + def add_config_options_to_parser(parser): parser.add_argument("--config", type=str, help="path of the config file") parser.add_argument("--use_gpu", type=int, default=0, help="device to use") @@ -62,8 +97,9 @@ def main(args): if not os.path.exists(args.output): os.mkdir(args.output) - writer = SummaryWriter(os.path.join(args.output, - 'log')) if local_rank == 0 else None + writer = LogWriter(os.path.join(args.output, + 'log')) if local_rank == 0 else None + writer.add_scalars = add_scalars fluid.enable_dygraph(place) network_cfg = cfg['network'] @@ -162,8 +198,7 @@ def main(args): writer.add_image( 'Attention_%d_0' % global_step, x, - i * 4 + j, - dataformats="HWC") + i * 4 + j) for i, prob in enumerate(attn_enc): for j in range(cfg['network']['encoder_num_head']): @@ -173,8 +208,7 @@ def main(args): writer.add_image( 'Attention_enc_%d_0' % global_step, x, - i * 4 + j, - dataformats="HWC") + i * 4 + j) for i, prob in enumerate(attn_dec): for j in range(cfg['network']['decoder_num_head']): @@ -184,8 +218,7 @@ def main(args): writer.add_image( 'Attention_dec_%d_0' % global_step, x, - i * 4 + j, - dataformats="HWC") + i * 4 + j) if parallel: loss = model.scale_loss(loss) diff --git a/examples/transformer_tts/train_vocoder.py b/examples/transformer_tts/train_vocoder.py index 37e9398..4b95f31 100644 --- a/examples/transformer_tts/train_vocoder.py +++ b/examples/transformer_tts/train_vocoder.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from tensorboardX import SummaryWriter +from visualdl import LogWriter import os from tqdm import tqdm from pathlib import Path @@ -60,8 +60,8 @@ def main(args): if not os.path.exists(args.output): os.mkdir(args.output) - writer = SummaryWriter(os.path.join(args.output, - 'log')) if local_rank == 0 else None + writer = LogWriter(os.path.join(args.output, + 'log')) if local_rank == 0 else None fluid.enable_dygraph(place) model = Vocoder(cfg['train']['batch_size'], cfg['vocoder']['hidden_size'], diff --git a/examples/waveflow/train.py b/examples/waveflow/train.py index a033369..dd3e7b7 100644 --- a/examples/waveflow/train.py +++ b/examples/waveflow/train.py @@ -22,7 +22,8 @@ import argparse import numpy as np import paddle.fluid.dygraph as dg from paddle import fluid -from tensorboardX import SummaryWriter +from visualdl import LogWriter + import utils from parakeet.utils import io @@ -78,8 +79,8 @@ def train(config): os.makedirs(checkpoint_dir) # Create tensorboard logger. - tb = SummaryWriter(os.path.join(run_dir, "logs")) \ - if rank == 0 else None + vdl = LogWriter(os.path.join(run_dir, "logs")) \ + if rank == 0 else None # Configurate device place = fluid.CUDAPlace(rank) if use_gpu else fluid.CPUPlace() @@ -94,7 +95,7 @@ def train(config): print("Random Seed: ", seed) # Build model. - model = WaveFlow(config, checkpoint_dir, parallel, rank, nranks, tb) + model = WaveFlow(config, checkpoint_dir, parallel, rank, nranks, vdl) iteration = model.build() while iteration < config.max_iterations: @@ -113,7 +114,7 @@ def train(config): # Close TensorBoard. if rank == 0: - tb.close() + vdl.close() if __name__ == "__main__": diff --git a/examples/waveflow/waveflow.py b/examples/waveflow/waveflow.py index 23c558e..a41a784 100644 --- a/examples/waveflow/waveflow.py +++ b/examples/waveflow/waveflow.py @@ -42,7 +42,7 @@ class WaveFlow(): rank (int, optional): the rank of the process in a multi-process scenario. Defaults to 0. nranks (int, optional): the total number of processes. Defaults to 1. - tb_logger (obj, optional): logger to visualize metrics. + vdl_logger (obj, optional): logger to visualize metrics. Defaults to None. Returns: @@ -55,13 +55,13 @@ class WaveFlow(): parallel=False, rank=0, nranks=1, - tb_logger=None): + vdl_logger=None): self.config = config self.checkpoint_dir = checkpoint_dir self.parallel = parallel self.rank = rank self.nranks = nranks - self.tb_logger = tb_logger + self.vdl_logger = vdl_logger self.dtype = "float16" if config.use_fp16 else "float32" def build(self, training=True): @@ -161,8 +161,8 @@ class WaveFlow(): load_time - start_time, graph_time - load_time) print(log) - tb = self.tb_logger - tb.add_scalar("Train-Loss-Rank-0", loss_val, iteration) + vdl_writer = self.vdl_logger + vdl_writer.add_scalar("Train-Loss-Rank-0", loss_val, iteration) @dg.no_grad def valid_step(self, iteration): @@ -175,7 +175,7 @@ class WaveFlow(): None """ self.waveflow.eval() - tb = self.tb_logger + vdl_writer = self.vdl_logger total_loss = [] sample_audios = [] @@ -188,10 +188,12 @@ class WaveFlow(): # Visualize latent z and scale log_s. if self.rank == 0 and i == 0: - tb.add_histogram("Valid-Latent_z", valid_z.numpy(), iteration) + vdl_writer.add_histogram("Valid-Latent_z", valid_z.numpy(), + iteration) for j, valid_log_s in enumerate(valid_log_s_list): hist_name = "Valid-{}th-Flow-Log_s".format(j) - tb.add_histogram(hist_name, valid_log_s.numpy(), iteration) + vdl_writer.add_histogram(hist_name, valid_log_s.numpy(), + iteration) valid_loss = self.criterion(valid_outputs) total_loss.append(float(valid_loss.numpy())) @@ -202,7 +204,7 @@ class WaveFlow(): log = "Test | Rank: {} AvgLoss: {:<8.3f} Time {:<8.3f}".format( self.rank, loss_val, total_time) print(log) - tb.add_scalar("Valid-Avg-Loss", loss_val, iteration) + vdl_writer.add_scalar("Valid-Avg-Loss", loss_val, iteration) @dg.no_grad def infer(self, iteration): diff --git a/examples/wavenet/synthesis.py b/examples/wavenet/synthesis.py index 3dc9f80..a1d13f4 100644 --- a/examples/wavenet/synthesis.py +++ b/examples/wavenet/synthesis.py @@ -17,7 +17,6 @@ import os import ruamel.yaml import argparse from tqdm import tqdm -from tensorboardX import SummaryWriter from paddle import fluid fluid.require_version('1.8.0') import paddle.fluid.dygraph as dg diff --git a/examples/wavenet/train.py b/examples/wavenet/train.py index 328a811..d211b06 100644 --- a/examples/wavenet/train.py +++ b/examples/wavenet/train.py @@ -17,7 +17,7 @@ import os import ruamel.yaml import argparse import tqdm -from tensorboardX import SummaryWriter +from visualdl import LogWriter from paddle import fluid fluid.require_version('1.8.0') import paddle.fluid.dygraph as dg @@ -154,7 +154,7 @@ if __name__ == "__main__": eval_interval = train_config["eval_interval"] checkpoint_dir = os.path.join(args.output, "checkpoints") log_dir = os.path.join(args.output, "log") - writer = SummaryWriter(log_dir) + writer = LogWriter(log_dir) # load parameters and optimizer, and update iterations done so far if args.checkpoint is not None: diff --git a/setup.py b/setup.py index 7d329ea..ceada72 100644 --- a/setup.py +++ b/setup.py @@ -57,8 +57,7 @@ setup_info = dict( 'numba==0.47.0', 'tqdm==4.19.8', 'matplotlib', - 'tensorboardX', - 'tensorboard', + 'visualdl', 'scipy', 'ruamel.yaml', 'pandas', -- GitLab