diff --git a/synthesizer/inference.py b/synthesizer/inference.py index cbade5f589e1513d0e3d3788c0814cc69aef7c8a..5a51a03e302ef006864b700fe0eb3c0f698810f5 100644 --- a/synthesizer/inference.py +++ b/synthesizer/inference.py @@ -2,6 +2,7 @@ from synthesizer.tacotron2 import Tacotron2 from synthesizer.hparams import hparams from multiprocess.pool import Pool # You're free to use either one #from multiprocessing import Pool # +from multiprocess.context import SpawnContext from synthesizer import audio from pathlib import Path from typing import Union, List @@ -97,16 +98,16 @@ class Synthesizer: # Low memory inference mode: load the model upon every request. The model has to be # loaded in a separate process to be able to release GPU memory (a simple workaround # to tensorflow's intricacies) - specs, alignments = Pool(1).starmap(Synthesizer._one_shot_synthesize_spectrograms, - [(self.checkpoint_fpath, embeddings, texts)])[0] + specs, alignments = Pool(1, context=SpawnContext()).starmap(Synthesizer._one_shot_synthesize_spectrograms, + [(self.checkpoint_fpath, embeddings, texts, self._seed)])[0] return (specs, alignments) if return_alignments else specs @staticmethod - def _one_shot_synthesize_spectrograms(checkpoint_fpath, embeddings, texts): + def _one_shot_synthesize_spectrograms(checkpoint_fpath, embeddings, texts, seed): # Load the model and forward the inputs tf.compat.v1.reset_default_graph() - model = Tacotron2(checkpoint_fpath, hparams, seed=self._seed) + model = Tacotron2(checkpoint_fpath, hparams, seed=seed) specs, alignments = model.my_synthesize(embeddings, texts) # Detach the outputs (not doing so will cause the process to hang)