diff --git a/examples/aishell3/tts3/run.sh b/examples/aishell3/tts3/run.sh index b5da076b2d0e817d893303ad38b254bef62bfacb..8dcecaa03709ac6e2862c4bf0952e0a239062a82 100755 --- a/examples/aishell3/tts3/run.sh +++ b/examples/aishell3/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3 # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 diff --git a/examples/canton/tts3/run.sh b/examples/canton/tts3/run.sh index 3a3dfe0a587814e6745bc18f98082385baeb3269..acfc502239b6569e9d29cb240e91464a30c0f12d 100755 --- a/examples/canton/tts3/run.sh +++ b/examples/canton/tts3/run.sh @@ -46,10 +46,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ../../csmsc/tts3/local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_canton # considering the balance between speed and quality, we recommend that you use hifigan as vocoder # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts2/run.sh b/examples/csmsc/tts2/run.sh index 6279ec5799cb32bf9b1d6d8bc2a45da871c501cd..5732ea3c774574f409074cafe24d6e0f806da1b6 100755 --- a/examples/csmsc/tts2/run.sh +++ b/examples/csmsc/tts2/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index dd8c9f3e685d1888c71fbd23bd61c5a17eae0e00..a7b4e4239ba9a7c7983cf5676758964db0099e50 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run_cnndecoder.sh b/examples/csmsc/tts3/run_cnndecoder.sh index 96b446c529d04fb49951d0a4235d0d2f83df7e8a..f356f31335c98a1874a360f71c97f885c4fdf832 100755 --- a/examples/csmsc/tts3/run_cnndecoder.sh +++ b/examples/csmsc/tts3/run_cnndecoder.sh @@ -58,10 +58,7 @@ fi # paddle2onnx non streaming if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc @@ -77,10 +74,7 @@ fi # paddle2onnx streaming if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade # streaming acoustic model ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_decoder diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index f2c5d452f94256259ca089b27a9ed7d56a223aa8..03c59702b7d8dec462ce518a50dd898cae92819e 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -45,10 +45,7 @@ fi # # we have only tested the following models so far # if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # # install paddle2onnx -# version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') -# if [[ -z "$version" || ${version} != '1.0.0' ]]; then -# pip install paddle2onnx==1.0.0 -# fi +# pip install paddle2onnx --upgrade # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx vits_csmsc # fi diff --git a/examples/ljspeech/tts3/run.sh b/examples/ljspeech/tts3/run.sh index aacd4cc03a84d2b15382de09bf4fc7d8f5863d78..0d8da920cbeb2798310e8008e74a0ed778a14216 100755 --- a/examples/ljspeech/tts3/run.sh +++ b/examples/ljspeech/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_ljspeech # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_ljspeech diff --git a/examples/vctk/tts3/run.sh b/examples/vctk/tts3/run.sh index a112b94b7bddea3fbdb775dc9dc87dda4d95cfe8..76307bd5f32b76263ffe82c43803e5521113072e 100755 --- a/examples/vctk/tts3/run.sh +++ b/examples/vctk/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_vctk # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_vctk diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh index 12f99081af877515e978ae30e77532137571fb17..a4d86480b34a5be87f5eb216a92428e9fb8a79e0 100755 --- a/examples/zh_en_tts/tts3/run.sh +++ b/examples/zh_en_tts/tts3/run.sh @@ -46,10 +46,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3