From 2fdf51ebd2586f86965b6b76a6fd8bd16cab295c Mon Sep 17 00:00:00 2001 From: WeberJulian Date: Fri, 6 Oct 2023 17:39:04 +0200 Subject: [PATCH] 2nd version of the tokenizer fix --- TTS/tts/layers/xtts/tokenizer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 9fa094ecb4..a279528925 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -223,9 +223,11 @@ def preprocess_text(self, txt, lang): results = kks.convert(txt) txt = " ".join([result["kana"] for result in results]) txt = basic_cleaners(txt) - # elif lang == "en": - # txt = english_cleaners(txt) - # English cleaner remove the language tag [en] + elif lang == "en": + if txt[:4] == "[en]": + txt = txt[4:] + txt = english_cleaners(txt) + txt = "[en]" + txt elif lang == "ar": txt = arabic_cleaners(txt) elif lang == "zh-cn":