diff --git a/examples/nlp/duplex_text_normalization/duplex_text_normalization_infer.py b/examples/nlp/duplex_text_normalization/duplex_text_normalization_infer.py index 4cf25e12fc89..6bcc69de7db9 100644 --- a/examples/nlp/duplex_text_normalization/duplex_text_normalization_infer.py +++ b/examples/nlp/duplex_text_normalization/duplex_text_normalization_infer.py @@ -50,7 +50,6 @@ from typing import List from helpers import DECODER_MODEL, TAGGER_MODEL, instantiate_model_and_trainer -from nemo_text_processing.text_normalization.data_loader_utils import post_process_punct from nn_wfst.en.electronic.normalize import ElectronicNormalizer from nn_wfst.en.whitelist.normalize import WhitelistNormalizer from omegaconf import DictConfig, OmegaConf @@ -60,6 +59,15 @@ from nemo.core.config import hydra_runner from nemo.utils import logging +try: + from nemo_text_processing.text_normalization.data_loader_utils import post_process_punct +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + @hydra_runner(config_path="conf", config_name="duplex_tn_config") def main(cfg: DictConfig) -> None: diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/normalize.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/normalize.py index e0d83b42222d..a1f8caa7d959 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/normalize.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/normalize.py @@ -12,8 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nemo_text_processing.text_normalization.normalize import Normalizer -from nemo_text_processing.text_normalization.token_parser import TokenParser +try: + from nemo_text_processing.text_normalization.normalize import Normalizer + from nemo_text_processing.text_normalization.token_parser import TokenParser +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor @@ -21,7 +28,7 @@ class ElectronicNormalizer(Normalizer): """ Normalizer for ELECTRONIC. - + Args: input_case: accepting either "lower_cased" or "cased" input. lang: language diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/tokenize_and_classify.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/tokenize_and_classify.py index 59a9d9784038..9e0c284d84b0 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/tokenize_and_classify.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/tokenize_and_classify.py @@ -15,18 +15,25 @@ import os -import pynini -from nemo_text_processing.text_normalization.en.graph_utils import ( - NEMO_WHITE_SPACE, - GraphFst, - delete_extra_space, - delete_space, - generator_main, -) -from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst -from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst -from nemo_text_processing.text_normalization.en.taggers.word import WordFst -from pynini.lib import pynutil +try: + import pynini + from nemo_text_processing.text_normalization.en.graph_utils import ( + NEMO_WHITE_SPACE, + GraphFst, + delete_extra_space, + delete_space, + generator_main, + ) + from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst + from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst + from nemo_text_processing.text_normalization.en.taggers.word import WordFst + from pynini.lib import pynutil +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) from nemo.utils import logging @@ -34,9 +41,9 @@ class ClassifyFst(GraphFst): """ Final class that composes all other classification grammars. This class can process an entire sentence including punctuation. - For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. + For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. More details to deployment at NeMo/tools/text_processing_deployment. - + Args: input_case: accepting either "lower_cased" or "cased" input. deterministic: if True will provide a single transduction option, diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize.py index 6366942d34c8..7236be7a1994 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize.py @@ -12,15 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from nemo_text_processing.text_normalization.en.graph_utils import GraphFst -from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst +try: + from nemo_text_processing.text_normalization.en.graph_utils import GraphFst + from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) class VerbalizeFst(GraphFst): """ Composes other verbalizer grammars. - For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. + For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. More details to deployment at NeMo/tools/text_processing_deployment. Args: diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize_final.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize_final.py index 4d5d716bd01e..b2cc69ca9e09 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize_final.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize_final.py @@ -12,12 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. - -import pynini -from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space -from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst -from nn_wfst.en.electronic.verbalize import VerbalizeFst -from pynini.lib import pynutil +try: + import pynini + from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space + from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst + from nn_wfst.en.electronic.verbalize import VerbalizeFst + from pynini.lib import pynutil +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) class VerbalizeFinalFst(GraphFst): diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/normalize.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/normalize.py index 4109109ec83a..cfb4bef5d1c3 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/normalize.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/normalize.py @@ -12,8 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nemo_text_processing.text_normalization.normalize import Normalizer -from nemo_text_processing.text_normalization.token_parser import TokenParser +try: + from nemo_text_processing.text_normalization.normalize import Normalizer + from nemo_text_processing.text_normalization.token_parser import TokenParser +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor @@ -21,7 +28,7 @@ class WhitelistNormalizer(Normalizer): """ Normalizer for WHITELIST. - + Args: input_case: accepting either "lower_cased" or "cased" input. lang: language diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/tokenize_and_classify.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/tokenize_and_classify.py index 712812fa8190..c2d69e765bb4 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/tokenize_and_classify.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/tokenize_and_classify.py @@ -15,18 +15,25 @@ import os -import pynini -from nemo_text_processing.text_normalization.en.graph_utils import ( - NEMO_WHITE_SPACE, - GraphFst, - delete_extra_space, - delete_space, - generator_main, -) -from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst -from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst -from nemo_text_processing.text_normalization.en.taggers.word import WordFst -from pynini.lib import pynutil +try: + import pynini + from nemo_text_processing.text_normalization.en.graph_utils import ( + NEMO_WHITE_SPACE, + GraphFst, + delete_extra_space, + delete_space, + generator_main, + ) + from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst + from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst + from nemo_text_processing.text_normalization.en.taggers.word import WordFst + from pynini.lib import pynutil +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) from nemo.utils import logging @@ -34,9 +41,9 @@ class ClassifyFst(GraphFst): """ Final class that composes all other classification grammars. This class can process an entire sentence including punctuation. - For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. + For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. More details to deployment at NeMo/tools/text_processing_deployment. - + Args: input_case: accepting either "lower_cased" or "cased" input. deterministic: if True will provide a single transduction option, diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize.py index e85f067acf96..c647a142ef8c 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize.py @@ -12,15 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from nemo_text_processing.text_normalization.en.graph_utils import GraphFst -from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst +try: + from nemo_text_processing.text_normalization.en.graph_utils import GraphFst + from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) class VerbalizeFst(GraphFst): """ Composes other verbalizer grammars. - For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. + For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. More details to deployment at NeMo/tools/text_processing_deployment. Args: diff --git a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize_final.py b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize_final.py index 4d5d716bd01e..550a8a85d797 100644 --- a/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize_final.py +++ b/examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize_final.py @@ -13,11 +13,18 @@ # limitations under the License. -import pynini -from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space -from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst -from nn_wfst.en.electronic.verbalize import VerbalizeFst -from pynini.lib import pynutil +try: + import pynini + from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space + from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst + from nn_wfst.en.electronic.verbalize import VerbalizeFst + from pynini.lib import pynutil +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) class VerbalizeFinalFst(GraphFst): diff --git a/requirements/requirements_tts.txt b/requirements/requirements_tts.txt index bb330aaf2e58..9536faec8c78 100644 --- a/requirements/requirements_tts.txt +++ b/requirements/requirements_tts.txt @@ -4,7 +4,8 @@ jieba kornia librosa matplotlib -nemo_text_processing +# pynini does not currently support aarch, disable nemo_text_processing for now +nemo_text_processing; 'arm' not in platform_machine and 'aarch' not in platform_machine nltk pandas pypinyin diff --git a/scripts/dataset_processing/tts/hui_acg/get_data.py b/scripts/dataset_processing/tts/hui_acg/get_data.py index dfde19f33f57..668d532f321a 100644 --- a/scripts/dataset_processing/tts/hui_acg/get_data.py +++ b/scripts/dataset_processing/tts/hui_acg/get_data.py @@ -21,9 +21,17 @@ import pandas as pd from joblib import Parallel, delayed -from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + from nemo.utils import logging # full corpus. diff --git a/scripts/dataset_processing/tts/ljspeech/get_data.py b/scripts/dataset_processing/tts/ljspeech/get_data.py index c8aeed5dbfca..8007b5a0f05a 100644 --- a/scripts/dataset_processing/tts/ljspeech/get_data.py +++ b/scripts/dataset_processing/tts/ljspeech/get_data.py @@ -20,9 +20,17 @@ import sox import wget -from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + def get_args(): parser = argparse.ArgumentParser(description='Download LJSpeech and create manifests with predefined split') diff --git a/scripts/dataset_processing/tts/preprocess_text.py b/scripts/dataset_processing/tts/preprocess_text.py index 580a84a02d6f..6afab42a1d6b 100644 --- a/scripts/dataset_processing/tts/preprocess_text.py +++ b/scripts/dataset_processing/tts/preprocess_text.py @@ -32,10 +32,18 @@ from hydra.utils import instantiate from joblib import Parallel, delayed -from nemo_text_processing.text_normalization.normalize import Normalizer from omegaconf import OmegaConf from tqdm import tqdm +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest diff --git a/scripts/dataset_processing/tts/sfbilingual/get_data.py b/scripts/dataset_processing/tts/sfbilingual/get_data.py index bb38a6d127ba..806f9882a9f4 100755 --- a/scripts/dataset_processing/tts/sfbilingual/get_data.py +++ b/scripts/dataset_processing/tts/sfbilingual/get_data.py @@ -20,9 +20,17 @@ from pathlib import Path import numpy as np -from nemo_text_processing.text_normalization.normalize import Normalizer from opencc import OpenCC +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + def get_args(): parser = argparse.ArgumentParser( diff --git a/scripts/dataset_processing/tts/thorsten_neutral/get_data.py b/scripts/dataset_processing/tts/thorsten_neutral/get_data.py index 9422c0cd5498..d49d362064fd 100644 --- a/scripts/dataset_processing/tts/thorsten_neutral/get_data.py +++ b/scripts/dataset_processing/tts/thorsten_neutral/get_data.py @@ -32,9 +32,17 @@ from pathlib import Path from joblib import Parallel, delayed -from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + from nemo.utils import logging # Thorsten Müller published two neural voice datasets, 21.02 and 22.10. diff --git a/tests/collections/asr/test_text_to_text_dataset.py b/tests/collections/asr/test_text_to_text_dataset.py index bc7a0a9d01dd..92205de41a1b 100644 --- a/tests/collections/asr/test_text_to_text_dataset.py +++ b/tests/collections/asr/test_text_to_text_dataset.py @@ -20,9 +20,17 @@ import pytest from hydra.utils import instantiate -from nemo_text_processing.text_normalization.normalize import Normalizer from omegaconf import OmegaConf +try: + from nemo_text_processing.text_normalization.normalize import Normalizer +except (ImportError, ModuleNotFoundError): + raise ModuleNotFoundError( + "The package `nemo_text_processing` was not installed in this environment. Please refer to" + " https://github.com/NVIDIA/NeMo-text-processing and install this package before using " + "this script" + ) + from nemo.collections.asr.data.text_to_text import TextToTextDataset, TextToTextItem, TextToTextIterableDataset from nemo.collections.common import tokenizers diff --git a/tools/ctc_segmentation/requirements.txt b/tools/ctc_segmentation/requirements.txt index f010b225a66e..bb51e49a0c87 100644 --- a/tools/ctc_segmentation/requirements.txt +++ b/tools/ctc_segmentation/requirements.txt @@ -1,3 +1,4 @@ ctc_segmentation==1.7.1 -nemo_text_processing==0.1.6rc0 +# pynini does not currently support aarch, disable nemo_text_processing for now +nemo_text_processing==0.1.6rc0; 'arm' not in platform_machine and 'aarch' not in platform_machine num2words diff --git a/tutorials/asr/ASR_TTS_Tutorial.ipynb b/tutorials/asr/ASR_TTS_Tutorial.ipynb index 267c84bca9d2..067c007ea3df 100644 --- a/tutorials/asr/ASR_TTS_Tutorial.ipynb +++ b/tutorials/asr/ASR_TTS_Tutorial.ipynb @@ -183,7 +183,14 @@ "from nemo.collections.tts.models import FastPitchModel, SpectrogramEnhancerModel\n", "from nemo.utils.notebook_utils import download_an4\n", "\n", - "from nemo_text_processing.text_normalization.normalize import Normalizer" + "try:\n", + " from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "except ModuleNotFoundError:\n", + " raise ModuleNotFoundError(\n", + " \"The package `nemo_text_processing` was not installed in this environment. Please refer to\"\n", + " \" https://github.com/NVIDIA/NeMo-text-processing and install this package before using \"\n", + " \"this script\"\n", + " )" ] }, { diff --git a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb index 9eac34c499d8..a2e26d17ed9b 100644 --- a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb @@ -198,8 +198,15 @@ "source": [ "from nemo.collections.tts.g2p.models.en_us_arpabet import EnglishG2p\n", "from nemo.collections.tts.data.dataset import TTSDataset\n", - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", - "from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer" + "from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer\n", + "try:\n", + " from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "except ModuleNotFoundError:\n", + " raise ModuleNotFoundError(\n", + " \"The package `nemo_text_processing` was not installed in this environment. Please refer to\"\n", + " \" https://github.com/NVIDIA/NeMo-text-processing and install this package before using \"\n", + " \"this script\"\n", + " )" ] }, { diff --git a/tutorials/tts/NeMo_TTS_Primer.ipynb b/tutorials/tts/NeMo_TTS_Primer.ipynb index dea4a8936053..f891b7f11594 100644 --- a/tutorials/tts/NeMo_TTS_Primer.ipynb +++ b/tutorials/tts/NeMo_TTS_Primer.ipynb @@ -240,7 +240,14 @@ }, "outputs": [], "source": [ - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "try:\n", + " from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "except ModuleNotFoundError:\n", + " raise ModuleNotFoundError(\n", + " \"The package `nemo_text_processing` was not installed in this environment. Please refer to\"\n", + " \" https://github.com/NVIDIA/NeMo-text-processing and install this package before using \"\n", + " \"this script\"\n", + " )\n", "\n", "text_normalizer = Normalizer(input_case=\"cased\", lang=\"en\")" ] @@ -777,7 +784,7 @@ "While raw audio shows amplitude versus time and is useful for easily recording and listening, it is not optimal when it comes to processing.\n", "\n", "For processing, it is usually preferable to represent the audio as a **spectrogram** which shows frequency versus time. Specifically, we:\n", - "\n", + "\n", "1. Group together audio samples into a much smaller set of time buckets, called **audio frames**. An audio frame will usually bucket around 50ms of audio.\n", "2. For each audio frame, use the [Fast Fourier transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) (**FFT**) to calculate the magnitude (ie. energy, amplitude or \"loudness\") and phase (which we don't use) of each frequency bin. We refer to the magnitudes of the frequency bins as a spectrogram\n", "3. Map the original frequency bins onto the [mel scale](https://en.wikipedia.org/wiki/Mel_scale), using overlapped [triangular filters](https://en.wikipedia.org/wiki/Window_function#Triangular_window) to create mel filterbanks.\n",