forked from NVIDIA/DeepLearningExamples
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Adding FastPitch/PyT (modified version of FastSpeech)
- Loading branch information
Showing
82 changed files
with
33,021 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
*~ | ||
*.pyc | ||
__pycache__ | ||
output | ||
LJSpeech-1.1* | ||
runs* | ||
pretrained_models | ||
|
||
.git |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
*.swp | ||
*.swo | ||
*.pyc | ||
__pycache__ | ||
scripts_joc/ | ||
runs*/ | ||
notebooks/ | ||
LJSpeech-1.1/ | ||
output* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:20.03-py3 | ||
FROM ${FROM_IMAGE_NAME} | ||
|
||
ADD requirements.txt . | ||
RUN pip install --no-cache-dir -r requirements.txt | ||
WORKDIR /workspace/fastpitch | ||
COPY . . |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
BSD 3-Clause License | ||
|
||
Copyright (c) 2020, NVIDIA Corporation | ||
All rights reserved. | ||
|
||
Redistribution and use in source and binary forms, with or without | ||
modification, are permitted provided that the following conditions are met: | ||
|
||
* Redistributions of source code must retain the above copyright notice, this | ||
list of conditions and the following disclaimer. | ||
|
||
* Redistributions in binary form must reproduce the above copyright notice, | ||
this list of conditions and the following disclaimer in the documentation | ||
and/or other materials provided with the distribution. | ||
|
||
* Neither the name of the copyright holder nor the names of its | ||
contributors may be used to endorse or promote products derived from | ||
this software without specific prior written permission. | ||
|
||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
120 changes: 120 additions & 0 deletions
120
PyTorch/SpeechSynthesis/FastPitch/common/audio_processing.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
# ***************************************************************************** | ||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | ||
# | ||
# Redistribution and use in source and binary forms, with or without | ||
# modification, are permitted provided that the following conditions are met: | ||
# * Redistributions of source code must retain the above copyright | ||
# notice, this list of conditions and the following disclaimer. | ||
# * Redistributions in binary form must reproduce the above copyright | ||
# notice, this list of conditions and the following disclaimer in the | ||
# documentation and/or other materials provided with the distribution. | ||
# * Neither the name of the NVIDIA CORPORATION nor the | ||
# names of its contributors may be used to endorse or promote products | ||
# derived from this software without specific prior written permission. | ||
# | ||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | ||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY | ||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
# | ||
# ***************************************************************************** | ||
|
||
import torch | ||
import numpy as np | ||
from scipy.signal import get_window | ||
import librosa.util as librosa_util | ||
|
||
|
||
def window_sumsquare(window, n_frames, hop_length=200, win_length=800, | ||
n_fft=800, dtype=np.float32, norm=None): | ||
""" | ||
# from librosa 0.6 | ||
Compute the sum-square envelope of a window function at a given hop length. | ||
This is used to estimate modulation effects induced by windowing | ||
observations in short-time fourier transforms. | ||
Parameters | ||
---------- | ||
window : string, tuple, number, callable, or list-like | ||
Window specification, as in `get_window` | ||
n_frames : int > 0 | ||
The number of analysis frames | ||
hop_length : int > 0 | ||
The number of samples to advance between frames | ||
win_length : [optional] | ||
The length of the window function. By default, this matches `n_fft`. | ||
n_fft : int > 0 | ||
The length of each analysis frame. | ||
dtype : np.dtype | ||
The data type of the output | ||
Returns | ||
------- | ||
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` | ||
The sum-squared envelope of the window function | ||
""" | ||
if win_length is None: | ||
win_length = n_fft | ||
|
||
n = n_fft + hop_length * (n_frames - 1) | ||
x = np.zeros(n, dtype=dtype) | ||
|
||
# Compute the squared window at the desired length | ||
win_sq = get_window(window, win_length, fftbins=True) | ||
win_sq = librosa_util.normalize(win_sq, norm=norm)**2 | ||
win_sq = librosa_util.pad_center(win_sq, n_fft) | ||
|
||
# Fill the envelope | ||
for i in range(n_frames): | ||
sample = i * hop_length | ||
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] | ||
return x | ||
|
||
|
||
def griffin_lim(magnitudes, stft_fn, n_iters=30): | ||
""" | ||
PARAMS | ||
------ | ||
magnitudes: spectrogram magnitudes | ||
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods | ||
""" | ||
|
||
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size()))) | ||
angles = angles.astype(np.float32) | ||
angles = torch.autograd.Variable(torch.from_numpy(angles)) | ||
signal = stft_fn.inverse(magnitudes, angles).squeeze(1) | ||
|
||
for i in range(n_iters): | ||
_, angles = stft_fn.transform(signal) | ||
signal = stft_fn.inverse(magnitudes, angles).squeeze(1) | ||
return signal | ||
|
||
|
||
def dynamic_range_compression(x, C=1, clip_val=1e-5): | ||
""" | ||
PARAMS | ||
------ | ||
C: compression factor | ||
""" | ||
return torch.log(torch.clamp(x, min=clip_val) * C) | ||
|
||
|
||
def dynamic_range_decompression(x, C=1): | ||
""" | ||
PARAMS | ||
------ | ||
C: compression factor used to compress | ||
""" | ||
return torch.exp(x) / C |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
# ***************************************************************************** | ||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | ||
# | ||
# Redistribution and use in source and binary forms, with or without | ||
# modification, are permitted provided that the following conditions are met: | ||
# * Redistributions of source code must retain the above copyright | ||
# notice, this list of conditions and the following disclaimer. | ||
# * Redistributions in binary form must reproduce the above copyright | ||
# notice, this list of conditions and the following disclaimer in the | ||
# documentation and/or other materials provided with the distribution. | ||
# * Neither the name of the NVIDIA CORPORATION nor the | ||
# names of its contributors may be used to endorse or promote products | ||
# derived from this software without specific prior written permission. | ||
# | ||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | ||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY | ||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
# | ||
# ***************************************************************************** | ||
|
||
import torch | ||
import torch.nn.functional as F | ||
from librosa.filters import mel as librosa_mel_fn | ||
from common.audio_processing import dynamic_range_compression, dynamic_range_decompression | ||
from common.stft import STFT | ||
|
||
|
||
class LinearNorm(torch.nn.Module): | ||
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): | ||
super(LinearNorm, self).__init__() | ||
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) | ||
|
||
torch.nn.init.xavier_uniform_( | ||
self.linear_layer.weight, | ||
gain=torch.nn.init.calculate_gain(w_init_gain)) | ||
|
||
def forward(self, x): | ||
return self.linear_layer(x) | ||
|
||
|
||
class ConvNorm(torch.nn.Module): | ||
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, | ||
padding=None, dilation=1, bias=True, w_init_gain='linear', batch_norm=False): | ||
super(ConvNorm, self).__init__() | ||
if padding is None: | ||
assert(kernel_size % 2 == 1) | ||
padding = int(dilation * (kernel_size - 1) / 2) | ||
|
||
self.conv = torch.nn.Conv1d(in_channels, out_channels, | ||
kernel_size=kernel_size, stride=stride, | ||
padding=padding, dilation=dilation, | ||
bias=bias) | ||
self.norm = torch.nn.BatchNorm1D(out_channels) if batch_norm else None | ||
|
||
torch.nn.init.xavier_uniform_( | ||
self.conv.weight, | ||
gain=torch.nn.init.calculate_gain(w_init_gain)) | ||
|
||
def forward(self, signal): | ||
if self.norm is None: | ||
return self.conv(signal) | ||
else: | ||
return self.norm(self.conv(signal)) | ||
|
||
|
||
class ConvReLUNorm(torch.nn.Module): | ||
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): | ||
super(ConvReLUNorm, self).__init__() | ||
self.conv = torch.nn.Conv1d(in_channels, out_channels, | ||
kernel_size=kernel_size, | ||
padding=(kernel_size // 2)) | ||
self.norm = torch.nn.LayerNorm(out_channels) | ||
self.dropout = torch.nn.Dropout(dropout) | ||
|
||
def forward(self, signal): | ||
out = F.relu(self.conv(signal)) | ||
out = self.norm(out.transpose(1, 2)).transpose(1, 2) | ||
return self.dropout(out) | ||
|
||
|
||
class TacotronSTFT(torch.nn.Module): | ||
def __init__(self, filter_length=1024, hop_length=256, win_length=1024, | ||
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, | ||
mel_fmax=8000.0): | ||
super(TacotronSTFT, self).__init__() | ||
self.n_mel_channels = n_mel_channels | ||
self.sampling_rate = sampling_rate | ||
self.stft_fn = STFT(filter_length, hop_length, win_length) | ||
mel_basis = librosa_mel_fn( | ||
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) | ||
mel_basis = torch.from_numpy(mel_basis).float() | ||
self.register_buffer('mel_basis', mel_basis) | ||
|
||
def spectral_normalize(self, magnitudes): | ||
output = dynamic_range_compression(magnitudes) | ||
return output | ||
|
||
def spectral_de_normalize(self, magnitudes): | ||
output = dynamic_range_decompression(magnitudes) | ||
return output | ||
|
||
def mel_spectrogram(self, y): | ||
"""Computes mel-spectrograms from a batch of waves | ||
PARAMS | ||
------ | ||
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] | ||
RETURNS | ||
------- | ||
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) | ||
""" | ||
assert(torch.min(y.data) >= -1) | ||
assert(torch.max(y.data) <= 1) | ||
|
||
magnitudes, phases = self.stft_fn.transform(y) | ||
magnitudes = magnitudes.data | ||
mel_output = torch.matmul(self.mel_basis, magnitudes) | ||
mel_output = self.spectral_normalize(mel_output) | ||
return mel_output |
Oops, something went wrong.