Skip to content

Commit

Permalink
Format code (#1298)
Browse files Browse the repository at this point in the history
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
  • Loading branch information
github-actions[bot] and github-actions[bot] authored Sep 21, 2023
1 parent 39b7582 commit 1b7aa52
Show file tree
Hide file tree
Showing 11 changed files with 515 additions and 341 deletions.
32 changes: 22 additions & 10 deletions gui_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
logger = logging.getLogger(__name__)
stream_latency = -1


class Harvest(multiprocessing.Process):
def __init__(self, inp_q, opt_q):
multiprocessing.Process.__init__(self)
Expand Down Expand Up @@ -100,7 +101,7 @@ class GUI:
def __init__(self) -> None:
self.config = GUIConfig()
self.flag_vc = False
self.function = 'vc'
self.function = "vc"
self.delay_time = 0
self.launcher()

Expand All @@ -116,7 +117,7 @@ def load(self):
if data["sg_input_device"] not in input_devices:
data["sg_input_device"] = input_devices[sd.default.device[0]]
if data["sg_output_device"] not in output_devices:
data["sg_output_device"] = output_devices[sd.default.device[1]]
data["sg_output_device"] = output_devices[sd.default.device[1]]
except:
with open("configs/config.json", "w") as j:
data = {
Expand Down Expand Up @@ -364,7 +365,7 @@ def launcher(self):
key="im",
default=False,
enable_events=True,
),
),
sg.Radio(
i18n("输出变声"),
"function",
Expand Down Expand Up @@ -439,7 +440,12 @@ def event_handler(self):
global stream_latency
while stream_latency < 0:
time.sleep(0.01)
self.delay_time = stream_latency + values["block_time"] + values["crossfade_length"] + 0.01
self.delay_time = (
stream_latency
+ values["block_time"]
+ values["crossfade_length"]
+ 0.01
)
if values["I_noise_reduce"]:
self.delay_time += values["crossfade_length"]
self.window["delay_time"].update(int(self.delay_time * 1000))
Expand All @@ -464,7 +470,9 @@ def event_handler(self):
elif event == "I_noise_reduce":
self.config.I_noise_reduce = values["I_noise_reduce"]
if stream_latency > 0:
self.delay_time += (1 if values["I_noise_reduce"] else -1) * values["crossfade_length"]
self.delay_time += (
1 if values["I_noise_reduce"] else -1
) * values["crossfade_length"]
self.window["delay_time"].update(int(self.delay_time * 1000))
elif event == "O_noise_reduce":
self.config.O_noise_reduce = values["O_noise_reduce"]
Expand Down Expand Up @@ -646,7 +654,7 @@ def audio_callback(
self.block_frame_16k :
].clone()
# input noise reduction and resampling
if self.config.I_noise_reduce and self.function == 'vc':
if self.config.I_noise_reduce and self.function == "vc":
input_wav = self.input_wav[
-self.crossfade_frame - self.block_frame - 2 * self.zc :
]
Expand All @@ -670,10 +678,12 @@ def audio_callback(
self.input_wav[-self.block_frame - 2 * self.zc :]
)[160:]
# infer
if self.function == 'vc':
if self.function == "vc":
f0_extractor_frame = self.block_frame_16k + 800
if self.config.f0method == "rmvpe":
f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160
f0_extractor_frame = (
5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160
)
infer_wav = self.rvc.infer(
self.input_wav_res,
self.input_wav_res[-f0_extractor_frame:].cpu().numpy(),
Expand All @@ -691,7 +701,9 @@ def audio_callback(
-self.crossfade_frame - self.sola_search_frame - self.block_frame :
].clone()
# output noise reduction
if (self.config.O_noise_reduce and self.function == 'vc') or (self.config.I_noise_reduce and self.function == 'im'):
if (self.config.O_noise_reduce and self.function == "vc") or (
self.config.I_noise_reduce and self.function == "im"
):
self.output_buffer[: -self.block_frame] = self.output_buffer[
self.block_frame :
].clone()
Expand All @@ -700,7 +712,7 @@ def audio_callback(
infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)
).squeeze(0)
# volume envelop mixing
if self.config.rms_mix_rate < 1 and self.function == 'vc':
if self.config.rms_mix_rate < 1 and self.function == "vc":
rms1 = librosa.feature.rms(
y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :]
.cpu()
Expand Down
113 changes: 63 additions & 50 deletions infer/lib/infer_pack/attentions.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(
window_size=10,
**kwargs
):
super(Encoder,self).__init__()
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
Expand Down Expand Up @@ -62,9 +62,10 @@ def __init__(
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
zippep=zip(self.attn_layers,self.norm_layers_1,\
self.ffn_layers,self.norm_layers_2)
for attn_layers,norm_layers_1,ffn_layers,norm_layers_2 in zippep:
zippep = zip(
self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2
)
for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zippep:
y = attn_layers(x, x, attn_mask)
y = self.drop(y)
x = norm_layers_1(x + y)
Expand All @@ -89,7 +90,7 @@ def __init__(
proximal_init=True,
**kwargs
):
super(Decoder,self).__init__()
super(Decoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
Expand Down Expand Up @@ -175,7 +176,7 @@ def __init__(
proximal_bias=False,
proximal_init=False,
):
super(MultiHeadAttention,self).__init__()
super(MultiHeadAttention, self).__init__()
assert channels % n_heads == 0

self.channels = channels
Expand Down Expand Up @@ -216,20 +217,28 @@ def __init__(
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)

def forward(self, x:torch.Tensor, c:torch.Tensor, attn_mask:Optional[torch.Tensor]=None):
def forward(
self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None
):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)

x, _= self.attention(q, k, v,mask=attn_mask)
x, _ = self.attention(q, k, v, mask=attn_mask)

x = self.conv_o(x)
return x

def attention(self, query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, mask:Optional[torch.Tensor]=None):
def attention(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s= key.size()
t_t=query.size(2)
b, d, t_s = key.size()
t_t = query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
Expand Down Expand Up @@ -296,17 +305,17 @@ def _matmul_with_relative_keys(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret

def _get_relative_embeddings(self, relative_embeddings, length:int):
def _get_relative_embeddings(self, relative_embeddings, length: int):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length:int = max(length - (self.window_size + 1), 0)
pad_length: int = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
# commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
[0, 0, pad_length, pad_length,0,0]
[0, 0, pad_length, pad_length, 0, 0],
)
else:
padded_relative_embeddings = relative_embeddings
Expand All @@ -322,17 +331,18 @@ def _relative_position_to_absolute_position(self, x):
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x,
# commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
[0,1,0,0,0,0,0,0]
)
x = F.pad(
x,
# commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
[0, 1, 0, 0, 0, 0, 0, 0],
)

# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(
x_flat,
x_flat,
# commons.convert_pad_shape([[0, 0], [0, 0], [0, int(length) - 1]])
[0, int(length) - 1, 0,0,0,0]
[0, int(length) - 1, 0, 0, 0, 0],
)

# Reshape and slice out the padded elements.
Expand All @@ -349,20 +359,21 @@ def _absolute_position_to_relative_position(self, x):
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(
x,
x,
# commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, int(length) - 1]])
[0, int(length) - 1,0,0,0,0,0,0]
[0, int(length) - 1, 0, 0, 0, 0, 0, 0],
)
x_flat = x.view([batch, heads, int(length**2) + int(length * (length - 1))])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat,
# commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]])
[length,0,0,0,0,0]
)
x_flat = F.pad(
x_flat,
# commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]])
[length, 0, 0, 0, 0, 0],
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final

def _attention_bias_proximal(self, length:int):
def _attention_bias_proximal(self, length: int):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Expand All @@ -382,18 +393,18 @@ def __init__(
filter_channels,
kernel_size,
p_dropout=0.0,
activation:str=None,
activation: str = None,
causal=False,
):
super(FFN,self).__init__()
super(FFN, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
self.is_activation = True if activation=="gelu" else False
self.is_activation = True if activation == "gelu" else False
# if causal:
# self.padding = self._causal_padding
# else:
Expand All @@ -402,45 +413,47 @@ def __init__(
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def padding(self,x:torch.Tensor,x_mask:torch.Tensor)->torch.Tensor:

def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:
if self.causal:
padding=self._causal_padding(x * x_mask)
padding = self._causal_padding(x * x_mask)
else:
padding=self._same_padding(x * x_mask)
padding = self._same_padding(x * x_mask)
return padding
def forward(self, x:torch.Tensor, x_mask:torch.Tensor):
x = self.conv_1(self.padding(x,x_mask))

def forward(self, x: torch.Tensor, x_mask: torch.Tensor):
x = self.conv_1(self.padding(x, x_mask))
if self.is_activation:
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)

x = self.conv_2(self.padding(x,x_mask))
x = self.conv_2(self.padding(x, x_mask))
return x * x_mask

def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l:int = self.kernel_size - 1
pad_r:int = 0
pad_l: int = self.kernel_size - 1
pad_r: int = 0
# padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x,
# commons.convert_pad_shape(padding)
[pad_l, pad_r,0,0,0,0]
)
x = F.pad(
x,
# commons.convert_pad_shape(padding)
[pad_l, pad_r, 0, 0, 0, 0],
)
return x

def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l:int = (self.kernel_size - 1) // 2
pad_r:int = self.kernel_size // 2
pad_l: int = (self.kernel_size - 1) // 2
pad_r: int = self.kernel_size // 2
# padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x,
# commons.convert_pad_shape(padding)
[pad_l, pad_r,0,0,0,0]
)
x = F.pad(
x,
# commons.convert_pad_shape(padding)
[pad_l, pad_r, 0, 0, 0, 0],
)
return x
5 changes: 3 additions & 2 deletions infer/lib/infer_pack/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,8 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
# pad_shape = [item for sublist in l for item in sublist]
# return pad_shape

def convert_pad_shape(pad_shape:List[List[int]])->List[int]:

def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:
return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist()


Expand All @@ -128,7 +129,7 @@ def shift_1d(x):
return x


def sequence_mask(length:torch.Tensor, max_length:Optional[int]=None):
def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
Expand Down
Loading

0 comments on commit 1b7aa52

Please sign in to comment.