WIP: support rvc-webui, pitch-less is not support yet

This commit is contained in:
wataru 2023-04-24 03:25:31 +09:00
parent d3823183d6
commit acfb7b601a
4 changed files with 135 additions and 29 deletions

View File

@ -32,7 +32,7 @@ import pyworld as pw
from voice_changer.RVC.custom_vc_infer_pipeline import VC
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
from .models import SynthesizerTrnMsNSFsid as SynthesizerTrnMs768NSFsid
from .const import RVC_MODEL_TYPE_NORMAL, RVC_MODEL_TYPE_PITCH_LESS, RVC_MODEL_TYPE_WEBUI_256_NORMAL, RVC_MODEL_TYPE_WEBUI_768_NORMAL, RVC_MODEL_TYPE_UNKNOWN
from .const import RVC_MODEL_TYPE_NORMAL, RVC_MODEL_TYPE_PITCHLESS, RVC_MODEL_TYPE_WEBUI_256_NORMAL, RVC_MODEL_TYPE_WEBUI_768_NORMAL, RVC_MODEL_TYPE_WEBUI_256_PITCHLESS, RVC_MODEL_TYPE_WEBUI_768_PITCHLESS, RVC_MODEL_TYPE_UNKNOWN
from fairseq import checkpoint_utils
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
@ -162,39 +162,34 @@ class RVC:
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 768, 48000]
18: オリジナル, 19: rvc-webui
(2-1) オリジナルのーマルorPitchレス判定 コンフィグのpsamplingrateの形状から判断
ノーマル
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 48000]
ピッチレス
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2],  512, [16, 16, 4, 4],109, 256, 40000]
12番目の要素upsamplingrateの数で判定4: ピッチレス, 5:ノーマル
(2-1) オリジナルのーマルorPitchレス判定 ckp["f0"]で判定
0: ピッチレス, 1:ノーマル
(2-2) rvc-webuiの(256 or 768) x (ーマルor pitchレス)判定 256, or 768 は17番目の要素で判定
256 x ノーマル
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 256, 48000]
256 x pitchレス
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 256, 48000]
768 x ノーマル
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 768, 48000]
768 x pitchレス
[1025, 32, 192, 192, 768, 2, 6, 3, 0, '1', [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 6, 2, 2, 2], 512, [16, 16, 4, 4, 4], 109, 256, 768, 48000]
(2-2) rvc-webuiの(256 or 768) x (ーマルor pitchレス)判定 256, or 768 は17番目の要素で判定, ーマルor pitchレスはckp["f0"]で判定
'''
print("config shape:::::", cpt["config"])
# print("config shape:1::::", cpt["config"], cpt["f0"])
# print("config shape:2::::", (cpt).keys)
config_len = len(cpt["config"])
upsamplingRateDims = len(cpt["config"][12])
if config_len == 18 and upsamplingRateDims == 4:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_PITCH_LESS")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_PITCH_LESS
elif config_len == 18 and upsamplingRateDims == 5:
if config_len == 18 and cpt["f0"] == 0:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_PITCHLESS")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_PITCHLESS
elif config_len == 18 and cpt["f0"] == 1:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_NORMAL")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_NORMAL
elif config_len == 19:
print("PARAMS:::::::::", cpt["params"])
embedding = cpt["config"][17]
if embedding == 256:
if embedding == 256 and cpt["f0"] == 0:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_256_PITCHLESS")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_256_PITCHLESS
elif embedding == 256 and cpt["f0"] == 1:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_256_NORMAL")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_256_NORMAL
elif embedding == 768 and cpt["f0"] == 0:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_768_PITCHLESS")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_768_PITCHLESS
else:
print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_768_NORMAL")
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_768_NORMAL
@ -206,10 +201,12 @@ class RVC:
if self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_NORMAL:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=self.is_half)
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_PITCH_LESS:
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_PITCHLESS:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL or self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI_768_NORMAL:
net_g = SynthesizerTrnMs768NSFsid(**cpt["params"], is_half=self.is_half)
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI_256_PITCHLESS or self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI_768_PITCHLESS:
net_g = SynthesizerTrnMs768NSFsid(**cpt["params"], is_half=self.is_half)
else:
print("unknwon")

View File

@ -1,5 +1,5 @@
RVC_MODEL_TYPE_NORMAL = 0
RVC_MODEL_TYPE_PITCH_LESS = 1
RVC_MODEL_TYPE_PITCHLESS = 1
RVC_MODEL_TYPE_WEBUI_256_NORMAL = 2
RVC_MODEL_TYPE_WEBUI_256_PITCHLESS = 3
RVC_MODEL_TYPE_WEBUI_768_NORMAL = 4

View File

@ -10,7 +10,7 @@ import pyworld
import os
import traceback
import faiss
from .const import RVC_MODEL_TYPE_NORMAL, RVC_MODEL_TYPE_PITCH_LESS, RVC_MODEL_TYPE_WEBUI_256_NORMAL, RVC_MODEL_TYPE_WEBUI_768_NORMAL
from .const import RVC_MODEL_TYPE_NORMAL, RVC_MODEL_TYPE_PITCHLESS, RVC_MODEL_TYPE_WEBUI_256_NORMAL, RVC_MODEL_TYPE_WEBUI_768_NORMAL, RVC_MODEL_TYPE_WEBUI_256_PITCHLESS, RVC_MODEL_TYPE_WEBUI_768_PITCHLESS
class VC(object):
@ -94,7 +94,7 @@ class VC(object):
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_PITCH_LESS or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL:
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_PITCHLESS or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_256_PITCHLESS:
inputs = {
"source": feats.to(self.device),
"padding_mask": padding_mask,
@ -109,9 +109,11 @@ class VC(object):
t0 = ttime()
with torch.no_grad():
logits = model.extract_features(**inputs)
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_PITCH_LESS or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL:
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_PITCHLESS or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_256_PITCHLESS:
print("-------------------------256")
feats = model.final_proj(logits[0])
else:
print("-------------------------768")
feats = logits[0]
if (isinstance(index, type(None)) == False and isinstance(big_npy, type(None)) == False and index_rate != 0):
@ -136,10 +138,11 @@ class VC(object):
p_len = torch.tensor([p_len], device=self.device).long()
with torch.no_grad():
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_768_NORMAL:
if modelType == RVC_MODEL_TYPE_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_256_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_768_NORMAL or modelType == RVC_MODEL_TYPE_WEBUI_256_PITCHLESS or modelType == RVC_MODEL_TYPE_WEBUI_768_PITCHLESS:
audio1 = (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
else:
audio1 = (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
# audio1 = (net_g.infer(feats, p_len, None, pitchf, sid)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
del feats, p_len, padding_mask
torch.cuda.empty_cache()

View File

@ -111,6 +111,7 @@ class SynthesizerTrnMsNSFsid(nn.Module):
n_layers,
kernel_size,
p_dropout,
# f0=False,
)
self.dec = GeneratorNSF(
inter_channels,
@ -168,3 +169,108 @@ class SynthesizerTrnMsNSFsid(nn.Module):
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
class SynthesizerTrnMs256NSFSidNono(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
emb_channels,
sr=None,
**kwargs
):
super().__init__()
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
self.emb_channels = emb_channels
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder256(
inter_channels,
hidden_channels,
filter_channels,
emb_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
f0=False,
)
self.dec = Generator(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id[bs,1]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t广播的
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
o = self.dec(z_slice, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
def infer(self, phone, phone_lengths, sid, max_len=None):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
return o, x_mask, (z, z_p, m_p, logs_p)