WIP: pitch extractor
This commit is contained in:
parent
6a09338af5
commit
02eadeeaa3
@ -17,8 +17,6 @@ from voice_changer.RVC.embedder.Embedder import Embedder
|
||||
from voice_changer.common.VolumeExtractor import VolumeExtractor
|
||||
from torchaudio.transforms import Resample
|
||||
|
||||
from voice_changer.utils.Timer import Timer
|
||||
|
||||
|
||||
class Pipeline(object):
|
||||
embedder: Embedder
|
||||
@ -144,23 +142,15 @@ class Pipeline(object):
|
||||
silence_front=silence_front,
|
||||
)
|
||||
|
||||
pitch = torch.tensor(pitch[-n_frames:], device=self.device).unsqueeze(0).long()
|
||||
pitch = torch.tensor(pitch[-n_frames:], device=self.device).unsqueeze(0).long()
|
||||
except IndexError as e: # NOQA
|
||||
# print(e)
|
||||
raise NotEnoughDataExtimateF0()
|
||||
print("[EMBEDDER EXTRACT:audio:4:]", audio_t.shape)
|
||||
|
||||
# f0 = self.f0ex.extract_f0(audio_pad, key=4, sr=44100)
|
||||
# print("[Pitch_f0]", f0)
|
||||
|
||||
# tensor型調整
|
||||
feats = audio16k.squeeze()
|
||||
if feats.dim() == 2: # double channels
|
||||
feats = feats.mean(-1)
|
||||
feats = feats.view(1, -1)
|
||||
print("[EMBEDDER EXTRACT:audio:5:]", audio_t.shape)
|
||||
|
||||
print("[EMBEDDER EXTRACT:::]", feats.shape)
|
||||
|
||||
# embedding
|
||||
with autocast(enabled=self.isHalf):
|
||||
@ -177,39 +167,6 @@ class Pipeline(object):
|
||||
raise e
|
||||
feats = F.interpolate(feats.permute(0, 2, 1), size=int(n_frames), mode='nearest').permute(0, 2, 1)
|
||||
|
||||
if protect < 0.5:
|
||||
feats0 = feats.clone()
|
||||
|
||||
# # ピッチサイズ調整
|
||||
# p_len = audio_pad.shape[0] // self.window
|
||||
# feats_len = feats.shape[1]
|
||||
# if feats.shape[1] < p_len:
|
||||
# p_len = feats_len
|
||||
# pitch = pitch[:, :feats_len]
|
||||
# pitchf = pitchf[:, :feats_len]
|
||||
|
||||
# pitch = pitch[:, -feats_len:]
|
||||
# pitchf = pitchf[:, -feats_len:]
|
||||
# p_len = torch.tensor([feats_len], device=self.device).long()
|
||||
|
||||
# print("----------plen::1:", p_len)
|
||||
|
||||
# pitchの推定が上手くいかない(pitchf=0)場合、検索前の特徴を混ぜる
|
||||
# pitchffの作り方の疑問はあるが、本家通りなので、このまま使うことにする。
|
||||
# https://github.com/w-okada/voice-changer/pull/276#issuecomment-1571336929
|
||||
# if protect < 0.5:
|
||||
# pitchff = pitchf.clone()
|
||||
# pitchff[pitchf > 0] = 1
|
||||
# pitchff[pitchf < 1] = protect
|
||||
# pitchff = pitchff.unsqueeze(-1)
|
||||
# feats = feats * pitchff + feats0 * (1 - pitchff)
|
||||
# feats = feats.to(feats0.dtype)
|
||||
|
||||
# # apply silent front for inference
|
||||
# if type(self.inferencer) in [OnnxRVCInferencer, OnnxRVCInferencerNono]:
|
||||
# npyOffset = math.floor(silence_front * 16000) // 360 # 160x2 = 360
|
||||
# feats = feats[:, npyOffset * 2 :, :] # NOQA
|
||||
|
||||
# 推論実行
|
||||
try:
|
||||
with torch.no_grad():
|
||||
|
@ -1,6 +1,5 @@
|
||||
import pyworld
|
||||
import numpy as np
|
||||
import scipy.signal as signal
|
||||
from const import PitchExtractorType
|
||||
import torch
|
||||
from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||
@ -11,40 +10,31 @@ class HarvestPitchExtractor(PitchExtractor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.pitchExtractorType: PitchExtractorType = "harvest"
|
||||
self.f0_min = 50
|
||||
self.f0_max = 1100
|
||||
self.sapmle_rate = 16000
|
||||
self.uv_interp = True
|
||||
|
||||
def extract(self, audio: torch.Tensor, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||
audio = audio.detach().cpu().numpy()
|
||||
f0_min = 50
|
||||
f0_max = 1100
|
||||
# f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
||||
# f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
||||
|
||||
f0 = self.extract2(audio, uv_interp=True, hop_size=window, silence_front=silence_front)
|
||||
f0 = f0 * 2 ** (float(f0_up_key) / 12)
|
||||
|
||||
# pitchf[-f0.shape[0]:] = f0[:pitchf.shape[0]]
|
||||
return f0
|
||||
|
||||
def extract2(self, audio, uv_interp, hop_size: int, silence_front=0): # audio: 1d numpy array
|
||||
n_frames = int(len(audio) // hop_size) + 1
|
||||
|
||||
start_frame = int(silence_front * 16000 / hop_size)
|
||||
real_silence_front = start_frame * hop_size / 16000
|
||||
audio = audio[int(np.round(real_silence_front * 16000)):]
|
||||
|
||||
start_frame = int(silence_front * self.sapmle_rate / window)
|
||||
real_silence_front = start_frame * window / self.sapmle_rate
|
||||
audio = audio[int(np.round(real_silence_front * self.sapmle_rate)):]
|
||||
f0, _ = pyworld.harvest(
|
||||
audio.astype('double'),
|
||||
16000,
|
||||
f0_floor=50,
|
||||
f0_ceil=1100,
|
||||
frame_period=(1000 * hop_size / 16000))
|
||||
f0 = np.pad(f0.astype('float'), (start_frame, n_frames - len(f0) - start_frame))
|
||||
frame_period=(1000 * window / self.sapmle_rate))
|
||||
pitchf[-f0.shape[0]:] = f0[:pitchf.shape[0]]
|
||||
f0 = pitchf
|
||||
|
||||
if uv_interp:
|
||||
if self.uv_interp:
|
||||
uv = f0 == 0
|
||||
if len(f0[~uv]) > 0:
|
||||
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
|
||||
f0[f0 < 50] = 50
|
||||
|
||||
f0 = f0 * 2 ** (float(f0_up_key) / 12)
|
||||
|
||||
return f0
|
||||
|
Loading…
x
Reference in New Issue
Block a user