WIP: refactoring

This commit is contained in:
wataru 2023-04-28 13:59:28 +09:00
parent c96609640d
commit 4ac4a225a7

View File

@ -1,6 +1,10 @@
import sys import sys
import os import os
if sys.platform.startswith('darwin'):
from voice_changer.utils.LoadModelParams import LoadModelParams
from voice_changer.utils.VoiceChangerModel import AudioInOut
if sys.platform.startswith("darwin"):
baseDir = [x for x in sys.path if x.endswith("Contents/MacOS")] baseDir = [x for x in sys.path if x.endswith("Contents/MacOS")]
if len(baseDir) != 1: if len(baseDir) != 1:
print("baseDir should be only one ", baseDir) print("baseDir should be only one ", baseDir)
@ -17,16 +21,26 @@ import torch
import onnxruntime import onnxruntime
import pyworld as pw import pyworld as pw
from models import SynthesizerTrn from models import SynthesizerTrn # type:ignore
from voice_changer.MMVCv15.client_modules import convert_continuos_f0, spectrogram_torch, get_hparams_from_file, load_checkpoint from voice_changer.MMVCv15.client_modules import (
convert_continuos_f0,
spectrogram_torch,
get_hparams_from_file,
load_checkpoint,
)
from Exceptions import NoModeLoadedException, ONNXInputArgumentException from Exceptions import NoModeLoadedException, ONNXInputArgumentException
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"] providers = [
"OpenVINOExecutionProvider",
"CUDAExecutionProvider",
"DmlExecutionProvider",
"CPUExecutionProvider",
]
@dataclass @dataclass
class MMVCv15Settings(): class MMVCv15Settings:
gpu: int = 0 gpu: int = 0
srcId: int = 0 srcId: int = 0
dstId: int = 101 dstId: int = 101
@ -46,6 +60,8 @@ class MMVCv15Settings():
class MMVCv15: class MMVCv15:
audio_buffer: AudioInOut | None = None
def __init__(self): def __init__(self):
self.settings = MMVCv15Settings() self.settings = MMVCv15Settings()
self.net_g = None self.net_g = None
@ -53,13 +69,12 @@ class MMVCv15:
self.gpu_num = torch.cuda.device_count() self.gpu_num = torch.cuda.device_count()
def loadModel(self, props): def loadModel(self, props: LoadModelParams):
self.settings.configFile = props.files.configFilename
self.settings.configFile = props["files"]["configFilename"]
self.hps = get_hparams_from_file(self.settings.configFile) self.hps = get_hparams_from_file(self.settings.configFile)
self.settings.pyTorchModelFile = props["files"]["pyTorchModelFilename"] self.settings.pyTorchModelFile = props.files.pyTorchModelFilename
self.settings.onnxModelFile = props["files"]["onnxModelFilename"] self.settings.onnxModelFile = props.files.onnxModelFilename
# PyTorchモデル生成 # PyTorchモデル生成
self.net_g = SynthesizerTrn( self.net_g = SynthesizerTrn(
@ -78,20 +93,19 @@ class MMVCv15:
requires_grad_pe=self.hps.requires_grad.pe, requires_grad_pe=self.hps.requires_grad.pe,
requires_grad_flow=self.hps.requires_grad.flow, requires_grad_flow=self.hps.requires_grad.flow,
requires_grad_text_enc=self.hps.requires_grad.text_enc, requires_grad_text_enc=self.hps.requires_grad.text_enc,
requires_grad_dec=self.hps.requires_grad.dec requires_grad_dec=self.hps.requires_grad.dec,
) )
if self.settings.pyTorchModelFile != None: if self.settings.pyTorchModelFile is not None:
self.net_g.eval() self.net_g.eval()
load_checkpoint(self.settings.pyTorchModelFile, self.net_g, None) load_checkpoint(self.settings.pyTorchModelFile, self.net_g, None)
# ONNXモデル生成 # ONNXモデル生成
self.onxx_input_length = 8192 self.onxx_input_length = 8192
if self.settings.onnxModelFile != None: if self.settings.onnxModelFile is not None:
ort_options = onnxruntime.SessionOptions() ort_options = onnxruntime.SessionOptions()
ort_options.intra_op_num_threads = 8 ort_options.intra_op_num_threads = 8
self.onnx_session = onnxruntime.InferenceSession( self.onnx_session = onnxruntime.InferenceSession(
self.settings.onnxModelFile, self.settings.onnxModelFile, providers=providers
providers=providers
) )
inputs_info = self.onnx_session.get_inputs() inputs_info = self.onnx_session.get_inputs()
for i in inputs_info: for i in inputs_info:
@ -100,23 +114,39 @@ class MMVCv15:
self.onxx_input_length = i.shape[2] self.onxx_input_length = i.shape[2]
return self.get_info() return self.get_info()
def update_settings(self, key: str, val: any): def update_settings(self, key: str, val: int | float | str):
if key == "onnxExecutionProvider" and self.settings.onnxModelFile != "" and self.settings.onnxModelFile != None: if (
key == "onnxExecutionProvider"
and self.settings.onnxModelFile != ""
and self.settings.onnxModelFile is not None
):
if val == "CUDAExecutionProvider": if val == "CUDAExecutionProvider":
if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num: if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
self.settings.gpu = 0 self.settings.gpu = 0
provider_options = [{'device_id': self.settings.gpu}] provider_options = [{"device_id": self.settings.gpu}]
self.onnx_session.set_providers(providers=[val], provider_options=provider_options) self.onnx_session.set_providers(
providers=[val], provider_options=provider_options
)
else: else:
self.onnx_session.set_providers(providers=[val]) self.onnx_session.set_providers(providers=[val])
elif key in self.settings.intData: elif key in self.settings.intData:
setattr(self.settings, key, int(val)) val = int(val)
if key == "gpu" and val >= 0 and val < self.gpu_num and self.settings.onnxModelFile != "" and self.settings.onnxModelFile != None: setattr(self.settings, key, val)
if (
key == "gpu"
and val >= 0
and val < self.gpu_num
and self.settings.onnxModelFile != ""
and self.settings.onnxModelFile is not None
):
providers = self.onnx_session.get_providers() providers = self.onnx_session.get_providers()
print("Providers:", providers) print("Providers:", providers)
if "CUDAExecutionProvider" in providers: if "CUDAExecutionProvider" in providers:
provider_options = [{'device_id': self.settings.gpu}] provider_options = [{"device_id": self.settings.gpu}]
self.onnx_session.set_providers(providers=["CUDAExecutionProvider"], provider_options=provider_options) self.onnx_session.set_providers(
providers=["CUDAExecutionProvider"],
provider_options=provider_options,
)
elif key in self.settings.floatData: elif key in self.settings.floatData:
setattr(self.settings, key, float(val)) setattr(self.settings, key, float(val))
elif key in self.settings.strData: elif key in self.settings.strData:
@ -129,11 +159,15 @@ class MMVCv15:
def get_info(self): def get_info(self):
data = asdict(self.settings) data = asdict(self.settings)
data["onnxExecutionProviders"] = self.onnx_session.get_providers( data["onnxExecutionProviders"] = (
) if self.settings.onnxModelFile != "" and self.settings.onnxModelFile != None else [] self.onnx_session.get_providers()
if self.settings.onnxModelFile != ""
and self.settings.onnxModelFile is not None
else []
)
files = ["configFile", "pyTorchModelFile", "onnxModelFile"] files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
for f in files: for f in files:
if data[f] != None and os.path.exists(data[f]): if data[f] is not None and os.path.exists(data[f]):
data[f] = os.path.basename(data[f]) data[f] = os.path.basename(data[f])
else: else:
data[f] = "" data[f] = ""
@ -141,36 +175,58 @@ class MMVCv15:
return data return data
def get_processing_sampling_rate(self): def get_processing_sampling_rate(self):
if hasattr(self, "hps") == False: if hasattr(self, "hps") is False:
raise NoModeLoadedException("config") raise NoModeLoadedException("config")
return self.hps.data.sampling_rate return self.hps.data.sampling_rate
def _get_f0(self, detector: str, newData: any): def _get_f0(self, detector: str, newData: AudioInOut):
audio_norm_np = newData.astype(np.float64) audio_norm_np = newData.astype(np.float64)
if detector == "dio": if detector == "dio":
_f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5) _f0, _time = pw.dio(
audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5
)
f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate) f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate)
else: else:
f0, t = pw.harvest(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5, f0_floor=71.0, f0_ceil=1000.0) f0, t = pw.harvest(
f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length)) audio_norm_np,
self.hps.data.sampling_rate,
frame_period=5.5,
f0_floor=71.0,
f0_ceil=1000.0,
)
f0 = convert_continuos_f0(
f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length)
)
f0 = torch.from_numpy(f0.astype(np.float32)) f0 = torch.from_numpy(f0.astype(np.float32))
return f0 return f0
def _get_spec(self, newData: any): def _get_spec(self, newData: AudioInOut):
audio = torch.FloatTensor(newData) audio = torch.FloatTensor(newData)
audio_norm = audio.unsqueeze(0) # unsqueeze audio_norm = audio.unsqueeze(0) # unsqueeze
spec = spectrogram_torch(audio_norm, self.hps.data.filter_length, spec = spectrogram_torch(
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length, audio_norm,
center=False) self.hps.data.filter_length,
self.hps.data.sampling_rate,
self.hps.data.hop_length,
self.hps.data.win_length,
center=False,
)
spec = torch.squeeze(spec, 0) spec = torch.squeeze(spec, 0)
return spec return spec
def generate_input(self, newData: any, inputSize: int, crossfadeSize: int, solaSearchFrame: int = 0): def generate_input(
self,
newData: AudioInOut,
inputSize: int,
crossfadeSize: int,
solaSearchFrame: int = 0,
):
newData = newData.astype(np.float32) / self.hps.data.max_wav_value newData = newData.astype(np.float32) / self.hps.data.max_wav_value
if hasattr(self, "audio_buffer"): if self.audio_buffer is not None:
self.audio_buffer = np.concatenate([self.audio_buffer, newData], 0) # 過去のデータに連結 self.audio_buffer = np.concatenate(
[self.audio_buffer, newData], 0
) # 過去のデータに連結
else: else:
self.audio_buffer = newData self.audio_buffer = newData
@ -179,13 +235,16 @@ class MMVCv15:
if convertSize < 8192: if convertSize < 8192:
convertSize = 8192 convertSize = 8192
if convertSize % self.hps.data.hop_length != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。 if convertSize % self.hps.data.hop_length != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
convertSize = convertSize + (self.hps.data.hop_length - (convertSize % self.hps.data.hop_length)) convertSize = convertSize + (
self.hps.data.hop_length - (convertSize % self.hps.data.hop_length)
)
# ONNX は固定長 # ONNX は固定長
if self.settings.framework == "ONNX": if self.settings.framework == "ONNX":
convertSize = self.onxx_input_length convertSize = self.onxx_input_length
self.audio_buffer = self.audio_buffer[-1 * convertSize:] # 変換対象の部分だけ抽出 convertOffset = -1 * convertSize
self.audio_buffer = self.audio_buffer[convertOffset:] # 変換対象の部分だけ抽出
f0 = self._get_f0(self.settings.f0Detector, self.audio_buffer) # torch f0 = self._get_f0(self.settings.f0Detector, self.audio_buffer) # torch
f0 = (f0 * self.settings.f0Factor).unsqueeze(0).unsqueeze(0) f0 = (f0 * self.settings.f0Factor).unsqueeze(0).unsqueeze(0)
@ -194,7 +253,7 @@ class MMVCv15:
return [spec, f0, sid] return [spec, f0, sid]
def _onnx_inference(self, data): def _onnx_inference(self, data):
if self.settings.onnxModelFile == "" and self.settings.onnxModelFile == None: if self.settings.onnxModelFile == "" and self.settings.onnxModelFile is None:
print("[Voice Changer] No ONNX session.") print("[Voice Changer] No ONNX session.")
raise NoModeLoadedException("ONNX") raise NoModeLoadedException("ONNX")
@ -204,23 +263,30 @@ class MMVCv15:
sid_tgt1 = torch.LongTensor([self.settings.dstId]) sid_tgt1 = torch.LongTensor([self.settings.dstId])
sin, d = self.net_g.make_sin_d(f0) sin, d = self.net_g.make_sin_d(f0)
(d0, d1, d2, d3) = d (d0, d1, d2, d3) = d
audio1 = self.onnx_session.run( audio1 = (
["audio"], self.onnx_session.run(
{ ["audio"],
"specs": spec.numpy(), {
"lengths": spec_lengths.numpy(), "specs": spec.numpy(),
"sin": sin.numpy(), "lengths": spec_lengths.numpy(),
"d0": d0.numpy(), "sin": sin.numpy(),
"d1": d1.numpy(), "d0": d0.numpy(),
"d2": d2.numpy(), "d1": d1.numpy(),
"d3": d3.numpy(), "d2": d2.numpy(),
"sid_src": sid_src.numpy(), "d3": d3.numpy(),
"sid_tgt": sid_tgt1.numpy() "sid_src": sid_src.numpy(),
})[0][0, 0] * self.hps.data.max_wav_value "sid_tgt": sid_tgt1.numpy(),
},
)[0][0, 0]
* self.hps.data.max_wav_value
)
return audio1 return audio1
def _pyTorch_inference(self, data): def _pyTorch_inference(self, data):
if self.settings.pyTorchModelFile == "" or self.settings.pyTorchModelFile == None: if (
self.settings.pyTorchModelFile == ""
or self.settings.pyTorchModelFile is None
):
print("[Voice Changer] No pyTorch session.") print("[Voice Changer] No pyTorch session.")
raise NoModeLoadedException("pytorch") raise NoModeLoadedException("pytorch")
@ -237,7 +303,12 @@ class MMVCv15:
sid_src = sid_src.to(dev) sid_src = sid_src.to(dev)
sid_target = torch.LongTensor([self.settings.dstId]).to(dev) sid_target = torch.LongTensor([self.settings.dstId]).to(dev)
audio1 = self.net_g.to(dev).voice_conversion(spec, spec_lengths, f0, sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value audio1 = (
self.net_g.to(dev)
.voice_conversion(spec, spec_lengths, f0, sid_src, sid_target)[0, 0]
.data
* self.hps.data.max_wav_value
)
result = audio1.float().cpu().numpy() result = audio1.float().cpu().numpy()
return result return result
@ -257,7 +328,7 @@ class MMVCv15:
del self.onnx_session del self.onnx_session
remove_path = os.path.join("MMVC_Client_v15", "python") remove_path = os.path.join("MMVC_Client_v15", "python")
sys.path = [x for x in sys.path if x.endswith(remove_path) == False] sys.path = [x for x in sys.path if x.endswith(remove_path) is False]
for key in list(sys.modules): for key in list(sys.modules):
val = sys.modules.get(key) val = sys.modules.get(key)
@ -266,5 +337,5 @@ class MMVCv15:
if file_path.find(remove_path + os.path.sep) >= 0: if file_path.find(remove_path + os.path.sep) >= 0:
print("remove", key, file_path) print("remove", key, file_path)
sys.modules.pop(key) sys.modules.pop(key)
except Exception as e: except: # type:ignore
pass pass