WIP: DML onnx inferencer
This commit is contained in:
parent
f1683cb4a8
commit
d45467a3c4
@ -102,10 +102,11 @@ class MMVCv13:
|
||||
return self.get_info()
|
||||
|
||||
def getOnnxExecutionProvider(self):
|
||||
if self.settings.gpu >= 0:
|
||||
availableProviders = onnxruntime.get_available_providers()
|
||||
if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders:
|
||||
return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}]
|
||||
elif "DmlExecutionProvider" in onnxruntime.get_available_providers():
|
||||
return ["DmlExecutionProvider"], []
|
||||
elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||
return ["DmlExecutionProvider"], [{}]
|
||||
else:
|
||||
return ["CPUExecutionProvider"], [
|
||||
{
|
||||
|
@ -125,10 +125,11 @@ class MMVCv15:
|
||||
return self.get_info()
|
||||
|
||||
def getOnnxExecutionProvider(self):
|
||||
if self.settings.gpu >= 0:
|
||||
availableProviders = onnxruntime.get_available_providers()
|
||||
if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders:
|
||||
return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}]
|
||||
elif "DmlExecutionProvider" in onnxruntime.get_available_providers():
|
||||
return ["DmlExecutionProvider"], []
|
||||
elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||
return ["DmlExecutionProvider"], [{}]
|
||||
else:
|
||||
return ["CPUExecutionProvider"], [
|
||||
{
|
||||
|
@ -41,13 +41,6 @@ from const import RVC_MAX_SLOT_NUM, RVC_MODEL_DIRNAME, SAMPLES_JSONS, UPLOAD_DIR
|
||||
import shutil
|
||||
import json
|
||||
|
||||
providers = [
|
||||
"OpenVINOExecutionProvider",
|
||||
"CUDAExecutionProvider",
|
||||
"DmlExecutionProvider",
|
||||
"CPUExecutionProvider",
|
||||
]
|
||||
|
||||
|
||||
class RVC:
|
||||
initialLoad: bool = True
|
||||
@ -193,19 +186,8 @@ class RVC:
|
||||
setattr(self.settings, key, val)
|
||||
|
||||
if key == "gpu":
|
||||
dev = self.deviceManager.getDevice(val)
|
||||
half = self.deviceManager.halfPrecisionAvailable(val)
|
||||
self.prepareModel(self.settings.modelSlotIndex)
|
||||
|
||||
# half-precisionの使用可否が変わるときは作り直し
|
||||
if self.pipeline is not None and self.pipeline.isHalf == half:
|
||||
print(
|
||||
"USE EXISTING PIPELINE",
|
||||
half,
|
||||
)
|
||||
self.pipeline.setDevice(dev)
|
||||
else:
|
||||
print("CHAGE TO NEW PIPELINE", half)
|
||||
self.prepareModel(self.settings.modelSlotIndex)
|
||||
if key == "enableDirectML":
|
||||
if self.pipeline is not None and val == 0:
|
||||
self.pipeline.setDirectMLEnable(False)
|
||||
|
@ -1,4 +1,5 @@
|
||||
import torch
|
||||
import onnxruntime
|
||||
|
||||
|
||||
class DeviceManager(object):
|
||||
@ -26,6 +27,21 @@ class DeviceManager(object):
|
||||
dev = torch.device("cuda", index=id)
|
||||
return dev
|
||||
|
||||
def getOnnxExecutionProvider(self, gpu: int):
|
||||
availableProviders = onnxruntime.get_available_providers()
|
||||
if gpu >= 0 and "CUDAExecutionProvider" in availableProviders:
|
||||
return ["CUDAExecutionProvider"], [{"device_id": gpu}]
|
||||
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||
return ["DmlExecutionProvider"], [{}]
|
||||
else:
|
||||
return ["CPUExecutionProvider"], [
|
||||
{
|
||||
"intra_op_num_threads": 8,
|
||||
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
|
||||
"inter_op_num_threads": 8,
|
||||
}
|
||||
]
|
||||
|
||||
def halfPrecisionAvailable(self, id: int):
|
||||
if self.gpu_num == 0:
|
||||
return False
|
||||
|
@ -1,21 +1,18 @@
|
||||
from typing import Any, Protocol
|
||||
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
import onnxruntime
|
||||
|
||||
|
||||
class Inferencer(Protocol):
|
||||
inferencerType: EnumInferenceTypes = EnumInferenceTypes.pyTorchRVC
|
||||
file: str
|
||||
isHalf: bool = True
|
||||
dev: device
|
||||
|
||||
# inferencerType: EnumInferenceTypes = EnumInferenceTypes.pyTorchRVC
|
||||
# file: str
|
||||
# isHalf: bool = True
|
||||
# dev: device | None
|
||||
# onnxProviders: list[str] | None
|
||||
# onnxProviderOptions: Any | None
|
||||
model: onnxruntime.InferenceSession | Any | None = None
|
||||
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
...
|
||||
|
||||
def infer(
|
||||
@ -28,27 +25,18 @@ class Inferencer(Protocol):
|
||||
) -> torch.Tensor:
|
||||
...
|
||||
|
||||
def setProps(
|
||||
self,
|
||||
inferencerType: EnumInferenceTypes,
|
||||
file: str,
|
||||
dev: device,
|
||||
isHalf: bool = True,
|
||||
):
|
||||
self.inferencerType = inferencerType
|
||||
self.file = file
|
||||
self.isHalf = isHalf
|
||||
self.dev = dev
|
||||
|
||||
def setHalf(self, isHalf: bool):
|
||||
self.isHalf = isHalf
|
||||
if self.model is not None and isHalf:
|
||||
self.model = self.model.half()
|
||||
elif self.model is not None and isHalf is False:
|
||||
self.model = self.model.float()
|
||||
|
||||
def setDevice(self, dev: device):
|
||||
self.dev = dev
|
||||
if self.model is not None:
|
||||
self.model = self.model.to(self.dev)
|
||||
return self
|
||||
# def setProps(
|
||||
# self,
|
||||
# inferencerType: EnumInferenceTypes,
|
||||
# file: str,
|
||||
# dev: device | None,
|
||||
# onnxProviders: list[str] | None,
|
||||
# onnxProviderOptions: Any | None,
|
||||
# isHalf: bool = True,
|
||||
# ):
|
||||
# self.inferencerType = inferencerType
|
||||
# self.file = file
|
||||
# self.isHalf = isHalf
|
||||
# self.dev = dev
|
||||
# self.onnxProviders = onnxProviders
|
||||
# self.onnxProviderOptions = onnxProviderOptions
|
||||
|
@ -1,5 +1,3 @@
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from voice_changer.RVC.inferencer.OnnxRVCInferencer import OnnxRVCInferencer
|
||||
@ -17,54 +15,60 @@ class InferencerManager:
|
||||
|
||||
@classmethod
|
||||
def getInferencer(
|
||||
cls, inferencerType: EnumInferenceTypes, file: str, isHalf: bool, dev: device
|
||||
cls,
|
||||
inferencerType: EnumInferenceTypes,
|
||||
file: str,
|
||||
gpu: int,
|
||||
) -> Inferencer:
|
||||
cls.currentInferencer = cls.loadInferencer(inferencerType, file, isHalf, dev)
|
||||
cls.currentInferencer = cls.loadInferencer(inferencerType, file, gpu)
|
||||
return cls.currentInferencer
|
||||
|
||||
@classmethod
|
||||
def loadInferencer(
|
||||
cls, inferencerType: EnumInferenceTypes, file: str, isHalf: bool, dev: device
|
||||
cls,
|
||||
inferencerType: EnumInferenceTypes,
|
||||
file: str,
|
||||
gpu: int,
|
||||
) -> Inferencer:
|
||||
if (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVC
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVC.value
|
||||
):
|
||||
return RVCInferencer().loadModel(file, dev, isHalf)
|
||||
return RVCInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCNono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value
|
||||
):
|
||||
return RVCInferencerNono().loadModel(file, dev, isHalf)
|
||||
return RVCInferencerNono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value
|
||||
):
|
||||
return RVCInferencerv2().loadModel(file, dev, isHalf)
|
||||
return RVCInferencerv2().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value
|
||||
):
|
||||
return RVCInferencerv2Nono().loadModel(file, dev, isHalf)
|
||||
return RVCInferencerv2Nono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchWebUI
|
||||
or inferencerType == EnumInferenceTypes.pyTorchWebUI.value
|
||||
):
|
||||
return WebUIInferencer().loadModel(file, dev, isHalf)
|
||||
return WebUIInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchWebUINono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value
|
||||
):
|
||||
return WebUIInferencerNono().loadModel(file, dev, isHalf)
|
||||
return WebUIInferencerNono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.onnxRVC
|
||||
or inferencerType == EnumInferenceTypes.onnxRVC.value
|
||||
):
|
||||
return OnnxRVCInferencer().loadModel(file, dev, isHalf)
|
||||
return OnnxRVCInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.onnxRVCNono
|
||||
or inferencerType == EnumInferenceTypes.onnxRVCNono.value
|
||||
):
|
||||
return OnnxRVCInferencerNono().loadModel(file, dev, isHalf)
|
||||
return OnnxRVCInferencerNono().loadModel(file, gpu)
|
||||
else:
|
||||
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
|
||||
|
@ -1,20 +1,20 @@
|
||||
import torch
|
||||
from torch import device
|
||||
import onnxruntime
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
import numpy as np
|
||||
|
||||
providers = ["CPUExecutionProvider"]
|
||||
|
||||
|
||||
class OnnxRVCInferencer(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.onnxRVC, file, dev, isHalf)
|
||||
# ort_options = onnxruntime.SessionOptions()
|
||||
# ort_options.intra_op_num_threads = 8
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
(
|
||||
onnxProviders,
|
||||
onnxProviderOptions,
|
||||
) = DeviceManager.get_instance().getOnnxExecutionProvider(gpu)
|
||||
|
||||
onnx_session = onnxruntime.InferenceSession(file, providers=providers)
|
||||
onnx_session = onnxruntime.InferenceSession(
|
||||
file, providers=onnxProviders, provider_options=onnxProviderOptions
|
||||
)
|
||||
|
||||
# check half-precision
|
||||
first_input_type = onnx_session.get_inputs()[0].type
|
||||
@ -24,7 +24,6 @@ class OnnxRVCInferencer(Inferencer):
|
||||
self.isHalf = True
|
||||
|
||||
self.model = onnx_session
|
||||
self.setDevice(dev)
|
||||
return self
|
||||
|
||||
def infer(
|
||||
@ -66,36 +65,3 @@ class OnnxRVCInferencer(Inferencer):
|
||||
)
|
||||
|
||||
return torch.tensor(np.array(audio1))
|
||||
|
||||
def setHalf(self, isHalf: bool):
|
||||
self.isHalf = isHalf
|
||||
pass
|
||||
# raise RuntimeError("half-precision is not changable.", self.isHalf)
|
||||
|
||||
def setDevice(self, dev: device):
|
||||
index = dev.index
|
||||
type = dev.type
|
||||
if type == "cpu":
|
||||
self.model.set_providers(providers=["CPUExecutionProvider"])
|
||||
elif type == "cuda":
|
||||
provider_options = [{"device_id": index}]
|
||||
self.model.set_providers(
|
||||
providers=["CUDAExecutionProvider"],
|
||||
provider_options=provider_options,
|
||||
)
|
||||
else:
|
||||
self.model.set_providers(providers=["CPUExecutionProvider"])
|
||||
|
||||
return self
|
||||
|
||||
def setDirectMLEnable(self, enable: bool):
|
||||
if "DmlExecutionProvider" not in onnxruntime.get_available_providers():
|
||||
print("[Voice Changer] DML is not available.")
|
||||
return
|
||||
|
||||
if enable:
|
||||
self.model.set_providers(
|
||||
providers=["DmlExecutionProvider", "CPUExecutionProvider"]
|
||||
)
|
||||
else:
|
||||
self.model.set_providers(providers=["CPUExecutionProvider"])
|
||||
|
@ -1,32 +1,10 @@
|
||||
import torch
|
||||
from torch import device
|
||||
import onnxruntime
|
||||
from const import EnumInferenceTypes
|
||||
import numpy as np
|
||||
|
||||
from voice_changer.RVC.inferencer.OnnxRVCInferencer import OnnxRVCInferencer
|
||||
|
||||
providers = ["CPUExecutionProvider"]
|
||||
|
||||
|
||||
class OnnxRVCInferencerNono(OnnxRVCInferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.onnxRVC, file, dev, isHalf)
|
||||
# ort_options = onnxruntime.SessionOptions()
|
||||
# ort_options.intra_op_num_threads = 8
|
||||
|
||||
onnx_session = onnxruntime.InferenceSession(file, providers=providers)
|
||||
|
||||
# check half-precision
|
||||
first_input_type = onnx_session.get_inputs()[0].type
|
||||
if first_input_type == "tensor(float)":
|
||||
self.isHalf = False
|
||||
else:
|
||||
self.isHalf = True
|
||||
|
||||
self.model = onnx_session
|
||||
return self
|
||||
|
||||
def infer(
|
||||
self,
|
||||
feats: torch.Tensor,
|
||||
|
@ -1,7 +1,6 @@
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from infer_pack.models import ( # type:ignore
|
||||
SynthesizerTrnMs256NSFsid,
|
||||
@ -9,9 +8,10 @@ from infer_pack.models import ( # type:ignore
|
||||
|
||||
|
||||
class RVCInferencer(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchRVC, file, dev, isHalf)
|
||||
print("load inf", file)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=isHalf)
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from infer_pack.models import ( # type:ignore
|
||||
SynthesizerTrnMs256NSFsid_nono,
|
||||
@ -9,8 +8,10 @@ from infer_pack.models import ( # type:ignore
|
||||
|
||||
|
||||
class RVCInferencerNono(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchRVCNono, file, dev, isHalf)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMs256NSFsid_nono(*cpt["config"], is_half=isHalf)
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from infer_pack.models import ( # type:ignore
|
||||
SynthesizerTrnMs768NSFsid,
|
||||
@ -9,9 +7,10 @@ from infer_pack.models import ( # type:ignore
|
||||
|
||||
|
||||
class RVCInferencerv2(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchRVCv2, file, dev, isHalf)
|
||||
print("load inf", file)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=isHalf)
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from infer_pack.models import ( # type:ignore
|
||||
SynthesizerTrnMs768NSFsid_nono,
|
||||
@ -9,8 +8,10 @@ from infer_pack.models import ( # type:ignore
|
||||
|
||||
|
||||
class RVCInferencerv2Nono(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchRVCv2Nono, file, dev, isHalf)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMs768NSFsid_nono(*cpt["config"], is_half=isHalf)
|
||||
|
||||
|
@ -1,14 +1,15 @@
|
||||
import torch
|
||||
from torch import device
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from .models import SynthesizerTrnMsNSFsid
|
||||
|
||||
|
||||
class WebUIInferencer(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchWebUI, file, dev, isHalf)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMsNSFsid(**cpt["params"], is_half=isHalf)
|
||||
|
||||
|
@ -1,14 +1,15 @@
|
||||
import torch
|
||||
from torch import device
|
||||
|
||||
from const import EnumInferenceTypes
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
from voice_changer.RVC.inferencer.Inferencer import Inferencer
|
||||
from .models import SynthesizerTrnMsNSFsidNono
|
||||
|
||||
|
||||
class WebUIInferencerNono(Inferencer):
|
||||
def loadModel(self, file: str, dev: device, isHalf: bool = True):
|
||||
super().setProps(EnumInferenceTypes.pyTorchWebUINono, file, dev, isHalf)
|
||||
def loadModel(self, file: str, gpu: int):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
|
||||
cpt = torch.load(file, map_location="cpu")
|
||||
model = SynthesizerTrnMsNSFsidNono(**cpt["params"], is_half=isHalf)
|
||||
|
||||
|
@ -59,14 +59,6 @@ class Pipeline(object):
|
||||
self.sr = 16000
|
||||
self.window = 160
|
||||
|
||||
self.device = device
|
||||
self.isHalf = isHalf
|
||||
|
||||
def setDevice(self, device: torch.device):
|
||||
self.device = device
|
||||
self.embedder.setDevice(device)
|
||||
self.inferencer.setDevice(device)
|
||||
|
||||
def setDirectMLEnable(self, enable: bool):
|
||||
if hasattr(self.inferencer, "setDirectMLEnable"):
|
||||
self.inferencer.setDirectMLEnable(enable)
|
||||
|
@ -13,18 +13,11 @@ from voice_changer.RVC.pitchExtractor.PitchExtractorManager import PitchExtracto
|
||||
def createPipeline(modelSlot: ModelSlot, gpu: int, f0Detector: str):
|
||||
dev = DeviceManager.get_instance().getDevice(gpu)
|
||||
half = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
|
||||
# # ファイル名特定(Inferencer)
|
||||
# inferencerFilename = (
|
||||
# modelSlot.onnxModelFile if modelSlot.isONNX else modelSlot.pyTorchModelFile
|
||||
# )
|
||||
|
||||
# Inferencer 生成
|
||||
try:
|
||||
inferencer = InferencerManager.getInferencer(
|
||||
modelSlot.modelType,
|
||||
modelSlot.modelFile,
|
||||
half,
|
||||
dev,
|
||||
modelSlot.modelType, modelSlot.modelFile, gpu
|
||||
)
|
||||
except Exception as e:
|
||||
print("[Voice Changer] exception! loading inferencer", e)
|
||||
|
@ -162,10 +162,11 @@ class SoVitsSvc40:
|
||||
return self.get_info()
|
||||
|
||||
def getOnnxExecutionProvider(self):
|
||||
if self.settings.gpu >= 0:
|
||||
availableProviders = onnxruntime.get_available_providers()
|
||||
if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders:
|
||||
return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}]
|
||||
elif "DmlExecutionProvider" in onnxruntime.get_available_providers():
|
||||
return ["DmlExecutionProvider"], []
|
||||
elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||
return ["DmlExecutionProvider"], [{}]
|
||||
else:
|
||||
return ["CPUExecutionProvider"], [
|
||||
{
|
||||
|
@ -139,10 +139,11 @@ class SoVitsSvc40v2:
|
||||
return self.get_info()
|
||||
|
||||
def getOnnxExecutionProvider(self):
|
||||
if self.settings.gpu >= 0:
|
||||
availableProviders = onnxruntime.get_available_providers()
|
||||
if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders:
|
||||
return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}]
|
||||
elif "DmlExecutionProvider" in onnxruntime.get_available_providers():
|
||||
return ["DmlExecutionProvider"], []
|
||||
elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||
return ["DmlExecutionProvider"], [{}]
|
||||
else:
|
||||
return ["CPUExecutionProvider"], [
|
||||
{
|
||||
|
@ -29,13 +29,6 @@ import time
|
||||
import sounddevice as sd
|
||||
import librosa
|
||||
|
||||
providers = [
|
||||
"OpenVINOExecutionProvider",
|
||||
"CUDAExecutionProvider",
|
||||
"DmlExecutionProvider",
|
||||
"CPUExecutionProvider",
|
||||
]
|
||||
|
||||
STREAM_INPUT_FILE = os.path.join(TMP_DIR, "in.wav")
|
||||
STREAM_OUTPUT_FILE = os.path.join(TMP_DIR, "out.wav")
|
||||
|
||||
@ -379,7 +372,7 @@ class VoiceChanger:
|
||||
else:
|
||||
ret = self.voiceChanger.update_settings(key, val)
|
||||
if ret is False:
|
||||
print(f"{key} is not mutable variable or unknown variable!")
|
||||
print(f"({key} is not mutable variable or unknown variable)")
|
||||
return self.get_info()
|
||||
|
||||
def _generate_strength(self, crossfadeSize: int):
|
||||
|
Loading…
x
Reference in New Issue
Block a user