From d45467a3c4d504c7d119a828d0d778b33f860fe0 Mon Sep 17 00:00:00 2001 From: wataru Date: Mon, 29 May 2023 17:34:35 +0900 Subject: [PATCH] WIP: DML onnx inferencer --- server/voice_changer/MMVCv13/MMVCv13.py | 7 ++- server/voice_changer/MMVCv15/MMVCv15.py | 7 ++- server/voice_changer/RVC/RVC.py | 20 +------ .../RVC/deviceManager/DeviceManager.py | 16 ++++++ .../RVC/inferencer/Inferencer.py | 56 ++++++++----------- .../RVC/inferencer/InferencerManager.py | 30 +++++----- .../RVC/inferencer/OnnxRVCInferencer.py | 52 +++-------------- .../RVC/inferencer/OnnxRVCInferencerNono.py | 22 -------- .../RVC/inferencer/RVCInferencer.py | 10 ++-- .../RVC/inferencer/RVCInferencerNono.py | 9 +-- .../RVC/inferencer/RVCInferencerv2.py | 11 ++-- .../RVC/inferencer/RVCInferencerv2Nono.py | 9 +-- .../RVC/inferencer/WebUIInferencer.py | 9 +-- .../RVC/inferencer/WebUIInferencerNono.py | 9 +-- server/voice_changer/RVC/pipeline/Pipeline.py | 8 --- .../RVC/pipeline/PipelineGenerator.py | 9 +-- .../voice_changer/SoVitsSvc40/SoVitsSvc40.py | 7 ++- .../SoVitsSvc40v2/SoVitsSvc40v2.py | 7 ++- server/voice_changer/VoiceChanger.py | 9 +-- 19 files changed, 113 insertions(+), 194 deletions(-) diff --git a/server/voice_changer/MMVCv13/MMVCv13.py b/server/voice_changer/MMVCv13/MMVCv13.py index 3f1bd59a..331a78e2 100644 --- a/server/voice_changer/MMVCv13/MMVCv13.py +++ b/server/voice_changer/MMVCv13/MMVCv13.py @@ -102,10 +102,11 @@ class MMVCv13: return self.get_info() def getOnnxExecutionProvider(self): - if self.settings.gpu >= 0: + availableProviders = onnxruntime.get_available_providers() + if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders: return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}] - elif "DmlExecutionProvider" in onnxruntime.get_available_providers(): - return ["DmlExecutionProvider"], [] + elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders: + return ["DmlExecutionProvider"], [{}] else: return ["CPUExecutionProvider"], [ { diff --git a/server/voice_changer/MMVCv15/MMVCv15.py b/server/voice_changer/MMVCv15/MMVCv15.py index 3e3338f9..eb7188fc 100644 --- a/server/voice_changer/MMVCv15/MMVCv15.py +++ b/server/voice_changer/MMVCv15/MMVCv15.py @@ -125,10 +125,11 @@ class MMVCv15: return self.get_info() def getOnnxExecutionProvider(self): - if self.settings.gpu >= 0: + availableProviders = onnxruntime.get_available_providers() + if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders: return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}] - elif "DmlExecutionProvider" in onnxruntime.get_available_providers(): - return ["DmlExecutionProvider"], [] + elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders: + return ["DmlExecutionProvider"], [{}] else: return ["CPUExecutionProvider"], [ { diff --git a/server/voice_changer/RVC/RVC.py b/server/voice_changer/RVC/RVC.py index 4bbe4dad..5ba64067 100644 --- a/server/voice_changer/RVC/RVC.py +++ b/server/voice_changer/RVC/RVC.py @@ -41,13 +41,6 @@ from const import RVC_MAX_SLOT_NUM, RVC_MODEL_DIRNAME, SAMPLES_JSONS, UPLOAD_DIR import shutil import json -providers = [ - "OpenVINOExecutionProvider", - "CUDAExecutionProvider", - "DmlExecutionProvider", - "CPUExecutionProvider", -] - class RVC: initialLoad: bool = True @@ -193,19 +186,8 @@ class RVC: setattr(self.settings, key, val) if key == "gpu": - dev = self.deviceManager.getDevice(val) - half = self.deviceManager.halfPrecisionAvailable(val) + self.prepareModel(self.settings.modelSlotIndex) - # half-precisionの使用可否が変わるときは作り直し - if self.pipeline is not None and self.pipeline.isHalf == half: - print( - "USE EXISTING PIPELINE", - half, - ) - self.pipeline.setDevice(dev) - else: - print("CHAGE TO NEW PIPELINE", half) - self.prepareModel(self.settings.modelSlotIndex) if key == "enableDirectML": if self.pipeline is not None and val == 0: self.pipeline.setDirectMLEnable(False) diff --git a/server/voice_changer/RVC/deviceManager/DeviceManager.py b/server/voice_changer/RVC/deviceManager/DeviceManager.py index 9075e99a..4f5cfdd1 100644 --- a/server/voice_changer/RVC/deviceManager/DeviceManager.py +++ b/server/voice_changer/RVC/deviceManager/DeviceManager.py @@ -1,4 +1,5 @@ import torch +import onnxruntime class DeviceManager(object): @@ -26,6 +27,21 @@ class DeviceManager(object): dev = torch.device("cuda", index=id) return dev + def getOnnxExecutionProvider(self, gpu: int): + availableProviders = onnxruntime.get_available_providers() + if gpu >= 0 and "CUDAExecutionProvider" in availableProviders: + return ["CUDAExecutionProvider"], [{"device_id": gpu}] + elif gpu >= 0 and "DmlExecutionProvider" in availableProviders: + return ["DmlExecutionProvider"], [{}] + else: + return ["CPUExecutionProvider"], [ + { + "intra_op_num_threads": 8, + "execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL, + "inter_op_num_threads": 8, + } + ] + def halfPrecisionAvailable(self, id: int): if self.gpu_num == 0: return False diff --git a/server/voice_changer/RVC/inferencer/Inferencer.py b/server/voice_changer/RVC/inferencer/Inferencer.py index 46738dc5..0bc880c2 100644 --- a/server/voice_changer/RVC/inferencer/Inferencer.py +++ b/server/voice_changer/RVC/inferencer/Inferencer.py @@ -1,21 +1,18 @@ from typing import Any, Protocol - import torch -from torch import device - -from const import EnumInferenceTypes import onnxruntime class Inferencer(Protocol): - inferencerType: EnumInferenceTypes = EnumInferenceTypes.pyTorchRVC - file: str - isHalf: bool = True - dev: device - + # inferencerType: EnumInferenceTypes = EnumInferenceTypes.pyTorchRVC + # file: str + # isHalf: bool = True + # dev: device | None + # onnxProviders: list[str] | None + # onnxProviderOptions: Any | None model: onnxruntime.InferenceSession | Any | None = None - def loadModel(self, file: str, dev: device, isHalf: bool = True): + def loadModel(self, file: str, gpu: int): ... def infer( @@ -28,27 +25,18 @@ class Inferencer(Protocol): ) -> torch.Tensor: ... - def setProps( - self, - inferencerType: EnumInferenceTypes, - file: str, - dev: device, - isHalf: bool = True, - ): - self.inferencerType = inferencerType - self.file = file - self.isHalf = isHalf - self.dev = dev - - def setHalf(self, isHalf: bool): - self.isHalf = isHalf - if self.model is not None and isHalf: - self.model = self.model.half() - elif self.model is not None and isHalf is False: - self.model = self.model.float() - - def setDevice(self, dev: device): - self.dev = dev - if self.model is not None: - self.model = self.model.to(self.dev) - return self + # def setProps( + # self, + # inferencerType: EnumInferenceTypes, + # file: str, + # dev: device | None, + # onnxProviders: list[str] | None, + # onnxProviderOptions: Any | None, + # isHalf: bool = True, + # ): + # self.inferencerType = inferencerType + # self.file = file + # self.isHalf = isHalf + # self.dev = dev + # self.onnxProviders = onnxProviders + # self.onnxProviderOptions = onnxProviderOptions diff --git a/server/voice_changer/RVC/inferencer/InferencerManager.py b/server/voice_changer/RVC/inferencer/InferencerManager.py index 94dc4cfe..bc42772c 100644 --- a/server/voice_changer/RVC/inferencer/InferencerManager.py +++ b/server/voice_changer/RVC/inferencer/InferencerManager.py @@ -1,5 +1,3 @@ -from torch import device - from const import EnumInferenceTypes from voice_changer.RVC.inferencer.Inferencer import Inferencer from voice_changer.RVC.inferencer.OnnxRVCInferencer import OnnxRVCInferencer @@ -17,54 +15,60 @@ class InferencerManager: @classmethod def getInferencer( - cls, inferencerType: EnumInferenceTypes, file: str, isHalf: bool, dev: device + cls, + inferencerType: EnumInferenceTypes, + file: str, + gpu: int, ) -> Inferencer: - cls.currentInferencer = cls.loadInferencer(inferencerType, file, isHalf, dev) + cls.currentInferencer = cls.loadInferencer(inferencerType, file, gpu) return cls.currentInferencer @classmethod def loadInferencer( - cls, inferencerType: EnumInferenceTypes, file: str, isHalf: bool, dev: device + cls, + inferencerType: EnumInferenceTypes, + file: str, + gpu: int, ) -> Inferencer: if ( inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value ): - return RVCInferencer().loadModel(file, dev, isHalf) + return RVCInferencer().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value ): - return RVCInferencerNono().loadModel(file, dev, isHalf) + return RVCInferencerNono().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value ): - return RVCInferencerv2().loadModel(file, dev, isHalf) + return RVCInferencerv2().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value ): - return RVCInferencerv2Nono().loadModel(file, dev, isHalf) + return RVCInferencerv2Nono().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value ): - return WebUIInferencer().loadModel(file, dev, isHalf) + return WebUIInferencer().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value ): - return WebUIInferencerNono().loadModel(file, dev, isHalf) + return WebUIInferencerNono().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value ): - return OnnxRVCInferencer().loadModel(file, dev, isHalf) + return OnnxRVCInferencer().loadModel(file, gpu) elif ( inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value ): - return OnnxRVCInferencerNono().loadModel(file, dev, isHalf) + return OnnxRVCInferencerNono().loadModel(file, gpu) else: raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType) diff --git a/server/voice_changer/RVC/inferencer/OnnxRVCInferencer.py b/server/voice_changer/RVC/inferencer/OnnxRVCInferencer.py index 9ad8c284..79e451b4 100644 --- a/server/voice_changer/RVC/inferencer/OnnxRVCInferencer.py +++ b/server/voice_changer/RVC/inferencer/OnnxRVCInferencer.py @@ -1,20 +1,20 @@ import torch -from torch import device import onnxruntime -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer import numpy as np -providers = ["CPUExecutionProvider"] - class OnnxRVCInferencer(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.onnxRVC, file, dev, isHalf) - # ort_options = onnxruntime.SessionOptions() - # ort_options.intra_op_num_threads = 8 + def loadModel(self, file: str, gpu: int): + ( + onnxProviders, + onnxProviderOptions, + ) = DeviceManager.get_instance().getOnnxExecutionProvider(gpu) - onnx_session = onnxruntime.InferenceSession(file, providers=providers) + onnx_session = onnxruntime.InferenceSession( + file, providers=onnxProviders, provider_options=onnxProviderOptions + ) # check half-precision first_input_type = onnx_session.get_inputs()[0].type @@ -24,7 +24,6 @@ class OnnxRVCInferencer(Inferencer): self.isHalf = True self.model = onnx_session - self.setDevice(dev) return self def infer( @@ -66,36 +65,3 @@ class OnnxRVCInferencer(Inferencer): ) return torch.tensor(np.array(audio1)) - - def setHalf(self, isHalf: bool): - self.isHalf = isHalf - pass - # raise RuntimeError("half-precision is not changable.", self.isHalf) - - def setDevice(self, dev: device): - index = dev.index - type = dev.type - if type == "cpu": - self.model.set_providers(providers=["CPUExecutionProvider"]) - elif type == "cuda": - provider_options = [{"device_id": index}] - self.model.set_providers( - providers=["CUDAExecutionProvider"], - provider_options=provider_options, - ) - else: - self.model.set_providers(providers=["CPUExecutionProvider"]) - - return self - - def setDirectMLEnable(self, enable: bool): - if "DmlExecutionProvider" not in onnxruntime.get_available_providers(): - print("[Voice Changer] DML is not available.") - return - - if enable: - self.model.set_providers( - providers=["DmlExecutionProvider", "CPUExecutionProvider"] - ) - else: - self.model.set_providers(providers=["CPUExecutionProvider"]) diff --git a/server/voice_changer/RVC/inferencer/OnnxRVCInferencerNono.py b/server/voice_changer/RVC/inferencer/OnnxRVCInferencerNono.py index cdd3b8ea..201a205c 100644 --- a/server/voice_changer/RVC/inferencer/OnnxRVCInferencerNono.py +++ b/server/voice_changer/RVC/inferencer/OnnxRVCInferencerNono.py @@ -1,32 +1,10 @@ import torch -from torch import device -import onnxruntime -from const import EnumInferenceTypes import numpy as np from voice_changer.RVC.inferencer.OnnxRVCInferencer import OnnxRVCInferencer -providers = ["CPUExecutionProvider"] - class OnnxRVCInferencerNono(OnnxRVCInferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.onnxRVC, file, dev, isHalf) - # ort_options = onnxruntime.SessionOptions() - # ort_options.intra_op_num_threads = 8 - - onnx_session = onnxruntime.InferenceSession(file, providers=providers) - - # check half-precision - first_input_type = onnx_session.get_inputs()[0].type - if first_input_type == "tensor(float)": - self.isHalf = False - else: - self.isHalf = True - - self.model = onnx_session - return self - def infer( self, feats: torch.Tensor, diff --git a/server/voice_changer/RVC/inferencer/RVCInferencer.py b/server/voice_changer/RVC/inferencer/RVCInferencer.py index d62f89bb..b35c18c5 100644 --- a/server/voice_changer/RVC/inferencer/RVCInferencer.py +++ b/server/voice_changer/RVC/inferencer/RVCInferencer.py @@ -1,7 +1,6 @@ import torch -from torch import device -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer from infer_pack.models import ( # type:ignore SynthesizerTrnMs256NSFsid, @@ -9,9 +8,10 @@ from infer_pack.models import ( # type:ignore class RVCInferencer(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchRVC, file, dev, isHalf) - print("load inf", file) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=isHalf) diff --git a/server/voice_changer/RVC/inferencer/RVCInferencerNono.py b/server/voice_changer/RVC/inferencer/RVCInferencerNono.py index f30d9531..8294c058 100644 --- a/server/voice_changer/RVC/inferencer/RVCInferencerNono.py +++ b/server/voice_changer/RVC/inferencer/RVCInferencerNono.py @@ -1,7 +1,6 @@ import torch -from torch import device -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer from infer_pack.models import ( # type:ignore SynthesizerTrnMs256NSFsid_nono, @@ -9,8 +8,10 @@ from infer_pack.models import ( # type:ignore class RVCInferencerNono(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchRVCNono, file, dev, isHalf) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMs256NSFsid_nono(*cpt["config"], is_half=isHalf) diff --git a/server/voice_changer/RVC/inferencer/RVCInferencerv2.py b/server/voice_changer/RVC/inferencer/RVCInferencerv2.py index 38a10e77..49dd0c02 100644 --- a/server/voice_changer/RVC/inferencer/RVCInferencerv2.py +++ b/server/voice_changer/RVC/inferencer/RVCInferencerv2.py @@ -1,7 +1,5 @@ import torch -from torch import device - -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer from infer_pack.models import ( # type:ignore SynthesizerTrnMs768NSFsid, @@ -9,9 +7,10 @@ from infer_pack.models import ( # type:ignore class RVCInferencerv2(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchRVCv2, file, dev, isHalf) - print("load inf", file) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=isHalf) diff --git a/server/voice_changer/RVC/inferencer/RVCInferencerv2Nono.py b/server/voice_changer/RVC/inferencer/RVCInferencerv2Nono.py index 773ebe44..2b4a746f 100644 --- a/server/voice_changer/RVC/inferencer/RVCInferencerv2Nono.py +++ b/server/voice_changer/RVC/inferencer/RVCInferencerv2Nono.py @@ -1,7 +1,6 @@ import torch -from torch import device -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer from infer_pack.models import ( # type:ignore SynthesizerTrnMs768NSFsid_nono, @@ -9,8 +8,10 @@ from infer_pack.models import ( # type:ignore class RVCInferencerv2Nono(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchRVCv2Nono, file, dev, isHalf) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMs768NSFsid_nono(*cpt["config"], is_half=isHalf) diff --git a/server/voice_changer/RVC/inferencer/WebUIInferencer.py b/server/voice_changer/RVC/inferencer/WebUIInferencer.py index 01fb5608..a069c398 100644 --- a/server/voice_changer/RVC/inferencer/WebUIInferencer.py +++ b/server/voice_changer/RVC/inferencer/WebUIInferencer.py @@ -1,14 +1,15 @@ import torch -from torch import device +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager -from const import EnumInferenceTypes from voice_changer.RVC.inferencer.Inferencer import Inferencer from .models import SynthesizerTrnMsNSFsid class WebUIInferencer(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchWebUI, file, dev, isHalf) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMsNSFsid(**cpt["params"], is_half=isHalf) diff --git a/server/voice_changer/RVC/inferencer/WebUIInferencerNono.py b/server/voice_changer/RVC/inferencer/WebUIInferencerNono.py index 044a0fcf..4e02fe20 100644 --- a/server/voice_changer/RVC/inferencer/WebUIInferencerNono.py +++ b/server/voice_changer/RVC/inferencer/WebUIInferencerNono.py @@ -1,14 +1,15 @@ import torch -from torch import device -from const import EnumInferenceTypes +from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager from voice_changer.RVC.inferencer.Inferencer import Inferencer from .models import SynthesizerTrnMsNSFsidNono class WebUIInferencerNono(Inferencer): - def loadModel(self, file: str, dev: device, isHalf: bool = True): - super().setProps(EnumInferenceTypes.pyTorchWebUINono, file, dev, isHalf) + def loadModel(self, file: str, gpu: int): + dev = DeviceManager.get_instance().getDevice(gpu) + isHalf = DeviceManager.get_instance().halfPrecisionAvailable(gpu) + cpt = torch.load(file, map_location="cpu") model = SynthesizerTrnMsNSFsidNono(**cpt["params"], is_half=isHalf) diff --git a/server/voice_changer/RVC/pipeline/Pipeline.py b/server/voice_changer/RVC/pipeline/Pipeline.py index 8c4364e5..582727a3 100644 --- a/server/voice_changer/RVC/pipeline/Pipeline.py +++ b/server/voice_changer/RVC/pipeline/Pipeline.py @@ -59,14 +59,6 @@ class Pipeline(object): self.sr = 16000 self.window = 160 - self.device = device - self.isHalf = isHalf - - def setDevice(self, device: torch.device): - self.device = device - self.embedder.setDevice(device) - self.inferencer.setDevice(device) - def setDirectMLEnable(self, enable: bool): if hasattr(self.inferencer, "setDirectMLEnable"): self.inferencer.setDirectMLEnable(enable) diff --git a/server/voice_changer/RVC/pipeline/PipelineGenerator.py b/server/voice_changer/RVC/pipeline/PipelineGenerator.py index 6a8d7691..6479cbe8 100644 --- a/server/voice_changer/RVC/pipeline/PipelineGenerator.py +++ b/server/voice_changer/RVC/pipeline/PipelineGenerator.py @@ -13,18 +13,11 @@ from voice_changer.RVC.pitchExtractor.PitchExtractorManager import PitchExtracto def createPipeline(modelSlot: ModelSlot, gpu: int, f0Detector: str): dev = DeviceManager.get_instance().getDevice(gpu) half = DeviceManager.get_instance().halfPrecisionAvailable(gpu) - # # ファイル名特定(Inferencer) - # inferencerFilename = ( - # modelSlot.onnxModelFile if modelSlot.isONNX else modelSlot.pyTorchModelFile - # ) # Inferencer 生成 try: inferencer = InferencerManager.getInferencer( - modelSlot.modelType, - modelSlot.modelFile, - half, - dev, + modelSlot.modelType, modelSlot.modelFile, gpu ) except Exception as e: print("[Voice Changer] exception! loading inferencer", e) diff --git a/server/voice_changer/SoVitsSvc40/SoVitsSvc40.py b/server/voice_changer/SoVitsSvc40/SoVitsSvc40.py index 752bb095..12acb110 100644 --- a/server/voice_changer/SoVitsSvc40/SoVitsSvc40.py +++ b/server/voice_changer/SoVitsSvc40/SoVitsSvc40.py @@ -162,10 +162,11 @@ class SoVitsSvc40: return self.get_info() def getOnnxExecutionProvider(self): - if self.settings.gpu >= 0: + availableProviders = onnxruntime.get_available_providers() + if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders: return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}] - elif "DmlExecutionProvider" in onnxruntime.get_available_providers(): - return ["DmlExecutionProvider"], [] + elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders: + return ["DmlExecutionProvider"], [{}] else: return ["CPUExecutionProvider"], [ { diff --git a/server/voice_changer/SoVitsSvc40v2/SoVitsSvc40v2.py b/server/voice_changer/SoVitsSvc40v2/SoVitsSvc40v2.py index 8f871727..e264d2c3 100644 --- a/server/voice_changer/SoVitsSvc40v2/SoVitsSvc40v2.py +++ b/server/voice_changer/SoVitsSvc40v2/SoVitsSvc40v2.py @@ -139,10 +139,11 @@ class SoVitsSvc40v2: return self.get_info() def getOnnxExecutionProvider(self): - if self.settings.gpu >= 0: + availableProviders = onnxruntime.get_available_providers() + if self.settings.gpu >= 0 and "CUDAExecutionProvider" in availableProviders: return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}] - elif "DmlExecutionProvider" in onnxruntime.get_available_providers(): - return ["DmlExecutionProvider"], [] + elif self.settings.gpu >= 0 and "DmlExecutionProvider" in availableProviders: + return ["DmlExecutionProvider"], [{}] else: return ["CPUExecutionProvider"], [ { diff --git a/server/voice_changer/VoiceChanger.py b/server/voice_changer/VoiceChanger.py index 31a04f71..411cfe78 100755 --- a/server/voice_changer/VoiceChanger.py +++ b/server/voice_changer/VoiceChanger.py @@ -29,13 +29,6 @@ import time import sounddevice as sd import librosa -providers = [ - "OpenVINOExecutionProvider", - "CUDAExecutionProvider", - "DmlExecutionProvider", - "CPUExecutionProvider", -] - STREAM_INPUT_FILE = os.path.join(TMP_DIR, "in.wav") STREAM_OUTPUT_FILE = os.path.join(TMP_DIR, "out.wav") @@ -379,7 +372,7 @@ class VoiceChanger: else: ret = self.voiceChanger.update_settings(key, val) if ret is False: - print(f"{key} is not mutable variable or unknown variable!") + print(f"({key} is not mutable variable or unknown variable)") return self.get_info() def _generate_strength(self, crossfadeSize: int):