hubert
This commit is contained in:
parent
4bbcbd66bc
commit
f0fbf58258
@ -42,6 +42,7 @@ def setupArgParser():
|
|||||||
parser.add_argument("--cluster", type=str, help="path to cluster model")
|
parser.add_argument("--cluster", type=str, help="path to cluster model")
|
||||||
parser.add_argument("--hubert", type=str, help="path to hubert model")
|
parser.add_argument("--hubert", type=str, help="path to hubert model")
|
||||||
parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換")
|
parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換")
|
||||||
|
parser.add_argument("--useHubertOnnx", type=strtobool, default=False, help="use hubert onnx")
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
@ -86,6 +87,8 @@ MODEL = args.m if args.m != None else None
|
|||||||
ONNX_MODEL = args.o if args.o != None else None
|
ONNX_MODEL = args.o if args.o != None else None
|
||||||
HUBERT_MODEL = args.hubert if args.hubert != None else None # hubertはユーザがダウンロードして解凍フォルダに格納する運用。
|
HUBERT_MODEL = args.hubert if args.hubert != None else None # hubertはユーザがダウンロードして解凍フォルダに格納する運用。
|
||||||
CLUSTER_MODEL = args.cluster if args.cluster != None else None
|
CLUSTER_MODEL = args.cluster if args.cluster != None else None
|
||||||
|
USE_HUBERT_ONNX = args.useHubertOnnx
|
||||||
|
|
||||||
if args.internal and hasattr(sys, "_MEIPASS"):
|
if args.internal and hasattr(sys, "_MEIPASS"):
|
||||||
print("use internal path")
|
print("use internal path")
|
||||||
if CONFIG != None:
|
if CONFIG != None:
|
||||||
@ -122,7 +125,7 @@ if args.colab == True:
|
|||||||
os.environ["colab"] = "True"
|
os.environ["colab"] = "True"
|
||||||
|
|
||||||
if __name__ == 'MMVCServerSIO':
|
if __name__ == 'MMVCServerSIO':
|
||||||
voiceChangerManager = VoiceChangerManager.get_instance({"hubert": HUBERT_MODEL})
|
voiceChangerManager = VoiceChangerManager.get_instance({"hubert": HUBERT_MODEL, "useHubertOnnx": USE_HUBERT_ONNX})
|
||||||
if CONFIG and (MODEL or ONNX_MODEL):
|
if CONFIG and (MODEL or ONNX_MODEL):
|
||||||
if MODEL_TYPE == "MMVCv15" or MODEL_TYPE == "MMVCv13":
|
if MODEL_TYPE == "MMVCv15" or MODEL_TYPE == "MMVCv13":
|
||||||
voiceChangerManager.loadModel(CONFIG, MODEL, ONNX_MODEL, None)
|
voiceChangerManager.loadModel(CONFIG, MODEL, ONNX_MODEL, None)
|
||||||
|
@ -74,19 +74,25 @@ class SoVitsSvc40:
|
|||||||
|
|
||||||
# hubert model
|
# hubert model
|
||||||
try:
|
try:
|
||||||
# if sys.platform.startswith('darwin'):
|
hubert_path = self.params["hubert"]
|
||||||
# vec_path = os.path.join(sys._MEIPASS, "hubert/checkpoint_best_legacy_500.pt")
|
useHubertOnnx = self.params["useHubertOnnx"]
|
||||||
# else:
|
self.useHubertOnnx = useHubertOnnx
|
||||||
# vec_path = "hubert/checkpoint_best_legacy_500.pt"
|
|
||||||
vec_path = self.params["hubert"]
|
|
||||||
|
|
||||||
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
if useHubertOnnx == True:
|
||||||
[vec_path],
|
ort_options = onnxruntime.SessionOptions()
|
||||||
suffix="",
|
ort_options.intra_op_num_threads = 8
|
||||||
)
|
self.hubert_onnx = onnxruntime.InferenceSession(
|
||||||
model = models[0]
|
"model_hubert/hubert_simple.onnx",
|
||||||
model.eval()
|
providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
|
||||||
self.hubert_model = model.cpu()
|
)
|
||||||
|
else:
|
||||||
|
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
||||||
|
[hubert_path],
|
||||||
|
suffix="",
|
||||||
|
)
|
||||||
|
model = models[0]
|
||||||
|
model.eval()
|
||||||
|
self.hubert_model = model.cpu()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("EXCEPTION during loading hubert/contentvec model", e)
|
print("EXCEPTION during loading hubert/contentvec model", e)
|
||||||
|
|
||||||
@ -204,39 +210,21 @@ class SoVitsSvc40:
|
|||||||
else:
|
else:
|
||||||
dev = torch.device("cuda", index=self.settings.gpu)
|
dev = torch.device("cuda", index=self.settings.gpu)
|
||||||
|
|
||||||
self.hubert_model = self.hubert_model.to(dev)
|
if hasattr(self, "hubert_onnx"):
|
||||||
wav16k_tensor = wav16k_tensor.to(dev)
|
c = self.hubert_onnx.run(
|
||||||
|
["units"],
|
||||||
|
{
|
||||||
|
"audio": wav16k_numpy.reshape(1, -1),
|
||||||
|
})
|
||||||
|
c = torch.from_numpy(np.array(c)).squeeze(0).transpose(1, 2)
|
||||||
|
else:
|
||||||
|
self.hubert_model = self.hubert_model.to(dev)
|
||||||
|
wav16k_tensor = wav16k_tensor.to(dev)
|
||||||
|
c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k_tensor)
|
||||||
|
|
||||||
uv = uv.to(dev)
|
uv = uv.to(dev)
|
||||||
f0 = f0.to(dev)
|
f0 = f0.to(dev)
|
||||||
|
|
||||||
import time
|
|
||||||
start = time.time()
|
|
||||||
for i in range(10):
|
|
||||||
c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k_tensor)
|
|
||||||
end = time.time()
|
|
||||||
elapse = end - start
|
|
||||||
print("torch time", elapse, elapse / 10)
|
|
||||||
|
|
||||||
import onnxruntime
|
|
||||||
ort_options = onnxruntime.SessionOptions()
|
|
||||||
ort_options.intra_op_num_threads = 8
|
|
||||||
if not hasattr(self, "hubert_onnx"):
|
|
||||||
self.hubert_onnx = onnxruntime.InferenceSession(
|
|
||||||
"model_hubert/hubert_simple.onnx",
|
|
||||||
# providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
|
|
||||||
providers=['CPUExecutionProvider']
|
|
||||||
)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
for i in range(10):
|
|
||||||
c_onnx = utils.get_hubert_content2(self.hubert_onnx, wav16k_numpy)
|
|
||||||
end = time.time()
|
|
||||||
elapse = end - start
|
|
||||||
print("onnx time", elapse, elapse / 10)
|
|
||||||
|
|
||||||
print("torch units:", c)
|
|
||||||
print("onnx units:", c_onnx)
|
|
||||||
|
|
||||||
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
|
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
|
||||||
|
|
||||||
if self.settings.clusterInferRatio != 0 and hasattr(self, "cluster_model") and self.cluster_model != None:
|
if self.settings.clusterInferRatio != 0 and hasattr(self, "cluster_model") and self.cluster_model != None:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user