WIP:improve model selector (MMVCv15)

This commit is contained in:
wataru 2023-05-08 18:58:51 +09:00
parent 19e70606c8
commit 270ffb6459
8 changed files with 192 additions and 68 deletions

View File

@ -32,22 +32,32 @@
],
"modelSetting": [
{
"name": "modelUploader",
"name": "modelUploaderv2",
"options": {}
},
{
"name": "commonFileSelect",
"options": {
"showConfig": true,
"showOnnx": true,
"showPyTorch": true,
"showCorrespondence": true,
"showPyTorchCluster": false,
"showPyTorchEnableCheckBox": true,
"defaultEnablePyTorch": false
"title": "Config(.json)",
"acceptExtentions": ["json"],
"fileKind": "mmvcv15Config"
}
},
{
"name": "framework",
"name": "commonFileSelect",
"options": {
"showFramework": true
"title": "Model(.pt,.pth,.onxx)",
"acceptExtentions": ["pt", "pth", "onnx"],
"fileKind": "mmvcv15Model"
}
},
{
"name": "correspondenceSelectRow2",
"options": {}
},
{
"name": "modelUploadButtonRow2",
"options": {}
}
],
"lab": [],

File diff suppressed because one or more lines are too long

View File

@ -32,22 +32,32 @@
],
"modelSetting": [
{
"name": "modelUploader",
"name": "modelUploaderv2",
"options": {}
},
{
"name": "commonFileSelect",
"options": {
"showConfig": true,
"showOnnx": true,
"showPyTorch": true,
"showCorrespondence": true,
"showPyTorchCluster": false,
"showPyTorchEnableCheckBox": true,
"defaultEnablePyTorch": false
"title": "Config(.json)",
"acceptExtentions": ["json"],
"fileKind": "mmvcv15Config"
}
},
{
"name": "framework",
"name": "commonFileSelect",
"options": {
"showFramework": true
"title": "Model(.pt,.pth,.onxx)",
"acceptExtentions": ["pt", "pth", "onnx"],
"fileKind": "mmvcv15Model"
}
},
{
"name": "correspondenceSelectRow2",
"options": {}
},
{
"name": "modelUploadButtonRow2",
"options": {}
}
],
"lab": [],

View File

@ -51,6 +51,7 @@ import { IOBufferRow, IOBufferRowProps } from "./components/411_IOBufferRow"
import { CommonFileSelectRow, CommonFileSelectRowProps } from "./components/301-e_CommonFileSelectRow"
import { ModelUploadButtonRow2, ModelUploadButtonRow2Props } from "./components/301-f_ModelUploadButtonRow"
import { ModelUploaderRowv2, ModelUploaderRowv2Props } from "./components/301_ModelUploaderRowv2"
import { CorrespondenceSelectRow2, CorrespondenceSelectRow2Props } from "./components/301-g_CorrespondenceSelectRow2"
export const catalog: { [key: string]: (props: any) => JSX.Element } = {}
@ -87,6 +88,7 @@ const initialize = () => {
addToCatalog("modelSamplingRate", (props: ModelSamplingRateRowProps) => { return <ModelSamplingRateRow {...props} /> })
addToCatalog("commonFileSelect", (props: CommonFileSelectRowProps) => { return <CommonFileSelectRow {...props} /> })
addToCatalog("modelUploadButtonRow2", (props: ModelUploadButtonRow2Props) => { return <ModelUploadButtonRow2 {...props} /> })
addToCatalog("correspondenceSelectRow2", (props: CorrespondenceSelectRow2Props) => { return <CorrespondenceSelectRow2 {...props} /> })

View File

@ -12,6 +12,8 @@ export type CommonFileSelectRowProps = {
export const Filekinds = {
"mmvcv13Config": "mmvcv13Config",
"mmvcv13Model": "mmvcv13Model",
"mmvcv15Config": "mmvcv15Config",
"mmvcv15Model": "mmvcv15Model",
"ddspSvcModel": "ddspSvcModel",
"ddspSvcModelConfig": "ddspSvcModelConfig",
"ddspSvcDiffusion": "ddspSvcDiffusion",

View File

@ -0,0 +1,56 @@
import React, { useMemo } from "react"
import { fileSelector, Correspondence } from "@dannadori/voice-changer-client-js"
import { useAppState } from "../../../001_provider/001_AppStateProvider"
export type CorrespondenceSelectRow2Props = {
}
export const CorrespondenceSelectRow2 = (_props: CorrespondenceSelectRow2Props) => {
const appState = useAppState()
const CorrespondenceSelectRow = useMemo(() => {
const correspondenceFileText = appState.clientSetting.clientSetting.correspondences ? JSON.stringify(appState.clientSetting.clientSetting.correspondences.map(x => { return x.dirname })) : ""
const onCorrespondenceFileLoadClicked = async () => {
const file = await fileSelector("")
const correspondenceText = await file.text()
const cors = correspondenceText.split("\n").map(line => {
const items = line.split("|")
if (items.length != 3) {
console.warn("Invalid Correspondence Line:", line)
return null
} else {
const cor: Correspondence = {
sid: Number(items[0]),
correspondence: Number(items[1]),
dirname: items[2]
}
return cor
}
}).filter(x => { return x != null }) as Correspondence[]
console.log("recogninzed corresponding lines:", cors)
appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, correspondences: cors })
}
const onCorrespondenceFileClearClicked = () => {
appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, correspondences: [] })
}
return (
<div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Correspondence</div>
<div className="body-item-text">
<div>{correspondenceFileText}</div>
</div>
<div className="body-button-container">
<div className="body-button" onClick={onCorrespondenceFileLoadClicked}>select</div>
<div className="body-button left-margin-1" onClick={onCorrespondenceFileClearClicked}>clear</div>
</div>
</div>
)
}, [appState.clientSetting.clientSetting, appState.clientSetting.updateClientSetting])
return CorrespondenceSelectRow
}

View File

@ -27,6 +27,8 @@ export type FileUploadSetting = {
mmvcv13Config: ModelData | null
mmvcv13Model: ModelData | null
mmvcv15Config: ModelData | null
mmvcv15Model: ModelData | null
ddspSvcModel: ModelData | null
ddspSvcModelConfig: ModelData | null
@ -52,6 +54,8 @@ const InitialFileUploadSetting: FileUploadSetting = {
mmvcv13Config: null,
mmvcv13Model: null,
mmvcv15Config: null,
mmvcv15Model: null,
ddspSvcModel: null,
ddspSvcModelConfig: null,
@ -229,6 +233,15 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
alert("モデルファイルを指定する必要があります。")
return
}
} else if (props.clientType == "MMVCv15") {
if (!fileUploadSettings[slot].mmvcv15Config) {
alert("Configファイルを指定する必要があります。")
return
}
if (!fileUploadSettings[slot].mmvcv15Model) {
alert("モデルファイルを指定する必要があります。")
return
}
} else if (props.clientType == "DDSP-SVC") {
if (!fileUploadSettings[slot].ddspSvcModel) {
alert("DDSPモデルを指定する必要があります。")
@ -321,17 +334,22 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
}
// MMVCv13
const mmvcv13Models = [fileUploadSetting.mmvcv13Config, fileUploadSetting.mmvcv13Model].filter(x => { return x != null }) as ModelData[]
for (let i = 0; i < mmvcv13Models.length; i++) {
if (!mmvcv13Models[i].data) {
mmvcv13Models[i].data = await mmvcv13Models[i].file!.arrayBuffer()
mmvcv13Models[i].filename = await mmvcv13Models[i].file!.name
const normalModels = [
fileUploadSetting.mmvcv13Config,
fileUploadSetting.mmvcv13Model,
fileUploadSetting.mmvcv15Config,
fileUploadSetting.mmvcv15Model
].filter(x => { return x != null }) as ModelData[]
for (let i = 0; i < normalModels.length; i++) {
if (!normalModels[i].data) {
normalModels[i].data = await normalModels[i].file!.arrayBuffer()
normalModels[i].filename = await normalModels[i].file!.name
}
}
for (let i = 0; i < mmvcv13Models.length; i++) {
const progRate = 1 / mmvcv13Models.length
for (let i = 0; i < normalModels.length; i++) {
const progRate = 1 / normalModels.length
const progOffset = 100 * i * progRate
await _uploadFile(mmvcv13Models[i], (progress: number, _end: boolean) => {
await _uploadFile(normalModels[i], (progress: number, _end: boolean) => {
setUploadProgress(progress * progRate + progOffset)
})
}
@ -359,6 +377,9 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
files: {
mmvcv13Config: fileUploadSetting.mmvcv13Config?.filename || "",
mmvcv13Models: fileUploadSetting.mmvcv13Model?.filename || "",
mmvcv15Config: fileUploadSetting.mmvcv15Config?.filename || "",
mmvcv15Models: fileUploadSetting.mmvcv15Model?.filename || "",
ddspSvcModel: fileUploadSetting.ddspSvcModel?.filename ? "ddsp_mod/" + fileUploadSetting.ddspSvcModel?.filename : "",
ddspSvcModelConfig: fileUploadSetting.ddspSvcModelConfig?.filename ? "ddsp_mod/" + fileUploadSetting.ddspSvcModelConfig?.filename : "",
ddspSvcDiffusion: fileUploadSetting.ddspSvcDiffusion?.filename ? "ddsp_diff/" + fileUploadSetting.ddspSvcDiffusion?.filename : "",
@ -433,6 +454,8 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
mmvcv13Config: fileUploadSetting.mmvcv13Config ? { data: fileUploadSetting.mmvcv13Config.data, filename: fileUploadSetting.mmvcv13Config.filename } : null,
mmvcv13Model: fileUploadSetting.mmvcv13Model ? { data: fileUploadSetting.mmvcv13Model.data, filename: fileUploadSetting.mmvcv13Model.filename } : null,
mmvcv15Config: fileUploadSetting.mmvcv15Config ? { data: fileUploadSetting.mmvcv15Config.data, filename: fileUploadSetting.mmvcv15Config.filename } : null,
mmvcv15Model: fileUploadSetting.mmvcv15Model ? { data: fileUploadSetting.mmvcv15Model.data, filename: fileUploadSetting.mmvcv15Model.filename } : null,
ddspSvcModel: fileUploadSetting.ddspSvcModel ? { data: fileUploadSetting.ddspSvcModel.data, filename: fileUploadSetting.ddspSvcModel.filename } : null,
ddspSvcModelConfig: fileUploadSetting.ddspSvcModelConfig ? { data: fileUploadSetting.ddspSvcModelConfig.data, filename: fileUploadSetting.ddspSvcModelConfig.filename } : null,

View File

@ -70,11 +70,18 @@ class MMVCv15:
self.gpu_num = torch.cuda.device_count()
def loadModel(self, props: LoadModelParams):
self.settings.configFile = props.files.configFilename
params = props.params
self.settings.configFile = params["files"]["mmvcv15Config"]
self.hps = get_hparams_from_file(self.settings.configFile)
self.settings.pyTorchModelFile = props.files.pyTorchModelFilename
self.settings.onnxModelFile = props.files.onnxModelFilename
modelFile = params["files"]["mmvcv15Models"]
if modelFile.endswith(".onnx"):
self.settings.pyTorchModelFile = None
self.settings.onnxModelFile = modelFile
else:
self.settings.pyTorchModelFile = modelFile
self.settings.onnxModelFile = None
# PyTorchモデル生成
self.net_g = SynthesizerTrn(
@ -102,10 +109,11 @@ class MMVCv15:
# ONNXモデル生成
self.onxx_input_length = 8192
if self.settings.onnxModelFile is not None:
ort_options = onnxruntime.SessionOptions()
ort_options.intra_op_num_threads = 8
providers, options = self.getOnnxExecutionProvider()
self.onnx_session = onnxruntime.InferenceSession(
self.settings.onnxModelFile, providers=providers
self.settings.onnxModelFile,
providers=providers,
provider_options=options,
)
inputs_info = self.onnx_session.get_inputs()
for i in inputs_info:
@ -114,39 +122,41 @@ class MMVCv15:
self.onxx_input_length = i.shape[2]
return self.get_info()
def getOnnxExecutionProvider(self):
if self.settings.gpu >= 0:
return ["CUDAExecutionProvider"], [{"device_id": self.settings.gpu}]
elif "DmlExecutionProvider" in onnxruntime.get_available_providers():
return ["DmlExecutionProvider"], []
else:
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
def isOnnx(self):
if self.settings.onnxModelFile is not None:
return True
else:
return False
def update_settings(self, key: str, val: int | float | str):
if (
key == "onnxExecutionProvider"
and self.settings.onnxModelFile != ""
and self.settings.onnxModelFile is not None
):
if val == "CUDAExecutionProvider":
if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
self.settings.gpu = 0
provider_options = [{"device_id": self.settings.gpu}]
self.onnx_session.set_providers(
providers=[val], provider_options=provider_options
)
else:
self.onnx_session.set_providers(providers=[val])
elif key in self.settings.intData:
if key in self.settings.intData:
val = int(val)
setattr(self.settings, key, val)
if (
key == "gpu"
and val >= 0
and val < self.gpu_num
and self.settings.onnxModelFile != ""
and self.settings.onnxModelFile is not None
):
providers = self.onnx_session.get_providers()
print("Providers:", providers)
if "CUDAExecutionProvider" in providers:
provider_options = [{"device_id": self.settings.gpu}]
self.onnx_session.set_providers(
providers=["CUDAExecutionProvider"],
provider_options=provider_options,
)
if key == "gpu" and self.isOnnx():
providers, options = self.getOnnxExecutionProvider()
self.onnx_session = onnxruntime.InferenceSession(
self.settings.onnxModelFile,
providers=providers,
provider_options=options,
)
inputs_info = self.onnx_session.get_inputs()
for i in inputs_info:
if i.name == "sin":
self.onxx_input_length = i.shape[2]
elif key in self.settings.floatData:
setattr(self.settings, key, float(val))
elif key in self.settings.strData:
@ -314,7 +324,7 @@ class MMVCv15:
def inference(self, data):
try:
if self.settings.framework == "ONNX":
if self.isOnnx():
audio = self._onnx_inference(data)
else:
audio = self._pyTorch_inference(data)