WIP: support rvc-webui, export onnxs is not support yet
This commit is contained in:
parent
11a536b03f
commit
cdceae9cf1
@ -43,6 +43,7 @@
|
||||
"showFeature": false,
|
||||
"showIndex": false,
|
||||
"showHalfPrecision": false,
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true,
|
||||
|
||||
"showOnnxExportButton": false
|
||||
|
@ -40,6 +40,7 @@
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": false
|
||||
}
|
||||
},
|
||||
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": true,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": false
|
||||
}
|
||||
},
|
||||
|
22
client/demo/dist/assets/gui_settings/RVC.json
vendored
22
client/demo/dist/assets/gui_settings/RVC.json
vendored
@ -36,6 +36,10 @@
|
||||
{
|
||||
"name": "onnxExport",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "onnxExecutor",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"modelSetting": [
|
||||
@ -43,29 +47,23 @@
|
||||
"name": "modelUploader",
|
||||
"options": {
|
||||
"showModelSlot": true,
|
||||
"showFrameworkSelector": false,
|
||||
"showConfig": false,
|
||||
"showOnnx": true,
|
||||
"showPyTorch": true,
|
||||
"oneModelFileType": true,
|
||||
"showOnnx": false,
|
||||
"showPyTorch": false,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showFeature": true,
|
||||
"showIndex": true,
|
||||
"showHalfPrecision": true,
|
||||
"showPyTorchEnableCheckBox": false,
|
||||
"defaultEnablePyTorch": true,
|
||||
"onlySelectedFramework": true,
|
||||
|
||||
"showDefaultTune": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "framework",
|
||||
"options": {
|
||||
"showFramework": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "modelSamplingRate",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"deviceSetting": [
|
||||
|
183
client/demo/dist/assets/gui_settings/RVC_CLASSIC.json
vendored
Normal file
183
client/demo/dist/assets/gui_settings/RVC_CLASSIC.json
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
{
|
||||
"type": "demo",
|
||||
"id": "RVC",
|
||||
"front": {
|
||||
"title": [
|
||||
{
|
||||
"name": "title",
|
||||
"options": {
|
||||
"mainTitle": "Realtime Voice Changer Client",
|
||||
"subTitle": "for RVC",
|
||||
"lineNum": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "clearSetting",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"serverControl": [
|
||||
{
|
||||
"name": "startButton",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "performance",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "serverInfo",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "modelSwitch",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "onnxExport",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"modelSetting": [
|
||||
{
|
||||
"name": "modelUploader",
|
||||
"options": {
|
||||
"showModelSlot": true,
|
||||
"showConfig": false,
|
||||
"showOnnx": true,
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showFeature": true,
|
||||
"showIndex": true,
|
||||
"showHalfPrecision": true,
|
||||
"defaultEnablePyTorch": true,
|
||||
|
||||
"showDefaultTune": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "framework",
|
||||
"options": {
|
||||
"showFramework": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "modelSamplingRate",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"deviceSetting": [
|
||||
{
|
||||
"name": "audioInput",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "audioOutput",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"qualityControl": [
|
||||
{
|
||||
"name": "noiseControl",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "gainControl",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "f0Detector",
|
||||
"options": {
|
||||
"detectors": ["pm", "harvest"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "divider",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "analyzer",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"speakerSetting": [
|
||||
{
|
||||
"name": "dstId",
|
||||
"options": {
|
||||
"showF0": true,
|
||||
"useServerInfo": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "tune",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "indexRatio",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "silentThreshold",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"converterSetting": [
|
||||
{
|
||||
"name": "inputChunkNum",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "extraDataLength",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "gpu",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"advancedSetting": [
|
||||
{
|
||||
"name": "protocol",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeOverlapSize",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeOffsetRate",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeEndRate",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "trancateNumThreshold",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "rvcQuality",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "silenceFront",
|
||||
"options": {}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"dialogs": {
|
||||
"license": [
|
||||
{
|
||||
"title": "Retrieval-based-Voice-Conversion-WebUI",
|
||||
"auther": "liujing04",
|
||||
"contact": "",
|
||||
"url": "https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI",
|
||||
"license": "MIT"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": true,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true
|
||||
}
|
||||
},
|
||||
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": true,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true
|
||||
}
|
||||
},
|
||||
|
57
client/demo/dist/index.js
vendored
57
client/demo/dist/index.js
vendored
File diff suppressed because one or more lines are too long
@ -43,6 +43,7 @@
|
||||
"showFeature": false,
|
||||
"showIndex": false,
|
||||
"showHalfPrecision": false,
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true,
|
||||
|
||||
"showOnnxExportButton": false
|
||||
|
@ -40,6 +40,7 @@
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": false
|
||||
}
|
||||
},
|
||||
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": true,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": false
|
||||
}
|
||||
},
|
||||
|
@ -36,6 +36,10 @@
|
||||
{
|
||||
"name": "onnxExport",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "onnxExecutor",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"modelSetting": [
|
||||
@ -43,29 +47,23 @@
|
||||
"name": "modelUploader",
|
||||
"options": {
|
||||
"showModelSlot": true,
|
||||
"showFrameworkSelector": false,
|
||||
"showConfig": false,
|
||||
"showOnnx": true,
|
||||
"showPyTorch": true,
|
||||
"oneModelFileType": true,
|
||||
"showOnnx": false,
|
||||
"showPyTorch": false,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showFeature": true,
|
||||
"showIndex": true,
|
||||
"showHalfPrecision": true,
|
||||
"showPyTorchEnableCheckBox": false,
|
||||
"defaultEnablePyTorch": true,
|
||||
"onlySelectedFramework": true,
|
||||
|
||||
"showDefaultTune": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "framework",
|
||||
"options": {
|
||||
"showFramework": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "modelSamplingRate",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"deviceSetting": [
|
||||
|
183
client/demo/public/assets/gui_settings/RVC_CLASSIC.json
Normal file
183
client/demo/public/assets/gui_settings/RVC_CLASSIC.json
Normal file
@ -0,0 +1,183 @@
|
||||
{
|
||||
"type": "demo",
|
||||
"id": "RVC",
|
||||
"front": {
|
||||
"title": [
|
||||
{
|
||||
"name": "title",
|
||||
"options": {
|
||||
"mainTitle": "Realtime Voice Changer Client",
|
||||
"subTitle": "for RVC",
|
||||
"lineNum": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "clearSetting",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"serverControl": [
|
||||
{
|
||||
"name": "startButton",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "performance",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "serverInfo",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "modelSwitch",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "onnxExport",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"modelSetting": [
|
||||
{
|
||||
"name": "modelUploader",
|
||||
"options": {
|
||||
"showModelSlot": true,
|
||||
"showConfig": false,
|
||||
"showOnnx": true,
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": false,
|
||||
|
||||
"showFeature": true,
|
||||
"showIndex": true,
|
||||
"showHalfPrecision": true,
|
||||
"defaultEnablePyTorch": true,
|
||||
|
||||
"showDefaultTune": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "framework",
|
||||
"options": {
|
||||
"showFramework": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "modelSamplingRate",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"deviceSetting": [
|
||||
{
|
||||
"name": "audioInput",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "audioOutput",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"qualityControl": [
|
||||
{
|
||||
"name": "noiseControl",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "gainControl",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "f0Detector",
|
||||
"options": {
|
||||
"detectors": ["pm", "harvest"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "divider",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "analyzer",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"speakerSetting": [
|
||||
{
|
||||
"name": "dstId",
|
||||
"options": {
|
||||
"showF0": true,
|
||||
"useServerInfo": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "tune",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "indexRatio",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "silentThreshold",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"converterSetting": [
|
||||
{
|
||||
"name": "inputChunkNum",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "extraDataLength",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "gpu",
|
||||
"options": {}
|
||||
}
|
||||
],
|
||||
"advancedSetting": [
|
||||
{
|
||||
"name": "protocol",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeOverlapSize",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeOffsetRate",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "crossFadeEndRate",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "trancateNumThreshold",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "rvcQuality",
|
||||
"options": {}
|
||||
},
|
||||
{
|
||||
"name": "silenceFront",
|
||||
"options": {}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"dialogs": {
|
||||
"license": [
|
||||
{
|
||||
"title": "Retrieval-based-Voice-Conversion-WebUI",
|
||||
"auther": "liujing04",
|
||||
"contact": "",
|
||||
"url": "https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI",
|
||||
"license": "MIT"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": true,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true
|
||||
}
|
||||
},
|
||||
|
@ -39,7 +39,7 @@
|
||||
"showPyTorch": true,
|
||||
"showCorrespondence": false,
|
||||
"showPyTorchCluster": true,
|
||||
|
||||
"showPyTorchEnableCheckBox": true,
|
||||
"defaultEnablePyTorch": true
|
||||
}
|
||||
},
|
||||
|
@ -19,7 +19,7 @@ export const ClientSelector = () => {
|
||||
if (ua.indexOf("mac os x") !== -1) {
|
||||
return ["MMVCv13", "MMVCv15", "so-vits-svc-40", "RVC"] as ClientType[]
|
||||
} else {
|
||||
return ["MMVCv13", "MMVCv15", "so-vits-svc-40", "so-vits-svc-40v2", "RVC", "DDSP-SVC"] as ClientType[]
|
||||
return ["MMVCv13", "MMVCv15", "so-vits-svc-40", "so-vits-svc-40v2", "RVC", "DDSP-SVC", "RVC_CLASSIC_GUI"] as ClientType[]
|
||||
}
|
||||
}, [])
|
||||
|
||||
|
@ -42,6 +42,7 @@ import { DstIdRow2, DstIdRow2Props } from "./components/602v2_DstIdRow2"
|
||||
import { SilenceFrontRow, SilenceFrontRowProps } from "./components/812_SilenceFrontRow"
|
||||
import { ModelSwitchRow, ModelSwitchRowProps } from "./components/204_ModelSwitchRow"
|
||||
import { ONNXExportRow, ONNXExportRowProps } from "./components/205_ONNXExportRow"
|
||||
import { ONNXExecutorRow, ONNXExecutorRowProps } from "./components/206_ONNXExecutorRow"
|
||||
|
||||
export const catalog: { [key: string]: (props: any) => JSX.Element } = {}
|
||||
|
||||
@ -68,6 +69,7 @@ const initialize = () => {
|
||||
addToCatalog("serverInfo", (props: ServerInfoRowProps) => { return <ServerInfoRow {...props} /> })
|
||||
addToCatalog("modelSwitch", (props: ModelSwitchRowProps) => { return <ModelSwitchRow {...props} /> })
|
||||
addToCatalog("onnxExport", (props: ONNXExportRowProps) => { return <ONNXExportRow {...props} /> })
|
||||
addToCatalog("onnxExecutor", (props: ONNXExecutorRowProps) => { return <ONNXExecutorRow {...props} /> })
|
||||
|
||||
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
import { Framework } from "@dannadori/voice-changer-client-js"
|
||||
import React, { useMemo } from "react"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
|
||||
@ -9,20 +10,23 @@ export const ModelSwitchRow = (_props: ModelSwitchRowProps) => {
|
||||
const appState = useAppState()
|
||||
|
||||
const modelSwitchRow = useMemo(() => {
|
||||
const slot = appState.serverSetting.serverSetting.modelSlotIndex
|
||||
|
||||
const onSwitchModelClicked = (index: number) => {
|
||||
|
||||
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, modelSlotIndex: index })
|
||||
const onSwitchModelClicked = (index: number, filename: string) => {
|
||||
const framework: Framework = filename.endsWith(".onnx") ? "ONNX" : "PyTorch"
|
||||
console.log("Framework:::", filename, framework)
|
||||
|
||||
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, modelSlotIndex: index, framework: framework })
|
||||
}
|
||||
let filename = ""
|
||||
const modelOptions = appState.serverSetting.serverSetting.modelSlots.map((x, index) => {
|
||||
const className = index == slot ? "body-button-active left-margin-1" : "body-button left-margin-1"
|
||||
let filename = ""
|
||||
if (x.pyTorchModelFile && x.pyTorchModelFile.length > 0) {
|
||||
filename = x.pyTorchModelFile.replace(/^.*[\\\/]/, '')
|
||||
return <div key={index} className="body-button left-margin-1" onClick={() => { onSwitchModelClicked(index) }}>{filename}</div>
|
||||
return <div key={index} className={className} onClick={() => { onSwitchModelClicked(index, filename) }}>{filename}</div>
|
||||
} else if (x.onnxModelFile && x.onnxModelFile.length > 0) {
|
||||
filename = x.onnxModelFile.replace(/^.*[\\\/]/, '')
|
||||
return <div key={index} className="body-button left-margin-1" onClick={() => { onSwitchModelClicked(index) }}>{filename}</div>
|
||||
return <div key={index} className={className} onClick={() => { onSwitchModelClicked(index, filename) }}>{filename}</div>
|
||||
} else {
|
||||
return <div key={index} ></div>
|
||||
}
|
||||
|
@ -12,6 +12,10 @@ export const ONNXExportRow = (_props: ONNXExportRowProps) => {
|
||||
const guiState = useGuiState()
|
||||
|
||||
const onnxExporthRow = useMemo(() => {
|
||||
if (appState.serverSetting.serverSetting.framework != "PyTorch") {
|
||||
return <></>
|
||||
}
|
||||
|
||||
const onnxExportButtonAction = async () => {
|
||||
|
||||
if (guiState.isConverting) {
|
||||
|
@ -0,0 +1,43 @@
|
||||
import { OnnxExecutionProvider, OnnxExporterInfo } from "@dannadori/voice-changer-client-js"
|
||||
import React, { useMemo } from "react"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
|
||||
|
||||
export type ONNXExecutorRowProps = {
|
||||
}
|
||||
|
||||
export const ONNXExecutorRow = (_props: ONNXExecutorRowProps) => {
|
||||
const appState = useAppState()
|
||||
|
||||
const onnxExecutorRow = useMemo(() => {
|
||||
if (appState.serverSetting.serverSetting.framework != "ONNX") {
|
||||
return <></>
|
||||
}
|
||||
const onOnnxExecutionProviderChanged = async (val: OnnxExecutionProvider) => {
|
||||
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, onnxExecutionProvider: val })
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="body-row split-3-7 left-padding-1">
|
||||
<div className="body-item-title left-padding-2">OnnxExecutionProvider</div>
|
||||
<div className="body-select-container">
|
||||
<select className="body-select" value={appState.serverSetting.serverSetting.onnxExecutionProvider} onChange={(e) => {
|
||||
onOnnxExecutionProviderChanged(e.target.value as
|
||||
OnnxExecutionProvider)
|
||||
}}>
|
||||
{
|
||||
Object.values(OnnxExecutionProvider).map(x => {
|
||||
return <option key={x} value={x}>{x}</option>
|
||||
})
|
||||
}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
)
|
||||
}, [appState.getInfo, appState.serverSetting.serverSetting])
|
||||
|
||||
return onnxExecutorRow
|
||||
}
|
||||
|
@ -0,0 +1,73 @@
|
||||
import React, { useMemo } from "react"
|
||||
import { fileSelector } from "@dannadori/voice-changer-client-js"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
|
||||
|
||||
export const ModelSelectRow = () => {
|
||||
const appState = useAppState()
|
||||
const guiState = useGuiState()
|
||||
|
||||
|
||||
const onnxSelectRow = useMemo(() => {
|
||||
const slot = guiState.modelSlotNum
|
||||
const fileUploadSetting = appState.serverSetting.fileUploadSettings[slot]
|
||||
if (!fileUploadSetting) {
|
||||
return <></>
|
||||
}
|
||||
|
||||
const onnxModelFilenameText = fileUploadSetting.onnxModel?.filename || fileUploadSetting.onnxModel?.file?.name || ""
|
||||
const pyTorchFilenameText = fileUploadSetting.pyTorchModel?.filename || fileUploadSetting.pyTorchModel?.file?.name || ""
|
||||
const modelFilenameText = onnxModelFilenameText + pyTorchFilenameText
|
||||
|
||||
const onModelFileLoadClicked = async () => {
|
||||
const file = await fileSelector("")
|
||||
if (file.name.endsWith(".onnx") == false && file.name.endsWith(".pth") == false) {
|
||||
alert("モデルファイルの拡張子は.onnxか.pthである必要があります。(Extension of the model file should be .onnx or .pth.)")
|
||||
return
|
||||
}
|
||||
if (file.name.endsWith(".onnx") == true) {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
...appState.serverSetting.fileUploadSettings[slot],
|
||||
onnxModel: {
|
||||
file: file
|
||||
},
|
||||
pyTorchModel: null
|
||||
})
|
||||
return
|
||||
}
|
||||
if (file.name.endsWith(".pth") == true) {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
...appState.serverSetting.fileUploadSettings[slot],
|
||||
pyTorchModel: {
|
||||
file: file
|
||||
},
|
||||
onnxModel: null
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
const onModelFileClearClicked = () => {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
...appState.serverSetting.fileUploadSettings[slot],
|
||||
onnxModel: null,
|
||||
pyTorchModel: null
|
||||
})
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="body-row split-3-3-4 left-padding-1 guided">
|
||||
<div className="body-item-title left-padding-2">Model(.onnx or .pth)</div>
|
||||
<div className="body-item-text">
|
||||
<div>{modelFilenameText}</div>
|
||||
</div>
|
||||
<div className="body-button-container">
|
||||
<div className="body-button" onClick={onModelFileLoadClicked}>select</div>
|
||||
<div className="body-button left-margin-1" onClick={onModelFileClearClicked}>clear</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [appState.serverSetting.fileUploadSettings, appState.serverSetting.setFileUploadSetting, guiState.modelSlotNum])
|
||||
|
||||
return onnxSelectRow
|
||||
}
|
@ -3,13 +3,21 @@ import { fileSelector } from "@dannadori/voice-changer-client-js"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
|
||||
export const ONNXSelectRow = () => {
|
||||
type ONNXSelectRowProps = {
|
||||
onlyWhenSelected: boolean
|
||||
}
|
||||
|
||||
export const ONNXSelectRow = (props: ONNXSelectRowProps) => {
|
||||
const appState = useAppState()
|
||||
const guiState = useGuiState()
|
||||
|
||||
|
||||
const onnxSelectRow = useMemo(() => {
|
||||
const slot = guiState.modelSlotNum
|
||||
if (props.onlyWhenSelected && appState.serverSetting.fileUploadSettings[slot]?.framework != "ONNX") {
|
||||
return <></>
|
||||
}
|
||||
|
||||
const onnxModelFilenameText = appState.serverSetting.fileUploadSettings[slot]?.onnxModel?.filename || appState.serverSetting.fileUploadSettings[slot]?.onnxModel?.file?.name || ""
|
||||
const onOnnxFileLoadClicked = async () => {
|
||||
const file = await fileSelector("")
|
||||
|
@ -3,15 +3,24 @@ import { fileSelector } from "@dannadori/voice-changer-client-js"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
|
||||
export type PyTorchSelectRow = {
|
||||
export type PyTorchSelectRowProps = {
|
||||
onlyWhenSelected: boolean
|
||||
}
|
||||
|
||||
export const PyTorchSelectRow = (_props: PyTorchSelectRow) => {
|
||||
export const PyTorchSelectRow = (props: PyTorchSelectRowProps) => {
|
||||
const appState = useAppState()
|
||||
const guiState = useGuiState()
|
||||
|
||||
const pyTorchSelectRow = useMemo(() => {
|
||||
if (guiState.showPyTorchModelUpload == false) {
|
||||
return <></>
|
||||
}
|
||||
const slot = guiState.modelSlotNum
|
||||
if (props.onlyWhenSelected && appState.serverSetting.fileUploadSettings[slot]?.framework != "PyTorch") {
|
||||
return <></>
|
||||
}
|
||||
|
||||
|
||||
const pyTorchFilenameText = appState.serverSetting.fileUploadSettings[slot]?.pyTorchModel?.filename || appState.serverSetting.fileUploadSettings[slot]?.pyTorchModel?.file?.name || ""
|
||||
const onPyTorchFileLoadClicked = async () => {
|
||||
const file = await fileSelector("")
|
||||
|
@ -9,6 +9,11 @@ export const HalfPrecisionRow = () => {
|
||||
|
||||
const halfPrecisionSelectRow = useMemo(() => {
|
||||
const slot = guiState.modelSlotNum
|
||||
const fileUploadSetting = appState.serverSetting.fileUploadSettings[slot]
|
||||
if (!fileUploadSetting) {
|
||||
return <></>
|
||||
}
|
||||
const currentValue = fileUploadSetting ? fileUploadSetting.isHalf : true
|
||||
const onHalfPrecisionChanged = () => {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
...appState.serverSetting.fileUploadSettings[slot],
|
||||
@ -16,16 +21,13 @@ export const HalfPrecisionRow = () => {
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
const currentVal = appState.serverSetting.fileUploadSettings[slot] ? appState.serverSetting.fileUploadSettings[slot].isHalf : true
|
||||
return (
|
||||
<div className="body-row split-3-3-4 left-padding-1 guided">
|
||||
<div className="body-item-title left-padding-2">-</div>
|
||||
<div className="body-item-text">
|
||||
<div></div>
|
||||
<input type="checkbox" checked={currentValue} onChange={() => onHalfPrecisionChanged()} /> half-precision
|
||||
</div>
|
||||
<div className="body-button-container">
|
||||
<input type="checkbox" checked={currentVal} onChange={() => onHalfPrecisionChanged()} /> half-precision
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
@ -27,7 +27,7 @@ export const ModelUploadButtonRow = () => {
|
||||
</div>
|
||||
<div className="body-button-container">
|
||||
<div className={uploadButtonClassName} onClick={uploadButtonAction}>{uploadButtonLabel}</div>
|
||||
<div>{uploadedText}</div>
|
||||
<div className="body-item-text-em" >{uploadedText}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -8,6 +8,10 @@ export const DefaultTuneRow = () => {
|
||||
const defaultTuneRow = useMemo(() => {
|
||||
const slot = guiState.modelSlotNum
|
||||
const fileUploadSetting = appState.serverSetting.fileUploadSettings[slot]
|
||||
if (!fileUploadSetting) {
|
||||
return <></>
|
||||
}
|
||||
const currentValue = fileUploadSetting.defaultTune
|
||||
|
||||
const onDefaultTuneChanged = (val: number) => {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
@ -20,10 +24,10 @@ export const DefaultTuneRow = () => {
|
||||
<div className="body-row split-3-2-1-4 left-padding-1 guided">
|
||||
<div className="body-item-title left-padding-2 ">Default Tune</div>
|
||||
<div>
|
||||
<input type="range" className="body-item-input-slider" min="-50" max="50" step="1" value={fileUploadSetting?.defaultTune || 0} onChange={(e) => {
|
||||
<input type="range" className="body-item-input-slider" min="-50" max="50" step="1" value={currentValue} onChange={(e) => {
|
||||
onDefaultTuneChanged(Number(e.target.value))
|
||||
}}></input>
|
||||
<span className="body-item-input-slider-val">{fileUploadSetting?.defaultTune || 0}</span>
|
||||
<span className="body-item-input-slider-val">{currentValue}</span>
|
||||
</div>
|
||||
<div>
|
||||
</div>
|
||||
|
@ -0,0 +1,42 @@
|
||||
import { Framework } from "@dannadori/voice-changer-client-js"
|
||||
import React, { useMemo } from "react"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
|
||||
export const FrameworkSelectorRow = () => {
|
||||
const appState = useAppState()
|
||||
const guiState = useGuiState()
|
||||
const frameworkSelectorRow = useMemo(() => {
|
||||
const slot = guiState.modelSlotNum
|
||||
const fileUploadSetting = appState.serverSetting.fileUploadSettings[slot]
|
||||
const currentValue = fileUploadSetting?.framework || Framework.PyTorch
|
||||
|
||||
const onFrameworkChanged = (val: Framework) => {
|
||||
appState.serverSetting.setFileUploadSetting(slot, {
|
||||
...appState.serverSetting.fileUploadSettings[slot],
|
||||
framework: val
|
||||
})
|
||||
}
|
||||
return (
|
||||
<div className="body-row split-3-7 left-padding-1 guided">
|
||||
<div className="body-item-title left-padding-2">Framework</div>
|
||||
<div className="body-input-container">
|
||||
<div className="body-select-container">
|
||||
<select className="body-select" value={currentValue} onChange={(e) => {
|
||||
onFrameworkChanged(e.target.value as Framework)
|
||||
}}>
|
||||
{
|
||||
Object.values(Framework).map(x => {
|
||||
return <option key={x} value={x}>{x}</option>
|
||||
})
|
||||
}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [appState.serverSetting.fileUploadSettings, appState.serverSetting.setFileUploadSetting, guiState.modelSlotNum])
|
||||
|
||||
return frameworkSelectorRow
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
import React, { useMemo, useEffect } from "react"
|
||||
import { useGuiState } from "../001_GuiStateProvider"
|
||||
import { ConfigSelectRow } from "./301-1_ConfigSelectRow"
|
||||
import { ModelSelectRow } from "./301-2-5_ModelSelectRow copy"
|
||||
import { ONNXSelectRow } from "./301-2_ONNXSelectRow"
|
||||
import { PyTorchSelectRow } from "./301-3_PyTorchSelectRow"
|
||||
import { CorrespondenceSelectRow } from "./301-4_CorrespondenceSelectRow"
|
||||
@ -11,9 +12,11 @@ import { HalfPrecisionRow } from "./301-8_HalfPrescisionRow"
|
||||
import { ModelUploadButtonRow } from "./301-9_ModelUploadButtonRow"
|
||||
import { ModelSlotRow } from "./301-a_ModelSlotRow"
|
||||
import { DefaultTuneRow } from "./301-c_DefaultTuneRow"
|
||||
import { FrameworkSelectorRow } from "./301-d_FrameworkSelector"
|
||||
|
||||
export type ModelUploaderRowProps = {
|
||||
showModelSlot: boolean
|
||||
showFrameworkSelector: boolean
|
||||
showConfig: boolean
|
||||
showOnnx: boolean
|
||||
showPyTorch: boolean
|
||||
@ -26,7 +29,10 @@ export type ModelUploaderRowProps = {
|
||||
showDescription: boolean
|
||||
showDefaultTune: boolean
|
||||
|
||||
showPyTorchEnableCheckBox: boolean
|
||||
defaultEnablePyTorch: boolean
|
||||
onlySelectedFramework: boolean
|
||||
oneModelFileType: boolean
|
||||
|
||||
showOnnxExportButton: boolean
|
||||
}
|
||||
@ -38,6 +44,15 @@ export const ModelUploaderRow = (props: ModelUploaderRowProps) => {
|
||||
}, [])
|
||||
|
||||
const modelUploaderRow = useMemo(() => {
|
||||
const pytorchEnableCheckBox = props.showPyTorchEnableCheckBox ?
|
||||
<div>
|
||||
<input type="checkbox" checked={guiState.showPyTorchModelUpload} onChange={(e) => {
|
||||
guiState.setShowPyTorchModelUpload(e.target.checked)
|
||||
}} /> enable PyTorch
|
||||
</div>
|
||||
:
|
||||
<></>
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="body-row split-3-3-4 left-padding-1 guided">
|
||||
@ -46,17 +61,17 @@ export const ModelUploaderRow = (props: ModelUploaderRowProps) => {
|
||||
<div></div>
|
||||
</div>
|
||||
<div className="body-item-text">
|
||||
<div>
|
||||
<input type="checkbox" checked={guiState.showPyTorchModelUpload} onChange={(e) => {
|
||||
guiState.setShowPyTorchModelUpload(e.target.checked)
|
||||
}} /> enable PyTorch
|
||||
</div>
|
||||
{pytorchEnableCheckBox}
|
||||
</div>
|
||||
</div>
|
||||
{props.showModelSlot ? <ModelSlotRow /> : <></>}
|
||||
{props.showFrameworkSelector ? <FrameworkSelectorRow /> : <></>}
|
||||
{props.showConfig ? <ConfigSelectRow /> : <></>}
|
||||
{props.showOnnx ? <ONNXSelectRow /> : <></>}
|
||||
{props.showPyTorch && guiState.showPyTorchModelUpload ? <PyTorchSelectRow /> : <></>}
|
||||
|
||||
{props.oneModelFileType ? <ModelSelectRow /> : <></>}
|
||||
{props.showOnnx ? <ONNXSelectRow onlyWhenSelected={props.onlySelectedFramework} /> : <></>}
|
||||
{props.showPyTorch ? <PyTorchSelectRow onlyWhenSelected={props.onlySelectedFramework} /> : <></>}
|
||||
|
||||
{props.showCorrespondence ? <CorrespondenceSelectRow /> : <></>}
|
||||
{props.showPyTorchCluster ? <PyTorchClusterSelectRow /> : <></>}
|
||||
{props.showFeature ? <FeatureSelectRow /> : <></>}
|
||||
|
@ -1,5 +1,5 @@
|
||||
import React, { useMemo } from "react"
|
||||
import { fileSelector, ModelSamplingRate } from "@dannadori/voice-changer-client-js"
|
||||
import { ModelSamplingRate } from "@dannadori/voice-changer-client-js"
|
||||
import { useAppState } from "../../../001_provider/001_AppStateProvider"
|
||||
|
||||
export type ModelSamplingRateRowProps = {
|
||||
|
@ -535,6 +535,14 @@ body {
|
||||
color: rgb(30, 30, 30);
|
||||
font-size: 0.7rem;
|
||||
}
|
||||
.body-item-text-em {
|
||||
color: rgb(250, 30, 30);
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.body-input-container {
|
||||
display: flex;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
import { useState, useMemo, useEffect } from "react"
|
||||
import { VoiceChangerServerSetting, ServerInfo, ServerSettingKey, INDEXEDDB_KEY_SERVER, INDEXEDDB_KEY_MODEL_DATA, ClientType, DefaultServerSetting_MMVCv13, DefaultServerSetting_MMVCv15, DefaultServerSetting_so_vits_svc_40v2, DefaultServerSetting_so_vits_svc_40, DefaultServerSetting_so_vits_svc_40_c, DefaultServerSetting_RVC, OnnxExporterInfo, DefaultServerSetting_DDSP_SVC, MAX_MODEL_SLOT_NUM } from "../const"
|
||||
import { VoiceChangerServerSetting, ServerInfo, ServerSettingKey, INDEXEDDB_KEY_SERVER, INDEXEDDB_KEY_MODEL_DATA, ClientType, DefaultServerSetting_MMVCv13, DefaultServerSetting_MMVCv15, DefaultServerSetting_so_vits_svc_40v2, DefaultServerSetting_so_vits_svc_40, DefaultServerSetting_so_vits_svc_40_c, DefaultServerSetting_RVC, OnnxExporterInfo, DefaultServerSetting_DDSP_SVC, MAX_MODEL_SLOT_NUM, Framework } from "../const"
|
||||
import { VoiceChangerClient } from "../VoiceChangerClient"
|
||||
import { useIndexedDB } from "./useIndexedDB"
|
||||
|
||||
@ -22,6 +22,7 @@ export type FileUploadSetting = {
|
||||
isHalf: boolean
|
||||
uploaded: boolean
|
||||
defaultTune: number
|
||||
framework: Framework
|
||||
params: string
|
||||
|
||||
}
|
||||
@ -38,6 +39,7 @@ const InitialFileUploadSetting: FileUploadSetting = {
|
||||
isHalf: true,
|
||||
uploaded: false,
|
||||
defaultTune: 0,
|
||||
framework: Framework.PyTorch,
|
||||
params: "{}"
|
||||
}
|
||||
|
||||
@ -322,6 +324,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
|
||||
isHalf: fileUploadSetting.isHalf, // キャッシュとしては不使用。guiで上書きされる。
|
||||
uploaded: false, // キャッシュから読み込まれるときには、まだuploadされていないから。
|
||||
defaultTune: fileUploadSetting.defaultTune,
|
||||
framework: fileUploadSetting.framework,
|
||||
params: fileUploadSetting.params
|
||||
}
|
||||
setItem(`${INDEXEDDB_KEY_MODEL_DATA}_${slot}`, saveData)
|
||||
|
@ -31,8 +31,9 @@ import pyworld as pw
|
||||
|
||||
from voice_changer.RVC.custom_vc_infer_pipeline import VC
|
||||
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
|
||||
from .models import SynthesizerTrnMsNSFsid as SynthesizerTrnMs768NSFsid
|
||||
# from .const import RVC_MODEL_TYPE_NORMAL, RVC_MODEL_TYPE_PITCHLESS, RVC_MODEL_TYPE_WEBUI_256_NORMAL, RVC_MODEL_TYPE_WEBUI_768_NORMAL, RVC_MODEL_TYPE_WEBUI_256_PITCHLESS, RVC_MODEL_TYPE_WEBUI_768_PITCHLESS, RVC_MODEL_TYPE_UNKNOWN
|
||||
from .models import SynthesizerTrnMsNSFsid as SynthesizerTrnMsNSFsid_webui
|
||||
from .models import SynthesizerTrnMsNSFsidNono as SynthesizerTrnMsNSFsidNono_webui
|
||||
|
||||
from .const import RVC_MODEL_TYPE_RVC, RVC_MODEL_TYPE_WEBUI
|
||||
from fairseq import checkpoint_utils
|
||||
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
|
||||
@ -101,14 +102,10 @@ class RVC:
|
||||
self.feature_file = None
|
||||
self.index_file = None
|
||||
|
||||
# self.net_g2 = None
|
||||
# self.onnx_session2 = None
|
||||
# self.feature_file2 = None
|
||||
# self.index_file2 = None
|
||||
|
||||
self.gpu_num = torch.cuda.device_count()
|
||||
self.prevVol = 0
|
||||
self.params = params
|
||||
|
||||
self.mps_enabled: bool = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
|
||||
self.currentSlot = -1
|
||||
print("RVC initialization: ", params)
|
||||
@ -142,11 +139,10 @@ class RVC:
|
||||
except Exception as e:
|
||||
print("EXCEPTION during loading hubert/contentvec model", e)
|
||||
|
||||
# self.switchModel(self.slot)
|
||||
if self.initialLoad:
|
||||
self.prepareModel(self.tmp_slot)
|
||||
self.slot = self.tmp_slot
|
||||
self.currentSlot = self.slot
|
||||
self.settings.modelSlotIndex = self.tmp_slot
|
||||
self.currentSlot = self.settings.modelSlotIndex
|
||||
self.switchModel()
|
||||
self.initialLoad = False
|
||||
|
||||
@ -158,6 +154,7 @@ class RVC:
|
||||
onnxModelFile = self.settings.modelSlots[slot].onnxModelFile
|
||||
# PyTorchモデル生成
|
||||
if pyTorchModelFile != None and pyTorchModelFile != "":
|
||||
print("[Voice Changer] Loading Pytorch Model...")
|
||||
cpt = torch.load(pyTorchModelFile, map_location="cpu")
|
||||
'''
|
||||
(1) オリジナルとrvc-webuiのモデル判定 ⇒ config全体の形状
|
||||
@ -176,7 +173,6 @@ class RVC:
|
||||
# print("config shape:1::::", cpt["config"], cpt["f0"])
|
||||
# print("config shape:2::::", (cpt).keys)
|
||||
config_len = len(cpt["config"])
|
||||
upsamplingRateDims = len(cpt["config"][12])
|
||||
if config_len == 18:
|
||||
self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_RVC
|
||||
self.settings.modelSlots[slot].embChannels = 256
|
||||
@ -188,43 +184,19 @@ class RVC:
|
||||
|
||||
self.settings.modelSamplingRate = cpt["config"][-1]
|
||||
|
||||
# if config_len == 18 and cpt["f0"] == 0:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_PITCHLESS")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_PITCHLESS
|
||||
# elif config_len == 18 and cpt["f0"] == 1:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_NORMAL")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_NORMAL
|
||||
# elif config_len == 19:
|
||||
# print("PARAMS:::::::::", cpt["params"])
|
||||
# embedding = cpt["config"][17]
|
||||
# if embedding == 256 and cpt["f0"] == 0:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_256_PITCHLESS")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_256_PITCHLESS
|
||||
# elif embedding == 256 and cpt["f0"] == 1:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_256_NORMAL")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_256_NORMAL
|
||||
# elif embedding == 768 and cpt["f0"] == 0:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_768_PITCHLESS")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_768_PITCHLESS
|
||||
# else:
|
||||
# print("[Voice Changer] RVC Model Type: RVC_MODEL_TYPE_WEBUI_768_NORMAL")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_WEBUI_768_NORMAL
|
||||
# else:
|
||||
# print("[Voice Changer] RVC Model Type: UNKNOWN")
|
||||
# self.settings.modelSlots[slot].modelType = RVC_MODEL_TYPE_UNKNOWN
|
||||
|
||||
if self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_RVC and self.settings.modelSlots[slot].f0 == True:
|
||||
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=self.is_half)
|
||||
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_RVC and self.settings.modelSlots[slot].f0 == False:
|
||||
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
||||
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI and self.settings.modelSlots[slot].f0 == True:
|
||||
net_g = SynthesizerTrnMs768NSFsid(**cpt["params"], is_half=self.is_half)
|
||||
net_g = SynthesizerTrnMsNSFsid_webui(**cpt["params"], is_half=self.is_half)
|
||||
elif self.settings.modelSlots[slot].modelType == RVC_MODEL_TYPE_WEBUI and self.settings.modelSlots[slot].f0 == False:
|
||||
######################
|
||||
# TBD
|
||||
######################
|
||||
print("webui non-f0 is not supported yet")
|
||||
net_g = SynthesizerTrnMs768NSFsid(**cpt["params"], is_half=self.is_half)
|
||||
net_g = SynthesizerTrnMsNSFsidNono_webui(**cpt["params"], is_half=self.is_half)
|
||||
|
||||
else:
|
||||
print("unknwon")
|
||||
|
||||
@ -234,10 +206,12 @@ class RVC:
|
||||
net_g = net_g.half()
|
||||
self.next_net_g = net_g
|
||||
else:
|
||||
print("[Voice Changer] Skip Loading Pytorch Model...")
|
||||
self.next_net_g = None
|
||||
|
||||
# ONNXモデル生成
|
||||
if onnxModelFile != None and onnxModelFile != "":
|
||||
print("[Voice Changer] Loading ONNX Model...")
|
||||
self.next_onnx_session = ModelWrapper(onnxModelFile)
|
||||
self.settings.modelSlots[slot].samplingRateOnnx = self.next_onnx_session.getSamplingRate()
|
||||
self.settings.modelSlots[slot].f0Onnx = self.next_onnx_session.getF0()
|
||||
@ -249,15 +223,17 @@ class RVC:
|
||||
self.settings.modelSlots[slot].f0 = self.settings.modelSlots[slot].f0Onnx
|
||||
|
||||
else:
|
||||
print("[Voice Changer] Skip Loading ONNX Model...")
|
||||
self.next_onnx_session = None
|
||||
|
||||
self.next_feature_file = self.settings.modelSlots[slot].featureFile
|
||||
self.next_index_file = self.settings.modelSlots[slot].indexFile
|
||||
self.next_trans = self.settings.modelSlots[slot].defaultTrans
|
||||
|
||||
print("[Voice Changer] Prepare done.",)
|
||||
return self.get_info()
|
||||
|
||||
def switchModel(self):
|
||||
print("[Voice Changer] Switching model..",)
|
||||
# del self.net_g
|
||||
# del self.onnx_session
|
||||
self.net_g = self.next_net_g
|
||||
@ -267,6 +243,7 @@ class RVC:
|
||||
self.settings.tran = self.next_trans
|
||||
self.next_net_g = None
|
||||
self.next_onnx_session = None
|
||||
print("[Voice Changer] Switching model..done",)
|
||||
|
||||
def update_settings(self, key: str, val: any):
|
||||
if key == "onnxExecutionProvider" and self.onnx_session != None:
|
||||
@ -285,7 +262,6 @@ class RVC:
|
||||
print("Onnx is not enabled. Please load model.")
|
||||
return False
|
||||
elif key in self.settings.intData:
|
||||
setattr(self.settings, key, int(val))
|
||||
if key == "gpu" and val >= 0 and val < self.gpu_num and self.onnx_session != None:
|
||||
providers = self.onnx_session.get_providers()
|
||||
print("Providers:", providers)
|
||||
@ -296,7 +272,7 @@ class RVC:
|
||||
# self.switchModel(int(val))
|
||||
self.tmp_slot = int(val)
|
||||
self.prepareModel(self.tmp_slot)
|
||||
self.slot = self.tmp_slot
|
||||
setattr(self.settings, key, int(val))
|
||||
elif key in self.settings.floatData:
|
||||
setattr(self.settings, key, float(val))
|
||||
elif key in self.settings.strData:
|
||||
@ -389,7 +365,7 @@ class RVC:
|
||||
|
||||
def _pyTorch_inference(self, data):
|
||||
if hasattr(self, "net_g") == False or self.net_g == None:
|
||||
print("[Voice Changer] No pyTorch session.")
|
||||
print("[Voice Changer] No pyTorch session.", hasattr(self, "net_g"), self.net_g)
|
||||
raise NoModeLoadedException("pytorch")
|
||||
|
||||
if self.settings.gpu < 0 or (self.gpu_num == 0 and self.mps_enabled == False):
|
||||
@ -437,12 +413,13 @@ class RVC:
|
||||
return result
|
||||
|
||||
def inference(self, data):
|
||||
if hasattr(self, "slot") == False:
|
||||
print("[Voice Changer] No model uploaded.")
|
||||
raise NoModeLoadedException("model_common")
|
||||
# if self.settings.modelSlotIndex < -1:
|
||||
# print("[Voice Changer] No model uploaded.")
|
||||
# raise NoModeLoadedException("model_common")
|
||||
|
||||
if self.currentSlot != self.slot:
|
||||
self.currentSlot = self.slot
|
||||
if self.currentSlot != self.settings.modelSlotIndex:
|
||||
print(f"Switch model {self.currentSlot} -> {self.settings.modelSlotIndex}")
|
||||
self.currentSlot = self.settings.modelSlotIndex
|
||||
self.switchModel()
|
||||
|
||||
if self.settings.framework == "ONNX":
|
||||
@ -474,7 +451,7 @@ class RVC:
|
||||
print("[Voice Changer] export2onnx, No pyTorch session.")
|
||||
return {"status": "ng", "path": f""}
|
||||
|
||||
pyTorchModelFile = self.settings.modelSlots[self.slot].pyTorchModelFile # inference前にexportできるようにcurrentSlotではなくslot
|
||||
pyTorchModelFile = self.settings.modelSlots[self.settings.modelSlotIndex].pyTorchModelFile # inference前にexportできるようにcurrentSlotではなくslot
|
||||
|
||||
if pyTorchModelFile == None:
|
||||
print("[Voice Changer] export2onnx, No pyTorch filepath.")
|
||||
@ -488,10 +465,10 @@ class RVC:
|
||||
metadata = {
|
||||
"application": "VC_CLIENT",
|
||||
"version": "1",
|
||||
"ModelType": self.settings.modelSlots[self.slot].modelType,
|
||||
"samplingRate": self.settings.modelSlots[self.slot].samplingRate,
|
||||
"f0": self.settings.modelSlots[self.slot].f0,
|
||||
"embChannels": self.settings.modelSlots[self.slot].embChannels,
|
||||
"ModelType": self.settings.modelSlots[self.settings.modelSlotIndex].modelType,
|
||||
"samplingRate": self.settings.modelSlots[self.settings.modelSlotIndex].samplingRate,
|
||||
"f0": self.settings.modelSlots[self.settings.modelSlotIndex].f0,
|
||||
"embChannels": self.settings.modelSlots[self.settings.modelSlotIndex].embChannels,
|
||||
}
|
||||
|
||||
if torch.cuda.device_count() > 0:
|
||||
|
@ -3,7 +3,7 @@ import torch
|
||||
from torch import nn
|
||||
import numpy as np
|
||||
|
||||
from infer_pack.models import sr2sr, GeneratorNSF, PosteriorEncoder, ResidualCouplingBlock
|
||||
from infer_pack.models import sr2sr, GeneratorNSF, PosteriorEncoder, ResidualCouplingBlock, Generator
|
||||
from infer_pack import commons, attentions
|
||||
|
||||
|
||||
@ -111,7 +111,6 @@ class SynthesizerTrnMsNSFsid(nn.Module):
|
||||
n_layers,
|
||||
kernel_size,
|
||||
p_dropout,
|
||||
# f0=False,
|
||||
)
|
||||
self.dec = GeneratorNSF(
|
||||
inter_channels,
|
||||
@ -169,3 +168,108 @@ class SynthesizerTrnMsNSFsid(nn.Module):
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
|
||||
|
||||
class SynthesizerTrnMsNSFsidNono(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
spec_channels,
|
||||
segment_size,
|
||||
inter_channels,
|
||||
hidden_channels,
|
||||
filter_channels,
|
||||
n_heads,
|
||||
n_layers,
|
||||
kernel_size,
|
||||
p_dropout,
|
||||
resblock,
|
||||
resblock_kernel_sizes,
|
||||
resblock_dilation_sizes,
|
||||
upsample_rates,
|
||||
upsample_initial_channel,
|
||||
upsample_kernel_sizes,
|
||||
spk_embed_dim,
|
||||
gin_channels,
|
||||
emb_channels,
|
||||
sr=None,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__()
|
||||
self.spec_channels = spec_channels
|
||||
self.inter_channels = inter_channels
|
||||
self.hidden_channels = hidden_channels
|
||||
self.filter_channels = filter_channels
|
||||
self.n_heads = n_heads
|
||||
self.n_layers = n_layers
|
||||
self.kernel_size = kernel_size
|
||||
self.p_dropout = p_dropout
|
||||
self.resblock = resblock
|
||||
self.resblock_kernel_sizes = resblock_kernel_sizes
|
||||
self.resblock_dilation_sizes = resblock_dilation_sizes
|
||||
self.upsample_rates = upsample_rates
|
||||
self.upsample_initial_channel = upsample_initial_channel
|
||||
self.upsample_kernel_sizes = upsample_kernel_sizes
|
||||
self.segment_size = segment_size
|
||||
self.gin_channels = gin_channels
|
||||
self.emb_channels = emb_channels
|
||||
# self.hop_length = hop_length#
|
||||
self.spk_embed_dim = spk_embed_dim
|
||||
self.enc_p = TextEncoder(
|
||||
inter_channels,
|
||||
hidden_channels,
|
||||
filter_channels,
|
||||
emb_channels,
|
||||
n_heads,
|
||||
n_layers,
|
||||
kernel_size,
|
||||
p_dropout,
|
||||
f0=False,
|
||||
)
|
||||
self.dec = Generator(
|
||||
inter_channels,
|
||||
resblock,
|
||||
resblock_kernel_sizes,
|
||||
resblock_dilation_sizes,
|
||||
upsample_rates,
|
||||
upsample_initial_channel,
|
||||
upsample_kernel_sizes,
|
||||
gin_channels=gin_channels,
|
||||
)
|
||||
self.enc_q = PosteriorEncoder(
|
||||
spec_channels,
|
||||
inter_channels,
|
||||
hidden_channels,
|
||||
5,
|
||||
1,
|
||||
16,
|
||||
gin_channels=gin_channels,
|
||||
)
|
||||
self.flow = ResidualCouplingBlock(
|
||||
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
||||
)
|
||||
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
||||
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
||||
|
||||
def remove_weight_norm(self):
|
||||
self.dec.remove_weight_norm()
|
||||
self.flow.remove_weight_norm()
|
||||
self.enc_q.remove_weight_norm()
|
||||
|
||||
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
||||
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
||||
z_p = self.flow(z, y_mask, g=g)
|
||||
z_slice, ids_slice = commons.rand_slice_segments(
|
||||
z, y_lengths, self.segment_size
|
||||
)
|
||||
o = self.dec(z_slice, g=g)
|
||||
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
||||
|
||||
def infer(self, phone, phone_lengths, sid, max_len=None):
|
||||
g = self.emb_g(sid).unsqueeze(-1)
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
|
Loading…
x
Reference in New Issue
Block a user