bugfix: analyzer
This commit is contained in:
parent
bde3bc5933
commit
b7b1e583ee
2
client/demo/dist/index.js
vendored
2
client/demo/dist/index.js
vendored
File diff suppressed because one or more lines are too long
@ -14,12 +14,18 @@ export type StateControls = {
|
||||
|
||||
type FrontendManagerState = {
|
||||
stateControls: StateControls
|
||||
isConverting: boolean,
|
||||
isAnalyzing: boolean
|
||||
};
|
||||
|
||||
export type FrontendManagerStateAndMethod = FrontendManagerState & {
|
||||
setIsConverting: (val: boolean) => void
|
||||
setIsAnalyzing: (val: boolean) => void
|
||||
}
|
||||
|
||||
export const useFrontendManager = (): FrontendManagerStateAndMethod => {
|
||||
const [isConverting, setIsConverting] = useState<boolean>(false)
|
||||
const [isAnalyzing, setIsAnalyzing] = useState<boolean>(false)
|
||||
|
||||
// (1) Controller Switch
|
||||
const openServerControlCheckbox = useStateControlCheckbox(OpenServerControlCheckbox);
|
||||
@ -51,7 +57,11 @@ export const useFrontendManager = (): FrontendManagerStateAndMethod => {
|
||||
openSpeakerSettingCheckbox,
|
||||
openConverterSettingCheckbox,
|
||||
openAdvancedSettingCheckbox
|
||||
}
|
||||
},
|
||||
isConverting,
|
||||
setIsConverting,
|
||||
isAnalyzing,
|
||||
setIsAnalyzing
|
||||
};
|
||||
return returnValue;
|
||||
};
|
||||
|
@ -4,7 +4,6 @@ import { AnimationTypes, HeaderButton, HeaderButtonProps } from "./components/10
|
||||
|
||||
export const useServerControl = () => {
|
||||
const appState = useAppState()
|
||||
const [isStarted, setIsStarted] = useState<boolean>(false)
|
||||
const [startWithAudioContextCreate, setStartWithAudioContextCreate] = useState<boolean>(false)
|
||||
|
||||
const accodionButton = useMemo(() => {
|
||||
@ -23,7 +22,7 @@ export const useServerControl = () => {
|
||||
if (!startWithAudioContextCreate) {
|
||||
return
|
||||
}
|
||||
setIsStarted(true)
|
||||
appState.frontendManagerState.setIsConverting(true)
|
||||
appState.clientSetting.start()
|
||||
}, [startWithAudioContextCreate])
|
||||
|
||||
@ -36,16 +35,16 @@ export const useServerControl = () => {
|
||||
})
|
||||
setStartWithAudioContextCreate(true)
|
||||
} else {
|
||||
setIsStarted(true)
|
||||
appState.frontendManagerState.setIsConverting(true)
|
||||
await appState.clientSetting.start()
|
||||
}
|
||||
}
|
||||
const onStopClicked = async () => {
|
||||
setIsStarted(false)
|
||||
appState.frontendManagerState.setIsConverting(false)
|
||||
await appState.clientSetting.stop()
|
||||
}
|
||||
const startClassName = isStarted ? "body-button-active" : "body-button-stanby"
|
||||
const stopClassName = isStarted ? "body-button-stanby" : "body-button-active"
|
||||
const startClassName = appState.frontendManagerState.isConverting ? "body-button-active" : "body-button-stanby"
|
||||
const stopClassName = appState.frontendManagerState.isConverting ? "body-button-stanby" : "body-button-active"
|
||||
|
||||
return (
|
||||
<div className="body-row split-3-2-2-3 left-padding-1 guided">
|
||||
@ -60,7 +59,7 @@ export const useServerControl = () => {
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [isStarted, appState.clientSetting.start, appState.clientSetting.stop])
|
||||
}, [appState.frontendManagerState.isConverting, appState.clientSetting.start, appState.clientSetting.stop])
|
||||
|
||||
const performanceRow = useMemo(() => {
|
||||
return (
|
||||
|
@ -134,6 +134,14 @@ export const useQualityControl = (): QualityControlState => {
|
||||
const onRecordStopClicked = async () => {
|
||||
setRecording(false)
|
||||
await appState.serverSetting.setRecordIO(0)
|
||||
}
|
||||
const onRecordAnalizeClicked = async () => {
|
||||
if (appState.frontendManagerState.isConverting) {
|
||||
alert("please stop voice conversion. 解析処理と音声変換を同時に行うことはできません。音声変化をストップしてください。")
|
||||
return
|
||||
}
|
||||
appState.frontendManagerState.setIsAnalyzing(true)
|
||||
await appState.serverSetting.setRecordIO(2)
|
||||
// set spectrogram (dio)
|
||||
const imageDio = document.getElementById("body-image-container-img-dio") as HTMLImageElement
|
||||
imageDio.src = "/tmp/analyze-dio.png?" + new Date().getTime()
|
||||
@ -157,10 +165,15 @@ export const useQualityControl = (): QualityControlState => {
|
||||
wavOutput.controls = true
|
||||
// @ts-ignore
|
||||
wavOutput.setSinkId(audioOutputForGUI)
|
||||
appState.frontendManagerState.setIsAnalyzing(false)
|
||||
}
|
||||
|
||||
const startClassName = recording ? "body-button-active" : "body-button-stanby"
|
||||
const stopClassName = recording ? "body-button-stanby" : "body-button-active"
|
||||
const analyzeClassName = appState.frontendManagerState.isAnalyzing ? "body-button-active" : "body-button-stanby"
|
||||
const analyzeLabel = appState.frontendManagerState.isAnalyzing ? "wait..." : "Analyze"
|
||||
|
||||
|
||||
|
||||
return (
|
||||
<>
|
||||
@ -176,6 +189,7 @@ export const useQualityControl = (): QualityControlState => {
|
||||
<div className="body-button-container">
|
||||
<div onClick={onRecordStartClicked} className={startClassName}>Start</div>
|
||||
<div onClick={onRecordStopClicked} className={stopClassName}>Stop</div>
|
||||
<div onClick={onRecordAnalizeClicked} className={analyzeClassName}>{analyzeLabel}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -238,7 +252,7 @@ export const useQualityControl = (): QualityControlState => {
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}, [appState.serverSetting.setting.recordIO, appState.serverSetting.setRecordIO, outputAudioDeviceInfo, audioOutputForGUI])
|
||||
}, [appState.serverSetting.setting.recordIO, appState.serverSetting.setRecordIO, outputAudioDeviceInfo, audioOutputForGUI, appState.frontendManagerState.isAnalyzing, appState.frontendManagerState.isConverting])
|
||||
|
||||
const QualityControlContent = useMemo(() => {
|
||||
return (
|
||||
|
@ -1,8 +1,10 @@
|
||||
import os, shutil
|
||||
import os
|
||||
import shutil
|
||||
from fastapi import UploadFile
|
||||
|
||||
# UPLOAD_DIR = "model_upload_dir"
|
||||
|
||||
|
||||
def upload_file(upload_dirname: str, file: UploadFile, filename: str):
|
||||
if file and filename:
|
||||
fileobj = file.file
|
||||
@ -13,10 +15,11 @@ def upload_file(upload_dirname:str, file:UploadFile, filename: str):
|
||||
return {"status": "OK", "msg": f"uploaded files {filename} "}
|
||||
return {"status": "ERROR", "msg": "uploaded file is not found."}
|
||||
|
||||
|
||||
def concat_file_chunks(upload_dirname: str, filename: str, chunkNum: int, dest_dirname: str):
|
||||
target_file_name = os.path.join(dest_dirname, filename)
|
||||
if os.path.exists(target_file_name):
|
||||
os.unlink(target_file_name)
|
||||
os.remove(target_file_name)
|
||||
with open(target_file_name, "ab") as target_file:
|
||||
for i in range(chunkNum):
|
||||
chunkName = f"{filename}_{i}"
|
||||
@ -24,7 +27,6 @@ def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirn
|
||||
stored_chunk_file = open(chunk_file_path, 'rb')
|
||||
target_file.write(stored_chunk_file.read())
|
||||
stored_chunk_file.close()
|
||||
os.unlink(chunk_file_path)
|
||||
os.remove(chunk_file_path)
|
||||
target_file.close()
|
||||
return {"status": "OK", "msg": f"concat files {target_file_name} "}
|
||||
|
||||
|
@ -2,12 +2,13 @@
|
||||
from fastapi.responses import FileResponse
|
||||
import os
|
||||
|
||||
|
||||
def mod_get_model(modelFile: str):
|
||||
modelPath = os.path.join("MMVC_Trainer/logs", modelFile)
|
||||
return FileResponse(path=modelPath)
|
||||
|
||||
|
||||
def mod_delete_model(modelFile: str):
|
||||
modelPath = os.path.join("MMVC_Trainer/logs", modelFile)
|
||||
os.unlink(modelPath)
|
||||
os.remove(modelPath)
|
||||
return {"Model deleted": f"{modelFile}"}
|
||||
|
||||
|
@ -131,14 +131,14 @@ class VoiceChanger():
|
||||
mock_stream_out = MockStream(24000)
|
||||
stream_output_file = os.path.join(TMP_DIR, "out.wav")
|
||||
if os.path.exists(stream_output_file):
|
||||
os.unlink(stream_output_file)
|
||||
os.remove(stream_output_file)
|
||||
mock_stream_out.open_outputfile(stream_output_file)
|
||||
self.stream_out = mock_stream_out
|
||||
|
||||
mock_stream_in = MockStream(24000)
|
||||
stream_input_file = os.path.join(TMP_DIR, "in.wav")
|
||||
if os.path.exists(stream_input_file):
|
||||
os.unlink(stream_input_file)
|
||||
os.remove(stream_input_file)
|
||||
mock_stream_in.open_outputfile(stream_input_file)
|
||||
self.stream_in = mock_stream_in
|
||||
|
||||
@ -235,6 +235,8 @@ class VoiceChanger():
|
||||
if key == "recordIO" and val == 1:
|
||||
self._setupRecordIO()
|
||||
if key == "recordIO" and val == 0:
|
||||
pass
|
||||
if key == "recordIO" and val == 2:
|
||||
try:
|
||||
stream_input_file = os.path.join(TMP_DIR, "in.wav")
|
||||
analyze_file_dio = os.path.join(TMP_DIR, "analyze-dio.png")
|
||||
|
Loading…
x
Reference in New Issue
Block a user