diff --git a/client/demo/src/css/App.css b/client/demo/src/css/App.css index fd5d32e9..80f20e0f 100644 --- a/client/demo/src/css/App.css +++ b/client/demo/src/css/App.css @@ -131,6 +131,59 @@ body { width: 40%; } } +.split-3-2-2-2-1{ + display: flex; + width: 100%; + justify-content: center; + margin: 1px 0px 1px 0px; + & > div:nth-child(1) { + left: 0px; + width: 30%; + } + & > div:nth-child(2) { + left: 30%; + width: 20%; + } + & > div:nth-child(3) { + left: 50%; + width: 20%; + } + & > div:nth-child(4) { + left: 70%; + width: 20%; + } + & > div:nth-child(5) { + left: 90%; + width: 10%; + } +} +.split-3-1-1-1-4{ + display: flex; + width: 100%; + justify-content: center; + margin: 1px 0px 1px 0px; + & > div:nth-child(1) { + left: 0px; + width: 30%; + } + & > div:nth-child(2) { + left: 30%; + width: 10%; + } + & > div:nth-child(3) { + left: 40%; + width: 10%; + } + & > div:nth-child(4) { + left: 50%; + width: 10%; + } + & > div:nth-child(5) { + left: 60%; + width: 40%; + } + +} .underline { border-bottom: 3px solid #333; } @@ -165,12 +218,14 @@ body { .body-button-container { display: flex; flex-direction: row; + align-items: center; .body-button { user-select: none; border: solid 1px #333; border-radius: 2px; padding: 2px; cursor:pointer; + vertical-align:middle; } .body-button-active { user-select: none; diff --git a/client/demo/src/index.tsx b/client/demo/src/index.tsx index 721c2940..687ef912 100644 --- a/client/demo/src/index.tsx +++ b/client/demo/src/index.tsx @@ -2,16 +2,22 @@ import * as React from "react"; import { createRoot } from "react-dom/client"; import "./css/App.css" import { useEffect, useMemo, useRef, useState } from "react"; -import { VoiceChnagerClient } from "@dannadori/voice-changer-client-js" +import { VoiceChnagerClient, createDummyMediaStream } from "@dannadori/voice-changer-client-js" import { useMicrophoneOptions } from "./options_microphone"; const container = document.getElementById("app")!; const root = createRoot(container); const App = () => { - const { component: microphoneSettingComponent, options: microphoneOptions, params: microphoneParams, isStarted } = useMicrophoneOptions() + + const audioContextRef = useRef() const voiceChangerClientRef = useRef(null) const [clientInitialized, setClientInitialized] = useState(false) + const [bufferingTime, setBufferingTime] = useState(0) + const [responseTime, setResponseTime] = useState(0) + const [volume, setVolume] = useState(0) + + const { component: microphoneSettingComponent, options: microphoneOptions, params: microphoneParams, isStarted } = useMicrophoneOptions(audioContextRef.current) const onClearSettingClicked = async () => { //@ts-ignore @@ -25,16 +31,24 @@ const App = () => { useEffect(() => { const initialized = async () => { - const ctx = new AudioContext() - voiceChangerClientRef.current = new VoiceChnagerClient(ctx, true, { - notifySendBufferingTime: (val: number) => { console.log(`buf:${val}`) }, - notifyResponseTime: (val: number) => { console.log(`res:${val}`) }, + audioContextRef.current = new AudioContext() + voiceChangerClientRef.current = new VoiceChnagerClient(audioContextRef.current, true, { + notifySendBufferingTime: (val: number) => { + setBufferingTime(val) + }, + notifyResponseTime: (val: number) => { + setResponseTime(val) + }, notifyException: (mes: string) => { if (mes.length > 0) { console.log(`error:${mes}`) } } - }, { notifyVolume: (vol: number) => { } }) + }, { + notifyVolume: (vol: number) => { + setVolume(vol) + } + }) await voiceChangerClientRef.current.isInitialized() setClientInitialized(true) @@ -46,10 +60,9 @@ const App = () => { }, []) useEffect(() => { - console.log("START!!!", isStarted) const start = async () => { if (!voiceChangerClientRef.current || !clientInitialized) { - console.log("client is not initialized") + // console.log("client is not initialized") return } // if (!microphoneOptions.audioInputDeviceId || microphoneOptions.audioInputDeviceId.length == 0) { @@ -62,7 +75,7 @@ const App = () => { } const stop = async () => { if (!voiceChangerClientRef.current || !clientInitialized) { - console.log("client is not initialized") + // console.log("client is not initialized") return } voiceChangerClientRef.current.stop() @@ -74,29 +87,29 @@ const App = () => { } }, [isStarted]) - // useEffect(() => { - // if (!voiceChangerClientRef.current || !clientInitialized) { - // console.log("client is not initialized") - // return - // } - // voiceChangerClientRef.current.setServerUrl(microphoneOptions.mmvcServerUrl, microphoneOptions.protocol, false) - // }, [microphoneOptions.mmvcServerUrl, microphoneOptions.protocol]) useEffect(() => { const changeInput = async () => { if (!voiceChangerClientRef.current || !clientInitialized) { - console.log("client is not initialized") + // console.log("client is not initialized") return } - await voiceChangerClientRef.current.setup(microphoneOptions.audioInputDeviceId!, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable) + if (!microphoneOptions.audioInput || microphoneOptions.audioInput == "none") { + const ms = createDummyMediaStream(audioContextRef.current!) + await voiceChangerClientRef.current.setup(ms, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable) + + } else { + await voiceChangerClientRef.current.setup(microphoneOptions.audioInput, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable) + } + } changeInput() - }, [microphoneOptions.audioInputDeviceId!, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable]) + }, [microphoneOptions.audioInput, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable]) useEffect(() => { if (!voiceChangerClientRef.current || !clientInitialized) { - console.log("client is not initialized") + // console.log("client is not initialized") return } voiceChangerClientRef.current.setInputChunkNum(microphoneOptions.inputChunkNum) @@ -104,7 +117,7 @@ const App = () => { useEffect(() => { if (!voiceChangerClientRef.current || !clientInitialized) { - console.log("client is not initialized") + // console.log("client is not initialized") return } voiceChangerClientRef.current.setVoiceChangerMode(microphoneOptions.voiceChangerMode) @@ -131,7 +144,19 @@ const App = () => { ) }, []) - + const performanceRow = useMemo(() => { + return ( + <> +
+
monitor:
+
vol(db):{volume.toFixed(4)}
+
buf(ms):{bufferingTime}
+
res(ms):{responseTime}
+
+
+ + ) + }, [volume, bufferingTime, responseTime]) return (
@@ -140,6 +165,7 @@ const App = () => {
{clearRow} + {performanceRow} {microphoneSettingComponent}
diff --git a/client/demo/src/options_microphone.tsx b/client/demo/src/options_microphone.tsx index b4026f43..1963ffd7 100644 --- a/client/demo/src/options_microphone.tsx +++ b/client/demo/src/options_microphone.tsx @@ -1,7 +1,7 @@ import * as React from "react"; import { useEffect, useMemo, useState } from "react"; import { CHROME_EXTENSION } from "./const"; -import { DefaultVoiceChangerRequestParamas, VoiceChangerOptions, VoiceChangerRequestParamas, DefaultVoiceChangerOptions, SampleRate, BufferSize, VoiceChangerMode, Protocol } from "@dannadori/voice-changer-client-js" +import { DefaultVoiceChangerRequestParamas, VoiceChangerOptions, VoiceChangerRequestParamas, DefaultVoiceChangerOptions, SampleRate, BufferSize, VoiceChangerMode, Protocol, fileSelectorAsDataURL, createDummyMediaStream } from "@dannadori/voice-changer-client-js" const reloadDevices = async () => { @@ -20,7 +20,15 @@ const reloadDevices = async () => { label: "none", toJSON: () => { } }) - return audioInputs + audioInputs.push({ + deviceId: "file", + groupId: "file", + kind: "audioinput", + label: "file", + toJSON: () => { } + }) + const audioOutputs = mediaDeviceInfos.filter(x => { return x.kind == "audiooutput" }) + return [audioInputs, audioOutputs] } @@ -31,11 +39,23 @@ export type MicrophoneOptionsComponent = { isStarted: boolean } -export const useMicrophoneOptions = (): MicrophoneOptionsComponent => { + +export const useMicrophoneOptions = (audioContext?: AudioContext): MicrophoneOptionsComponent => { // GUI Info - const [audioDeviceInfo, setAudioDeviceInfo] = useState([]) + const [inputAudioDeviceInfo, setInputAudioDeviceInfo] = useState([]) + const [outputAudioDeviceInfo, setOutputAudioDeviceInfo] = useState([]) const [editSpeakerTargetId, setEditSpeakerTargetId] = useState(0) const [editSpeakerTargetName, setEditSpeakerTargetName] = useState("") + const [audioInput, setAudioInput] = useState("none") + const audioOutputRef = React.useRef("") + const [audioOutput, _setAudioOutput] = useState("none") + const setAudioOutput = (id: string) => { + audioOutputRef.current = id + _setAudioOutput(audioOutputRef.current) + const audio = document.getElementById("audio-output") as HTMLAudioElement + //@ts-ignore + audio.setSinkId(audioOutputRef.current) + } // const [options, setOptions] = useState(InitMicrophoneOptionsState) const [params, setParams] = useState(DefaultVoiceChangerRequestParamas) @@ -45,7 +65,8 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => { useEffect(() => { const initialize = async () => { const audioInfo = await reloadDevices() - setAudioDeviceInfo(audioInfo) + setInputAudioDeviceInfo(audioInfo[0]) + setOutputAudioDeviceInfo(audioInfo[1]) if (CHROME_EXTENSION) { //@ts-ignore @@ -78,7 +99,6 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => { } const startClassName = isStarted ? "body-button-active" : "body-button-stanby" const stopClassName = isStarted ? "body-button-stanby" : "body-button-active" - console.log("ClassName", startClassName, stopClassName) return (
@@ -95,9 +115,113 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => { }, [isStarted]) - const setAudioInputDeviceId = async (deviceId: string) => { - setOptions({ ...options, audioInputDeviceId: deviceId }) - } + const audioInputRow = useMemo(() => { + return ( +
+
AudioInput
+
+ +
+
+ ) + }, [inputAudioDeviceInfo, audioInput]) + + const audioMediaInputRow = useMemo(() => { + console.log("GEN:audioMediaInputRow1") + if (audioInput != "file") { + console.log("GEN:audioMediaInputRow2") + return <> + } + console.log("GEN:audioMediaInputRow3") + + const onFileLoadClicked = async () => { + const url = await fileSelectorAsDataURL("") + const audio = document.getElementById("body-audio-converted") as HTMLAudioElement + audio.src = url + // audio.volume = 0.0 + // audio.onplay = () => { + // //@ts-ignore + // const ms = audio.captureStream() + // setOptions({ ...options, audioInput: ms }) + // } + await audio.play() + const src = audioContext!.createMediaElementSource(audio); + const dst = audioContext!.createMediaStreamDestination() + src.connect(dst) + setOptions({ ...options, audioInput: dst.stream }) + + + const audio_org = document.getElementById("body-audio-original") as HTMLAudioElement + audio_org.src = url + audio_org.pause() + + audio_org.onplay = () => { + console.log(audioOutputRef.current) + // @ts-ignore + audio_org.setSinkId(audioOutputRef.current) + } + } + + return ( +
+
+
+
+ org: +
+
+ cnv: +
+
+
+
load
+
+
+ ) + }, [audioInput, audioOutput]) + console.log("GEN:audioMediaInputRow3") + useEffect(() => { + if (!audioContext) { + return + } + if (audioInput == "none") { + const ms = createDummyMediaStream(audioContext) + setOptions({ ...options, audioInput: ms }) + } else if (audioInput == "file") { + // const audio = document.getElementById("body-audio") as HTMLAudioElement + // //@ts-ignore + // const ms = audio.captureStream() + // setOptions({ ...options, audioInput: ms }) + } else { + setOptions({ ...options, audioInput: audioInput }) + } + }, [audioContext, audioInput]) + + + const audioOutputRow = useMemo(() => { + return ( +
+
AudioOutput
+
+ + +
+
+ ) + }, [outputAudioDeviceInfo, audioOutput]) + const onSetServerClicked = async () => { const input = document.getElementById("mmvc-server-url") as HTMLInputElement @@ -197,19 +321,9 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
- -
-
Microphone
-
- -
-
+ {audioInputRow} + {audioMediaInputRow} + {audioOutputRow}
Sample Rate
@@ -339,7 +453,7 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
) - }, [audioDeviceInfo, editSpeakerTargetId, editSpeakerTargetName, startButtonRow, params, options]) + }, [inputAudioDeviceInfo, outputAudioDeviceInfo, editSpeakerTargetId, editSpeakerTargetName, startButtonRow, audioInputRow, audioMediaInputRow, audioOutputRow, params, options]) return { component: settings, diff --git a/client/lib/src/AudioStreamer.ts b/client/lib/src/AudioStreamer.ts index ced063a5..acb4d928 100644 --- a/client/lib/src/AudioStreamer.ts +++ b/client/lib/src/AudioStreamer.ts @@ -187,8 +187,9 @@ export class AudioStreamer extends Duplex { private sendBuffer = async (newBuffer: Uint8Array) => { if (this.serverUrl.length == 0) { - console.error("no server url") - throw "no server url" + console.warn("no server url") + return + // throw "no server url" } const timestamp = Date.now() // console.log("REQUEST_MESSAGE:", [this.gpu, this.srcId, this.dstId, timestamp, newBuffer.buffer]) diff --git a/client/lib/src/VoiceChangerClient.ts b/client/lib/src/VoiceChangerClient.ts index 3b18199b..44c86f54 100644 --- a/client/lib/src/VoiceChangerClient.ts +++ b/client/lib/src/VoiceChangerClient.ts @@ -118,6 +118,7 @@ export class VoiceChnagerClient { // create mic stream if (this.micStream) { console.log("DESTROY!!!!!!!!!!!!!!!!!!!") + this.micStream.unpipe() // this.micStream.stop() this.micStream.destroy() this.micStream = null @@ -139,9 +140,12 @@ export class VoiceChnagerClient { console.log("VF disabled") this.micStream.setStream(this.currentMediaStream) // input device -> mic stream } - this.micStream.pipe(this.audioStreamer!) // mic stream -> audio streamer - - + this.micStream.pipe(this.audioStreamer) // mic stream -> audio streamer + if (!this._isVoiceChanging) { + this.micStream.pauseRecording() + } else { + this.micStream.playRecording() + } } get stream(): MediaStream { return this.currentMediaStreamAudioDestinationNode.stream diff --git a/client/lib/src/const.ts b/client/lib/src/const.ts index 35445da8..c7abaafa 100644 --- a/client/lib/src/const.ts +++ b/client/lib/src/const.ts @@ -16,8 +16,7 @@ export type VoiceChangerRequestParamas = { } export type VoiceChangerOptions = { - audioInputDeviceId: string | null, - mediaStream: MediaStream | null, + audioInput: string | MediaStream | null, mmvcServerUrl: string, protocol: Protocol, sampleRate: SampleRate, // 48000Hz @@ -77,8 +76,7 @@ export const DefaultVoiceChangerRequestParamas: VoiceChangerRequestParamas = { } export const DefaultVoiceChangerOptions: VoiceChangerOptions = { - audioInputDeviceId: null, - mediaStream: null, + audioInput: null, mmvcServerUrl: "https://192.168.0.3:18888/test", protocol: "sio", sampleRate: 48000, diff --git a/client/lib/src/index.ts b/client/lib/src/index.ts index 61f5973b..f2c09c06 100644 --- a/client/lib/src/index.ts +++ b/client/lib/src/index.ts @@ -1,2 +1,3 @@ export * from "./const" export * from "./VoiceChangerClient" +export * from "./util" \ No newline at end of file diff --git a/client/lib/src/util.ts b/client/lib/src/util.ts index 0db9d58b..205720aa 100644 --- a/client/lib/src/util.ts +++ b/client/lib/src/util.ts @@ -10,3 +10,48 @@ export const createDummyMediaStream = (audioContext: AudioContext) => { oscillatorNode.start(); return dummyOutputNode.stream; }; + +export const fileSelector = async (regex: string) => { + const fileInput = document.createElement("input"); + fileInput.type = "file"; + const p = new Promise((resolve, reject) => { + fileInput.onchange = (e) => { + if (e.target instanceof HTMLInputElement == false) { + console.log("invalid target!", e.target) + reject("invalid target") + return null + } + const target = e.target as HTMLInputElement + if (!target.files || target.files.length == 0) { + reject("no file selected") + return null + } + + if (regex != "" && target.files[0].type.match(regex)) { + reject(`not target file type ${target.files[0].type}`); + return null + } + resolve(target.files[0]) + return null + }; + fileInput.click(); + }); + return await p +} + +export const fileSelectorAsDataURL = async (regex: string) => { + const f = await fileSelector(regex) + if (!f) { + return f + } + + const url = await new Promise((resolve) => { + const reader = new FileReader(); + reader.onload = () => { + console.log("load data", reader.result as string); + resolve(reader.result as string); + }; + reader.readAsDataURL(f); + }) + return url +}