gpu update
This commit is contained in:
parent
1363b1d07f
commit
07a14e6bcd
@ -131,6 +131,59 @@ body {
|
||||
width: 40%;
|
||||
}
|
||||
}
|
||||
.split-3-2-2-2-1{
|
||||
display: flex;
|
||||
width: 100%;
|
||||
justify-content: center;
|
||||
margin: 1px 0px 1px 0px;
|
||||
& > div:nth-child(1) {
|
||||
left: 0px;
|
||||
width: 30%;
|
||||
}
|
||||
& > div:nth-child(2) {
|
||||
left: 30%;
|
||||
width: 20%;
|
||||
}
|
||||
& > div:nth-child(3) {
|
||||
left: 50%;
|
||||
width: 20%;
|
||||
}
|
||||
& > div:nth-child(4) {
|
||||
left: 70%;
|
||||
width: 20%;
|
||||
}
|
||||
& > div:nth-child(5) {
|
||||
left: 90%;
|
||||
width: 10%;
|
||||
}
|
||||
}
|
||||
.split-3-1-1-1-4{
|
||||
display: flex;
|
||||
width: 100%;
|
||||
justify-content: center;
|
||||
margin: 1px 0px 1px 0px;
|
||||
& > div:nth-child(1) {
|
||||
left: 0px;
|
||||
width: 30%;
|
||||
}
|
||||
& > div:nth-child(2) {
|
||||
left: 30%;
|
||||
width: 10%;
|
||||
}
|
||||
& > div:nth-child(3) {
|
||||
left: 40%;
|
||||
width: 10%;
|
||||
}
|
||||
& > div:nth-child(4) {
|
||||
left: 50%;
|
||||
width: 10%;
|
||||
}
|
||||
& > div:nth-child(5) {
|
||||
left: 60%;
|
||||
width: 40%;
|
||||
}
|
||||
|
||||
}
|
||||
.underline {
|
||||
border-bottom: 3px solid #333;
|
||||
}
|
||||
@ -165,12 +218,14 @@ body {
|
||||
.body-button-container {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
.body-button {
|
||||
user-select: none;
|
||||
border: solid 1px #333;
|
||||
border-radius: 2px;
|
||||
padding: 2px;
|
||||
cursor:pointer;
|
||||
vertical-align:middle;
|
||||
}
|
||||
.body-button-active {
|
||||
user-select: none;
|
||||
|
@ -2,16 +2,22 @@ import * as React from "react";
|
||||
import { createRoot } from "react-dom/client";
|
||||
import "./css/App.css"
|
||||
import { useEffect, useMemo, useRef, useState } from "react";
|
||||
import { VoiceChnagerClient } from "@dannadori/voice-changer-client-js"
|
||||
import { VoiceChnagerClient, createDummyMediaStream } from "@dannadori/voice-changer-client-js"
|
||||
import { useMicrophoneOptions } from "./options_microphone";
|
||||
const container = document.getElementById("app")!;
|
||||
const root = createRoot(container);
|
||||
|
||||
const App = () => {
|
||||
const { component: microphoneSettingComponent, options: microphoneOptions, params: microphoneParams, isStarted } = useMicrophoneOptions()
|
||||
|
||||
|
||||
const audioContextRef = useRef<AudioContext>()
|
||||
const voiceChangerClientRef = useRef<VoiceChnagerClient | null>(null)
|
||||
const [clientInitialized, setClientInitialized] = useState<boolean>(false)
|
||||
const [bufferingTime, setBufferingTime] = useState<number>(0)
|
||||
const [responseTime, setResponseTime] = useState<number>(0)
|
||||
const [volume, setVolume] = useState<number>(0)
|
||||
|
||||
const { component: microphoneSettingComponent, options: microphoneOptions, params: microphoneParams, isStarted } = useMicrophoneOptions(audioContextRef.current)
|
||||
|
||||
const onClearSettingClicked = async () => {
|
||||
//@ts-ignore
|
||||
@ -25,16 +31,24 @@ const App = () => {
|
||||
|
||||
useEffect(() => {
|
||||
const initialized = async () => {
|
||||
const ctx = new AudioContext()
|
||||
voiceChangerClientRef.current = new VoiceChnagerClient(ctx, true, {
|
||||
notifySendBufferingTime: (val: number) => { console.log(`buf:${val}`) },
|
||||
notifyResponseTime: (val: number) => { console.log(`res:${val}`) },
|
||||
audioContextRef.current = new AudioContext()
|
||||
voiceChangerClientRef.current = new VoiceChnagerClient(audioContextRef.current, true, {
|
||||
notifySendBufferingTime: (val: number) => {
|
||||
setBufferingTime(val)
|
||||
},
|
||||
notifyResponseTime: (val: number) => {
|
||||
setResponseTime(val)
|
||||
},
|
||||
notifyException: (mes: string) => {
|
||||
if (mes.length > 0) {
|
||||
console.log(`error:${mes}`)
|
||||
}
|
||||
}
|
||||
}, { notifyVolume: (vol: number) => { } })
|
||||
}, {
|
||||
notifyVolume: (vol: number) => {
|
||||
setVolume(vol)
|
||||
}
|
||||
})
|
||||
await voiceChangerClientRef.current.isInitialized()
|
||||
setClientInitialized(true)
|
||||
|
||||
@ -46,10 +60,9 @@ const App = () => {
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
console.log("START!!!", isStarted)
|
||||
const start = async () => {
|
||||
if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
console.log("client is not initialized")
|
||||
// console.log("client is not initialized")
|
||||
return
|
||||
}
|
||||
// if (!microphoneOptions.audioInputDeviceId || microphoneOptions.audioInputDeviceId.length == 0) {
|
||||
@ -62,7 +75,7 @@ const App = () => {
|
||||
}
|
||||
const stop = async () => {
|
||||
if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
console.log("client is not initialized")
|
||||
// console.log("client is not initialized")
|
||||
return
|
||||
}
|
||||
voiceChangerClientRef.current.stop()
|
||||
@ -74,29 +87,29 @@ const App = () => {
|
||||
}
|
||||
}, [isStarted])
|
||||
|
||||
// useEffect(() => {
|
||||
// if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
// console.log("client is not initialized")
|
||||
// return
|
||||
// }
|
||||
// voiceChangerClientRef.current.setServerUrl(microphoneOptions.mmvcServerUrl, microphoneOptions.protocol, false)
|
||||
// }, [microphoneOptions.mmvcServerUrl, microphoneOptions.protocol])
|
||||
|
||||
useEffect(() => {
|
||||
const changeInput = async () => {
|
||||
if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
console.log("client is not initialized")
|
||||
// console.log("client is not initialized")
|
||||
return
|
||||
}
|
||||
await voiceChangerClientRef.current.setup(microphoneOptions.audioInputDeviceId!, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable)
|
||||
if (!microphoneOptions.audioInput || microphoneOptions.audioInput == "none") {
|
||||
const ms = createDummyMediaStream(audioContextRef.current!)
|
||||
await voiceChangerClientRef.current.setup(ms, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable)
|
||||
|
||||
} else {
|
||||
await voiceChangerClientRef.current.setup(microphoneOptions.audioInput, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable)
|
||||
}
|
||||
|
||||
}
|
||||
changeInput()
|
||||
}, [microphoneOptions.audioInputDeviceId!, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable])
|
||||
}, [microphoneOptions.audioInput, microphoneOptions.bufferSize, microphoneOptions.forceVfDisable])
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
console.log("client is not initialized")
|
||||
// console.log("client is not initialized")
|
||||
return
|
||||
}
|
||||
voiceChangerClientRef.current.setInputChunkNum(microphoneOptions.inputChunkNum)
|
||||
@ -104,7 +117,7 @@ const App = () => {
|
||||
|
||||
useEffect(() => {
|
||||
if (!voiceChangerClientRef.current || !clientInitialized) {
|
||||
console.log("client is not initialized")
|
||||
// console.log("client is not initialized")
|
||||
return
|
||||
}
|
||||
voiceChangerClientRef.current.setVoiceChangerMode(microphoneOptions.voiceChangerMode)
|
||||
@ -131,7 +144,19 @@ const App = () => {
|
||||
</>
|
||||
)
|
||||
}, [])
|
||||
|
||||
const performanceRow = useMemo(() => {
|
||||
return (
|
||||
<>
|
||||
<div className="body-row split-3-1-1-1-4 left-padding-1 highlight">
|
||||
<div className="body-item-title">monitor:</div>
|
||||
<div className="body-item-text">vol(db):{volume.toFixed(4)}</div>
|
||||
<div className="body-item-text">buf(ms):{bufferingTime}</div>
|
||||
<div className="body-item-text">res(ms):{responseTime}</div>
|
||||
<div className="body-item-text"></div>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}, [volume, bufferingTime, responseTime])
|
||||
return (
|
||||
<div className="body">
|
||||
<div className="body-row">
|
||||
@ -140,6 +165,7 @@ const App = () => {
|
||||
</div>
|
||||
</div>
|
||||
{clearRow}
|
||||
{performanceRow}
|
||||
{microphoneSettingComponent}
|
||||
<div>
|
||||
<audio id="audio-output"></audio>
|
||||
|
@ -1,7 +1,7 @@
|
||||
import * as React from "react";
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import { CHROME_EXTENSION } from "./const";
|
||||
import { DefaultVoiceChangerRequestParamas, VoiceChangerOptions, VoiceChangerRequestParamas, DefaultVoiceChangerOptions, SampleRate, BufferSize, VoiceChangerMode, Protocol } from "@dannadori/voice-changer-client-js"
|
||||
import { DefaultVoiceChangerRequestParamas, VoiceChangerOptions, VoiceChangerRequestParamas, DefaultVoiceChangerOptions, SampleRate, BufferSize, VoiceChangerMode, Protocol, fileSelectorAsDataURL, createDummyMediaStream } from "@dannadori/voice-changer-client-js"
|
||||
|
||||
|
||||
const reloadDevices = async () => {
|
||||
@ -20,7 +20,15 @@ const reloadDevices = async () => {
|
||||
label: "none",
|
||||
toJSON: () => { }
|
||||
})
|
||||
return audioInputs
|
||||
audioInputs.push({
|
||||
deviceId: "file",
|
||||
groupId: "file",
|
||||
kind: "audioinput",
|
||||
label: "file",
|
||||
toJSON: () => { }
|
||||
})
|
||||
const audioOutputs = mediaDeviceInfos.filter(x => { return x.kind == "audiooutput" })
|
||||
return [audioInputs, audioOutputs]
|
||||
}
|
||||
|
||||
|
||||
@ -31,11 +39,23 @@ export type MicrophoneOptionsComponent = {
|
||||
isStarted: boolean
|
||||
}
|
||||
|
||||
export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
|
||||
export const useMicrophoneOptions = (audioContext?: AudioContext): MicrophoneOptionsComponent => {
|
||||
// GUI Info
|
||||
const [audioDeviceInfo, setAudioDeviceInfo] = useState<MediaDeviceInfo[]>([])
|
||||
const [inputAudioDeviceInfo, setInputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([])
|
||||
const [outputAudioDeviceInfo, setOutputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([])
|
||||
const [editSpeakerTargetId, setEditSpeakerTargetId] = useState<number>(0)
|
||||
const [editSpeakerTargetName, setEditSpeakerTargetName] = useState<string>("")
|
||||
const [audioInput, setAudioInput] = useState<string>("none")
|
||||
const audioOutputRef = React.useRef<string>("")
|
||||
const [audioOutput, _setAudioOutput] = useState<string>("none")
|
||||
const setAudioOutput = (id: string) => {
|
||||
audioOutputRef.current = id
|
||||
_setAudioOutput(audioOutputRef.current)
|
||||
const audio = document.getElementById("audio-output") as HTMLAudioElement
|
||||
//@ts-ignore
|
||||
audio.setSinkId(audioOutputRef.current)
|
||||
}
|
||||
|
||||
// const [options, setOptions] = useState<MicrophoneOptionsState>(InitMicrophoneOptionsState)
|
||||
const [params, setParams] = useState<VoiceChangerRequestParamas>(DefaultVoiceChangerRequestParamas)
|
||||
@ -45,7 +65,8 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
useEffect(() => {
|
||||
const initialize = async () => {
|
||||
const audioInfo = await reloadDevices()
|
||||
setAudioDeviceInfo(audioInfo)
|
||||
setInputAudioDeviceInfo(audioInfo[0])
|
||||
setOutputAudioDeviceInfo(audioInfo[1])
|
||||
|
||||
if (CHROME_EXTENSION) {
|
||||
//@ts-ignore
|
||||
@ -78,7 +99,6 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
}
|
||||
const startClassName = isStarted ? "body-button-active" : "body-button-stanby"
|
||||
const stopClassName = isStarted ? "body-button-stanby" : "body-button-active"
|
||||
console.log("ClassName", startClassName, stopClassName)
|
||||
|
||||
return (
|
||||
<div className="body-row split-3-3-4 left-padding-1">
|
||||
@ -95,9 +115,113 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
}, [isStarted])
|
||||
|
||||
|
||||
const setAudioInputDeviceId = async (deviceId: string) => {
|
||||
setOptions({ ...options, audioInputDeviceId: deviceId })
|
||||
const audioInputRow = useMemo(() => {
|
||||
return (
|
||||
<div className="body-row split-3-7 left-padding-1 highlight">
|
||||
<div className="body-item-title">AudioInput</div>
|
||||
<div className="body-select-container">
|
||||
<select className="body-select" value={audioInput} onChange={(e) => { setAudioInput(e.target.value) }}>
|
||||
{
|
||||
inputAudioDeviceInfo.map(x => {
|
||||
return <option key={x.deviceId} value={x.deviceId}>{x.label}</option>
|
||||
})
|
||||
}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [inputAudioDeviceInfo, audioInput])
|
||||
|
||||
const audioMediaInputRow = useMemo(() => {
|
||||
console.log("GEN:audioMediaInputRow1")
|
||||
if (audioInput != "file") {
|
||||
console.log("GEN:audioMediaInputRow2")
|
||||
return <></>
|
||||
}
|
||||
console.log("GEN:audioMediaInputRow3")
|
||||
|
||||
const onFileLoadClicked = async () => {
|
||||
const url = await fileSelectorAsDataURL("")
|
||||
const audio = document.getElementById("body-audio-converted") as HTMLAudioElement
|
||||
audio.src = url
|
||||
// audio.volume = 0.0
|
||||
// audio.onplay = () => {
|
||||
// //@ts-ignore
|
||||
// const ms = audio.captureStream()
|
||||
// setOptions({ ...options, audioInput: ms })
|
||||
// }
|
||||
await audio.play()
|
||||
const src = audioContext!.createMediaElementSource(audio);
|
||||
const dst = audioContext!.createMediaStreamDestination()
|
||||
src.connect(dst)
|
||||
setOptions({ ...options, audioInput: dst.stream })
|
||||
|
||||
|
||||
const audio_org = document.getElementById("body-audio-original") as HTMLAudioElement
|
||||
audio_org.src = url
|
||||
audio_org.pause()
|
||||
|
||||
audio_org.onplay = () => {
|
||||
console.log(audioOutputRef.current)
|
||||
// @ts-ignore
|
||||
audio_org.setSinkId(audioOutputRef.current)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="body-row split-3-3-4 left-padding-1 highlight">
|
||||
<div className="body-item-title"></div>
|
||||
<div className="body-item-text">
|
||||
<div>
|
||||
org:<audio id="body-audio-original" controls></audio>
|
||||
</div>
|
||||
<div>
|
||||
cnv:<audio id="body-audio-converted" controls></audio>
|
||||
</div>
|
||||
</div>
|
||||
<div className="body-button-container">
|
||||
<div className="body-button" onClick={onFileLoadClicked}>load</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [audioInput, audioOutput])
|
||||
console.log("GEN:audioMediaInputRow3")
|
||||
useEffect(() => {
|
||||
if (!audioContext) {
|
||||
return
|
||||
}
|
||||
if (audioInput == "none") {
|
||||
const ms = createDummyMediaStream(audioContext)
|
||||
setOptions({ ...options, audioInput: ms })
|
||||
} else if (audioInput == "file") {
|
||||
// const audio = document.getElementById("body-audio") as HTMLAudioElement
|
||||
// //@ts-ignore
|
||||
// const ms = audio.captureStream()
|
||||
// setOptions({ ...options, audioInput: ms })
|
||||
} else {
|
||||
setOptions({ ...options, audioInput: audioInput })
|
||||
}
|
||||
}, [audioContext, audioInput])
|
||||
|
||||
|
||||
const audioOutputRow = useMemo(() => {
|
||||
return (
|
||||
<div className="body-row split-3-7 left-padding-1 highlight">
|
||||
<div className="body-item-title">AudioOutput</div>
|
||||
<div className="body-select-container">
|
||||
<select className="body-select" value={audioOutput} onChange={(e) => { setAudioOutput(e.target.value) }}>
|
||||
{
|
||||
outputAudioDeviceInfo.map(x => {
|
||||
return <option key={x.deviceId} value={x.deviceId}>{x.label}</option>
|
||||
})
|
||||
}
|
||||
</select>
|
||||
<audio hidden id="body-output-audio"></audio>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [outputAudioDeviceInfo, audioOutput])
|
||||
|
||||
|
||||
const onSetServerClicked = async () => {
|
||||
const input = document.getElementById("mmvc-server-url") as HTMLInputElement
|
||||
@ -197,19 +321,9 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="body-row split-3-7 left-padding-1 highlight">
|
||||
<div className="body-item-title">Microphone</div>
|
||||
<div className="body-select-container">
|
||||
<select className="body-select" value={options.audioInputDeviceId || "none"} onChange={(e) => { setAudioInputDeviceId(e.target.value) }}>
|
||||
{
|
||||
audioDeviceInfo.map(x => {
|
||||
return <option key={x.deviceId} value={x.deviceId}>{x.label}</option>
|
||||
})
|
||||
}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
{audioInputRow}
|
||||
{audioMediaInputRow}
|
||||
{audioOutputRow}
|
||||
|
||||
<div className="body-row split-3-7 left-padding-1 highlight">
|
||||
<div className="body-item-title">Sample Rate</div>
|
||||
@ -339,7 +453,7 @@ export const useMicrophoneOptions = (): MicrophoneOptionsComponent => {
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}, [audioDeviceInfo, editSpeakerTargetId, editSpeakerTargetName, startButtonRow, params, options])
|
||||
}, [inputAudioDeviceInfo, outputAudioDeviceInfo, editSpeakerTargetId, editSpeakerTargetName, startButtonRow, audioInputRow, audioMediaInputRow, audioOutputRow, params, options])
|
||||
|
||||
return {
|
||||
component: settings,
|
||||
|
@ -187,8 +187,9 @@ export class AudioStreamer extends Duplex {
|
||||
|
||||
private sendBuffer = async (newBuffer: Uint8Array) => {
|
||||
if (this.serverUrl.length == 0) {
|
||||
console.error("no server url")
|
||||
throw "no server url"
|
||||
console.warn("no server url")
|
||||
return
|
||||
// throw "no server url"
|
||||
}
|
||||
const timestamp = Date.now()
|
||||
// console.log("REQUEST_MESSAGE:", [this.gpu, this.srcId, this.dstId, timestamp, newBuffer.buffer])
|
||||
|
@ -118,6 +118,7 @@ export class VoiceChnagerClient {
|
||||
// create mic stream
|
||||
if (this.micStream) {
|
||||
console.log("DESTROY!!!!!!!!!!!!!!!!!!!")
|
||||
this.micStream.unpipe()
|
||||
// this.micStream.stop()
|
||||
this.micStream.destroy()
|
||||
this.micStream = null
|
||||
@ -139,9 +140,12 @@ export class VoiceChnagerClient {
|
||||
console.log("VF disabled")
|
||||
this.micStream.setStream(this.currentMediaStream) // input device -> mic stream
|
||||
}
|
||||
this.micStream.pipe(this.audioStreamer!) // mic stream -> audio streamer
|
||||
|
||||
|
||||
this.micStream.pipe(this.audioStreamer) // mic stream -> audio streamer
|
||||
if (!this._isVoiceChanging) {
|
||||
this.micStream.pauseRecording()
|
||||
} else {
|
||||
this.micStream.playRecording()
|
||||
}
|
||||
}
|
||||
get stream(): MediaStream {
|
||||
return this.currentMediaStreamAudioDestinationNode.stream
|
||||
|
@ -16,8 +16,7 @@ export type VoiceChangerRequestParamas = {
|
||||
}
|
||||
|
||||
export type VoiceChangerOptions = {
|
||||
audioInputDeviceId: string | null,
|
||||
mediaStream: MediaStream | null,
|
||||
audioInput: string | MediaStream | null,
|
||||
mmvcServerUrl: string,
|
||||
protocol: Protocol,
|
||||
sampleRate: SampleRate, // 48000Hz
|
||||
@ -77,8 +76,7 @@ export const DefaultVoiceChangerRequestParamas: VoiceChangerRequestParamas = {
|
||||
}
|
||||
|
||||
export const DefaultVoiceChangerOptions: VoiceChangerOptions = {
|
||||
audioInputDeviceId: null,
|
||||
mediaStream: null,
|
||||
audioInput: null,
|
||||
mmvcServerUrl: "https://192.168.0.3:18888/test",
|
||||
protocol: "sio",
|
||||
sampleRate: 48000,
|
||||
|
@ -1,2 +1,3 @@
|
||||
export * from "./const"
|
||||
export * from "./VoiceChangerClient"
|
||||
export * from "./util"
|
@ -10,3 +10,48 @@ export const createDummyMediaStream = (audioContext: AudioContext) => {
|
||||
oscillatorNode.start();
|
||||
return dummyOutputNode.stream;
|
||||
};
|
||||
|
||||
export const fileSelector = async (regex: string) => {
|
||||
const fileInput = document.createElement("input");
|
||||
fileInput.type = "file";
|
||||
const p = new Promise<File>((resolve, reject) => {
|
||||
fileInput.onchange = (e) => {
|
||||
if (e.target instanceof HTMLInputElement == false) {
|
||||
console.log("invalid target!", e.target)
|
||||
reject("invalid target")
|
||||
return null
|
||||
}
|
||||
const target = e.target as HTMLInputElement
|
||||
if (!target.files || target.files.length == 0) {
|
||||
reject("no file selected")
|
||||
return null
|
||||
}
|
||||
|
||||
if (regex != "" && target.files[0].type.match(regex)) {
|
||||
reject(`not target file type ${target.files[0].type}`);
|
||||
return null
|
||||
}
|
||||
resolve(target.files[0])
|
||||
return null
|
||||
};
|
||||
fileInput.click();
|
||||
});
|
||||
return await p
|
||||
}
|
||||
|
||||
export const fileSelectorAsDataURL = async (regex: string) => {
|
||||
const f = await fileSelector(regex)
|
||||
if (!f) {
|
||||
return f
|
||||
}
|
||||
|
||||
const url = await new Promise<string>((resolve) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
console.log("load data", reader.result as string);
|
||||
resolve(reader.result as string);
|
||||
};
|
||||
reader.readAsDataURL(f);
|
||||
})
|
||||
return url
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user