WIP: store stting 3

This commit is contained in:
wataru 2023-01-29 14:41:44 +09:00
parent dd7036cd08
commit ac967fdea1
9 changed files with 118 additions and 46 deletions

File diff suppressed because one or more lines are too long

View File

@ -9,7 +9,7 @@
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"@dannadori/voice-changer-client-js": "^1.0.57",
"@dannadori/voice-changer-client-js": "^1.0.58",
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
@ -3211,9 +3211,9 @@
}
},
"node_modules/@dannadori/voice-changer-client-js": {
"version": "1.0.57",
"resolved": "https://registry.npmjs.org/@dannadori/voice-changer-client-js/-/voice-changer-client-js-1.0.57.tgz",
"integrity": "sha512-JJl4WedfJGZLMsvHFbUQiCi6HavkH7P5JSGQcSI4C8iAh4DmUON/0/R2STlhasxlMiqlFPVqHpqLK/tQTapU8g==",
"version": "1.0.58",
"resolved": "https://registry.npmjs.org/@dannadori/voice-changer-client-js/-/voice-changer-client-js-1.0.58.tgz",
"integrity": "sha512-grdhyhYbAlJScIvYqNga9yD+sCexDL7WZ8oy3Jb6164lV3i+ceUQaSHMDEyhN/p4G73vcwp3QN4ROj1huCl28A==",
"dependencies": {
"@types/readable-stream": "^2.3.15",
"amazon-chime-sdk-js": "^3.10.0",
@ -13296,9 +13296,9 @@
}
},
"@dannadori/voice-changer-client-js": {
"version": "1.0.57",
"resolved": "https://registry.npmjs.org/@dannadori/voice-changer-client-js/-/voice-changer-client-js-1.0.57.tgz",
"integrity": "sha512-JJl4WedfJGZLMsvHFbUQiCi6HavkH7P5JSGQcSI4C8iAh4DmUON/0/R2STlhasxlMiqlFPVqHpqLK/tQTapU8g==",
"version": "1.0.58",
"resolved": "https://registry.npmjs.org/@dannadori/voice-changer-client-js/-/voice-changer-client-js-1.0.58.tgz",
"integrity": "sha512-grdhyhYbAlJScIvYqNga9yD+sCexDL7WZ8oy3Jb6164lV3i+ceUQaSHMDEyhN/p4G73vcwp3QN4ROj1huCl28A==",
"requires": {
"@types/readable-stream": "^2.3.15",
"amazon-chime-sdk-js": "^3.10.0",

View File

@ -48,7 +48,7 @@
"webpack-dev-server": "^4.11.1"
},
"dependencies": {
"@dannadori/voice-changer-client-js": "^1.0.57",
"@dannadori/voice-changer-client-js": "^1.0.58",
"react": "^18.2.0",
"react-dom": "^18.2.0"
}

View File

@ -22,7 +22,10 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
}
props.clientState.serverSetting.setFileUploadSetting({
...props.clientState.serverSetting.fileUploadSetting,
pyTorchModel: file
pyTorchModel: {
data: await file.arrayBuffer(),
filename: file.name
}
})
}
const onPyTorchFileClearClicked = () => {
@ -39,7 +42,10 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
}
props.clientState.serverSetting.setFileUploadSetting({
...props.clientState.serverSetting.fileUploadSetting,
configFile: file
configFile: {
data: await file.arrayBuffer(),
filename: file.name
}
})
}
const onConfigFileClearClicked = () => {
@ -56,7 +62,10 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
}
props.clientState.serverSetting.setFileUploadSetting({
...props.clientState.serverSetting.fileUploadSetting,
onnxModel: file
onnxModel: {
data: await file.arrayBuffer(),
filename: file.name
}
})
}
const onOnnxFileClearClicked = () => {
@ -91,7 +100,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
<div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Config(.json)</div>
<div className="body-item-text">
<div>{props.clientState.serverSetting.fileUploadSetting.configFile?.name}</div>
<div>{props.clientState.serverSetting.fileUploadSetting.configFile?.filename}</div>
</div>
<div className="body-button-container">
<div className="body-button" onClick={onConfigFileLoadClicked}>select</div>
@ -101,7 +110,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
<div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Onnx(.onnx)</div>
<div className="body-item-text">
<div>{props.clientState.serverSetting.fileUploadSetting.onnxModel?.name}</div>
<div>{props.clientState.serverSetting.fileUploadSetting.onnxModel?.filename}</div>
</div>
<div className="body-button-container">
<div className="body-button" onClick={onOnnxFileLoadClicked}>select</div>
@ -113,7 +122,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
<div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">PyTorch(.pth)</div>
<div className="body-item-text">
<div>{props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.name}</div>
<div>{props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.filename}</div>
</div>
<div className="body-button-container">
<div className="body-button" onClick={onPyTorchFileLoadClicked}>select</div>

View File

@ -1,12 +1,12 @@
{
"name": "@dannadori/voice-changer-client-js",
"version": "1.0.57",
"version": "1.0.58",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@dannadori/voice-changer-client-js",
"version": "1.0.57",
"version": "1.0.58",
"license": "ISC",
"dependencies": {
"@types/readable-stream": "^2.3.15",

View File

@ -1,6 +1,6 @@
{
"name": "@dannadori/voice-changer-client-js",
"version": "1.0.57",
"version": "1.0.58",
"description": "",
"main": "dist/index.js",
"directories": {

View File

@ -3,7 +3,7 @@ import { ServerInfo, ServerSettingKey } from "./const";
type FileChunk = {
hash: number,
chunk: Blob
chunk: ArrayBuffer
}
export class ServerConfigurator {
private serverUrl = ""
@ -42,16 +42,16 @@ export class ServerConfigurator {
return info
}
uploadFile = async (file: File, onprogress: (progress: number, end: boolean) => void) => {
uploadFile = async (buf: ArrayBuffer, filename: string, onprogress: (progress: number, end: boolean) => void) => {
const url = this.serverUrl + "/upload_file"
onprogress(0, false)
const size = 1024 * 1024;
const fileChunks: FileChunk[] = [];
let index = 0; // index値
for (let cur = 0; cur < file.size; cur += size) {
for (let cur = 0; cur < buf.byteLength; cur += size) {
fileChunks.push({
hash: index++,
chunk: file.slice(cur, cur + size),
chunk: buf.slice(cur, cur + size),
});
}
@ -68,8 +68,8 @@ export class ServerConfigurator {
}
const p = new Promise<void>((resolve) => {
const formData = new FormData();
formData.append("file", chunk.chunk);
formData.append("filename", `${file.name}_${chunk.hash}`);
formData.append("file", new Blob([chunk.chunk]));
formData.append("filename", `${filename}_${chunk.hash}`);
const request = new Request(url, {
method: 'POST',
body: formData,
@ -91,11 +91,11 @@ export class ServerConfigurator {
return chunkNum
}
concatUploadedFile = async (file: File, chunkNum: number) => {
concatUploadedFile = async (filename: string, chunkNum: number) => {
const url = this.serverUrl + "/concat_uploaded_file"
await new Promise<void>((resolve) => {
const formData = new FormData();
formData.append("filename", file.name);
formData.append("filename", filename);
formData.append("filenameChunkNum", "" + chunkNum);
const request = new Request(url, {
method: 'POST',
@ -108,13 +108,13 @@ export class ServerConfigurator {
})
}
loadModel = async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
loadModel = async (configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null) => {
const url = this.serverUrl + "/load_model"
const info = new Promise<ServerInfo>(async (resolve) => {
const formData = new FormData();
formData.append("pyTorchModelFilename", pyTorchModelFile?.name || "-");
formData.append("onnxModelFilename", onnxModelFile?.name || "-");
formData.append("configFilename", configFile.name);
formData.append("pyTorchModelFilename", pyTorchModelFilename || "-");
formData.append("onnxModelFilename", onnxModelFilename || "-");
formData.append("configFilename", configFilename);
const request = new Request(url, {
method: 'POST',
body: formData,

View File

@ -236,14 +236,14 @@ export class VoiceChangerClient {
}
// Configurator Method
uploadFile = (file: File, onprogress: (progress: number, end: boolean) => void) => {
return this.configurator.uploadFile(file, onprogress)
uploadFile = (buf: ArrayBuffer, filename: string, onprogress: (progress: number, end: boolean) => void) => {
return this.configurator.uploadFile(buf, filename, onprogress)
}
concatUploadedFile = (file: File, chunkNum: number) => {
return this.configurator.concatUploadedFile(file, chunkNum)
concatUploadedFile = (filename: string, chunkNum: number) => {
return this.configurator.concatUploadedFile(filename, chunkNum)
}
loadModel = (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
return this.configurator.loadModel(configFile, pyTorchModelFile, onnxModelFile)
loadModel = (configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null) => {
return this.configurator.loadModel(configFilename, pyTorchModelFilename, onnxModelFilename)
}
updateServerSettings = (key: ServerSettingKey, val: string) => {
return this.configurator.updateSettings(key, val)

View File

@ -4,16 +4,30 @@ import { VoiceChangerClient } from "../VoiceChangerClient"
import { useIndexedDB } from "./useIndexedDB"
export type FileUploadSetting = {
pyTorchModel: File | null
configFile: File | null
onnxModel: File | null
// export type FileUploadSetting = {
// pyTorchModel: File | null
// configFile: File | null
// onnxModel: File | null
// }
type ModelData = {
data: ArrayBuffer
filename: string
}
export type FileUploadSetting = {
pyTorchModel: ModelData | null
onnxModel: ModelData | null
configFile: ModelData | null
}
const InitialFileUploadSetting: FileUploadSetting = {
pyTorchModel: null,
configFile: null,
onnxModel: null,
}
export type UseServerSettingProps = {
voiceChangerClient: VoiceChangerClient | null
}
@ -185,10 +199,10 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
// (e) モデルアップロード
const _uploadFile = useMemo(() => {
return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
return async (modelData: ModelData, onprogress: (progress: number, end: boolean) => void) => {
if (!props.voiceChangerClient) return
const num = await props.voiceChangerClient.uploadFile(file, onprogress)
const res = await props.voiceChangerClient.concatUploadedFile(file, num)
const num = await props.voiceChangerClient.uploadFile(modelData.data, modelData.filename, onprogress)
const res = await props.voiceChangerClient.concatUploadedFile(modelData.filename, num)
console.log("uploaded", num, res)
}
}, [props.voiceChangerClient])
@ -203,9 +217,12 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
return
}
if (!props.voiceChangerClient) return
setUploadProgress(0)
setIsUploading(true)
const models = [fileUploadSetting.pyTorchModel, fileUploadSetting.onnxModel].filter(x => { return x != null }) as File[]
const models = [fileUploadSetting.onnxModel, fileUploadSetting.pyTorchModel].filter(x => { return x != null }) as ModelData[]
for (let i = 0; i < models.length; i++) {
const progRate = 1 / models.length
const progOffset = 100 * i * progRate
@ -219,13 +236,59 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
console.log(progress, end)
})
await props.voiceChangerClient.loadModel(fileUploadSetting.configFile, fileUploadSetting.pyTorchModel, fileUploadSetting.onnxModel)
await props.voiceChangerClient.loadModel(fileUploadSetting.configFile.filename, fileUploadSetting.pyTorchModel?.filename || null, fileUploadSetting.onnxModel?.filename || null)
setUploadProgress(0)
setIsUploading(false)
reloadServerInfo()
}
}, [fileUploadSetting, props.voiceChangerClient])
// const _uploadFile = useMemo(() => {
// return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
// if (!props.voiceChangerClient) return
// const num = await props.voiceChangerClient.uploadFile(file, onprogress)
// const res = await props.voiceChangerClient.concatUploadedFile(file, num)
// console.log("uploaded", num, res)
// }
// }, [props.voiceChangerClient])
// const loadModel = useMemo(() => {
// return async () => {
// if (!fileUploadSetting.pyTorchModel && !fileUploadSetting.onnxModel) {
// alert("PyTorchモデルとONNXモデルのどちらか一つ以上指定する必要があります。")
// return
// }
// if (!fileUploadSetting.configFile) {
// alert("Configファイルを指定する必要があります。")
// return
// }
// if (!props.voiceChangerClient) return
// setUploadProgress(0)
// setIsUploading(true)
// const models = [fileUploadSetting.pyTorchModel, fileUploadSetting.onnxModel].filter(x => { return x != null }) as File[]
// for (let i = 0; i < models.length; i++) {
// const progRate = 1 / models.length
// const progOffset = 100 * i * progRate
// await _uploadFile(models[i], (progress: number, _end: boolean) => {
// // console.log(progress * progRate + progOffset, end, progRate,)
// setUploadProgress(progress * progRate + progOffset)
// })
// }
// await _uploadFile(fileUploadSetting.configFile, (progress: number, end: boolean) => {
// console.log(progress, end)
// })
// await props.voiceChangerClient.loadModel(fileUploadSetting.configFile, fileUploadSetting.pyTorchModel, fileUploadSetting.onnxModel)
// setUploadProgress(0)
// setIsUploading(false)
// reloadServerInfo()
// }
// }, [fileUploadSetting, props.voiceChangerClient])
const reloadServerInfo = useMemo(() => {
return async () => {
if (!props.voiceChangerClient) return