diff --git a/demo/MMVCServerSIO.py b/demo/MMVCServerSIO.py index 14b3a7b2..80b8d20d 100755 --- a/demo/MMVCServerSIO.py +++ b/demo/MMVCServerSIO.py @@ -20,9 +20,23 @@ from datetime import datetime import torch import numpy as np + from mods.ssl import create_self_signed_cert from mods.VoiceChanger import VoiceChanger -# from mods.Whisper import Whisper + + +# File Uploader +from mods.FileUploader import upload_file, concat_file_chunks + +# Trainer Rest Internal +from mods.Trainer_Speakers import mod_get_speakers +from mods.Trainer_Speaker import mod_delete_speaker +from mods.Trainer_Speaker_Voices import mod_get_speaker_voices +from mods.Trainer_Speaker_Voice import mod_get_speaker_voice +from mods.Trainer_MultiSpeakerSetting import mod_get_multi_speaker_setting, mod_post_multi_speaker_setting +from mods.Trainer_Models import mod_get_models +from mods.Trainer_Model import mod_get_model, mod_delete_model +from mods.Trainer_Training import mod_post_pre_training, mod_post_start_training, mod_post_stop_training, mod_get_related_files, mod_get_tail_training_log class UvicornSuppressFilter(logging.Filter): def filter(self, record): @@ -131,6 +145,24 @@ args = parser.parse_args() printMessage(f"Phase name:{__name__}", level=2) thisFilename = os.path.basename(__file__)[:-3] +from typing import Callable, List +from fastapi import Body, FastAPI, HTTPException, Request, Response +from fastapi.exceptions import RequestValidationError +from fastapi.routing import APIRoute +class ValidationErrorLoggingRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + try: + return await original_route_handler(request) + except Exception as exc: + print("Exception", request.url, str(exc)) + body = await request.body() + detail = {"errors": exc.errors(), "body": body.decode()} + raise HTTPException(status_code=422, detail=detail) + + return custom_route_handler if __name__ == thisFilename or args.colab == True: printMessage(f"PHASE3:{__name__}", level=2) @@ -139,6 +171,7 @@ if __name__ == thisFilename or args.colab == True: MODEL = args.m app_fastapi = FastAPI() + app_fastapi.router.route_class = ValidationErrorLoggingRoute app_fastapi.add_middleware( CORSMiddleware, allow_origins=["*"], @@ -149,6 +182,10 @@ if __name__ == thisFilename or args.colab == True: app_fastapi.mount("/front", StaticFiles(directory="../frontend/dist", html=True), name="static") + app_fastapi.mount("/trainer", StaticFiles(directory="../frontend/dist", html=True), name="static") + + app_fastapi.mount("/recorder", StaticFiles(directory="../frontend/dist", html=True), name="static") + sio = socketio.AsyncServer( async_mode='asgi', cors_allowed_origins='*' @@ -178,36 +215,20 @@ if __name__ == thisFilename or args.colab == True: return {"result": "Index"} - UPLOAD_DIR = "model_upload_dir" + ############ + # File Uploder + # ########## + UPLOAD_DIR = "upload_dir" os.makedirs(UPLOAD_DIR, exist_ok=True) - # Can colab receive post request "ONLY" at root path? - @app_fastapi.post("/upload_model_file") - async def upload_file(configFile:UploadFile = File(...), modelFile: UploadFile = File(...)): - if configFile and modelFile: - for file in [modelFile, configFile]: - filename = file.filename - fileobj = file.file - upload_dir = open(os.path.join(UPLOAD_DIR, filename),'wb+') - shutil.copyfileobj(fileobj, upload_dir) - upload_dir.close() - namespace.loadModel(os.path.join(UPLOAD_DIR, configFile.filename), os.path.join(UPLOAD_DIR, modelFile.filename)) - return {"uploaded files": f"{configFile.filename}, {modelFile.filename} "} - return {"Error": "uploaded file is not found."} - + MODEL_DIR = "/MMVC_Trainer/logs" + os.makedirs(MODEL_DIR, exist_ok=True) @app_fastapi.post("/upload_file") async def post_upload_file( file:UploadFile = File(...), filename: str = Form(...) ): - - if file and filename: - fileobj = file.file - upload_dir = open(os.path.join(UPLOAD_DIR, filename),'wb+') - shutil.copyfileobj(fileobj, upload_dir) - upload_dir.close() - return {"uploaded files": f"{filename} "} - return {"Error": "uploaded file is not found."} + return upload_file(UPLOAD_DIR, file, filename) @app_fastapi.post("/load_model") async def post_load_model( @@ -216,33 +237,40 @@ if __name__ == thisFilename or args.colab == True: configFilename: str = Form(...) ): - target_file_name = modelFilename - with open(os.path.join(UPLOAD_DIR, target_file_name), "ab") as target_file: - for i in range(modelFilenameChunkNum): - filename = f"{modelFilename}_{i}" - chunk_file_path = os.path.join(UPLOAD_DIR,filename) - stored_chunk_file = open(chunk_file_path, 'rb') - target_file.write(stored_chunk_file.read()) - stored_chunk_file.close() - os.unlink(chunk_file_path) - target_file.close() - print(f'File saved to: {target_file_name}') + modelFilePath = concat_file_chunks(UPLOAD_DIR, modelFilename, modelFilenameChunkNum,UPLOAD_DIR) + print(f'File saved to: {modelFilePath}') + configFilePath = os.path.join(UPLOAD_DIR, configFilename) - print(f'Load: {configFilename}, {target_file_name}') - namespace.loadModel(os.path.join(UPLOAD_DIR, configFilename), os.path.join(UPLOAD_DIR, target_file_name)) - return {"File saved to": f"{target_file_name}"} + namespace.loadModel(configFilePath, modelFilePath) + return {"load": f"{modelFilePath}, {configFilePath}"} + + @app_fastapi.post("/load_model_for_train") + async def post_load_model_for_train( + modelGFilename: str = Form(...), + modelGFilenameChunkNum: int = Form(...), + modelDFilename: str = Form(...), + modelDFilenameChunkNum: int = Form(...), + ): + + + modelGFilePath = concat_file_chunks(UPLOAD_DIR, modelGFilename, modelGFilenameChunkNum, MODEL_DIR) + modelDFilePath = concat_file_chunks(UPLOAD_DIR, modelDFilename, modelDFilenameChunkNum,MODEL_DIR) + return {"File saved": f"{modelGFilePath}, {modelDFilePath}"} + @app_fastapi.post("/extract_voices") + async def post_load_model( + zipFilename: str = Form(...), + zipFileChunkNum: int = Form(...), + ): + zipFilePath = concat_file_chunks(UPLOAD_DIR, zipFilename, zipFileChunkNum, UPLOAD_DIR) + shutil.unpack_archive(zipFilePath, "/MMVC_Trainer/dataset/textful/") + return {"Zip file unpacked": f"{zipFilePath}"} - @app_fastapi.get("/transcribe") - def get_transcribe(): - try: - namespace.transcribe() - except Exception as e: - print("TRANSCRIBE PROCESSING!!!! EXCEPTION!!!", e) - print(traceback.format_exc()) - return str(e) + ############ + # Voice Changer + # ########## @app_fastapi.post("/test") async def post_test(voice:VoiceModel): try: @@ -284,6 +312,68 @@ if __name__ == thisFilename or args.colab == True: return str(e) + # Trainer REST API ※ ColabがTop直下のパスにしかPOSTを投げれないようなので"REST風" + @app_fastapi.get("/get_speakers") + async def get_speakers(): + return mod_get_speakers() + + @app_fastapi.delete("/delete_speaker") + async def delete_speaker(speaker:str= Form(...)): + return mod_delete_speaker(speaker) + + @app_fastapi.get("/get_speaker_voices") + async def get_speaker_voices(speaker:str): + return mod_get_speaker_voices(speaker) + + @app_fastapi.get("/get_speaker_voice") + async def get_speaker_voices(speaker:str, voice:str): + return mod_get_speaker_voice(speaker, voice) + + + @app_fastapi.get("/get_multi_speaker_setting") + async def get_multi_speaker_setting(): + return mod_get_multi_speaker_setting() + + @app_fastapi.post("/post_multi_speaker_setting") + async def post_multi_speaker_setting(setting: str = Form(...)): + return mod_post_multi_speaker_setting(setting) + + @app_fastapi.get("/get_models") + async def get_models(): + return mod_get_models() + + @app_fastapi.get("/get_model") + async def get_model(model:str): + return mod_get_model(model) + + @app_fastapi.delete("/delete_model") + async def delete_model(model:str= Form(...)): + return mod_delete_model(model) + + + @app_fastapi.post("/post_pre_training") + async def post_pre_training(batch:int= Form(...)): + return mod_post_pre_training(batch) + + @app_fastapi.post("/post_start_training") + async def post_start_training(): + print("POST START TRAINING..") + return mod_post_start_training() + + @app_fastapi.post("/post_stop_training") + async def post_stop_training(): + print("POST STOP TRAINING..") + return mod_post_stop_training() + + @app_fastapi.get("/get_related_files") + async def get_related_files(): + return mod_get_related_files() + + @app_fastapi.get("/get_tail_training_log") + async def get_tail_training_log(num:int): + return mod_get_tail_training_log(num) + + if __name__ == '__mp_main__': printMessage(f"PHASE2:{__name__}", level=2) diff --git a/demo/mods/FileUploader.py b/demo/mods/FileUploader.py new file mode 100755 index 00000000..74d04a9f --- /dev/null +++ b/demo/mods/FileUploader.py @@ -0,0 +1,27 @@ +import os, shutil +from fastapi import UploadFile + +# UPLOAD_DIR = "model_upload_dir" + +def upload_file(upload_dirname:str, file:UploadFile, filename: str): + if file and filename: + fileobj = file.file + upload_dir = open(os.path.join(upload_dirname, filename),'wb+') + shutil.copyfileobj(fileobj, upload_dir) + upload_dir.close() + return {"uploaded files": f"{filename} "} + return {"Error": "uploaded file is not found."} + +def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirname:str): + target_file_name = os.path.join(dest_dirname, filename) + with open(target_file_name, "ab") as target_file: + for i in range(chunkNum): + chunkName = f"{filename}_{i}" + chunk_file_path = os.path.join(upload_dirname, chunkName) + stored_chunk_file = open(chunk_file_path, 'rb') + target_file.write(stored_chunk_file.read()) + stored_chunk_file.close() + os.unlink(chunk_file_path) + target_file.close() + return target_file_name + diff --git a/demo/mods/Trainer_Model.py b/demo/mods/Trainer_Model.py new file mode 100755 index 00000000..5aeeac4d --- /dev/null +++ b/demo/mods/Trainer_Model.py @@ -0,0 +1,13 @@ + +from fastapi.responses import FileResponse +import os + +def mod_get_model(modelFile:str): + modelPath = os.path.join("/MMVC_Trainer/logs", modelFile) + return FileResponse(path=modelPath) + +def mod_delete_model(modelFile:str): + modelPath = os.path.join("/MMVC_Trainer/logs", modelFile) + os.unlink(modelPath) + return {"Model deleted": f"{modelFile}"} + diff --git a/demo/mods/Trainer_Models.py b/demo/mods/Trainer_Models.py new file mode 100755 index 00000000..7f7ebdf4 --- /dev/null +++ b/demo/mods/Trainer_Models.py @@ -0,0 +1,21 @@ + +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +from trainer_mods.files import get_file_list +import os + +def mod_get_models(): + gModels = get_file_list(f'/MMVC_Trainer/logs/G*.pth') + dModels = get_file_list(f'/MMVC_Trainer/logs/D*.pth') + models = [] + models.extend(gModels) + models.extend(dModels) + models = [ os.path.basename(x) for x in models] + + models = sorted(models) + data = { + "models":models + } + json_compatible_item_data = jsonable_encoder(data) + return JSONResponse(content=json_compatible_item_data) + diff --git a/demo/mods/Trainer_MultiSpeakerSetting.py b/demo/mods/Trainer_MultiSpeakerSetting.py new file mode 100755 index 00000000..ef573ca7 --- /dev/null +++ b/demo/mods/Trainer_MultiSpeakerSetting.py @@ -0,0 +1,26 @@ +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +import os + +MULTI_SPEAKER_SETTING_PATH = "/MMVC_Trainer/dataset/multi_speaker_correspondence.txt" +def mod_get_multi_speaker_setting(): + data = {} + if os.path.isfile(MULTI_SPEAKER_SETTING_PATH) == False: + with open(MULTI_SPEAKER_SETTING_PATH, "w") as f: + f.write("") + f.flush() + f.close() + + with open(MULTI_SPEAKER_SETTING_PATH, "r") as f: + setting = f.read() + data["multi_speaker_setting"] = setting + json_compatible_item_data = jsonable_encoder(data) + return JSONResponse(content=json_compatible_item_data) + + +def mod_post_multi_speaker_setting(setting:str): + with open(MULTI_SPEAKER_SETTING_PATH, "w") as f: + f.write(setting) + f.flush() + f.close() + return {"Write Multispeaker setting": f"{setting}"} \ No newline at end of file diff --git a/demo/mods/Trainer_Speaker.py b/demo/mods/Trainer_Speaker.py new file mode 100755 index 00000000..0aea5ae3 --- /dev/null +++ b/demo/mods/Trainer_Speaker.py @@ -0,0 +1,15 @@ +import shutil +from mods.Trainer_MultiSpeakerSetting import MULTI_SPEAKER_SETTING_PATH + +def mod_delete_speaker(speaker:str): + shutil.rmtree(f"/MMVC_Trainer/dataset/textful/{speaker}") + + with open(MULTI_SPEAKER_SETTING_PATH, "r") as f: + setting = f.readlines() + + filtered = filter(lambda x: x.startswith(f"{speaker}|")==False, setting) + with open(MULTI_SPEAKER_SETTING_PATH, "w") as f: + f.writelines(list(filtered)) + f.flush() + f.close() + return {"Speaker deleted": f"{speaker}"} \ No newline at end of file diff --git a/demo/mods/Trainer_Speaker_Voice.py b/demo/mods/Trainer_Speaker_Voice.py new file mode 100755 index 00000000..27ffd6d3 --- /dev/null +++ b/demo/mods/Trainer_Speaker_Voice.py @@ -0,0 +1,28 @@ +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +import os, base64 + +def mod_get_speaker_voice(speaker:str, voice:str): + wav_file = f'/MMVC_Trainer/dataset/textful/{speaker}/wav/{voice}.wav' + text_file = f'/MMVC_Trainer/dataset/textful/{speaker}/text/{voice}.txt' + readable_text_file = f'/MMVC_Trainer/dataset/textful/{speaker}/readable_text/{voice}.txt' + + data = {} + if os.path.exists(wav_file): + with open(wav_file, "rb") as f: + wav_data = f.read() + wav_data_base64 = base64.b64encode(wav_data).decode('utf-8') + data["wav"] = wav_data_base64 + + + if os.path.exists(text_file): + with open(text_file, "r") as f: + text_data = f.read() + data["text"] = text_data + + if os.path.exists(readable_text_file): + with open(readable_text_file, "r") as f: + text_data = f.read() + data["readable_text"] = text_data + json_compatible_item_data = jsonable_encoder(data) + return JSONResponse(content=json_compatible_item_data) diff --git a/demo/mods/Trainer_Speaker_Voices.py b/demo/mods/Trainer_Speaker_Voices.py new file mode 100755 index 00000000..c83e68a8 --- /dev/null +++ b/demo/mods/Trainer_Speaker_Voices.py @@ -0,0 +1,22 @@ +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +from trainer_mods.files import get_file_list +import os + +def mod_get_speaker_voices(speaker:str): + voices = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/wav/*.wav') + + texts = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/text/*.txt') + + readable_texts = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/readable_text/*.txt') + + items = voices + items.extend(texts) + items.extend(readable_texts) + items = [ os.path.splitext(os.path.basename(x))[0] for x in items] + items = sorted(set(items)) + data = { + "voices":items + } + json_compatible_item_data = jsonable_encoder(data) + return JSONResponse(content=json_compatible_item_data) \ No newline at end of file diff --git a/demo/mods/Trainer_Speakers.py b/demo/mods/Trainer_Speakers.py new file mode 100755 index 00000000..752dcad5 --- /dev/null +++ b/demo/mods/Trainer_Speakers.py @@ -0,0 +1,15 @@ +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +from trainer_mods.files import get_dir_list +import os +# CreateはFileUploaderで実装。 + +def mod_get_speakers(): + os.makedirs("/MMVC_Trainer/dataset/textful", exist_ok=True) + speakers = get_dir_list("/MMVC_Trainer/dataset/textful/") + + data = { + "speakers":sorted(speakers) + } + json_compatible_item_data = jsonable_encoder(data) + return JSONResponse(content=json_compatible_item_data) diff --git a/demo/mods/Trainer_Training.py b/demo/mods/Trainer_Training.py new file mode 100755 index 00000000..a58dcbbc --- /dev/null +++ b/demo/mods/Trainer_Training.py @@ -0,0 +1,167 @@ +import subprocess,os +from trainer_mods.files import get_file_list +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder + +LOG_DIR = "/MMVC_Trainer/info" +train_proc = None + +SUCCESS = 0 +ERROR = -1 +### Submodule for Pre train +def sync_exec(cmd:str, log_path:str): + shortCmdStr = cmd[:20] + try: + with open(log_path, 'w') as log_file: + proc = subprocess.run(cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="/MMVC_Trainer") + print(f"{shortCmdStr} returncode:{proc.returncode}") + if proc.returncode != 0: + print(f"{shortCmdStr} exception:") + return (ERROR, f"returncode:{proc.returncode}") + except Exception as e: + print(f"{shortCmdStr} exception:", str(e)) + return (ERROR, str(e)) + return (SUCCESS, "success") + +def sync_exec_with_stdout(cmd:str, log_path:str): + shortCmdStr = cmd[:20] + try: + with open(log_path, 'w') as log_file: + proc = subprocess.run(cmd, shell=True, text=True, stdout=subprocess.PIPE, + stderr=log_file, cwd="/MMVC_Trainer") + print(f"STDOUT{shortCmdStr}",proc.stdout) + except Exception as e: + print(f"{shortCmdStr} exception:", str(e)) + return (ERROR, str(e)) + return (SUCCESS, proc.stdout) + + +def create_dataset(): + cmd = "python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt" + log_file = os.path.join(LOG_DIR, "log_create_dataset_jtalk.txt") + res = sync_exec(cmd, log_file) + return res + +def set_batch_size(batch:int): + cmd = "sed -i 's/\"batch_size\": [0-9]*/\"batch_size\": " + str(batch) + "/' /MMVC_Trainer/configs/baseconfig.json" + log_file = os.path.join(LOG_DIR, "log_set_batch_size.txt") + res = sync_exec(cmd, log_file) + return res + +def set_dummy_device_count(): + cmd = 'sed -ie "s/torch.cuda.device_count()/1/" /MMVC_Trainer/train_ms.py' + log_file = os.path.join(LOG_DIR, "log_set_dummy_device_count.txt") + res = sync_exec(cmd, log_file) + return res + +### Submodule for Train +def exec_training(): + global train_proc + log_file = os.path.join(LOG_DIR, "training.txt") + + # トレーニング開始確認(二重起動回避) + if train_proc != None: + status = train_proc.poll() + if status != None: + print("Training have ended.", status) + train_proc = None + else: + print("Training have stated.") + return (ERROR, "Training have started") + + try: + with open(log_file, 'w') as log_file: + cmd = 'python3 train_ms.py -c configs/train_config.json -m ./' + print("exec:",cmd) + train_proc = subprocess.Popen("exec "+cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="/MMVC_Trainer") + print("Training stated") + print(f"returncode:{train_proc.returncode}") + except Exception as e: + print("start training exception:", str(e)) + return (ERROR, str(e)) + + return (SUCCESS, "success") + +def stop_training(): + global train_proc + if train_proc == None: + print("Training have not stated.") + return (ERROR, "Training have not stated.") + + status = train_proc.poll() + if status != None: + print("Training have already ended.", status) + train_proc = None + return (ERROR, "Training have already ended. " + status) + else: + train_proc.kill() + print("Training have stoped.") + return (SUCCESS, "success") + +### Main +def mod_post_pre_training(batch:int): + res = set_batch_size(batch) + if res[0] == ERROR: + return {"result":"failed", "detail": f"Preprocess(set_batch_size) failed. {res[1]}"} + + res = set_dummy_device_count() + if res[0] == ERROR: + return {"result":"failed", "detail": f"Preprocess(set_dummy_device_count) failed. {res[1]}"} + + res = create_dataset() + if res[0] == ERROR: + return {"result":"failed", "detail": f"Preprocess failed(create_dataset). {res[1]}"} + + return {"result":"success", "detail": f"Preprocess succeeded. {res[1]}"} + + +def mod_post_start_training(): + res = exec_training() + if res[0] == ERROR: + return {"result":"failed", "detail": f"Start training failed. {res[1]}"} + + return {"result":"success", "detail": f"Start training succeeded. {res[1]}"} + +def mod_post_stop_training(): + res = stop_training() + if res[0] == ERROR: + return {"result":"failed", "detail": f"Stop training failed. {res[1]}"} + + return {"result":"success", "detail": f"Stop training succeeded. {res[1]}"} + +### DEBUG +def mod_get_related_files(): + files = get_file_list(os.path.join(LOG_DIR,"*")) + files.extend([ + "/MMVC_Trainer/dataset/multi_speaker_correspondence.txt", + "/MMVC_Trainer/train_ms.py", + ]) + files.extend( + get_file_list("/MMVC_Trainer/configs/*") + ) + + res = [] + for f in files: + size = os.path.getsize(f) + data = "" + if size < 1024*1024: + with open(f, "r") as input: + data = input.read() + + res.append({ + "name":f, + "size":size, + "data":data + }) + + json_compatible_item_data = jsonable_encoder(res) + return JSONResponse(content=json_compatible_item_data) + +def mod_get_tail_training_log(num:int): + training_log_file = os.path.join(LOG_DIR, "training.txt") + res = sync_exec(f"cat {training_log_file} | sed -e 's/.*\r//' > /tmp/out","/dev/null") + cmd = f'tail -n {num} /tmp/out' + res = sync_exec_with_stdout(cmd, "/dev/null") + if res[0] == ERROR: + return {"result":"failed", "detail": f"Tail training log failed. {res[1]}"} + return {"result":"success", "detail":res[1]} diff --git a/demo/trainer_mods/files.py b/demo/trainer_mods/files.py new file mode 100755 index 00000000..6610b7cd --- /dev/null +++ b/demo/trainer_mods/files.py @@ -0,0 +1,19 @@ +import os,glob + + +def get_file_list(top_dir): + for root, dirs, files in os.walk(top_dir): + for dir in dirs: + dirPath = os.path.join(root, dir) + print(f'dirPath = {dirPath}') + + for file in files: + filePath = os.path.join(root, file) + print(f'filePath = {filePath}') + + +def get_dir_list(top_dir): + return os.listdir(top_dir) + +def get_file_list(top_dir): + return glob.glob(top_dir) \ No newline at end of file diff --git a/frontend/dist/assets/setting_recorder.json b/frontend/dist/assets/setting_recorder.json new file mode 100755 index 00000000..125429c4 --- /dev/null +++ b/frontend/dist/assets/setting_recorder.json @@ -0,0 +1,4 @@ +{ + "app_title": "recorder", + "majar_mode": "docker" +} diff --git a/frontend/dist/assets/setting_trainer.json b/frontend/dist/assets/setting_trainer.json new file mode 100755 index 00000000..5e3fc7b6 --- /dev/null +++ b/frontend/dist/assets/setting_trainer.json @@ -0,0 +1,4 @@ +{ + "app_title": "trainer", + "majar_mode": "docker" +} diff --git a/frontend/dist/index.html b/frontend/dist/index.html index a333ab9c..cbd093f7 100755 --- a/frontend/dist/index.html +++ b/frontend/dist/index.html @@ -1 +1 @@ -