improve: directml device id

This commit is contained in:
w-okada 2023-07-18 09:43:17 +09:00
parent 87a10c8e11
commit 257d99e355
5 changed files with 65 additions and 12 deletions

File diff suppressed because one or more lines are too long

View File

@ -33,6 +33,12 @@ export const ConvertArea = (props: ConvertProps) => {
const onClassName = serverSetting.serverSetting.gpu == 0 ? "config-sub-area-button-active" : "config-sub-area-button";
const offClassName = serverSetting.serverSetting.gpu == 0 ? "config-sub-area-button" : "config-sub-area-button-active";
const cpuClassName = serverSetting.serverSetting.gpu == -1 ? "config-sub-area-button-active" : "config-sub-area-button";
const gpu0ClassName = serverSetting.serverSetting.gpu == 0 ? "config-sub-area-button-active" : "config-sub-area-button";
const gpu1ClassName = serverSetting.serverSetting.gpu == 1 ? "config-sub-area-button-active" : "config-sub-area-button";
const gpu2ClassName = serverSetting.serverSetting.gpu == 2 ? "config-sub-area-button-active" : "config-sub-area-button";
const gpu3ClassName = serverSetting.serverSetting.gpu == 3 ? "config-sub-area-button-active" : "config-sub-area-button";
const gpuSelect =
edition.indexOf("onnxdirectML-cuda") >= 0 ? (
<div className="config-sub-area-control">
@ -43,23 +49,56 @@ export const ConvertArea = (props: ConvertProps) => {
onClick={async () => {
await serverSetting.updateServerSettings({
...serverSetting.serverSetting,
gpu: 0,
gpu: -1,
});
}}
className={onClassName}
className={cpuClassName}
>
on
cpu
</div>
<div
onClick={async () => {
await serverSetting.updateServerSettings({
...serverSetting.serverSetting,
gpu: -1,
gpu: 0,
});
}}
className={offClassName}
className={gpu0ClassName}
>
off
0
</div>
<div
onClick={async () => {
await serverSetting.updateServerSettings({
...serverSetting.serverSetting,
gpu: 1,
});
}}
className={gpu1ClassName}
>
1
</div>
<div
onClick={async () => {
await serverSetting.updateServerSettings({
...serverSetting.serverSetting,
gpu: 2,
});
}}
className={gpu2ClassName}
>
2
</div>
<div
onClick={async () => {
await serverSetting.updateServerSettings({
...serverSetting.serverSetting,
gpu: 3,
});
}}
className={gpu3ClassName}
>
3
</div>
</div>
</div>

View File

@ -11,7 +11,7 @@ class ModelSlotManager:
def __init__(self, model_dir: str):
self.model_dir = model_dir
self.modelSlots = loadAllSlotInfo(self.model_dir)
print("MODEL SLOT INFO-------------->>>>>", self.modelSlots)
print("[MODEL SLOT INFO]", self.modelSlots)
@classmethod
def get_instance(cls, model_dir: str):

View File

@ -25,16 +25,30 @@ class DeviceManager(object):
elif self.mps_enabled:
dev = torch.device("mps")
else:
if id < self.gpu_num:
dev = torch.device("cuda", index=id)
else:
print("[Voice Changer] device detection error, fallback to cpu")
dev = torch.device("cpu")
return dev
def getOnnxExecutionProvider(self, gpu: int):
availableProviders = onnxruntime.get_available_providers()
devNum = torch.cuda.device_count()
if gpu >= 0 and "CUDAExecutionProvider" in availableProviders and devNum > 0:
if gpu < devNum: # ひとつ前のif文で弾いてもよいが、エラーの解像度を上げるため一段下げ。
return ["CUDAExecutionProvider"], [{"device_id": gpu}]
else:
print("[Voice Changer] device detection error, fallback to cpu")
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
return ["DmlExecutionProvider"], [{}]
return ["DmlExecutionProvider"], [{"device_id": gpu}]
else:
return ["CPUExecutionProvider"], [
{

View File

@ -30,7 +30,7 @@ def createPipeline(modelSlot: RVCModelSlot, gpu: int, f0Detector: str):
dev,
)
except Exception as e:
print("[Voice Changer] exception! loading embedder", e)
print("[Voice Changer] exception! loading embedder", e, dev)
traceback.print_exc()
# pitchExtractor