File size: 5,638 Bytes
dd3664d fa55757 dd3664d 68e905b dd3664d d60f15b dd3664d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import argparse
import os
import sys
import json
import shutil
from multiprocessing import cpu_count
import torch
try:
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
if torch.xpu.is_available():
from infer.modules.ipex import ipex_init
ipex_init()
except Exception: # pylint: disable=broad-exception-caught
pass
import logging
logger = logging.getLogger(__name__)
version_config_list = [
"v1/32k.json",
"v1/40k.json",
"v1/48k.json",
"v2/48k.json",
"v2/32k.json",
]
def singleton_variable(func):
def wrapper(*args, **kwargs):
if not wrapper.instance:
wrapper.instance = func(*args, **kwargs)
return wrapper.instance
wrapper.instance = None
return wrapper
@singleton_variable
class Config:
def __init__(self):
self.device = "cuda:0"
self.is_half = True
self.use_jit = False
self.n_cpu = 0
self.gpu_name = None
self.json_config = self.load_config_json()
self.gpu_mem = None
self.instead = ""
self.preprocess_per = 3.7
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
@staticmethod
def load_config_json() -> dict:
d = {}
for config_file in version_config_list:
p = f"configs/inuse/{config_file}"
if not os.path.exists(p):
shutil.copy(f"configs/{config_file}", p)
with open(f"configs/inuse/{config_file}", "r") as f:
d[config_file] = json.load(f)
return d
def has_mps() -> bool:
if not torch.backends.mps.is_available():
return False
try:
torch.zeros(1).to(torch.device("mps"))
return True
except Exception:
return False
@staticmethod
def has_xpu() -> bool:
if hasattr(torch, "xpu") and torch.xpu.is_available():
return True
else:
return False
def use_fp32_config(self):
for config_file in version_config_list:
self.json_config[config_file]["train"]["fp16_run"] = False
with open(f"configs/inuse/{config_file}", "r") as f:
strr = f.read().replace("true", "false")
with open(f"configs/inuse/{config_file}", "w") as f:
f.write(strr)
logger.info("overwrite " + config_file)
self.preprocess_per = 3.0
logger.info("overwrite preprocess_per to %d" % (self.preprocess_per))
def device_config(self) -> tuple:
if torch.cuda.is_available():
if self.has_xpu():
self.device = self.instead = "xpu:0"
self.is_half = True
i_device = int(self.device.split(":")[-1])
self.gpu_name = torch.cuda.get_device_name(i_device)
if (
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
or "P40" in self.gpu_name.upper()
or "P10" in self.gpu_name.upper()
or "1060" in self.gpu_name
or "1070" in self.gpu_name
or "1080" in self.gpu_name
):
logger.info("Found GPU %s, force to fp32", self.gpu_name)
self.is_half = False
self.use_fp32_config()
else:
logger.info("Found GPU %s", self.gpu_name)
self.gpu_mem = int(
torch.cuda.get_device_properties(i_device).total_memory
/ 1024
/ 1024
/ 1024
+ 0.4
)
if self.gpu_mem <= 4:
self.preprocess_per = 3.0
elif self.has_mps():
logger.info("No supported Nvidia GPU found")
self.device = self.instead = "mps"
self.is_half = False
self.use_fp32_config()
else:
logger.info("No supported Nvidia GPU found")
self.device = self.instead = "cpu"
self.is_half = False
self.use_fp32_config()
if self.n_cpu == 0:
self.n_cpu = cpu_count()
if self.is_half:
# 6G显存配置
x_pad = 3
x_query = 10
x_center = 60
x_max = 65
else:
# 5G显存配置
x_pad = 1
x_query = 6
x_center = 38
x_max = 41
if self.gpu_mem is not None and self.gpu_mem <= 4:
x_pad = 1
x_query = 5
x_center = 30
x_max = 32
else:
if self.instead:
logger.info(f"Use {self.instead} instead")
if (
os.path.exists(
"runtime\Lib\site-packages\onnxruntime\capi\onnxruntime_providers_cuda.dll"
)
== False
):
try:
os.rename(
"runtime\Lib\site-packages\onnxruntime",
"runtime\Lib\site-packages\onnxruntime-dml",
)
except:
pass
try:
os.rename(
"runtime\Lib\site-packages\onnxruntime-cuda",
"runtime\Lib\site-packages\onnxruntime",
)
except:
pass
logger.info(
"Half-precision floating-point: %s, device: %s"
% (self.is_half, self.device)
)
return x_pad, x_query, x_center, x_max
|