mistral-v3 / app_backup.py
rodrigomasini's picture
Update app_backup.py
3f05e5b verified
raw
history blame
5.55 kB
# Contact us:
import platform
import sys
import shutil
import os
import datetime
import subprocess
import gradio as gr
import spaces
@spaces.GPU(duration=120)
def check_python():
supported_minors = [9, 10, 11]
python_info = f"Python {platform.python_version()} on {platform.system()}"
if not (
int(sys.version_info.major) == 3
and int(sys.version_info.minor) in supported_minors
):
python_info += f"\nIncompatible Python version: {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} required 3.{supported_minors}"
return python_info
def check_torch():
torch_info = ""
if shutil.which('nvidia-smi') is not None or os.path.exists(
os.path.join(
os.environ.get('SystemRoot') or r'C:\Windows',
'System32',
'nvidia-smi.exe',
)
):
torch_info += 'NVIDIA toolkit detected\n'
elif shutil.which('rocminfo') is not None or os.path.exists('/opt/rocm/bin/rocminfo'):
torch_info += 'AMD toolkit detected\n'
elif (shutil.which('sycl-ls') is not None
or os.environ.get('ONEAPI_ROOT') is not None
or os.path.exists('/opt/intel/oneapi')):
torch_info += 'Intel OneAPI toolkit detected\n'
else:
torch_info += 'Using CPU-only Torch\n'
try:
import torch
try:
import intel_extension_for_pytorch as ipex
if torch.xpu.is_available():
from library.ipex import ipex_init
ipex_init()
os.environ.setdefault('NEOReadDebugKeys', '1')
os.environ.setdefault('ClDeviceGlobalMemSizeAvailablePercent', '100')
except Exception:
pass
torch_info += f'Torch {torch.__version__}\n'
if not torch.cuda.is_available():
torch_info += 'Torch reports CUDA not available\n'
else:
if torch.version.cuda:
if hasattr(torch, "xpu") and torch.xpu.is_available():
torch_info += f'Torch backend: Intel IPEX OneAPI {ipex.__version__}\n'
else:
torch_info += f'Torch backend: NVIDIA CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}\n'
elif torch.version.hip:
torch_info += f'Torch backend: AMD ROCm HIP {torch.version.hip}\n'
else:
torch_info += 'Unknown Torch backend\n'
for device in [torch.cuda.device(i) for i in range(torch.cuda.device_count())]:
if hasattr(torch, "xpu") and torch.xpu.is_available():
torch_info += f'Torch detected GPU: {torch.xpu.get_device_name(device)} VRAM {round(torch.xpu.get_device_properties(device).total_memory / 1024 / 1024)} Compute Units {torch.xpu.get_device_properties(device).max_compute_units}\n'
else:
torch_info += f'Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}\n'
return torch_info
except Exception as e:
return f'Could not load torch: {e}'
def get_installed_packages():
pkgs_installed = subprocess.getoutput("pip freeze")
output_lines = [line for line in pkgs_installed.splitlines() if "WARNING" not in line]
packages = []
for line in output_lines:
if '==' in line:
pkg_name, pkg_version = line.split('==')
packages.append((pkg_name, pkg_version))
packages.sort(key=lambda x: x[0].lower())
return packages
def get_installed_packages_version():
pkgs_installed = subprocess.getoutput("pip freeze")
output_lines = [line for line in pkgs_installed.splitlines() if "WARNING" not in line]
packages_version = []
for line in output_lines:
if '==' in line:
pkg_name, pkg_version = line.split('==')
packages_version.append((pkg_name, pkg_version))
return packages_version
def match_packages_with_versions():
#local = pathlib.Path(__file__).parent
requirements_file = os.path.join(os.path.dirname(__file__),"requirements.txt")
with open(requirements_file, 'r') as f:
requirements = f.read().splitlines()
requirements = [req.split('==')[0] for req in requirements if req and not req.startswith('#')]
installed_packages = get_installed_packages_version()
installed_dict = {pkg: version for pkg, version in installed_packages}
matched_packages = []
for req in requirements:
if req in installed_dict:
matched_packages.append((req, installed_dict[req]))
else:
matched_packages.append((req, "Not installed"))
return matched_packages
def display_info():
current_datetime = datetime.datetime.now()
current_datetime_str = current_datetime.strftime('%m/%d/%Y-%H:%M:%S')
python_info = check_python()
torch_info = check_torch()
packages = get_installed_packages()
versions = match_packages_with_versions()
return f"Machine local date and time: {current_datetime_str}\n\n{python_info}\n\n{torch_info}\n\nInstalled packages:\n{packages}\n\nMatched packages with versions:\n{versions}"
with gr.Blocks() as demo:
output = gr.Textbox(lines=20, label="System Information")
btn = gr.Button("Check System")
btn.click(display_info, inputs=[], outputs=[output])
demo.launch()