Spaces:
Running
Running
import os | |
import gc | |
import shutil | |
import psutil | |
import torch | |
def is_disk_full(min_free_space_in_GB=10): | |
total, used, free = shutil.disk_usage("/") | |
free_gb = free / (1024 ** 3) | |
if free_gb >= min_free_space_in_GB: | |
print(f'enough space available ({free_gb} GB)') | |
return False | |
else: | |
print('clean up!') | |
return True | |
def release_model(model=None, label='Model'): | |
""" | |
Releases CPU and GPU memory used by a model or pipeline. | |
Args: | |
model: The object to delete (e.g., model, pipeline). | |
label: String label for log output. | |
""" | |
using_cuda = torch.cuda.is_available() | |
was_cuda = False | |
# CPU memory before | |
process = psutil.Process(os.getpid()) | |
mem_cpu_before = process.memory_info().rss / 1e6 # MB | |
if using_cuda: | |
mem_gpu_before = torch.cuda.memory_allocated() | |
print(f"\n[{label}] GPU memory before release: {mem_gpu_before:.2f} MB") | |
print(f"[{label}] CPU memory before release: {mem_cpu_before:.2f} MB") | |
# Try to detect if model was on CUDA | |
if model is not None: | |
try: | |
if hasattr(model, 'parameters'): | |
was_cuda = any(p.is_cuda for p in model.parameters()) | |
except Exception as e: | |
print(f"[{label}] Could not check device: {e}") | |
del model | |
# Garbage collection and cache clearing | |
gc.collect() | |
if using_cuda: | |
if was_cuda: | |
torch.cuda.empty_cache() | |
else: | |
print(f"[{label}] ⚠️ Model was not using CUDA, but CUDA is available.") | |
# CPU memory after | |
mem_cpu_after = process.memory_info().rss / 1e6 # MB | |
print(f"[{label}] CPU memory after release: {mem_cpu_after:.2f} MB") | |
if using_cuda: | |
mem_gpu_after = torch.cuda.memory_allocated() | |
print(f"[{label}] GPU memory after release: {mem_gpu_after:.2f} MB\n") | |
else: | |
print(f"[{label}] CUDA not available — GPU memory not tracked.\n") | |