Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -38,18 +38,30 @@ if np.__version__ != "1.23.5":
|
|
38 |
if not torch.__version__.startswith(("2.1.0", "2.3.1")):
|
39 |
print(f"WARNING: PyTorch version {torch.__version__} may not be compatible. Expected torch==2.1.0 or 2.3.1.")
|
40 |
|
41 |
-
# 1) DEVICE SETUP
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
43 |
if device != "cuda":
|
44 |
-
print("
|
45 |
-
sys.exit(1)
|
46 |
-
print(f"CUDA is available. Using GPU: {torch.cuda.get_device_name(0)}")
|
47 |
|
48 |
# Pre-run memory cleanup
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
torch.cuda.
|
|
|
53 |
|
54 |
# 2) LOAD MUSICGEN INTO VRAM
|
55 |
try:
|
@@ -67,13 +79,17 @@ except Exception as e:
|
|
67 |
# 3) RESOURCE MONITORING FUNCTION
|
68 |
def print_resource_usage(stage: str):
|
69 |
print(f"--- {stage} ---")
|
70 |
-
|
71 |
-
|
|
|
72 |
print(f"CPU Memory Used: {psutil.virtual_memory().percent}%")
|
73 |
print("---------------")
|
74 |
|
75 |
-
# Check available GPU memory
|
76 |
def check_vram_availability(required_gb=3.5):
|
|
|
|
|
|
|
77 |
total_vram = torch.cuda.get_device_properties(0).total_memory / (1024**3)
|
78 |
allocated_vram = torch.cuda.memory_allocated() / (1024**3)
|
79 |
available_vram = total_vram - allocated_vram
|
@@ -237,7 +253,7 @@ def generate_music(instrumental_prompt: str, cfg_scale: float, top_k: int, top_p
|
|
237 |
|
238 |
for i in range(num_chunks):
|
239 |
chunk_prompt = instrumental_prompt
|
240 |
-
print(f"Generating chunk {i+1}/{num_chunks} on
|
241 |
musicgen_model.set_generation_params(
|
242 |
duration=generation_duration,
|
243 |
use_sampling=True,
|
@@ -274,10 +290,11 @@ def generate_music(instrumental_prompt: str, cfg_scale: float, top_k: int, top_p
|
|
274 |
os.unlink(temp_wav_path)
|
275 |
audio_segments.append(segment)
|
276 |
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
|
|
281 |
time.sleep(0.5)
|
282 |
print_resource_usage(f"After Chunk {i+1} Generation")
|
283 |
|
@@ -311,10 +328,11 @@ def generate_music(instrumental_prompt: str, cfg_scale: float, top_k: int, top_p
|
|
311 |
except Exception as e:
|
312 |
return None, f"❌ Generation failed: {e}"
|
313 |
finally:
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
318 |
|
319 |
# Function to clear inputs
|
320 |
def clear_inputs():
|
|
|
38 |
if not torch.__version__.startswith(("2.1.0", "2.3.1")):
|
39 |
print(f"WARNING: PyTorch version {torch.__version__} may not be compatible. Expected torch==2.1.0 or 2.3.1.")
|
40 |
|
41 |
+
# 1) DEVICE SETUP WITH ENHANCED DEBUGGING
|
42 |
+
print("Debugging GPU and CUDA setup...")
|
43 |
+
print(f"PyTorch CUDA available: {torch.cuda.is_available()}")
|
44 |
+
if torch.cuda.is_available():
|
45 |
+
print(f"CUDA device count: {torch.cuda.device_count()}")
|
46 |
+
print(f"CUDA current device: {torch.cuda.current_device()}")
|
47 |
+
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
|
48 |
+
print(f"CUDA version: {torch.version.cuda}")
|
49 |
+
else:
|
50 |
+
print("WARNING: CUDA is not available. Checking for potential issues...")
|
51 |
+
print(f"CUDA library loaded: {torch._C._cuda_getCompiledVersion() > 0}")
|
52 |
+
print("Please ensure the Hugging Face Space is configured to use a GPU runtime.")
|
53 |
+
print("Falling back to CPU rendering. Note: Performance may be significantly slower.")
|
54 |
+
|
55 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
56 |
if device != "cuda":
|
57 |
+
print("WARNING: Running on CPU. This is a fallback due to CUDA unavailability. GPU rendering is strongly recommended for performance.")
|
|
|
|
|
58 |
|
59 |
# Pre-run memory cleanup
|
60 |
+
if device == "cuda":
|
61 |
+
torch.cuda.empty_cache()
|
62 |
+
gc.collect()
|
63 |
+
torch.cuda.ipc_collect()
|
64 |
+
torch.cuda.synchronize()
|
65 |
|
66 |
# 2) LOAD MUSICGEN INTO VRAM
|
67 |
try:
|
|
|
79 |
# 3) RESOURCE MONITORING FUNCTION
|
80 |
def print_resource_usage(stage: str):
|
81 |
print(f"--- {stage} ---")
|
82 |
+
if device == "cuda":
|
83 |
+
print(f"GPU Memory Allocated: {torch.cuda.memory_allocated() / (1024**3):.2f} GB")
|
84 |
+
print(f"GPU Memory Reserved: {torch.cuda.memory_reserved() / (1024**3):.2f} GB")
|
85 |
print(f"CPU Memory Used: {psutil.virtual_memory().percent}%")
|
86 |
print("---------------")
|
87 |
|
88 |
+
# Check available GPU memory if using CUDA
|
89 |
def check_vram_availability(required_gb=3.5):
|
90 |
+
if device != "cuda":
|
91 |
+
print("Skipping VRAM check as running on CPU.")
|
92 |
+
return True
|
93 |
total_vram = torch.cuda.get_device_properties(0).total_memory / (1024**3)
|
94 |
allocated_vram = torch.cuda.memory_allocated() / (1024**3)
|
95 |
available_vram = total_vram - allocated_vram
|
|
|
253 |
|
254 |
for i in range(num_chunks):
|
255 |
chunk_prompt = instrumental_prompt
|
256 |
+
print(f"Generating chunk {i+1}/{num_chunks} on {device} (prompt: {chunk_prompt})...")
|
257 |
musicgen_model.set_generation_params(
|
258 |
duration=generation_duration,
|
259 |
use_sampling=True,
|
|
|
290 |
os.unlink(temp_wav_path)
|
291 |
audio_segments.append(segment)
|
292 |
|
293 |
+
if device == "cuda":
|
294 |
+
torch.cuda.empty_cache()
|
295 |
+
gc.collect()
|
296 |
+
torch.cuda.ipc_collect()
|
297 |
+
torch.cuda.synchronize()
|
298 |
time.sleep(0.5)
|
299 |
print_resource_usage(f"After Chunk {i+1} Generation")
|
300 |
|
|
|
328 |
except Exception as e:
|
329 |
return None, f"❌ Generation failed: {e}"
|
330 |
finally:
|
331 |
+
if device == "cuda":
|
332 |
+
torch.cuda.empty_cache()
|
333 |
+
gc.collect()
|
334 |
+
torch.cuda.ipc_collect()
|
335 |
+
torch.cuda.synchronize()
|
336 |
|
337 |
# Function to clear inputs
|
338 |
def clear_inputs():
|