Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,36 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
-
# GhostAI Music Generator Hugging Face Spaces GPU
|
3 |
-
|
4 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from pydub import AudioSegment
|
6 |
from audiocraft.models import MusicGen
|
7 |
from huggingface_hub import login
|
8 |
-
import spaces # Hugging Face spaces decorator
|
9 |
|
10 |
warnings.filterwarnings("ignore")
|
11 |
|
|
|
12 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
13 |
if not HF_TOKEN:
|
14 |
sys.exit("ERROR: HF_TOKEN not set.")
|
15 |
login(HF_TOKEN)
|
16 |
|
|
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
print(f"Running on {device.upper()}")
|
19 |
|
20 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
|
21 |
|
|
|
22 |
musicgen = MusicGen.get_pretrained("medium")
|
23 |
musicgen.lm.to(device)
|
24 |
musicgen.set_generation_params(duration=10)
|
@@ -28,15 +40,15 @@ def clean_resources():
|
|
28 |
torch.cuda.empty_cache()
|
29 |
gc.collect()
|
30 |
|
31 |
-
@spaces.GPU #
|
32 |
def generate_music(prompt, cfg, top_k, top_p, temp, total_len, chunk_len, crossfade):
|
33 |
if not prompt.strip():
|
34 |
return None, "⚠️ Enter a valid prompt."
|
35 |
|
36 |
sample_rate = musicgen.sample_rate
|
37 |
segments = []
|
38 |
-
|
39 |
chunks = max(1, total_len // chunk_len)
|
|
|
40 |
for _ in range(chunks):
|
41 |
with torch.no_grad():
|
42 |
audio = musicgen.generate(
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
+
# GhostAI Music Generator Hugging Face Spaces GPU-Compatible
|
3 |
+
|
4 |
+
import spaces # <--- Must be imported FIRST before torch and CUDA
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import gc
|
9 |
+
import warnings
|
10 |
+
import tempfile
|
11 |
+
import torch
|
12 |
+
import torchaudio
|
13 |
+
import numpy as np
|
14 |
+
import gradio as gr
|
15 |
from pydub import AudioSegment
|
16 |
from audiocraft.models import MusicGen
|
17 |
from huggingface_hub import login
|
|
|
18 |
|
19 |
warnings.filterwarnings("ignore")
|
20 |
|
21 |
+
# Hugging Face token auth
|
22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
23 |
if not HF_TOKEN:
|
24 |
sys.exit("ERROR: HF_TOKEN not set.")
|
25 |
login(HF_TOKEN)
|
26 |
|
27 |
+
# Device setup
|
28 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
print(f"Running on {device.upper()}")
|
30 |
|
31 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
|
32 |
|
33 |
+
# Load MusicGen model explicitly on GPU
|
34 |
musicgen = MusicGen.get_pretrained("medium")
|
35 |
musicgen.lm.to(device)
|
36 |
musicgen.set_generation_params(duration=10)
|
|
|
40 |
torch.cuda.empty_cache()
|
41 |
gc.collect()
|
42 |
|
43 |
+
@spaces.GPU # <-- Correct GPU decorator for HF
|
44 |
def generate_music(prompt, cfg, top_k, top_p, temp, total_len, chunk_len, crossfade):
|
45 |
if not prompt.strip():
|
46 |
return None, "⚠️ Enter a valid prompt."
|
47 |
|
48 |
sample_rate = musicgen.sample_rate
|
49 |
segments = []
|
|
|
50 |
chunks = max(1, total_len // chunk_len)
|
51 |
+
|
52 |
for _ in range(chunks):
|
53 |
with torch.no_grad():
|
54 |
audio = musicgen.generate(
|