Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -41,6 +41,8 @@ def load_model():
|
|
41 |
model, model_config = get_pretrained_model("ford442/stable-audio-open-1.0")
|
42 |
print("Model loaded successfully.")
|
43 |
return model, model_config
|
|
|
|
|
44 |
|
45 |
# Function to set up, generate, and process the audio
|
46 |
@spaces.GPU(duration=60) # Allocate GPU only when this function is called
|
@@ -56,7 +58,6 @@ def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
|
|
56 |
print(f"Hugging Face token: {hf_token}")
|
57 |
|
58 |
# Use pre-loaded model and configuration
|
59 |
-
model, model_config = load_model()
|
60 |
sample_rate = model_config["sample_rate"]
|
61 |
sample_size = model_config["sample_size"]
|
62 |
|
|
|
41 |
model, model_config = get_pretrained_model("ford442/stable-audio-open-1.0")
|
42 |
print("Model loaded successfully.")
|
43 |
return model, model_config
|
44 |
+
|
45 |
+
model, model_config = load_model()
|
46 |
|
47 |
# Function to set up, generate, and process the audio
|
48 |
@spaces.GPU(duration=60) # Allocate GPU only when this function is called
|
|
|
58 |
print(f"Hugging Face token: {hf_token}")
|
59 |
|
60 |
# Use pre-loaded model and configuration
|
|
|
61 |
sample_rate = model_config["sample_rate"]
|
62 |
sample_size = model_config["sample_size"]
|
63 |
|