This Pull Request fixes the space by using a reacheable model

#1
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -18,7 +18,7 @@ client = InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct", token=os.envir
18
 
19
  # Load models
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
- model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
22
  sample_rate = model_config["sample_rate"]
23
  sample_size = model_config["sample_size"]
24
  model = model.to(device)
 
18
 
19
  # Load models
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
+ model, model_config = get_pretrained_model("chaowenguo/stable-audio-open-1.0")
22
  sample_rate = model_config["sample_rate"]
23
  sample_size = model_config["sample_size"]
24
  model = model.to(device)