m-ric HF Staff commited on
Commit
5f17aa3
·
verified ·
1 Parent(s): 0d89a98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -11,7 +11,7 @@ PODCAST_SUBJECT = "The future of AI and its impact on society"
11
 
12
  # Initialize the inference client
13
  client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", provider="cerebras", token=os.getenv("HF_TOKEN"))
14
- model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
15
 
16
  # Queue for audio streaming
17
  audio_queue = queue.Queue()
@@ -50,7 +50,7 @@ def process_audio_chunks(podcast_text):
50
  if stop_signal.is_set():
51
  break
52
 
53
- raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False, seed=42)
54
  audio_chunk = np.array(raw_audio, dtype=np.float32)
55
  audio_queue.put((sample_rate, audio_chunk))
56
 
 
11
 
12
  # Initialize the inference client
13
  client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", provider="cerebras", token=os.getenv("HF_TOKEN"))
14
+ model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16", seed=42)
15
 
16
  # Queue for audio streaming
17
  audio_queue = queue.Queue()
 
50
  if stop_signal.is_set():
51
  break
52
 
53
+ raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False)
54
  audio_chunk = np.array(raw_audio, dtype=np.float32)
55
  audio_queue.put((sample_rate, audio_chunk))
56