m-ric HF Staff commited on
Commit
d065cac
·
verified ·
1 Parent(s): df2821e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -4,6 +4,7 @@ import os
4
  import gradio as gr
5
  from dia.model import Dia
6
  from huggingface_hub import InferenceClient
 
7
 
8
  # Hardcoded podcast subject
9
  PODCAST_SUBJECT = "The future of AI and its impact on society"
@@ -44,13 +45,14 @@ def split_podcast_into_chunks(podcast_text, chunk_size=3):
44
 
45
  def process_audio_chunks(podcast_text):
46
  chunks = split_podcast_into_chunks(podcast_text)
47
-
48
  for chunk in chunks:
49
  if stop_signal.is_set():
50
  break
51
 
52
- audio_chunk = model.generate(chunk, use_torch_compile=True, verbose=False)
53
- audio_queue.put(audio_chunk)
 
54
 
55
  audio_queue.put(None)
56
 
@@ -63,8 +65,6 @@ def stream_audio_generator(podcast_text):
63
  gen_thread = threading.Thread(target=process_audio_chunks, args=(podcast_text,))
64
  gen_thread.start()
65
 
66
- sample_rate = 32000 # Modified from https://huggingface.co/spaces/nari-labs/Dia-1.6B/blob/main/app.py has 44100
67
-
68
  try:
69
  while True:
70
  # Get next chunk from queue
@@ -75,7 +75,7 @@ def stream_audio_generator(podcast_text):
75
  break
76
 
77
  # Yield the audio chunk with sample rate
78
- yield (sample_rate, chunk)
79
 
80
  except Exception as e:
81
  print(f"Error in streaming: {e}")
 
4
  import gradio as gr
5
  from dia.model import Dia
6
  from huggingface_hub import InferenceClient
7
+ import numpy as np
8
 
9
  # Hardcoded podcast subject
10
  PODCAST_SUBJECT = "The future of AI and its impact on society"
 
45
 
46
  def process_audio_chunks(podcast_text):
47
  chunks = split_podcast_into_chunks(podcast_text)
48
+ sample_rate = 36000 # Modified from https://huggingface.co/spaces/nari-labs/Dia-1.6B/blob/main/app.py has 44100
49
  for chunk in chunks:
50
  if stop_signal.is_set():
51
  break
52
 
53
+ raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False)
54
+ audio_chunk = np.array(raw_audio, dtype=np.float32)
55
+ audio_queue.put((sample_rate, audio_chunk))
56
 
57
  audio_queue.put(None)
58
 
 
65
  gen_thread = threading.Thread(target=process_audio_chunks, args=(podcast_text,))
66
  gen_thread.start()
67
 
 
 
68
  try:
69
  while True:
70
  # Get next chunk from queue
 
75
  break
76
 
77
  # Yield the audio chunk with sample rate
78
+ yield chunk
79
 
80
  except Exception as e:
81
  print(f"Error in streaming: {e}")