m-ric HF Staff commited on
Commit
f0bd7e5
·
verified ·
1 Parent(s): d065cac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,7 +10,7 @@ import numpy as np
10
  PODCAST_SUBJECT = "The future of AI and its impact on society"
11
 
12
  # Initialize the inference client
13
- client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct", provider="together", token=os.getenv("HF_TOKEN"))
14
  model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
15
 
16
  # Queue for audio streaming
@@ -45,12 +45,12 @@ def split_podcast_into_chunks(podcast_text, chunk_size=3):
45
 
46
  def process_audio_chunks(podcast_text):
47
  chunks = split_podcast_into_chunks(podcast_text)
48
- sample_rate = 36000 # Modified from https://huggingface.co/spaces/nari-labs/Dia-1.6B/blob/main/app.py has 44100
49
  for chunk in chunks:
50
  if stop_signal.is_set():
51
  break
52
 
53
- raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False)
54
  audio_chunk = np.array(raw_audio, dtype=np.float32)
55
  audio_queue.put((sample_rate, audio_chunk))
56
 
 
10
  PODCAST_SUBJECT = "The future of AI and its impact on society"
11
 
12
  # Initialize the inference client
13
+ client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", provider="cerebras", token=os.getenv("HF_TOKEN"))
14
  model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
15
 
16
  # Queue for audio streaming
 
45
 
46
  def process_audio_chunks(podcast_text):
47
  chunks = split_podcast_into_chunks(podcast_text)
48
+ sample_rate = 39000 # Modified from https://huggingface.co/spaces/nari-labs/Dia-1.6B/blob/main/app.py has 44100
49
  for chunk in chunks:
50
  if stop_signal.is_set():
51
  break
52
 
53
+ raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False, seed=42)
54
  audio_chunk = np.array(raw_audio, dtype=np.float32)
55
  audio_queue.put((sample_rate, audio_chunk))
56