m-ric HF Staff commited on
Commit
119a653
·
1 Parent(s): 856cb19

Improve description

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +8 -6
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Open NotebookLM
3
- emoji: 🐢
4
  colorFrom: yellow
5
  colorTo: red
6
  sdk: gradio
@@ -9,7 +9,7 @@ app_file: app.py
9
  pinned: true
10
  header: mini
11
  license: apache-2.0
12
- short_description: Generate a podcast where 2 hosts discuss your document.
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Open NotebookLM
3
+ emoji: 🎙️
4
  colorFrom: yellow
5
  colorTo: red
6
  sdk: gradio
 
9
  pinned: true
10
  header: mini
11
  license: apache-2.0
12
+ short_description: Have your document discussed by 2 hsots in a captivating podcast.
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -61,10 +61,10 @@ def generate_podcast_script(subject: str, steering_question: str | None = None)
61
  CUDA_AVAILABLE = torch.cuda.is_available()
62
 
63
  kmodel = KModel(repo_id='hexgrad/Kokoro-82M').to("cuda" if CUDA_AVAILABLE else "cpu").eval()
64
- kpipeline = KPipeline(lang_code="a") # English voices
65
 
66
- MALE_VOICE = "am_daniel"
67
- FEMALE_VOICE = "af_emma"
68
 
69
  # Pre‑warm voices to avoid first‑call latency
70
  for v in (MALE_VOICE, FEMALE_VOICE):
@@ -119,10 +119,12 @@ def generate_podcast(url: str, pdf_path: str, topic: str):
119
  print(f"PROCESSED '{utterance}' in {int(t1-t0)} seconds. {audio_numpy.shape}")
120
 
121
  demo = gr.Interface(
122
- title="Open NotebookLM",
123
- description=f"""Generates a podcast discussion between two hosts about the materials of your choice. Based on [Kokoro](https://huggingface.co/hexgrad/Kokoro-82M), and uses elements from a NotebookLM app by [Gabriel Chua](https://huggingface.co/spaces/gabrielchua/open-notebooklm).
124
 
125
- If you do not specify any source materials below, the podcast will be about the top trending [Daily paper](https://huggingface.co/papers/), '**{list(top_papers.keys())[0]}**'""",
 
 
126
  fn=generate_podcast,
127
  inputs=[
128
  gr.Textbox(
 
61
  CUDA_AVAILABLE = torch.cuda.is_available()
62
 
63
  kmodel = KModel(repo_id='hexgrad/Kokoro-82M').to("cuda" if CUDA_AVAILABLE else "cpu").eval()
64
+ kpipeline = KPipeline(lang_code="b") # English voices
65
 
66
+ MALE_VOICE = "bm_daniel"
67
+ FEMALE_VOICE = "bf_emma"
68
 
69
  # Pre‑warm voices to avoid first‑call latency
70
  for v in (MALE_VOICE, FEMALE_VOICE):
 
119
  print(f"PROCESSED '{utterance}' in {int(t1-t0)} seconds. {audio_numpy.shape}")
120
 
121
  demo = gr.Interface(
122
+ title="Open NotebookLM 🎙️",
123
+ description=f"""Generates a podcast discussion between two hosts about the materials of your choice.
124
 
125
+ If you do not specify any source materials below, the podcast will be about the top trending [Daily paper](https://huggingface.co/papers/), '**{list(top_papers.keys())[0]}**'
126
+
127
+ Based on [Kokoro TTS](https://huggingface.co/hexgrad/Kokoro-82M), lightning-fast inference for [Llama-3.3-70B](meta-llama/Llama-3.3-70B-Instruct) by Cerebras, and uses elements from a NotebookLM app by [Gabriel Chua](https://huggingface.co/spaces/gabrielchua/open-notebooklm).""",
128
  fn=generate_podcast,
129
  inputs=[
130
  gr.Textbox(