HaiderAUT commited on
Commit
c172b12
·
verified ·
1 Parent(s): 50d2a40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -20
app.py CHANGED
@@ -1,19 +1,17 @@
1
  # =============================================================
2
- # Hugging Face Space – Lecture → Multilingual Podcast Generator
3
  # =============================================================
4
- # * **Text generation** – SmolAgents `HfApiModel` running the remote
5
- # Qwen/Qwen2.5‑Coder‑32B‑Instruct model.
6
- # * **Speech synthesis** – `huggingface_hub.InferenceClient.text_to_speech`
7
- # (serverless) with open models per language – no heavy local
8
- # downloads.
9
- # * Outputs five FLAC files (English, Bangla, Chinese, Urdu, Nepali).
10
  # -----------------------------------------------------------------
11
 
12
  import os
13
  import tempfile
14
  import textwrap
15
  from pathlib import Path
16
- from typing import List, Dict
17
 
18
  import gradio as gr
19
  from huggingface_hub import InferenceClient
@@ -41,12 +39,14 @@ client = InferenceClient(token=os.getenv("HF_TOKEN", None))
41
  LANG_INFO: Dict[str, Dict[str, str]] = {
42
  "en": {"name": "English", "tts_model": "facebook/mms-tts-eng"},
43
  "bn": {"name": "Bangla", "tts_model": "facebook/mms-tts-ben"},
44
- # MMS lacks mainstream Mandarin — fallback to an open Chinese TTS
45
  "zh": {"name": "Chinese", "tts_model": "myshell-ai/MeloTTS-Chinese"},
46
  "ur": {"name": "Urdu", "tts_model": "facebook/mms-tts-urd-script_arabic"},
47
  "ne": {"name": "Nepali", "tts_model": "facebook/mms-tts-npi"},
48
  }
49
 
 
 
 
50
  PROMPT_TEMPLATE = textwrap.dedent(
51
  """
52
  You are producing a lively two‑host educational podcast in {lang_name}.
@@ -77,14 +77,25 @@ def truncate_text(text: str, limit: int = TOKEN_LIMIT) -> str:
77
  # Main pipeline
78
  # ------------------------------------------------------------------
79
 
80
- def generate_podcast(pdf: gr.File) -> List[gr.Audio]:
81
- """Generate multilingual podcast from a lecture PDF."""
 
 
 
 
 
 
 
82
  with tempfile.TemporaryDirectory() as tmpdir:
83
  raw_text = extract_pdf_text(pdf.name)
84
  lecture_text = truncate_text(raw_text)
85
- outputs: List[tuple] = []
86
 
87
  for code, info in LANG_INFO.items():
 
 
 
 
88
  # 1️⃣ Draft dialogue in the target language
89
  prompt = PROMPT_TEMPLATE.format(lang_name=info["name"], content=lecture_text)
90
  dialogue: str = llm(prompt)
@@ -102,21 +113,31 @@ def generate_podcast(pdf: gr.File) -> List[gr.Audio]:
102
  # Gradio interface
103
  # ------------------------------------------------------------------
104
 
 
 
 
 
 
 
 
 
 
 
 
105
  audio_components = [
106
- gr.Audio(label=f"{info['name']} Podcast", type="filepath")
107
- for info in LANG_INFO.values()
108
  ]
109
 
110
  iface = gr.Interface(
111
  fn=generate_podcast,
112
- inputs=gr.File(label="Upload Lecture PDF", file_types=[".pdf"]),
113
  outputs=audio_components,
114
- title="Lecture → Multilingual Podcast Generator",
115
  description=(
116
- "Upload a lecture PDF and receive a two‑host audio podcast in five "
117
- "languages (English, Bangla, Chinese, Urdu, Nepali). Dialogue is "
118
- "crafted by Qwen‑32B; speech is synthesized on‑the‑fly using the "
119
- "Hugging Face Inference API — no heavy downloads or GPUs required."
120
  ),
121
  )
122
 
 
1
  # =============================================================
2
+ # Hugging Face Space – Lecture → Podcast Generator (User‑selectable Languages)
3
  # =============================================================
4
+ # * **Text generation** – SmolAgents `HfApiModel` (Qwen/Qwen2.5‑Coder‑32B‑Instruct).
5
+ # * **Speech synthesis** – `huggingface_hub.InferenceClient.text_to_speech`.
6
+ # * Users pick which languages to generate (English, Bangla, Chinese,
7
+ # Urdu, Nepali). Unselected languages are skipped.
 
 
8
  # -----------------------------------------------------------------
9
 
10
  import os
11
  import tempfile
12
  import textwrap
13
  from pathlib import Path
14
+ from typing import List, Dict, Tuple, Optional
15
 
16
  import gradio as gr
17
  from huggingface_hub import InferenceClient
 
39
  LANG_INFO: Dict[str, Dict[str, str]] = {
40
  "en": {"name": "English", "tts_model": "facebook/mms-tts-eng"},
41
  "bn": {"name": "Bangla", "tts_model": "facebook/mms-tts-ben"},
 
42
  "zh": {"name": "Chinese", "tts_model": "myshell-ai/MeloTTS-Chinese"},
43
  "ur": {"name": "Urdu", "tts_model": "facebook/mms-tts-urd-script_arabic"},
44
  "ne": {"name": "Nepali", "tts_model": "facebook/mms-tts-npi"},
45
  }
46
 
47
+ # Helper map: name ➜ code
48
+ LANG_CODE_BY_NAME = {info["name"]: code for code, info in LANG_INFO.items()}
49
+
50
  PROMPT_TEMPLATE = textwrap.dedent(
51
  """
52
  You are producing a lively two‑host educational podcast in {lang_name}.
 
77
  # Main pipeline
78
  # ------------------------------------------------------------------
79
 
80
+ def generate_podcast(pdf: gr.File, selected_lang_names: List[str]) -> List[Optional[Tuple[str, None]]]:
81
+ """Generate podcast audio files for chosen languages. Returns a list
82
+ aligned with LANG_INFO order; unselected languages yield None."""
83
+ # Ensure at least one language selected
84
+ if not selected_lang_names:
85
+ return [None] * len(LANG_INFO)
86
+
87
+ selected_codes = [LANG_CODE_BY_NAME[name] for name in selected_lang_names]
88
+
89
  with tempfile.TemporaryDirectory() as tmpdir:
90
  raw_text = extract_pdf_text(pdf.name)
91
  lecture_text = truncate_text(raw_text)
92
+ outputs: List[Optional[Tuple[str, None]]] = []
93
 
94
  for code, info in LANG_INFO.items():
95
+ if code not in selected_codes:
96
+ outputs.append(None)
97
+ continue
98
+
99
  # 1️⃣ Draft dialogue in the target language
100
  prompt = PROMPT_TEMPLATE.format(lang_name=info["name"], content=lecture_text)
101
  dialogue: str = llm(prompt)
 
113
  # Gradio interface
114
  # ------------------------------------------------------------------
115
 
116
+ language_choices = [info["name"] for info in LANG_INFO.values()]
117
+
118
+ inputs = [
119
+ gr.File(label="Upload Lecture PDF", file_types=[".pdf"]),
120
+ gr.CheckboxGroup(
121
+ choices=language_choices,
122
+ value=["English"],
123
+ label="Select podcast language(s) to generate",
124
+ ),
125
+ ]
126
+
127
  audio_components = [
128
+ gr.Audio(label=f"{info['name']} Podcast", type="filepath") for info in LANG_INFO.values()
 
129
  ]
130
 
131
  iface = gr.Interface(
132
  fn=generate_podcast,
133
+ inputs=inputs,
134
  outputs=audio_components,
135
+ title="Lecture → Podcast Generator (Choose Languages)",
136
  description=(
137
+ "Upload a lecture PDF, choose your desired languages, and receive a "
138
+ "two‑host audio podcast. Dialogue is crafted by Qwen‑32B; speech is "
139
+ "synthesized on‑the‑fly using the Hugging Face Inference API — "
140
+ "no heavy downloads or GPUs required."
141
  ),
142
  )
143