PuristanLabs1 commited on
Commit
2a4c58e
·
verified ·
1 Parent(s): 49deb85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -17
app.py CHANGED
@@ -61,8 +61,6 @@ def fetch_and_display_content(url):
61
 
62
  # Add detected language to metadata
63
  metadata["Detected Language"] = detected_lang.upper()
64
- #return cleaned_text, detected_lang, gr.update(visible=True), gr.update(visible=True)
65
- #return cleaned_text, metadata, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
66
  return cleaned_text, metadata, detected_lang, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
67
 
68
  ### 2️⃣ Cleaning Function
@@ -168,7 +166,9 @@ def extract_entities_with_stanza(text, chunk_size=1000):
168
  doc = nlp(chunk)
169
  for ent in doc.ents:
170
  entities.append({"text": ent.text, "type": ent.type})
171
-
 
 
172
  return entities
173
 
174
  ### 4️⃣ TTS Functionality (KokoroTTS)
@@ -252,11 +252,11 @@ with gr.Blocks() as demo:
252
  url_input = gr.Textbox(label="Enter URL", placeholder="https://example.com/article")
253
 
254
  voice_selection = gr.Dropdown(AVAILABLE_VOICES, label="Select Voice", value="af_bella")
255
-
256
- process_text_button = gr.Button("Fetch Text & Detect Language")
257
- process_summary_button = gr.Button("Summarize Text", visible=False)
258
- process_audio_button = gr.Button("Generate Audio", visible=False)
259
- process_ner_button = gr.Button("Extract Entities", visible=True) # ✅ New button for NER
260
 
261
  # Layout: Two adjacent columns (Text and Metadata)
262
  with gr.Row():
@@ -264,19 +264,17 @@ with gr.Blocks() as demo:
264
  metadata_output = gr.JSON(label="Article Metadata", visible=False) # Displays metadata
265
 
266
 
267
- #extracted_text = gr.Markdown(label="Extracted Content")
268
-
269
  detected_lang = gr.Textbox(label="Detected Language", visible=False)
270
  summary_output = gr.Textbox(label="Summary", visible=True, interactive=False)
271
  full_audio_output = gr.Audio(label="Generated Audio", visible=True)
272
- ner_output = gr.JSON(label="Extracted Entities", visible=True) # ✅ New output for NER
273
 
274
  # Step 1: Fetch Text & Detect Language First
275
  process_text_button.click(
276
  fetch_and_display_content,
277
  inputs=[url_input],
278
 
279
- outputs=[extracted_text, metadata_output, detected_lang, process_summary_button, process_audio_button, extracted_text, metadata_output]
280
  )
281
 
282
  process_summary_button.click(hierarchical_summarization, inputs=[extracted_text], outputs=[summary_output])
@@ -284,9 +282,6 @@ with gr.Blocks() as demo:
284
  # Step 2: Generate Audio After Text & Language Are Displayed
285
  process_audio_button.click(
286
  generate_audio_kokoro,
287
- #inputs=[extracted_text, detected_language],
288
- #inputs=[extracted_text, metadata_output, voice_selection],
289
- #inputs=[extracted_text, metadata_output["Detected Language"], voice_selection],
290
  inputs=[extracted_text, detected_lang, voice_selection],
291
  outputs=[full_audio_output]
292
  )
@@ -297,6 +292,4 @@ with gr.Blocks() as demo:
297
  outputs=[ner_output]
298
  )
299
 
300
- #process_button.click(process_url, inputs=[url_input], outputs=[extracted_text, detected_language, full_audio_output])
301
-
302
  demo.launch()
 
61
 
62
  # Add detected language to metadata
63
  metadata["Detected Language"] = detected_lang.upper()
 
 
64
  return cleaned_text, metadata, detected_lang, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
65
 
66
  ### 2️⃣ Cleaning Function
 
166
  doc = nlp(chunk)
167
  for ent in doc.ents:
168
  entities.append({"text": ent.text, "type": ent.type})
169
+
170
+ formatted_entities = "\n".join([f"{i+1}: {ent['text']} --> {ent['type']}" for i, ent in enumerate(entities)])
171
+ return formatted_entities
172
  return entities
173
 
174
  ### 4️⃣ TTS Functionality (KokoroTTS)
 
252
  url_input = gr.Textbox(label="Enter URL", placeholder="https://example.com/article")
253
 
254
  voice_selection = gr.Dropdown(AVAILABLE_VOICES, label="Select Voice", value="af_bella")
255
+ with gr.Row():
256
+ process_text_button = gr.Button("Fetch Text & Detect Language",scale = 1)
257
+ process_summary_button = gr.Button("Summarize Text", visible=False,scale = 1)
258
+ process_audio_button = gr.Button("Generate Audio", visible=False,scale = 1)
259
+ process_ner_button = gr.Button("Extract Entities", visible=False,scale = 1) # ✅ New button for NER
260
 
261
  # Layout: Two adjacent columns (Text and Metadata)
262
  with gr.Row():
 
264
  metadata_output = gr.JSON(label="Article Metadata", visible=False) # Displays metadata
265
 
266
 
 
 
267
  detected_lang = gr.Textbox(label="Detected Language", visible=False)
268
  summary_output = gr.Textbox(label="Summary", visible=True, interactive=False)
269
  full_audio_output = gr.Audio(label="Generated Audio", visible=True)
270
+ ner_output = gr.Textbox(label="Extracted Entities", visible=False, interactive=False) # ✅ New output for NER
271
 
272
  # Step 1: Fetch Text & Detect Language First
273
  process_text_button.click(
274
  fetch_and_display_content,
275
  inputs=[url_input],
276
 
277
+ outputs=[extracted_text, metadata_output, detected_lang, process_summary_button, process_audio_button,process_ner_button, extracted_text, metadata_output]
278
  )
279
 
280
  process_summary_button.click(hierarchical_summarization, inputs=[extracted_text], outputs=[summary_output])
 
282
  # Step 2: Generate Audio After Text & Language Are Displayed
283
  process_audio_button.click(
284
  generate_audio_kokoro,
 
 
 
285
  inputs=[extracted_text, detected_lang, voice_selection],
286
  outputs=[full_audio_output]
287
  )
 
292
  outputs=[ner_output]
293
  )
294
 
 
 
295
  demo.launch()