Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -48,7 +48,7 @@ def check_hallucination(assertion,citation):
|
|
| 48 |
output = response.json()
|
| 49 |
output = output[0][0]["score"]
|
| 50 |
|
| 51 |
-
return f"**
|
| 52 |
|
| 53 |
# Define the API parameters
|
| 54 |
VAPI_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
|
|
@@ -372,6 +372,7 @@ def process_summary_with_stablemed(summary):
|
|
| 372 |
|
| 373 |
# Main function to handle the Gradio interface logic
|
| 374 |
|
|
|
|
| 375 |
def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
|
| 376 |
try:
|
| 377 |
# Initialize the conditional variables
|
|
@@ -385,14 +386,12 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 385 |
# Process audio input
|
| 386 |
if audio_input is not None:
|
| 387 |
audio_text = process_speech(input_language, audio_input)
|
| 388 |
-
|
| 389 |
-
combined_text += "\n" + audio_text
|
| 390 |
|
| 391 |
# Process image input
|
| 392 |
if image_input is not None:
|
| 393 |
-
image_text = process_image(image_input)
|
| 394 |
-
|
| 395 |
-
combined_text += "\n" + image_text
|
| 396 |
|
| 397 |
# Check if combined text is empty
|
| 398 |
if not combined_text.strip():
|
|
@@ -400,14 +399,11 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 400 |
|
| 401 |
# Use the text to query Vectara
|
| 402 |
vectara_response_json = query_vectara(combined_text)
|
| 403 |
-
print("Vectara Response:", vectara_response_json) # Debug print
|
| 404 |
|
| 405 |
# Parse the Vectara response
|
| 406 |
vectara_response = json.loads(vectara_response_json)
|
| 407 |
summary = vectara_response.get('summary', 'No summary available')
|
| 408 |
sources_info = vectara_response.get('sources', [])
|
| 409 |
-
print("Summary:", summary) # Debug print
|
| 410 |
-
print("Sources Info:", sources_info) # Debug print
|
| 411 |
|
| 412 |
# Format Vectara response in Markdown
|
| 413 |
markdown_output = "### Vectara Response Summary\n"
|
|
@@ -421,13 +417,11 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 421 |
markdown_output += "\n### Original Image Description\n"
|
| 422 |
markdown_output += image_description + "\n"
|
| 423 |
|
| 424 |
-
# Process the summary with
|
| 425 |
final_response = process_summary_with_stablemed(summary)
|
| 426 |
-
print("Final Response:", final_response) # Debug print
|
| 427 |
|
| 428 |
# Evaluate hallucination
|
| 429 |
hallucination_label = evaluate_hallucination(final_response, summary)
|
| 430 |
-
print("Hallucination Label:", hallucination_label) # Debug print
|
| 431 |
|
| 432 |
# Add final response and hallucination label to Markdown output
|
| 433 |
markdown_output += "\n### Processed Summary with StableMed\n"
|
|
@@ -435,10 +429,9 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 435 |
markdown_output += "\n### Hallucination Evaluation\n"
|
| 436 |
markdown_output += f"* **Label**: {hallucination_label}\n"
|
| 437 |
|
| 438 |
-
return markdown_output
|
| 439 |
except Exception as e:
|
| 440 |
-
|
| 441 |
-
return "Error occurred during processing."
|
| 442 |
|
| 443 |
|
| 444 |
|
|
@@ -577,15 +570,15 @@ def create_interface():
|
|
| 577 |
|
| 578 |
with gr.Accordion("Use Voice", open=False) as voice_accordion:
|
| 579 |
audio_input = gr.Audio(label="Speak", type="filepath", sources="microphone")
|
| 580 |
-
audio_output = gr.Markdown(label="Output text")
|
| 581 |
|
| 582 |
with gr.Accordion("Use a Picture", open=False) as picture_accordion:
|
| 583 |
image_input = gr.Image(label="Upload image")
|
| 584 |
-
image_output = gr.Markdown(label="Output text")
|
| 585 |
|
| 586 |
with gr.Accordion("MultiMed", open=False) as multimend_accordion:
|
| 587 |
text_input = gr.Textbox(label="Use Text", lines=3)
|
| 588 |
-
text_output = gr.Markdown(label="Output text")
|
| 589 |
|
| 590 |
text_button = gr.Button("Use MultiMed")
|
| 591 |
hallucination_output = gr.Label(label="Hallucination Evaluation")
|
|
@@ -597,4 +590,4 @@ def create_interface():
|
|
| 597 |
return iface
|
| 598 |
|
| 599 |
iface = create_interface()
|
| 600 |
-
iface.launch(show_error=True, debug=True, share=True)
|
|
|
|
| 48 |
output = response.json()
|
| 49 |
output = output[0][0]["score"]
|
| 50 |
|
| 51 |
+
return f"**hallucination score:** {output}"
|
| 52 |
|
| 53 |
# Define the API parameters
|
| 54 |
VAPI_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
|
|
|
|
| 372 |
|
| 373 |
# Main function to handle the Gradio interface logic
|
| 374 |
|
| 375 |
+
|
| 376 |
def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
|
| 377 |
try:
|
| 378 |
# Initialize the conditional variables
|
|
|
|
| 386 |
# Process audio input
|
| 387 |
if audio_input is not None:
|
| 388 |
audio_text = process_speech(input_language, audio_input)
|
| 389 |
+
combined_text += "\n\n**Audio Input:**\n" + audio_text
|
|
|
|
| 390 |
|
| 391 |
# Process image input
|
| 392 |
if image_input is not None:
|
| 393 |
+
image_text = process_image(image_input)
|
| 394 |
+
combined_text += "\n\n**Image Input:**\n" + image_text
|
|
|
|
| 395 |
|
| 396 |
# Check if combined text is empty
|
| 397 |
if not combined_text.strip():
|
|
|
|
| 399 |
|
| 400 |
# Use the text to query Vectara
|
| 401 |
vectara_response_json = query_vectara(combined_text)
|
|
|
|
| 402 |
|
| 403 |
# Parse the Vectara response
|
| 404 |
vectara_response = json.loads(vectara_response_json)
|
| 405 |
summary = vectara_response.get('summary', 'No summary available')
|
| 406 |
sources_info = vectara_response.get('sources', [])
|
|
|
|
|
|
|
| 407 |
|
| 408 |
# Format Vectara response in Markdown
|
| 409 |
markdown_output = "### Vectara Response Summary\n"
|
|
|
|
| 417 |
markdown_output += "\n### Original Image Description\n"
|
| 418 |
markdown_output += image_description + "\n"
|
| 419 |
|
| 420 |
+
# Process the summary with OpenAI
|
| 421 |
final_response = process_summary_with_stablemed(summary)
|
|
|
|
| 422 |
|
| 423 |
# Evaluate hallucination
|
| 424 |
hallucination_label = evaluate_hallucination(final_response, summary)
|
|
|
|
| 425 |
|
| 426 |
# Add final response and hallucination label to Markdown output
|
| 427 |
markdown_output += "\n### Processed Summary with StableMed\n"
|
|
|
|
| 429 |
markdown_output += "\n### Hallucination Evaluation\n"
|
| 430 |
markdown_output += f"* **Label**: {hallucination_label}\n"
|
| 431 |
|
| 432 |
+
return markdown_output, hallucination_label
|
| 433 |
except Exception as e:
|
| 434 |
+
return f"Error occurred during processing: {e}", "No hallucination evaluation"
|
|
|
|
| 435 |
|
| 436 |
|
| 437 |
|
|
|
|
| 570 |
|
| 571 |
with gr.Accordion("Use Voice", open=False) as voice_accordion:
|
| 572 |
audio_input = gr.Audio(label="Speak", type="filepath", sources="microphone")
|
| 573 |
+
audio_output = gr.Markdown(label="Output text") # Markdown component for audio
|
| 574 |
|
| 575 |
with gr.Accordion("Use a Picture", open=False) as picture_accordion:
|
| 576 |
image_input = gr.Image(label="Upload image")
|
| 577 |
+
image_output = gr.Markdown(label="Output text") # Markdown component for image
|
| 578 |
|
| 579 |
with gr.Accordion("MultiMed", open=False) as multimend_accordion:
|
| 580 |
text_input = gr.Textbox(label="Use Text", lines=3)
|
| 581 |
+
text_output = gr.Markdown(label="Output text") # Markdown component for text
|
| 582 |
|
| 583 |
text_button = gr.Button("Use MultiMed")
|
| 584 |
hallucination_output = gr.Label(label="Hallucination Evaluation")
|
|
|
|
| 590 |
return iface
|
| 591 |
|
| 592 |
iface = create_interface()
|
| 593 |
+
iface.launch(show_error=True, debug=True, share=True)
|