Spaces:
Running
Running
File size: 1,160 Bytes
05d9728 f02b4db 8ef1537 f02b4db 8ef1537 f02b4db 05d9728 f02b4db 05d9728 f02b4db 05d9728 8ef1537 05d9728 cb3dd0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import pipeline
# Try loading the model with a fallback for any loading errors
try:
print("Loading the model...")
qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
print("Model loaded successfully.")
except Exception as e:
# Print error message for debugging purposes
print(f"Error loading model: {e}")
qa_pipeline = None
# Define the function that takes inputs and returns the answer
def answer_question(context, question):
if qa_pipeline is None:
return "Error: Model not loaded."
result = qa_pipeline(question=question, context=context)
return result['answer']
# Create the Gradio interface
interface = gr.Interface(
fn=answer_question,
inputs=[gr.Textbox(lines=7, label="Context (Enter the passage)"), gr.Textbox(lines=2, label="Question")],
outputs="text",
title="Question Answering Model",
description="Ask a question based on the given context.",
)
# Print a message before launching the app to confirm it's starting
print("Launching the Gradio interface...")
# Launch the interface
interface.launch()
|