neovalle's picture
Update app.py
8fc19af verified
raw
history blame
1.9 kB
import gradio as gr
from transformers import pipeline
# Load the Flan-T5-Large model and tokenizer via a pipeline.
# If you're on a GPU Space, device=0 will place it on GPU.
# If you're on CPU only, leave device=-1.
model_name = "google/flan-t5-large"
pipe = pipeline("text2text-generation", model=model_name, device=-1)
def judge_ecolinguistics(pairs_text):
"""
Takes a multiline string of question–answer pairs and returns a model-generated
scoring from 1 to 10 for each pair, along with a brief explanation.
"""
# Construct a single prompt that instructs the model to score each Q&A pair.
prompt = f"""
You are an ecolinguistics judge. You evaluate answers based on how thoroughly
they address ecological concerns, clarity of expression, and factual correctness.
Below is a set of question–answer pairs:
{pairs_text}
Please provide, for each pair, a single numerical score from 1 to 10 and a brief explanation.
"""
# Use the pipeline to generate a response.
response = pipe(prompt, max_length=512, truncation=True)[0]["generated_text"]
return response
# Build the Gradio interface.
demo = gr.Interface(
fn=judge_ecolinguistics,
inputs=gr.Textbox(
lines=10,
label="Enter Your Question–Answer Pairs",
placeholder="Example:\nQ1: What is an ecological niche?\nA1: It is the role a species plays in its environment.\n\nQ2: How does deforestation affect the climate?\nA2: It can reduce carbon sequestration and disrupt rainfall patterns.\n"
),
outputs="text",
title="Ecolinguistics Q&A Scorer (Flan-T5-Large)",
description=(
"Paste multiple question–answer pairs. The model will assign a score from 1–10 "
"to each answer, considering ecological relevance and clarity. "
"It will also provide a brief rationale for its scoring."
)
)
if __name__ == "__main__":
demo.launch()