|
import gradio as gr |
|
import json |
|
from transformers import pipeline |
|
|
|
|
|
model_name = "Qwen/Qwen2.5-3B" |
|
pipe = pipeline("text-generation", model=model_name, device=0) |
|
|
|
|
|
with open("gta_test.json", "r", encoding="utf-8") as f: |
|
gta_data = json.load(f) |
|
|
|
def run_model(input_text, use_gta_index): |
|
if use_gta_index: |
|
try: |
|
index = int(input_text) |
|
question = gta_data[index]["question"] |
|
except Exception as e: |
|
return f"β Invalid index: {e}" |
|
else: |
|
question = input_text |
|
|
|
output = pipe(question, max_new_tokens=256, do_sample=True) |
|
return f"**Question:** {question}\n\n**Response:**\n{output[0]['generated_text']}" |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# π€ GTA Reasoning with Qwen2.5-3B") |
|
gr.Markdown("Use a GTA query by index or enter your own.") |
|
with gr.Row(): |
|
input_text = gr.Textbox(label="Enter a question or GTA index (0β228)") |
|
use_index = gr.Checkbox(label="Treat input as GTA index", value=False) |
|
run_btn = gr.Button("Generate") |
|
output_md = gr.Markdown() |
|
|
|
run_btn.click(run_model, inputs=[input_text, use_index], outputs=output_md) |
|
|
|
demo.launch() |
|
|