mharkey commited on
Commit
7aab66d
Β·
verified Β·
1 Parent(s): 6a02f5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -1,37 +1,36 @@
1
  import gradio as gr
2
- import json
3
  from transformers import pipeline
 
4
 
5
- # Load Qwen2.5-3B (should fit in 16GB with CPU)
6
  model_name = "Qwen/Qwen2.5-3B"
7
  pipe = pipeline("text-generation", model=model_name, device=0)
8
 
9
- # βœ… Load local GTA dataset (test set)
10
- with open("gta_test.json", "r", encoding="utf-8") as f:
11
- gta_data = json.load(f)
12
 
13
- def run_model(input_text, use_gta_index):
14
- if use_gta_index:
15
  try:
16
- index = int(input_text)
17
- question = gta_data[index]["question"]
18
  except Exception as e:
19
- return f"❌ Invalid index: {e}"
20
  else:
21
- question = input_text
22
 
23
  output = pipe(question, max_new_tokens=256, do_sample=True)
24
  return f"**Question:** {question}\n\n**Response:**\n{output[0]['generated_text']}"
25
 
26
  with gr.Blocks() as demo:
27
- gr.Markdown("# πŸ€– GTA Reasoning with Qwen2.5-3B")
28
- gr.Markdown("Use a GTA query by index or enter your own.")
29
  with gr.Row():
30
- input_text = gr.Textbox(label="Enter a question or GTA index (0–228)")
31
  use_index = gr.Checkbox(label="Treat input as GTA index", value=False)
32
  run_btn = gr.Button("Generate")
33
  output_md = gr.Markdown()
34
 
35
- run_btn.click(run_model, inputs=[input_text, use_index], outputs=output_md)
36
 
37
  demo.launch()
 
1
  import gradio as gr
 
2
  from transformers import pipeline
3
+ from datasets import load_dataset
4
 
5
+ # Load smaller model to fit memory
6
  model_name = "Qwen/Qwen2.5-3B"
7
  pipe = pipeline("text-generation", model=model_name, device=0)
8
 
9
+ # Load the GTA dataset from Hugging Face
10
+ gta = load_dataset("Jize1/GTA", split="test")
 
11
 
12
+ def run_model(input_text, use_gta_idx):
13
+ if use_gta_idx:
14
  try:
15
+ idx = int(input_text)
16
+ question = gta[idx]["dialogs"][0]["content"].strip()
17
  except Exception as e:
18
+ return f"❌ Invalid index (0–{len(gta)-1}): {e}"
19
  else:
20
+ question = input_text.strip()
21
 
22
  output = pipe(question, max_new_tokens=256, do_sample=True)
23
  return f"**Question:** {question}\n\n**Response:**\n{output[0]['generated_text']}"
24
 
25
  with gr.Blocks() as demo:
26
+ gr.Markdown("# πŸ€– GTA Reasoning Demo (Qwen2.5‑3B + GTA Dataset)")
27
+ gr.Markdown("Enter your own question or a GTA index (0–228).")
28
  with gr.Row():
29
+ input_text = gr.Textbox(label="Your input or GTA index")
30
  use_index = gr.Checkbox(label="Treat input as GTA index", value=False)
31
  run_btn = gr.Button("Generate")
32
  output_md = gr.Markdown()
33
 
34
+ run_btn.click(fn=run_model, inputs=[input_text, use_index], outputs=[output_md])
35
 
36
  demo.launch()