mharkey commited on
Commit
5ed5886
Β·
verified Β·
1 Parent(s): 1368c13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -16
app.py CHANGED
@@ -2,30 +2,39 @@ import gradio as gr
2
  from transformers import pipeline
3
  from datasets import load_dataset
4
 
5
- # Load model from Hugging Face Hub
6
- model_name = "Qwen/Qwen1.5-7B-Chat" # or any other LLM
7
- pipe = pipeline("text-generation", model=model_name)
8
 
9
- # Optionally load GTA queries
 
 
 
10
  gta_data = load_dataset("open-compass/GTA", split="test")
11
 
12
- def run_model(user_query, use_sample):
13
- if use_sample:
14
- question = gta_data[int(user_query)]["question"]
 
 
 
 
 
15
  else:
16
- question = user_query
17
 
18
  output = pipe(question, max_new_tokens=256, do_sample=True)
19
- return f"**Input**: {question}\n\n**Output**:\n{output[0]['generated_text']}"
20
 
 
21
  with gr.Blocks() as demo:
22
- gr.Markdown("## 🧠 GTA Tool Reasoning Demo with Hugging Face Models")
 
23
  with gr.Row():
24
- user_input = gr.Textbox(label="Enter your query or GTA index (if using sample)")
25
- use_sample = gr.Checkbox(label="Use sample from GTA dataset (enter index)", value=False)
26
- run_button = gr.Button("Run")
27
- output = gr.Markdown()
28
 
29
- run_button.click(run_model, inputs=[user_input, use_sample], outputs=output)
30
 
31
- demo.launch()
 
2
  from transformers import pipeline
3
  from datasets import load_dataset
4
 
5
+ # Choose your model here – change if needed
6
+ model_name = "Qwen/Qwen1.5-7B-Chat"
 
7
 
8
+ # Load the text generation pipeline
9
+ pipe = pipeline("text-generation", model=model_name, device=0)
10
+
11
+ # Load GTA dataset (optional)
12
  gta_data = load_dataset("open-compass/GTA", split="test")
13
 
14
+ # Inference function
15
+ def run_model(input_text, use_gta_index):
16
+ if use_gta_index:
17
+ try:
18
+ index = int(input_text)
19
+ question = gta_data[index]["question"]
20
+ except Exception as e:
21
+ return f"❌ Error: Invalid index β€” {str(e)}"
22
  else:
23
+ question = input_text
24
 
25
  output = pipe(question, max_new_tokens=256, do_sample=True)
26
+ return f"**Question:** {question}\n\n**Response:**\n{output[0]['generated_text']}"
27
 
28
+ # Gradio UI
29
  with gr.Blocks() as demo:
30
+ gr.Markdown("# πŸ€– GTA-style Reasoning with Hugging Face Models")
31
+ gr.Markdown("Enter a question or choose a sample from the GTA benchmark.")
32
  with gr.Row():
33
+ input_text = gr.Textbox(label="Enter a question or GTA index (e.g., 0–228)")
34
+ use_sample = gr.Checkbox(label="Use as GTA index", value=False)
35
+ run_button = gr.Button("Generate")
36
+ output_text = gr.Markdown()
37
 
38
+ run_button.click(run_model, inputs=[input_text, use_sample], outputs=output_text)
39
 
40
+ demo.launch()