rdave88 commited on
Commit
9f59898
Β·
verified Β·
1 Parent(s): 92174e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -47
app.py CHANGED
@@ -1,55 +1,52 @@
1
  import gradio as gr
2
- from model_tools import extract_task, scrape_huggingface_models
3
-
4
- # Final agent with Gradio UI
5
- def run_agent(user_query: str):
6
- """
7
- Given a user query, extracts the ML task, finds relevant models, and formats results in markdown.
8
- This function is used for Gradio UI interaction.
9
- """
10
- try:
11
- # 1. Extract the standard ML task (e.g., "text-classification")
12
- task = extract_task(user_query)
13
-
14
- # 2. Get relevant models for the task
15
- models = scrape_huggingface_models(task)
16
-
17
- if not models:
18
- return f"❌ No models found for task `{task}`. Try refining your query."
19
 
20
- # 3. Format response as a markdown table
21
- response = f"### πŸ” Models for task: `{task}`\n\n"
22
- response += "| Model Name | Task | Architecture |\n"
23
- response += "|------------|------|---------------|\n"
24
 
25
- for model in models:
26
- name = model.get("model_name", "unknown")
27
- task_name = model.get("task", "unknown")
28
- arch = model.get("architecture", "unknown")
29
- response += f"| [{name}](https://huggingface.co/{name}) | {task_name} | {arch} |\n"
 
30
 
31
- return response
32
-
33
- except Exception as e:
34
- return f"❌ Error: {str(e)}"
35
 
36
- # Gradio interface for deployment
37
- def gradio_ui():
38
- with gr.Blocks() as demo:
39
- gr.Markdown("# Hugging Face Model Finder Agent")
40
- gr.Markdown("Enter a task description, and I'll find suitable ML models for you!")
41
-
42
- # User input for task description
43
- user_input = gr.Textbox(label="Describe the ML Task", placeholder="e.g., 'I need a text summarization model'", lines=2)
44
-
45
- # Output for model search results
46
- output = gr.Markdown()
47
 
48
- # Connect the input/output to the agent
49
- user_input.submit(run_agent, inputs=user_input, outputs=output)
 
 
 
 
 
50
 
51
- return demo
52
 
53
- # Run the Gradio interface (will run locally, and can be deployed to Spaces)
54
- if __name__ == "__main__":
55
- gradio_ui().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ task_extractor = pipeline("text2text-generation", model="google/flan-t5-small")
 
 
 
7
 
8
+ # Simulated LLM task extraction (replace with real call if local)
9
+ def extract_task(user_input):
10
+ prompt = f"Classify the following ML task: {user_input}. Just reply with the task name."
11
+ result = task_extractor(prompt, max_new_tokens=10)
12
+ task = result[0]["generated_text"].strip().lower()
13
+ return task
14
 
 
 
 
 
15
 
16
+ # Scrape Hugging Face models by task
17
+ def get_models_for_task(task):
18
+ url = f"https://huggingface.co/models?pipeline_tag={task}"
19
+ headers = {"User-Agent": "Mozilla/5.0"}
20
+ try:
21
+ res = requests.get(url, headers=headers)
22
+ soup = BeautifulSoup(res.text, "html.parser")
 
 
 
 
23
 
24
+ results = []
25
+ for a in soup.find_all("a", class_="flex items-center gap-2"):
26
+ model_name = a.get("href", "").strip("/").split("/")[-1]
27
+ if model_name:
28
+ results.append(model_name)
29
+ if len(results) >= 5:
30
+ break
31
 
32
+ return results if results else ["No models found"]
33
 
34
+ except Exception as e:
35
+ return [f"Error: {str(e)}"]
36
+
37
+ # Combined predict function
38
+ def predict(user_input):
39
+ task = extract_task(user_input)
40
+ models = get_models_for_task(task)
41
+ return f"🧠 Task: {task}\nπŸ“¦ Models:\n" + "\n".join(models)
42
+
43
+ # Gradio UI
44
+ with gr.Blocks() as demo:
45
+ gr.Markdown("## πŸ€– ML Task β†’ Hugging Face Model Finder")
46
+ with gr.Row():
47
+ input_box = gr.Textbox(label="Describe your ML task")
48
+ submit_btn = gr.Button("πŸ” Find Models")
49
+ output_box = gr.Textbox(label="Suggested Models", lines=10)
50
+ submit_btn.click(predict, inputs=input_box, outputs=output_box)
51
+
52
+ demo.launch()