Spaces:
Running
Running
import gradio as gr | |
import requests | |
from bs4 import BeautifulSoup | |
from transformers import pipeline | |
task_extractor = pipeline("text2text-generation", model="google/flan-t5-small") | |
# Simulated LLM task extraction (replace with real call if local) | |
def extract_task(user_input): | |
prompt = f"Classify the following ML task: {user_input}. Just reply with the task name." | |
result = task_extractor(prompt, max_new_tokens=10) | |
task = result[0]["generated_text"].strip().lower() | |
return task | |
# Scrape Hugging Face models by task | |
def get_models_for_task(task): | |
url = f"https://huggingface.co/models?pipeline_tag={task}" | |
headers = {"User-Agent": "Mozilla/5.0"} | |
try: | |
res = requests.get(url, headers=headers) | |
soup = BeautifulSoup(res.text, "html.parser") | |
results = [] | |
for a in soup.find_all("a", class_="flex items-center gap-2"): | |
model_name = a.get("href", "").strip("/").split("/")[-1] | |
if model_name: | |
results.append(model_name) | |
if len(results) >= 5: | |
break | |
return results if results else ["No models found"] | |
except Exception as e: | |
return [f"Error: {str(e)}"] | |
# Combined predict function | |
def predict(user_input): | |
task = extract_task(user_input) | |
models = get_models_for_task(task) | |
return f"π§ Task: {task}\nπ¦ Models:\n" + "\n".join(models) | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("## π€ ML Task β Hugging Face Model Finder") | |
with gr.Row(): | |
input_box = gr.Textbox(label="Describe your ML task") | |
submit_btn = gr.Button("π Find Models") | |
output_box = gr.Textbox(label="Suggested Models", lines=10) | |
submit_btn.click(predict, inputs=input_box, outputs=output_box) | |
demo.launch() | |