File size: 2,001 Bytes
7245f41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d5d726
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
from model_tools import extract_task, scrape_huggingface_models

# Final agent with Gradio UI
def run_agent(user_query: str):
    """
    Given a user query, extracts the ML task, finds relevant models, and formats results in markdown.
    This function is used for Gradio UI interaction.
    """
    try:
        # 1. Extract the standard ML task (e.g., "text-classification")
        task = extract_task(user_query)

        # 2. Get relevant models for the task
        models = scrape_huggingface_models(task)

        if not models:
            return f"❌ No models found for task `{task}`. Try refining your query."

        # 3. Format response as a markdown table
        response = f"### πŸ” Models for task: `{task}`\n\n"
        response += "| Model Name | Task | Architecture |\n"
        response += "|------------|------|---------------|\n"

        for model in models:
            name = model.get("model_name", "unknown")
            task_name = model.get("task", "unknown")
            arch = model.get("architecture", "unknown")
            response += f"| [{name}](https://huggingface.co/{name}) | {task_name} | {arch} |\n"

        return response

    except Exception as e:
        return f"❌ Error: {str(e)}"

# Gradio interface for deployment
def gradio_ui():
    with gr.Blocks() as demo:
        gr.Markdown("# Hugging Face Model Finder Agent")
        gr.Markdown("Enter a task description, and I'll find suitable ML models for you!")

        # User input for task description
        user_input = gr.Textbox(label="Describe the ML Task", placeholder="e.g., 'I need a text summarization model'", lines=2)

        # Output for model search results
        output = gr.Markdown()

        # Connect the input/output to the agent
        user_input.submit(run_agent, inputs=user_input, outputs=output)

    return demo

# Run the Gradio interface (will run locally, and can be deployed to Spaces)
if __name__ == "__main__":
    gradio_ui().launch()