File size: 4,548 Bytes
09fd315
 
 
 
 
 
 
 
10e9b7d
3c4371f
09fd315
 
 
 
 
 
 
10e9b7d
151e8da
09fd315
 
151e8da
09fd315
 
39f4053
09fd315
 
 
39f4053
09fd315
 
4021bf3
3c4371f
09fd315
 
 
 
 
 
7e4a06b
09fd315
952c324
09fd315
e80aab9
ed82cd0
09fd315
 
e80aab9
 
 
 
31243f4
0ee0419
e514fd7
 
 
81917a3
e514fd7
 
 
 
 
 
 
 
e80aab9
 
7e4a06b
e80aab9
09fd315
 
 
e80aab9
09fd315
 
 
 
e80aab9
09fd315
 
 
 
 
 
 
 
 
 
 
e80aab9
 
 
3c4371f
7d65c66
3c4371f
09fd315
7d65c66
3c4371f
 
09fd315
 
3c4371f
7d65c66
 
09fd315
7d65c66
 
09fd315
 
7d65c66
 
 
3c4371f
 
31243f4
09fd315
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry import trace
from evaluator import Evaluator
from runner import Runner
from settings import Settings
import os
import pandas as pd
import gradio as gr
import logging
logging.basicConfig(level=logging.INFO, force=True)
logger = logging.getLogger(__name__)
settings = Settings()
evaluator = Evaluator(settings)
runner = Runner(settings)


# Create a TracerProvider for OpenTelemetry
trace_provider = TracerProvider()

# Add a SimpleSpanProcessor with the OTLPSpanExporter to send traces
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter()))

# Set the global default tracer provider
trace.set_tracer_provider(trace_provider)
tracer = trace.get_tracer(__name__)

# Instrument smolagents with the configured provider
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)


def run(test_mode=False) -> pd.DataFrame:
    if test_mode:
        questions = [evaluator.get_one_question()]
        # questions = [get_one_question(task_id='8e867cd7-cff9-4e6c-867a-ff5ddc2550be')]
        # questions = [get_one_question('3f57289b-8c60-48be-bd80-01f8099ca449')]
        # questions = [get_one_question('cca530fc-4052-43b2-b130-b30968d8aa44')]
    else:
        questions = evaluator.get_questions()

    return runner.run_agent(questions)


def submit():
    evaluator.submit_answers()


# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown(
        """
        **Instructions:**

        1.  Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
        2.  Log in to your Hugging Face account using the button below. This uses your HF username for submission.
        3.  Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.

        ---
        **Disclaimers:**
        Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
        This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
        """
    )

    gr.LoginButton()

    run_one_button = gr.Button("Get One Answer")
    run_all_button = gr.Button("Run Full Evaluation")
    submit_button = gr.Button("Submit All Answers")

    status_output = gr.Textbox(
        label="Run Status / Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(
        label="Questions and Agent Answers", wrap=True)

    run_one_button.click(
        fn=run, inputs=[gr.Checkbox(value=True, visible=False)],
        outputs=[results_table]
    )
    run_all_button.click(
        fn=run, inputs=[gr.Checkbox(value=False, visible=False)],
        outputs=[results_table]
    )
    submit_button.click(
        fn=evaluator.get_one_question,
        outputs=[status_output]
    )

if __name__ == "__main__":
    print("\n" + "-"*30 + " App Starting " + "-"*30)
    # Check for SPACE_HOST and SPACE_ID at startup for information
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID")  # Get SPACE_ID at startup

    if space_host_startup:
        print(f"✅ SPACE_HOST found: {space_host_startup}")
        print(
            f"   Runtime URL should be: https://{space_host_startup}.hf.space")
    else:
        print("ℹ️  SPACE_HOST environment variable not found (running locally?).")

    if space_id_startup:  # Print repo URLs if SPACE_ID is found
        print(f"✅ SPACE_ID found: {space_id_startup}")
        print(f"   Repo URL: https://huggingface.co/spaces/{space_id_startup}")
        print(
            f"   Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
    else:
        print("ℹ️  SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")

    print("-"*(60 + len(" App Starting ")) + "\n")

    print("Launching Gradio Interface for Basic Agent Evaluation...")
    demo.launch(debug=True, share=False)