Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| # import math | |
| # import inspect | |
| import pandas as pd | |
| import datetime | |
| # from dotenv import load_dotenv | |
| from langchain.tools import tool | |
| # from langchain_community.tools import get_all_tools | |
| # from typing import TypedDict, Annotated | |
| # from langgraph.graph.message import add_messages | |
| # from langchain_core.messages import AnyMessage, HumanMessage, AIMessage | |
| # from langgraph.prebuilt import ToolNode | |
| # from langgraph.graph import START, StateGraph, END, Graph | |
| # from langgraph.prebuilt import tools_condition | |
| from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
| from langchain.agents import initialize_agent, AgentType | |
| # from langchain_community.llms import HuggingFaceHub | |
| # from langchain_community.chat_models import ChatHuggingFace | |
| # import openai | |
| # from openai import OpenAI | |
| # from langchain_openai import ChatOpenAI | |
| ## # Load environment variables from .env file | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| # Load the environment variables | |
| # load_dotenv() | |
| HF_ACCESS_KEY = os.getenv('HF_ACCESS_KEY') | |
| WEATHER_API_KEY = os.getenv('WEATHER_API_KEY') | |
| # OPENAI_MODEL = os.getenv('OPENAI_MODEL') #'gpt-3.5-turbo-0613' | |
| # OPENAI_KEY = os.getenv('OPENAI_KEY') | |
| # client = OpenAI(api_key = OPENAI_KEY) | |
| ########## ----- DEFINING TOOLS -----########## | |
| # --- TOOL 1: Web Search Tool (DuckDuckGo) --- | |
| def search_tool(query: str) -> str: | |
| """Answer general knowledge or current events queries using DuckDuckGo.""" | |
| url = f"https://api.duckduckgo.com/?q={query}&format=json&no_html=1" | |
| try: | |
| resp = requests.get(url, timeout=20) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| for key in ["AbstractText", "Answer", "Definition"]: | |
| if data.get(key): | |
| return data[key].split(".")[0] | |
| return "no_answer" | |
| except Exception: | |
| return "error" | |
| # when you use the @tool decorator from langchain.tools, the tool.name and tool.description are automatically extracted from your function | |
| # tool.name is set to the function name (e.g., `search_tool`), and | |
| # tool.description is set to the docstring of the function (the triple-quoted string right under def ...) (e.g., "Answer general knowledge or current events queries using DuckDuckGo."). | |
| # --- TOOL 2: Weather Tool (OpenWeatherMap) --- | |
| def get_weather(city: str) -> str: | |
| """Get current temperature in Celsius for a city.""" | |
| import os | |
| api_key = os.environ.get("WEATHER_API_KEY") | |
| url = f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={WEATHER_API_KEY}&units=metric" | |
| try: | |
| resp = requests.get(url, timeout=20) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return str(round(data["main"]["temp"])) | |
| except Exception: | |
| return "error" | |
| # --- TOOL 3: Calculator Tool --- | |
| def calculator(expression: str) -> str: | |
| """Evaluate math expressions.""" | |
| try: | |
| allowed = "0123456789+-*/(). " | |
| if not all(c in allowed for c in expression): | |
| return "error" | |
| result = eval(expression, {"__builtins__": None}, {}) | |
| return str(result) | |
| except Exception: | |
| return "error" | |
| # --- TOOL 4: Unit Conversion Tool --- | |
| def convert_units(args: str) -> str: | |
| """ | |
| Convert between metric and imperial units (length, mass, temperature). | |
| Input format: '<value> <from_unit> to <to_unit>', e.g. '10 meters to feet' | |
| """ | |
| try: | |
| parts = args.lower().split() | |
| value = float(parts[0]) | |
| from_unit = parts[1] | |
| to_unit = parts[3] | |
| conversions = { | |
| ("meters", "feet"): lambda v: v * 3.28084, | |
| ("feet", "meters"): lambda v: v / 3.28084, | |
| ("kg", "lb"): lambda v: v * 2.20462, | |
| ("lb", "kg"): lambda v: v / 2.20462, | |
| ("celsius", "fahrenheit"): lambda v: v * 9/5 + 32, | |
| ("fahrenheit", "celsius"): lambda v: (v - 32) * 5/9, | |
| } | |
| func = conversions.get((from_unit, to_unit)) | |
| if func: | |
| return str(round(func(value), 2)) | |
| return "error" | |
| except Exception: | |
| return "error" | |
| # --- TOOL 5: Date & Time Tool --- | |
| def get_time(input: str) -> str: | |
| """Get current UTC time as HH:MM.""" | |
| return datetime.datetime.utc().strftime("%H:%M") | |
| def get_date(input: str) -> str: | |
| """Get current date as YYYY-MM-DD.""" | |
| return datetime.datetime.utc().strftime("%Y-%m-%d") | |
| # --- TOOL 6: Wikipedia Summary Tool --- | |
| def wikipedia_summary(query: str) -> str: | |
| """Get a short summary of a topic from Wikipedia.""" | |
| url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query.replace(' ', '_')}" | |
| try: | |
| resp = requests.get(url, timeout=20) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return data.get("extract", "no_answer").split(".")[0] | |
| except Exception: | |
| return "error" | |
| # --- TOOL 7: Dictionary Tool --- | |
| def dictionary_lookup(word: str) -> str: | |
| """Get the definition of an English word.""" | |
| url = f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}" | |
| try: | |
| resp = requests.get(url, timeout=20) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return data[0]["meanings"][0]["definitions"][0]["definition"] | |
| except Exception: | |
| return "error" | |
| # --- TOOL 8: Currency Conversion Tool --- | |
| def currency_convert(args: str) -> str: | |
| """ | |
| Convert an amount from one currency to another. | |
| Input format: '<amount> <from_currency> to <to_currency>', e.g. '100 USD to EUR' | |
| """ | |
| try: | |
| parts = args.upper().split() | |
| amount = float(parts[0]) | |
| from_currency = parts[1] | |
| to_currency = parts[3] | |
| url = f"https://api.exchangerate.host/convert?from={from_currency}&to={to_currency}&amount={amount}" | |
| resp = requests.get(url, timeout=20) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return str(round(data["result"], 2)) | |
| except Exception: | |
| return "error" | |
| # --- TOOL 9: Image Captioning Tool --- | |
| def image_caption(image_url: str) -> str: | |
| """Generate a descriptive caption for an image given its URL.""" | |
| api_url = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base" | |
| headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"} | |
| payload = {"inputs": image_url} | |
| try: | |
| resp = requests.post(api_url, headers=headers, json=payload, timeout=30) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return data[0]["generated_text"] if isinstance(data, list) else data.get("generated_text", "no_caption") | |
| except Exception: | |
| return "error" | |
| # --- TOOL 10: Optical Character Recognition (OCR) Tool --- | |
| def ocr_image(image_url: str) -> str: | |
| """Extract text from an image given its URL.""" | |
| api_url = "https://api-inference.huggingface.co/models/impira/layoutlm-document-qa" | |
| headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"} | |
| payload = {"inputs": {"image": image_url, "question": "What text is in the image?"}} | |
| try: | |
| resp = requests.post(api_url, headers=headers, json=payload, timeout=30) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return data.get("answer", "no_text_found") | |
| except Exception: | |
| return "error" | |
| # --- TOOL 11: Image Classification Tool --- | |
| def classify_image(image_url: str) -> str: | |
| """Classify the main object or scene in an image given its URL.""" | |
| api_url = "https://api-inference.huggingface.co/models/google/vit-base-patch16-224" | |
| headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"} | |
| payload = {"inputs": image_url} | |
| try: | |
| resp = requests.post(api_url, headers=headers, json=payload, timeout=30) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| return data[0]["label"] if isinstance(data, list) else data.get("label", "no_label") | |
| except Exception: | |
| return "error" | |
| ##-- Tool Discovery --- | |
| # Use @tool for each function. | |
| # Use get_all_tools() to auto-discover all decorated tools. | |
| # tools_list = get_all_tools() | |
| tools_list = [ | |
| search_tool, | |
| get_weather, | |
| calculator, | |
| convert_units, | |
| get_time, | |
| get_date, | |
| wikipedia_summary, | |
| dictionary_lookup, | |
| currency_convert, | |
| image_caption, | |
| ocr_image, | |
| classify_image | |
| ] | |
| tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in tools_list) | |
| ## -- | |
| # --- System Prompt for the Agent --- | |
| system_prompt = f""" | |
| You are an intelligent assistant with access to the following tools: | |
| {tool_descriptions} | |
| For every question, you must do your internal reasoning using the Thought → Action → Observation → Answer process, but your output to the user should be ONLY the final answer as a single value (number, string, or comma-separated list), with no extra explanation, thoughts, actions, or observations. | |
| **Your output must be only the answer. Do not include any reasoning, tool calls, or explanations.** | |
| Examples: | |
| Q: What is 7 * (3 + 2)? | |
| Your Output: 35 | |
| Q: What’s the weather in Tokyo? | |
| Your Output: 22 | |
| Q: What is the capital of France? | |
| Your Output: Paris | |
| Q: Convert 10 meters to feet. | |
| Your Output: 32.81 | |
| Instructions: | |
| - Always do your internal reasoning (Thought → Action → Observation → Answer) before producing the answer, but DO NOT show this reasoning to the user. | |
| - Use a tool only if necessary, and don't use multiple tools in a call. Don't use a tool if you can answer directly. | |
| - Your output must be a single value (number, string, or comma-separated list) with no extra explanation or formatting. | |
| - Be concise and accurate. | |
| """ | |
| ## --- Initialize Hugging Face Model --- | |
| # Generate the chat interface, including the tools | |
| llm = HuggingFaceEndpoint( | |
| repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", | |
| huggingfacehub_api_token=HF_ACCESS_KEY, | |
| # model_kwargs={'prompt': system_prompt} | |
| # system_prompt=system_prompt, | |
| ) | |
| chat_llm = ChatHuggingFace(llm=llm) | |
| ''' | |
| llm = ChatOpenAI( | |
| openai_api_key=OPENAI_KEY, | |
| model_name=OPENAI_MODEL, | |
| temperature=0.1 | |
| ) | |
| ''' | |
| # chat = ChatHuggingFace(llm=llm, verbose=True) | |
| # tools = [search_tool, fetch_weather] | |
| # chat_with_tools = chat.bind_tools(tools) | |
| agent = initialize_agent( | |
| tools=tools_list, | |
| llm=llm, | |
| # llm=chat_llm, | |
| agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, | |
| # agent_kwargs={"system_message": system_prompt}, | |
| verbose=True, | |
| handle_parsing_errors=True | |
| ) | |
| ## -- | |
| def run_and_submit_all( profile: gr.OAuthProfile | None): | |
| """ | |
| Fetches all questions, runs the BasicAgent on them, submits all answers, | |
| and displays the results. | |
| """ | |
| # --- Determine HF Space Runtime URL and Repo URL --- | |
| space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code | |
| if profile: | |
| username= f"{profile.username}" | |
| print(f"User logged in: {username}") | |
| else: | |
| print("User not logged in.") | |
| return "Please Login to Hugging Face with the button.", None | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| """ | |
| # 1. Instantiate Agent ( modify this part to create your agent) | |
| try: | |
| agent = BasicAgent() | |
| except Exception as e: | |
| print(f"Error instantiating agent: {e}") | |
| return f"Error initializing agent: {e}", None | |
| # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public) | |
| """ | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| print(agent_code) | |
| # 2. Fetch Questions | |
| print(f"Fetching questions from: {questions_url}") | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not questions_data: | |
| print("Fetched questions list is empty.") | |
| return "Fetched questions list is empty or invalid format.", None | |
| print(f"Fetched {len(questions_data)} questions.") | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching questions: {e}") | |
| return f"Error fetching questions: {e}", None | |
| except requests.exceptions.JSONDecodeError as e: | |
| print(f"Error decoding JSON response from questions endpoint: {e}") | |
| print(f"Response text: {response.text[:500]}") | |
| return f"Error decoding server response for questions: {e}", None | |
| except Exception as e: | |
| print(f"An unexpected error occurred fetching questions: {e}") | |
| return f"An unexpected error occurred fetching questions: {e}", None | |
| # 3. Run your Agent | |
| results_log = [] | |
| answers_payload = [] | |
| print(f"Running agent on {len(questions_data)} questions...") | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| print(f"Skipping item with missing task_id or question: {item}") | |
| continue | |
| try: | |
| full_prompt = f"{system_prompt}\n Input Question: {question_text}" | |
| submitted_answer = agent.run(full_prompt) | |
| # submitted_answer = agent.run(question_text) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
| except Exception as e: | |
| print(f"Error running agent on task {task_id}: {e}") | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
| if not answers_payload: | |
| print("Agent did not produce any answers to submit.") | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # 4. Prepare Submission | |
| submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
| status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
| print(status_update) | |
| # 5. Submit | |
| print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
| try: | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| print("Submission successful.") | |
| results_df = pd.DataFrame(results_log) | |
| return final_status, results_df | |
| except requests.exceptions.HTTPError as e: | |
| error_detail = f"Server responded with status {e.response.status_code}." | |
| try: | |
| error_json = e.response.json() | |
| error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
| except requests.exceptions.JSONDecodeError: | |
| error_detail += f" Response: {e.response.text[:500]}" | |
| status_message = f"Submission Failed: {error_detail}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.Timeout: | |
| status_message = "Submission Failed: The request timed out." | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.RequestException as e: | |
| status_message = f"Submission Failed: Network error - {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except Exception as e: | |
| status_message = f"An unexpected error occurred during submission: {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| # --- Build Gradio Interface using Blocks --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Basic Agent Evaluation Runner") | |
| gr.Markdown( | |
| """ | |
| **Instructions:** | |
| 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
| 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
| 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
| --- | |
| **Disclaimers:** | |
| Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
| This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
| """ | |
| ) | |
| gr.LoginButton() | |
| # login_btn = gr.LoginButton() | |
| # login_btn.activate() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
| # Removed max_rows=10 from DataFrame constructor | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click( | |
| fn=run_and_submit_all, | |
| outputs=[status_output, results_table] | |
| ) | |
| if __name__ == "__main__": | |
| print("\n" + "-"*30 + " App Starting " + "-"*30) | |
| # Check for SPACE_HOST and SPACE_ID at startup for information | |
| space_host_startup = os.getenv("SPACE_HOST") | |
| space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
| if space_host_startup: | |
| print(f"✅ SPACE_HOST found: {space_host_startup}") | |
| print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
| else: | |
| print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
| if space_id_startup: # Print repo URLs if SPACE_ID is found | |
| print(f"✅ SPACE_ID found: {space_id_startup}") | |
| print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
| print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
| else: | |
| print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
| print("-"*(60 + len(" App Starting ")) + "\n") | |
| print("Launching Gradio Interface for Basic Agent Evaluation...") | |
| # Launch the Gradio app | |
| demo.launch(debug=True, share=True) #share=True |