{ "cells": [ { "cell_type": "markdown", "id": "e6c1f8cc-eee9-4e7f-b82e-69d681533cb3", "metadata": { "jp-MarkdownHeadingCollapsed": true }, "source": [ "# Smolagents" ] }, { "cell_type": "code", "execution_count": 73, "id": "fb226289-30c0-40db-aac1-942cb525db9e", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:34:19.514878Z", "iopub.status.busy": "2025-06-01T14:34:19.510625Z", "iopub.status.idle": "2025-06-01T14:34:19.528988Z", "shell.execute_reply": "2025-06-01T14:34:19.526394Z", "shell.execute_reply.started": "2025-06-01T14:34:19.514772Z" } }, "outputs": [], "source": [ "from dotenv import load_dotenv, find_dotenv\n", "import requests\n", "import logging\n", "from pprint import pprint" ] }, { "cell_type": "code", "execution_count": null, "id": "dd4de9e5-774e-4312-bc1f-2c22ab909ed0", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 75, "id": "2fa88e7e-f2d2-462a-abbf-718838d626ff", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:26:03.504931Z", "iopub.status.busy": "2025-06-01T14:26:03.504315Z", "iopub.status.idle": "2025-06-01T14:26:03.514488Z", "shell.execute_reply": "2025-06-01T14:26:03.513300Z", "shell.execute_reply.started": "2025-06-01T14:26:03.504872Z" } }, "outputs": [], "source": [ "_ = load_dotenv(find_dotenv(raise_error_if_not_found=True), override=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "413da286", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 28, "id": "7a960624-355b-4934-a741-fb880ad3ca37", "metadata": { "execution": { "iopub.execute_input": "2025-06-14T12:04:06.703540Z", "iopub.status.busy": "2025-06-14T12:04:06.702825Z", "iopub.status.idle": "2025-06-14T12:04:06.714241Z", "shell.execute_reply": "2025-06-14T12:04:06.711667Z", "shell.execute_reply.started": "2025-06-14T12:04:06.703473Z" } }, "outputs": [], "source": [ "DEFAULT_API_URL = \"https://agents-course-unit4-scoring.hf.space\"\n", "api_url = DEFAULT_API_URL\n", "questions_url = f\"{api_url}/questions\"\n", "submit_url = f\"{api_url}/submit\"\n", "task_id = 1\n", "files_url = f\"{api_url}/files/{task_id}\"" ] }, { "cell_type": "code", "execution_count": 4, "id": "0e50a6a9-c765-4a04-b922-b519029eeb4e", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:26:04.334778Z", "iopub.status.busy": "2025-06-01T14:26:04.333497Z", "iopub.status.idle": "2025-06-01T14:26:04.348588Z", "shell.execute_reply": "2025-06-01T14:26:04.345835Z", "shell.execute_reply.started": "2025-06-01T14:26:04.334650Z" } }, "outputs": [], "source": [ "logging.basicConfig(level=logging.INFO)\n", "logger = logging.getLogger(__name__)" ] }, { "cell_type": "code", "execution_count": 5, "id": "1c970d6d-4efb-4b7a-846f-474fc0980247", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T10:48:58.063267Z", "iopub.status.busy": "2025-06-01T10:48:58.062574Z", "iopub.status.idle": "2025-06-01T10:48:58.174339Z", "shell.execute_reply": "2025-06-01T10:48:58.172895Z", "shell.execute_reply.started": "2025-06-01T10:48:58.063196Z" } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:__main__:Fetching questions from: https://agents-course-unit4-scoring.hf.space/questions\n" ] } ], "source": [ "logger.info(f\"Fetching questions from: {questions_url}\")\n", "def fetch_all_questions():\n", " try:\n", " response = requests.get(questions_url, timeout=15)\n", " response.raise_for_status()\n", " questions_data = response.json()\n", " if not questions_data:\n", " logger.info(\"Fetched questions list is empty.\")\n", " return \"Fetched questions list is empty or invalid format.\", None\n", " logger.info(f\"Fetched {len(questions_data)} questions.\")\n", " return questions_data\n", " except requests.exceptions.RequestException as e:\n", " logger.info(f\"Error fetching questions: {e}\")\n", " return f\"Error fetching questions: {e}\", None\n", " except requests.exceptions.JSONDecodeError as e:\n", " logger.info(f\"Error decoding JSON response from questions endpoint: {e}\")\n", " logger.info(f\"Response text: {response.text[:500]}\")\n", " return f\"Error decoding server response for questions: {e}\", None\n", " except Exception as e:\n", " logger.info(f\"An unexpected error occurred fetching questions: {e}\")\n", " return f\"An unexpected error occurred fetching questions: {e}\", None\n", " " ] }, { "cell_type": "code", "execution_count": 6, "id": "da7f2e1c-c3b2-4b02-b0c4-d810308bf0ed", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T10:48:58.177220Z", "iopub.status.busy": "2025-06-01T10:48:58.176564Z", "iopub.status.idle": "2025-06-01T10:48:59.444547Z", "shell.execute_reply": "2025-06-01T10:48:59.442665Z", "shell.execute_reply.started": "2025-06-01T10:48:58.177157Z" } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:__main__:Fetched 20 questions.\n" ] } ], "source": [ "all_questions = fetch_all_questions()" ] }, { "cell_type": "code", "execution_count": 3, "id": "5dbca3aa", "metadata": {}, "outputs": [], "source": [ "import pickle" ] }, { "cell_type": "code", "execution_count": 4, "id": "e0cdf079", "metadata": {}, "outputs": [], "source": [ "# with open(\"all_questions.pkl\", \"wb\") as f:\n", "# pickle.dump(all_questions, file=f, protocol=pickle.HIGHEST_PROTOCOL)" ] }, { "cell_type": "code", "execution_count": 5, "id": "d925836a", "metadata": {}, "outputs": [], "source": [ "with open(\"all_questions.pkl\", \"rb\") as f:\n", " all_questions = pickle.load(f)" ] }, { "cell_type": "code", "execution_count": 6, "id": "cbace3c9-0939-49dc-b253-7abbe3fe7f47", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T10:48:59.448094Z", "iopub.status.busy": "2025-06-01T10:48:59.447412Z", "iopub.status.idle": "2025-06-01T10:48:59.472532Z", "shell.execute_reply": "2025-06-01T10:48:59.468068Z", "shell.execute_reply.started": "2025-06-01T10:48:59.448025Z" } }, "outputs": [ { "data": { "text/plain": [ "{'task_id': '8e867cd7-cff9-4e6c-867a-ff5ddc2550be',\n", " 'question': 'How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.',\n", " 'Level': '1',\n", " 'file_name': ''}" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_questions[0]" ] }, { "cell_type": "code", "execution_count": 49, "id": "cff113b0-b918-454b-9518-721d3d5a90cc", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:43:21.225829Z", "iopub.status.busy": "2025-06-01T14:43:21.224574Z", "iopub.status.idle": "2025-06-01T14:43:21.246142Z", "shell.execute_reply": "2025-06-01T14:43:21.245088Z", "shell.execute_reply.started": "2025-06-01T14:43:21.225765Z" } }, "outputs": [], "source": [ "from typing import Optional\n", "\n", "def download_file(url: str, save_as: Optional[str] = None) -> None:\n", " \"\"\"\n", " Download a file from a given URL and save it locally with the correct extension.\n", " \n", " Args:\n", " url (str): The URL to fetch the file.\n", " save_as (Optional[str]): Optional custom filename. If None, filename is extracted from headers.\n", " \n", " Returns:\n", " None\n", " \"\"\"\n", " files_list=set(map(lambda x: str(x.stem),Path.cwd().iterdir()))\n", " file_name = Path(url).stem\n", " if file_name in files_list:\n", " return f\"File: {file_name} already exists.\"\n", " \n", " response = requests.get(url, stream=True)\n", " \n", " if response.status_code == 200:\n", " # Extract filename from Content-Disposition header if available\n", " content_disposition = response.headers.get(\"Content-Disposition\")\n", " if content_disposition:\n", " filename = content_disposition.split(\"filename=\")[-1].strip('\"')\n", " else:\n", " # Extract extension from Content-Type header\n", " content_type = response.headers.get(\"Content-Type\", \"application/octet-stream\")\n", " extension = content_type.split(\"/\")[-1]\n", " filename = f\"downloaded_file.{extension}\"\n", " \n", " # Use custom filename if provided\n", " if save_as:\n", " filename = save_as\n", " \n", " # Save the file locally\n", " with open(filename, \"wb\") as file:\n", " for chunk in response.iter_content(chunk_size=8192):\n", " file.write(chunk)\n", " \n", " print(f\"File downloaded successfully: {filename}\")\n", " else:\n", " print(f\"Failed to download file. HTTP Status: {response.status_code}\")\n" ] }, { "cell_type": "code", "execution_count": 50, "id": "3f0e8447", "metadata": {}, "outputs": [], "source": [ "\n", "# # Example usage\n", "# file_url = \"https://agents-course-unit4-scoring.hf.space/files/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3\"\n", "# download_file(file_url)\n" ] }, { "cell_type": "code", "execution_count": 51, "id": "99d68405", "metadata": {}, "outputs": [], "source": [ "# Path(\"'cca530fc-4052-43b2-b130-b30968d8aa44.png'\").stem" ] }, { "cell_type": "code", "execution_count": 52, "id": "5571f89c-ccc8-445c-acbc-09a8c1656027", "metadata": {}, "outputs": [], "source": [ "# files_to_download = [i['file_name'] for i in all_questions if i['file_name']]\n", "# for f in files_to_download:\n", "# file_url = f\"https://agents-course-unit4-scoring.hf.space/files/{Path(f).stem}\"\n", "# download_file(url=file_url)" ] }, { "cell_type": "code", "execution_count": null, "id": "66e5d5d7-72e0-4b78-8e8a-9f538b08b2a2", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 53, "id": "c703d39d-6f11-49f2-877f-8210a31ba792", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:16:20.289766Z", "iopub.status.busy": "2025-06-01T14:16:20.288219Z", "iopub.status.idle": "2025-06-01T14:16:20.302767Z", "shell.execute_reply": "2025-06-01T14:16:20.299487Z", "shell.execute_reply.started": "2025-06-01T14:16:20.289703Z" } }, "outputs": [], "source": [ "# from smolagents import AgentAudio, AgentImage" ] }, { "cell_type": "code", "execution_count": 54, "id": "17185388-4faf-4e4c-ba50-7724ea8592c2", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:15:59.666336Z", "iopub.status.busy": "2025-06-01T14:15:59.665657Z", "iopub.status.idle": "2025-06-01T14:16:00.610811Z", "shell.execute_reply": "2025-06-01T14:16:00.607919Z", "shell.execute_reply.started": "2025-06-01T14:15:59.666276Z" } }, "outputs": [], "source": [ "# AgentAudio(\"99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3\").to_raw()" ] }, { "cell_type": "code", "execution_count": 55, "id": "eb1352f5", "metadata": {}, "outputs": [], "source": [ "# AgentImage(\"cca530fc-4052-43b2-b130-b30968d8aa44.png\")" ] }, { "cell_type": "code", "execution_count": 57, "id": "afd9c472-21d1-450e-a51f-5672e7f00188", "metadata": {}, "outputs": [], "source": [ "# stt_tool(\"99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3\")" ] }, { "cell_type": "code", "execution_count": 58, "id": "e65adf7c", "metadata": {}, "outputs": [], "source": [ "class HFImageDescriptionTool(Tool):\n", " name = \"image_description\"\n", " description = \"\"\"\n", " This tool uses a Hugging Face vision-language model to describe the content of an image.\n", " It returns a detailed description of the provided image file.\n", " \"\"\"\n", " inputs = {\n", " \"image_path\": {\n", " \"type\": \"string\",\n", " \"description\": \"Path to the image file to be described.\",\n", " }\n", " }\n", " output_type = \"string\"\n", "\n", " def forward(self, image_path: str) -> str:\n", " from huggingface_hub import InferenceClient\n", " from PIL import Image\n", " import base64\n", " from io import BytesIO\n", "\n", " def encode_image_to_base64(image_path):\n", " image = Image.open(image_path)\n", " buffered = BytesIO()\n", " image.save(buffered, format=\"JPEG\")\n", " return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n", "\n", " client = InferenceClient(\n", " provider=\"auto\",\n", " bill_to=\"VitalNest\",\n", " )\n", " messages = [\n", " {\n", " \"role\": \"user\",\n", " \"content\": [\n", " {\"type\": \"text\", \"text\": \"Describe this image in detail.\"},\n", " {\n", " \"type\": \"image_url\",\n", " \"image_url\": {\n", " \"url\": f\"data:image/jpeg;base64,{\n", " encode_image_to_base64(image_path=image_path)\n", " }\"\n", " },\n", " },\n", " ],\n", " }\n", " ]\n", " completion = client.chat.completions.create(\n", " model=\"Qwen/Qwen2.5-VL-7B-Instruct\",\n", " messages=messages,\n", " temperature=0.1,\n", " max_tokens=10000,\n", " )\n", " return completion.choices[0].message.content\n", "\n", "\n", "image_description_tool = HFImageDescriptionTool()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "a7a94aae", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:wikipediaapi:Wikipedia: language=en, user_agent: WikiAssistant (merlin@example.com) (Wikipedia-API/0.8.1; https://github.com/martin-majlis/Wikipedia-API/), extract_format=1\n" ] } ], "source": [ "ultimate_agent = CodeAgent(\n", " tools=[\n", " FinalAnswerTool(),\n", " PythonInterpreterTool(),\n", " VisitWebpageTool(),\n", " DuckDuckGoSearchTool(),\n", " WikipediaSearchTool(user_agent=\"WikiAssistant (merlin@example.com)\"),\n", " stt_tool,\n", " image_description_tool,\n", " ],\n", " additional_authorized_imports=[\"os\", \"json\", \"pandas\", \"PIL\"],\n", " model=InferenceClientModel(\n", " # model_id=\"Qwen/Qwen3-30B-A3B\",\n", " # model_id=\"Qwen/Qwen3-235B-A22B\",\n", " bill_to=\"VitalNest\",\n", " temperature=0.1,\n", " ),\n", " max_steps=10,\n", " planning_interval=2,\n", " verbosity_level=0,\n", " add_base_tools=True,\n", " name=\"Versatile_Multi_Agent\",\n", " description=\"\"\"You are a general AI assistant.\"\"\",\n", ")" ] }, { "cell_type": "markdown", "id": "f3739d3e-d652-415c-94e7-3c10f27c9fe7", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T12:00:26.517556Z", "iopub.status.busy": "2025-06-01T12:00:26.515902Z", "iopub.status.idle": "2025-06-01T12:00:26.530150Z", "shell.execute_reply": "2025-06-01T12:00:26.527326Z", "shell.execute_reply.started": "2025-06-01T12:00:26.517475Z" }, "scrolled": true }, "source": [ "ultimate_agent.prompt_templates[\"system_prompt\"] = ultimate_agent.prompt_templates[\"system_prompt\"][:8834] + \"\"\"\n", "\\n\\n\\n ALWAYS follow this ```Answer format```:\\nALWAYS Report your thoughts, and finish your answer with the following template:\\nANSWER: [YOUR ANSWER].\\nYOUR ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.\\nIf you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations(e.g. for cities), and write the digits in plain text unless specified otherwise.\\nIf you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\\n\\nNow Begin!\n", "\"\"\"" ] }, { "cell_type": "markdown", "id": "ebd88c4d-f05b-46a5-9a43-be69d1bdaa37", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T12:03:44.420453Z", "iopub.status.busy": "2025-06-01T12:03:44.419861Z", "iopub.status.idle": "2025-06-01T12:03:44.445366Z", "shell.execute_reply": "2025-06-01T12:03:44.444175Z", "shell.execute_reply.started": "2025-06-01T12:03:44.420396Z" }, "scrolled": true }, "source": [ "ultimate_agent.system_prompt = ultimate_agent.initialize_system_prompt()" ] }, { "cell_type": "markdown", "id": "6931a7a4-a1c7-4211-b2d0-da67b1e277f4", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T12:09:40.240675Z", "iopub.status.busy": "2025-06-01T12:09:40.239413Z", "iopub.status.idle": "2025-06-01T12:09:40.275581Z", "shell.execute_reply": "2025-06-01T12:09:40.273862Z", "shell.execute_reply.started": "2025-06-01T12:09:40.240554Z" }, "scrolled": true }, "source": [ "pprint(ultimate_agent.system_prompt)" ] }, { "cell_type": "code", "execution_count": 83, "id": "5731e0bc-bb28-4d52-a683-01c21677d018", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T14:26:18.224798Z", "iopub.status.busy": "2025-06-01T14:26:18.223444Z", "iopub.status.idle": "2025-06-01T14:26:18.233689Z", "shell.execute_reply": "2025-06-01T14:26:18.230869Z", "shell.execute_reply.started": "2025-06-01T14:26:18.224744Z" } }, "outputs": [], "source": [ "custom_instructions = \"\"\"\n", "\\n\\n\\n ALWAYS follow this ```Answer format```:\\nALWAYS Report your thoughts, and finish your answer with the following template:\\nANSWER: [YOUR ANSWER].\\nYOUR ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.\\nIf you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations(e.g. for cities), and write the digits in plain text unless specified otherwise.\\nIf you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\n", "\"\"\"\n", "\n", "# Set in pre_messages (recommended, so it's always seen by the model before answering)\n", "ultimate_agent.prompt_templates[\"final_answer\"][\"pre_messages\"] = custom_instructions\n" ] }, { "cell_type": "code", "execution_count": 84, "id": "7ceec021", "metadata": {}, "outputs": [], "source": [ "curr_task = [i for i in all_questions if i['task_id']=='1f975693-876d-457b-a649-393859e79bf3']" ] }, { "cell_type": "code", "execution_count": 85, "id": "70325cad", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'task_id': '1f975693-876d-457b-a649-393859e79bf3',\n", " 'question': \"Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\\n\\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.\",\n", " 'Level': '1',\n", " 'file_name': '1f975693-876d-457b-a649-393859e79bf3.mp3'}]" ] }, "execution_count": 85, "metadata": {}, "output_type": "execute_result" } ], "source": [ "curr_task" ] }, { "cell_type": "code", "execution_count": null, "id": "c8c80f80-5af2-48f2-af99-25ece66d9176", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T13:09:30.874873Z", "iopub.status.busy": "2025-06-01T13:09:30.872836Z", "iopub.status.idle": "2025-06-01T13:13:40.564236Z", "shell.execute_reply": "2025-06-01T13:13:40.562630Z", "shell.execute_reply.started": "2025-06-01T13:09:30.874777Z" }, "scrolled": true }, "outputs": [], "source": [ "ultimate_agent.run(task=curr_task[0]['question'] + \"If there's a valid file_url, then the file is saved locally by the same name as the file_url.\"\n", "\"Only the file_url should be provided to tools as a string: 'xyz.filename' solely without any additional args or kwargs.\",\n", " additional_args={\"file_url\":f'{curr_task[0]['file_name']}'} if curr_task[0]['file_name'] else None\n", ")\n", "# ultimate_agent.run(task=all_questions[1]['question'] + \"If there's a valid file_url, then the file is saved locally by the same name as the file_url.\",\n", "# additional_args={\"file_url\":f'{all_questions[1]['file_name']}'} if all_questions[1]['file_name'] else None\n", "# )" ] }, { "cell_type": "code", "execution_count": 26, "id": "6f9e9709-41bf-48ab-b0f1-72ece35b251d", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T13:04:37.953256Z", "iopub.status.busy": "2025-06-01T13:04:37.951156Z", "iopub.status.idle": "2025-06-01T13:04:37.972233Z", "shell.execute_reply": "2025-06-01T13:04:37.971246Z", "shell.execute_reply.started": "2025-06-01T13:04:37.953169Z" }, "scrolled": true }, "outputs": [ { "data": { "text/plain": [ "[{'task_id': '8e867cd7-cff9-4e6c-867a-ff5ddc2550be',\n", " 'question': 'How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'a1e91b78-d3d8-4675-bb8d-62741b4b68a6',\n", " 'question': 'In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '2d83110e-a098-4ebb-9987-066c06fa42d0',\n", " 'question': '.rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'cca530fc-4052-43b2-b130-b30968d8aa44',\n", " 'question': \"Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.\",\n", " 'Level': '1',\n", " 'file_name': 'cca530fc-4052-43b2-b130-b30968d8aa44.png'},\n", " {'task_id': '4fc2f1ae-8625-45b5-ab34-ad4433bc21f8',\n", " 'question': 'Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '6f37996b-2ac7-44b0-8e68-6d28256631b4',\n", " 'question': 'Given this table defining * on the set S = {a, b, c, d, e}\\n\\n|*|a|b|c|d|e|\\n|---|---|---|---|---|---|\\n|a|a|b|c|b|d|\\n|b|b|c|a|e|c|\\n|c|c|a|b|b|a|\\n|d|b|e|b|e|d|\\n|e|d|b|a|d|c|\\n\\nprovide the subset of S involved in any possible counter-examples that prove * is not commutative. Provide your answer as a comma separated list of the elements in the set in alphabetical order.',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '9d191bce-651d-4746-be2d-7ef8ecadb9c2',\n", " 'question': 'Examine the video at https://www.youtube.com/watch?v=1htKBjuUWec.\\n\\nWhat does Teal\\'c say in response to the question \"Isn\\'t that hot?\"',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'cabe07ed-9eca-40ea-8ead-410ef5e83f91',\n", " 'question': \"What is the surname of the equine veterinarian mentioned in 1.E Exercises from the chemistry materials licensed by Marisa Alviar-Agnew & Henry Agnew under the CK-12 license in LibreText's Introductory Chemistry materials as compiled 08/21/2023?\",\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '3cef3a44-215e-4aed-8e3b-b1e3f08063b7',\n", " 'question': \"I'm making a grocery list for my mom, but she's a professor of botany and she's a real stickler when it comes to categorizing things. I need to add different foods to different categories on the grocery list, but if I make a mistake, she won't buy anything inserted in the wrong category. Here's the list I have so far:\\n\\nmilk, eggs, flour, whole bean coffee, Oreos, sweet potatoes, fresh basil, plums, green beans, rice, corn, bell pepper, whole allspice, acorns, broccoli, celery, zucchini, lettuce, peanuts\\n\\nI need to make headings for the fruits and vegetables. Could you please create a list of just the vegetables from my list? If you could do that, then I can figure out how to categorize the rest of the list into the appropriate categories. But remember that my mom is a real stickler, so make sure that no botanical fruits end up on the vegetable list, or she won't get them when she's at the store. Please alphabetize the list of vegetables, and place each item in a comma separated list.\",\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3',\n", " 'question': 'Hi, I\\'m making a pie but I could use some help with my shopping list. I have everything I need for the crust, but I\\'m not sure about the filling. I got the recipe from my friend Aditi, but she left it as a voice memo and the speaker on my phone is buzzing so I can\\'t quite make out what she\\'s saying. Could you please listen to the recipe and list all of the ingredients that my friend described? I only want the ingredients for the filling, as I have everything I need to make my favorite pie crust. I\\'ve attached the recipe as Strawberry pie.mp3.\\n\\nIn your response, please only list the ingredients, not any measurements. So if the recipe calls for \"a pinch of salt\" or \"two cups of ripe strawberries\" the ingredients on the list would be \"salt\" and \"ripe strawberries\".\\n\\nPlease format your response as a comma separated list of ingredients. Also, please alphabetize the ingredients.',\n", " 'Level': '1',\n", " 'file_name': '99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3'},\n", " {'task_id': '305ac316-eef6-4446-960a-92d80d542f82',\n", " 'question': 'Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name.',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'f918266a-b3e0-4914-865d-4faa564f1aef',\n", " 'question': 'What is the final numeric output from the attached Python code?',\n", " 'Level': '1',\n", " 'file_name': 'f918266a-b3e0-4914-865d-4faa564f1aef.py'},\n", " {'task_id': '3f57289b-8c60-48be-bd80-01f8099ca449',\n", " 'question': 'How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '1f975693-876d-457b-a649-393859e79bf3',\n", " 'question': \"Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\\n\\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.\",\n", " 'Level': '1',\n", " 'file_name': '1f975693-876d-457b-a649-393859e79bf3.mp3'},\n", " {'task_id': '840bfca7-4f7b-481a-8794-c560c340185d',\n", " 'question': 'On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'bda648d7-d618-4883-88f4-3466eabd860e',\n", " 'question': \"Where were the Vietnamese specimens described by Kuznetzov in Nedoshivina's 2010 paper eventually deposited? Just give me the city name without abbreviations.\",\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'cf106601-ab4f-4af9-b045-5295fe67b37d',\n", " 'question': \"What country had the least number of athletes at the 1928 Summer Olympics? If there's a tie for a number of athletes, return the first in alphabetical order. Give the IOC country code as your answer.\",\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'a0c07678-e491-4bbc-8f0b-07405144218f',\n", " 'question': \"Who are the pitchers with the number before and after Taishō Tamai's number as of July 2023? Give them to me in the form Pitcher Before, Pitcher After, use their last names only, in Roman characters.\",\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '7bd855d8-463d-4ed5-93ca-5fe35145f733',\n", " 'question': 'The attached Excel file contains the sales of menu items for a local fast-food chain. What were the total sales that the chain made from food (not including drinks)? Express your answer in USD with two decimal places.',\n", " 'Level': '1',\n", " 'file_name': '7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx'},\n", " {'task_id': '5a0c1adf-205e-4841-a666-7c3ef95def9d',\n", " 'question': 'What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?',\n", " 'Level': '1',\n", " 'file_name': ''}]" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_questions[:]" ] }, { "cell_type": "markdown", "id": "b3d4fe5b-23be-4617-8738-374846281e94", "metadata": {}, "source": [ "## Inference client" ] }, { "cell_type": "code", "execution_count": null, "id": "1dc4b8b6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 31, "id": "ef147340-064a-4c3d-b7b8-bf47cae4a99b", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T13:18:06.730006Z", "iopub.status.busy": "2025-06-01T13:18:06.729139Z", "iopub.status.idle": "2025-06-01T13:18:06.744206Z", "shell.execute_reply": "2025-06-01T13:18:06.739120Z", "shell.execute_reply.started": "2025-06-01T13:18:06.729935Z" } }, "outputs": [], "source": [ "from huggingface_hub import InferenceClient" ] }, { "cell_type": "code", "execution_count": 32, "id": "3a24479e-bad1-448e-996f-0bd178b1f8f0", "metadata": { "execution": { "iopub.execute_input": "2025-06-01T13:56:28.271646Z", "iopub.status.busy": "2025-06-01T13:56:28.270762Z", "iopub.status.idle": "2025-06-01T13:56:29.364865Z", "shell.execute_reply": "2025-06-01T13:56:29.356268Z", "shell.execute_reply.started": "2025-06-01T13:56:28.271610Z" }, "scrolled": true }, "outputs": [], "source": [ "client = InferenceClient(\n", " provider=\"auto\",\n", " # provider=\"hyperbolic\",\n", " bill_to=\"VitalNest\",\n", ")\n" ] }, { "cell_type": "code", "execution_count": 38, "id": "47b538d3", "metadata": {}, "outputs": [], "source": [ "from PIL import Image\n", "import base64\n", "from io import BytesIO" ] }, { "cell_type": "code", "execution_count": null, "id": "a154704f", "metadata": {}, "outputs": [], "source": [ "_ = encode_image_to_base64(\"cca530fc-4052-43b2-b130-b30968d8aa44.png\")" ] }, { "cell_type": "code", "execution_count": null, "id": "e71b9d7f", "metadata": {}, "outputs": [], "source": [ "def encode_image_to_base64( image_path):\n", " \"\"\"\n", " Encodes an image file to a base64 string.\n", "\n", " Args:\n", " image_path (str): Path to the image file.\n", "\n", " Returns:\n", " str: Base64 encoded image string.\n", " \"\"\"\n", " \"\"\"Encodes a PIL image to a base64 string.\"\"\"\n", " image = Image.open(image_path)\n", " buffered = BytesIO()\n", " image.save(buffered, format=\"JPEG\")\n", " return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "482b163f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'The image depicts a chessboard with the pieces arranged in a specific position. Here is a detailed description:\\n\\n1. **White Pieces:**\\n - The white king is located at the top left corner of the board, on the h-file.\\n - The white queen is not visible in this image.\\n - The white rook is on the e-file, on the e5 square.\\n - The white knight is on the d-file, on the d5 square.\\n - The white bishop is on the c-file, on the c4 square.\\n - The white rook is on the a-file, on the a2 square.\\n - The white pawn is on the a-file, on the a7 square.\\n - The white pawn is on the b-file, on the b7 square.\\n - The white pawn is on the c-file, on the c7 square.\\n - The white pawn is on the d-file, on the d7 square.\\n - The white pawn is on the e-file, on the e7 square.\\n - The white pawn is on the f-file, on the f7 square.\\n - The white pawn is on the g-file, on the g7 square.\\n - The white pawn is on the h-file, on the h7 square.\\n\\n2. **Black Pieces:**\\n - The black king is located at the bottom right corner of the board, on the a-file.\\n - The black queen is not visible in this image.\\n - The black rook is on the d-file, on the d8 square.\\n - The black knight is on the e-file, on the e5 square.\\n - The black bishop is on the c-file, on the c8 square.\\n - The black pawn is on the a-file, on the a6 square.\\n - The black pawn is on the b-file, on the b6 square.\\n - The black pawn is on the c-file, on the c6 square.\\n - The black pawn is on the d-file, on the d6 square.\\n - The black pawn is on the e-file, on the e6 square.\\n - The black pawn is on the f-file, on the f6 square.\\n - The black pawn is on the g-file, on the g6 square.\\n - The black pawn is on the h-file, on the h6 square.\\n\\nThe board is divided into light and dark squares, which is typical for a chessboard. The pieces are arranged in a way that suggests a specific position in a chess game, possibly a mid-game or endgame scenario. The black king is in a position that could be considered vulnerable, as it is not protected by any other piece. The white king is also in a position that could be considered vulnerable, but it is not under immediate threat. The black rook and knight are positioned in a way that could potentially attack the white king, but the white king is not in check. The white rook and bishop are not in a position to attack the black king directly, but they could potentially move to a position where they could attack the black king. The black pawns are all on the sixth rank, which is typical for a mid-game position. The white pawns are all on the seventh rank, which is also typical for a mid-game position. The black queen is not visible in this image, so it is not possible to determine its position or potential threats. The white queen is not visible in this image, so it is not possible to determine its position or potential threats.'" ] }, "execution_count": 45, "metadata": {}, "output_type": "execute_result" } ], "source": [ "messages = [\n", " {\n", " \"role\": \"user\",\n", " \"content\": [\n", " {\"type\": \"text\", \"text\": \"Describe this image in detail.\"},\n", " {\n", " \"type\": \"image_url\",\n", " \"image_url\": {\n", " \"url\": f\"data:image/jpeg;base64,{encode_image_to_base64(\"cca530fc-4052-43b2-b130-b30968d8aa44.png\")}\"\n", " },\n", " },\n", " ],\n", " }\n", "]\n", "completion = client.chat.completions.create(\n", " model=\"Qwen/Qwen2.5-VL-7B-Instruct\",\n", " messages=messages,\n", " temperature=0.1,\n", " max_tokens=10_000,\n", ")\n", "answer = completion.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 46, "id": "f27b49d4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('The image depicts a chessboard with the pieces arranged in a specific '\n", " 'position. Here is a detailed description:\\n'\n", " '\\n'\n", " '1. **White Pieces:**\\n'\n", " ' - The white king is located at the top left corner of the board, on the '\n", " 'h-file.\\n'\n", " ' - The white queen is not visible in this image.\\n'\n", " ' - The white rook is on the e-file, on the e5 square.\\n'\n", " ' - The white knight is on the d-file, on the d5 square.\\n'\n", " ' - The white bishop is on the c-file, on the c4 square.\\n'\n", " ' - The white rook is on the a-file, on the a2 square.\\n'\n", " ' - The white pawn is on the a-file, on the a7 square.\\n'\n", " ' - The white pawn is on the b-file, on the b7 square.\\n'\n", " ' - The white pawn is on the c-file, on the c7 square.\\n'\n", " ' - The white pawn is on the d-file, on the d7 square.\\n'\n", " ' - The white pawn is on the e-file, on the e7 square.\\n'\n", " ' - The white pawn is on the f-file, on the f7 square.\\n'\n", " ' - The white pawn is on the g-file, on the g7 square.\\n'\n", " ' - The white pawn is on the h-file, on the h7 square.\\n'\n", " '\\n'\n", " '2. **Black Pieces:**\\n'\n", " ' - The black king is located at the bottom right corner of the board, on '\n", " 'the a-file.\\n'\n", " ' - The black queen is not visible in this image.\\n'\n", " ' - The black rook is on the d-file, on the d8 square.\\n'\n", " ' - The black knight is on the e-file, on the e5 square.\\n'\n", " ' - The black bishop is on the c-file, on the c8 square.\\n'\n", " ' - The black pawn is on the a-file, on the a6 square.\\n'\n", " ' - The black pawn is on the b-file, on the b6 square.\\n'\n", " ' - The black pawn is on the c-file, on the c6 square.\\n'\n", " ' - The black pawn is on the d-file, on the d6 square.\\n'\n", " ' - The black pawn is on the e-file, on the e6 square.\\n'\n", " ' - The black pawn is on the f-file, on the f6 square.\\n'\n", " ' - The black pawn is on the g-file, on the g6 square.\\n'\n", " ' - The black pawn is on the h-file, on the h6 square.\\n'\n", " '\\n'\n", " 'The board is divided into light and dark squares, which is typical for a '\n", " 'chessboard. The pieces are arranged in a way that suggests a specific '\n", " 'position in a chess game, possibly a mid-game or endgame scenario. The black '\n", " 'king is in a position that could be considered vulnerable, as it is not '\n", " 'protected by any other piece. The white king is also in a position that '\n", " 'could be considered vulnerable, but it is not under immediate threat. The '\n", " 'black rook and knight are positioned in a way that could potentially attack '\n", " 'the white king, but the white king is not in check. The white rook and '\n", " 'bishop are not in a position to attack the black king directly, but they '\n", " 'could potentially move to a position where they could attack the black king. '\n", " 'The black pawns are all on the sixth rank, which is typical for a mid-game '\n", " 'position. The white pawns are all on the seventh rank, which is also typical '\n", " 'for a mid-game position. The black queen is not visible in this image, so it '\n", " 'is not possible to determine its position or potential threats. The white '\n", " 'queen is not visible in this image, so it is not possible to determine its '\n", " 'position or potential threats.')\n" ] } ], "source": [ "pprint(answer)" ] }, { "cell_type": "code", "execution_count": null, "id": "bc9cdb75", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3cc66ac7", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f6c2e83c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "id": "c3b5f100-893f-405a-920f-a32461ac7277", "metadata": { "jp-MarkdownHeadingCollapsed": true }, "source": [ "# Async Task" ] }, { "cell_type": "code", "execution_count": 64, "id": "2581df7f-63d2-4d06-9caf-92afc92eb01c", "metadata": {}, "outputs": [], "source": [ "from concurrent.futures import ThreadPoolExecutor, as_completed, ProcessPoolExecutor" ] }, { "cell_type": "code", "execution_count": 65, "id": "205f5f19", "metadata": {}, "outputs": [], "source": [ "questions = all_questions[:5]" ] }, { "cell_type": "code", "execution_count": 66, "id": "2c846349", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'task_id': '8e867cd7-cff9-4e6c-867a-ff5ddc2550be',\n", " 'question': 'How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'a1e91b78-d3d8-4675-bb8d-62741b4b68a6',\n", " 'question': 'In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': '2d83110e-a098-4ebb-9987-066c06fa42d0',\n", " 'question': '.rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI',\n", " 'Level': '1',\n", " 'file_name': ''},\n", " {'task_id': 'cca530fc-4052-43b2-b130-b30968d8aa44',\n", " 'question': \"Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.\",\n", " 'Level': '1',\n", " 'file_name': 'cca530fc-4052-43b2-b130-b30968d8aa44.png'},\n", " {'task_id': '4fc2f1ae-8625-45b5-ab34-ad4433bc21f8',\n", " 'question': 'Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?',\n", " 'Level': '1',\n", " 'file_name': ''}]" ] }, "execution_count": 66, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_questions[:5]" ] }, { "cell_type": "code", "execution_count": 67, "id": "7ee3373c", "metadata": {}, "outputs": [], "source": [ "def run_agents_in_parallel(task, additional_args):\n", " res = ultimate_agent.run(task=task, additional_args=additional_args)\n", " return res\n", "def process_question(q):\n", " task = (\n", " q[\"question\"]\n", " + \"If there's a valid file_url, then the file is saved locally by the same name as the file_url.\"\n", " \"Only the file_url should be provided to tools as a string: 'xyz.filename' solely without any additional args or kwargs.\"\n", " )\n", " additional_args = {\"file_url\": f\"{q['file_name']}\"} if q[\"file_name\"] else None\n", " return (run_agents_in_parallel(task, additional_args))" ] }, { "cell_type": "code", "execution_count": 75, "id": "de2b07dd", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[92m18:31:23 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "\u001b[92m18:31:23 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "\u001b[92m18:31:23 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "\u001b[92m18:31:23 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "\u001b[92m18:31:23 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:37 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "\u001b[92m18:31:39 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:40 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:40 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:40 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:primp:response: https://lite.duckduckgo.com/lite/ 200\n", "\u001b[92m18:31:40 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n", "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n", "\n" ] }, { "data": { "text/html": [ "
Error in generating model output:\n", "litellm.BadRequestError: OpenAIException - Invalid value: 'tool-call'. Supported values are: 'system', 'assistant',\n", "'user', 'function', 'tool', and 'developer'.\n", "\n" ], "text/plain": [ "\u001b[1;31mError in generating model output:\u001b[0m\n", "\u001b[1;31mlitellm.BadRequestError: OpenAIException - Invalid value: \u001b[0m\u001b[32m'tool-call'\u001b[0m\u001b[1;31m. Supported values are: \u001b[0m\u001b[32m'system'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'assistant'\u001b[0m\u001b[1;31m,\u001b[0m\n", "\u001b[32m'user'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'function'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'tool'\u001b[0m\u001b[1;31m, and \u001b[0m\u001b[32m'developer'\u001b[0m\u001b[1;31m.\u001b[0m\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:primp:response: https://html.duckduckgo.com/html 200\n", "\u001b[92m18:31:41 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n", "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n", "\n" ] }, { "data": { "text/html": [ "
Error in generating model output:\n", "litellm.BadRequestError: OpenAIException - Invalid value: 'tool-call'. Supported values are: 'system', 'assistant',\n", "'user', 'function', 'tool', and 'developer'.\n", "\n" ], "text/plain": [ "\u001b[1;31mError in generating model output:\u001b[0m\n", "\u001b[1;31mlitellm.BadRequestError: OpenAIException - Invalid value: \u001b[0m\u001b[32m'tool-call'\u001b[0m\u001b[1;31m. Supported values are: \u001b[0m\u001b[32m'system'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'assistant'\u001b[0m\u001b[1;31m,\u001b[0m\n", "\u001b[32m'user'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'function'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'tool'\u001b[0m\u001b[1;31m, and \u001b[0m\u001b[32m'developer'\u001b[0m\u001b[1;31m.\u001b[0m\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:42 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", "\u001b[92m18:31:43 - LiteLLM:INFO\u001b[0m: utils.py:1215 - Wrapper: Completed Call, calling success_handler\n", "INFO:LiteLLM:Wrapper: Completed Call, calling success_handler\n", "\u001b[92m18:31:43 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:43 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "\u001b[92m18:31:43 - LiteLLM:INFO\u001b[0m: cost_calculator.py:655 - selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:LiteLLM:selected model name for cost calculation: openai/gpt-4.1-2025-04-14\n", "INFO:primp:response: https://html.duckduckgo.com/html 200\n", "\u001b[92m18:31:44 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:primp:response: https://html.duckduckgo.com/html 200\n", "\u001b[92m18:31:44 - LiteLLM:INFO\u001b[0m: utils.py:3101 - \n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:LiteLLM:\n", "LiteLLM completion() model= gpt-4.1; provider = openai\n", "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n", "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 400 Bad Request\"\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n", "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n", "\n" ] }, { "data": { "text/html": [ "
Error in generating model output:\n", "litellm.BadRequestError: OpenAIException - Invalid value: 'tool-call'. Supported values are: 'system', 'assistant',\n", "'user', 'function', 'tool', and 'developer'.\n", "\n" ], "text/plain": [ "\u001b[1;31mError in generating model output:\u001b[0m\n", "\u001b[1;31mlitellm.BadRequestError: OpenAIException - Invalid value: \u001b[0m\u001b[32m'tool-call'\u001b[0m\u001b[1;31m. Supported values are: \u001b[0m\u001b[32m'system'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'assistant'\u001b[0m\u001b[1;31m,\u001b[0m\n", "\u001b[32m'user'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'function'\u001b[0m\u001b[1;31m, \u001b[0m\u001b[32m'tool'\u001b[0m\u001b[1;31m, and \u001b[0m\u001b[32m'developer'\u001b[0m\u001b[1;31m.\u001b[0m\n" ] }, "metadata": {}, "output_type": "display_data" }, { "ename": "AgentGenerationError", "evalue": "Error in generating model output:\nlitellm.BadRequestError: OpenAIException - Invalid value: 'tool-call'. Supported values are: 'system', 'assistant', 'user', 'function', 'tool', and 'developer'.", "output_type": "error", "traceback": [ "\u001b[31m---------------------------------------------------------------------------\u001b[39m", "\u001b[31mBadRequestError\u001b[39m Traceback (most recent call last)", "\u001b[36mFile \u001b[39m\u001b[32m~/repos/unit_4_GAIA_challenge/.venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py:725\u001b[39m, in \u001b[36mOpenAIChatCompletion.completion\u001b[39m\u001b[34m(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)\u001b[39m\n\u001b[32m 724\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m725\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[32m 726\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m OpenAIError \u001b[38;5;28;01mas\u001b[39;00m e:\n", "\u001b[36mFile \u001b[39m\u001b[32m~/repos/unit_4_GAIA_challenge/.venv/lib/python3.12/site-packages/litellm/llms/openai/openai.py:653\u001b[39m, in \u001b[36mOpenAIChatCompletion.completion\u001b[39m\u001b[34m(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)\u001b[39m\n\u001b[32m 639\u001b[39m logging_obj.pre_call(\n\u001b[32m 640\u001b[39m \u001b[38;5;28minput\u001b[39m=messages,\n\u001b[32m 641\u001b[39m api_key=openai_client.api_key,\n\u001b[32m (...)\u001b[39m\u001b[32m 647\u001b[39m },\n\u001b[32m 648\u001b[39m )\n\u001b[32m 650\u001b[39m (\n\u001b[32m 651\u001b[39m headers,\n\u001b[32m 652\u001b[39m response,\n\u001b[32m--> \u001b[39m\u001b[32m653\u001b[39m ) = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmake_sync_openai_chat_completion_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 654\u001b[39m \u001b[43m \u001b[49m\u001b[43mopenai_client\u001b[49m\u001b[43m=\u001b[49m\u001b[43mopenai_client\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 655\u001b[39m \u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 656\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 657\u001b[39m \u001b[43m \u001b[49m\u001b[43mlogging_obj\u001b[49m\u001b[43m=\u001b[49m\u001b[43mlogging_obj\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 658\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 660\u001b[39m logging_obj.model_call_details[\u001b[33m\"\u001b[39m\u001b[33mresponse_headers\u001b[39m\u001b[33m\"\u001b[39m] = headers\n", "\u001b[36mFile \u001b[39m\u001b[32m~/repos/unit_4_GAIA_challenge/.venv/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py:149\u001b[39m, in \u001b[36mtrack_llm_api_timing.