{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Welcome to the Second Lab - Week 1, Day 3\n", "\n", "Today we will work with lots of models! This is a way to get comfortable with APIs." ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "# Start with imports - ask ChatGPT to explain any package that you don't know\n", "\n", "import os\n", "import json\n", "import asyncio\n", "from dotenv import load_dotenv\n", "from openai import OpenAI, AsyncOpenAI\n", "from anthropic import AsyncAnthropic\n", "from pydantic import BaseModel" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Always remember to do this!\n", "load_dotenv(override=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Print the key prefixes to help with any debugging\n", "\n", "OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\n", "ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')\n", "GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')\n", "DEEPSEEK_API_KEY = os.getenv('DEEPSEEK_API_KEY')\n", "GROQ_API_KEY = os.getenv('GROQ_API_KEY')\n", "\n", "if OPENAI_API_KEY:\n", " print(f\"OpenAI API Key exists and begins {OPENAI_API_KEY[:8]}\")\n", "else:\n", " print(\"OpenAI API Key not set\")\n", " \n", "if ANTHROPIC_API_KEY:\n", " print(f\"Anthropic API Key exists and begins {ANTHROPIC_API_KEY[:7]}\")\n", "else:\n", " print(\"Anthropic API Key not set (and this is optional)\")\n", "\n", "if GOOGLE_API_KEY:\n", " print(f\"Google API Key exists and begins {GOOGLE_API_KEY[:2]}\")\n", "else:\n", " print(\"Google API Key not set (and this is optional)\")\n", "\n", "if DEEPSEEK_API_KEY:\n", " print(f\"DeepSeek API Key exists and begins {DEEPSEEK_API_KEY[:3]}\")\n", "else:\n", " print(\"DeepSeek API Key not set (and this is optional)\")\n", "\n", "if GROQ_API_KEY:\n", " print(f\"Groq API Key exists and begins {GROQ_API_KEY[:4]}\")\n", "else:\n", " print(\"Groq API Key not set (and this is optional)\")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "request = \"Please come up with a challenging, nuanced question that I can ask a number of LLMs to evaluate their intelligence. \"\n", "request += \"Answer only with the question, no explanation.\"\n", "messages = [{\"role\": \"user\", \"content\": request}]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(messages)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "openai = AsyncOpenAI()\n", "response = await openai.chat.completions.create(\n", " model=\"gpt-4o-mini\",\n", " messages=messages,\n", ")\n", "question = response.choices[0].message.content\n", "print(question)\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "# Define Pydantic model for storing LLM results\n", "class LLMResult(BaseModel):\n", " model: str\n", " answer: str\n" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "results: list[LLMResult] = []\n", "messages = [{\"role\": \"user\", \"content\": question}]" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# The API we know well\n", "async def openai_answer() -> None:\n", "\n", " if OPENAI_API_KEY is None:\n", " return None\n", " \n", " print(\"OpenAI starting!\")\n", " model_name = \"gpt-4o-mini\"\n", "\n", " try:\n", " response = await openai.chat.completions.create(model=model_name, messages=messages)\n", " answer = response.choices[0].message.content\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with OpenAI: {e}\")\n", " return None\n", "\n", " print(\"OpenAI done!\")" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "# Anthropic has a slightly different API, and Max Tokens is required\n", "\n", "async def anthropic_answer() -> None:\n", "\n", " if ANTHROPIC_API_KEY is None:\n", " return None\n", " \n", " print(\"Anthropic starting!\")\n", " model_name = \"claude-3-7-sonnet-latest\"\n", "\n", " claude = AsyncAnthropic()\n", " try:\n", " response = await claude.messages.create(model=model_name, messages=messages, max_tokens=1000)\n", " answer = response.content[0].text\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with Anthropic: {e}\")\n", " return None\n", "\n", " print(\"Anthropic done!\")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "async def google_answer() -> None:\n", "\n", " if GOOGLE_API_KEY is None:\n", " return None\n", " \n", " print(\"Google starting!\")\n", " model_name = \"gemini-2.0-flash\"\n", "\n", " gemini = AsyncOpenAI(api_key=GOOGLE_API_KEY, base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\")\n", " try:\n", " response = await gemini.chat.completions.create(model=model_name, messages=messages)\n", " answer = response.choices[0].message.content\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with Google: {e}\")\n", " return None\n", "\n", " print(\"Google done!\")" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "async def deepseek_answer() -> None:\n", "\n", " if DEEPSEEK_API_KEY is None:\n", " return None\n", " \n", " print(\"DeepSeek starting!\")\n", " model_name = \"deepseek-chat\"\n", "\n", " deepseek = AsyncOpenAI(api_key=DEEPSEEK_API_KEY, base_url=\"https://api.deepseek.com/v1\")\n", " try:\n", " response = await deepseek.chat.completions.create(model=model_name, messages=messages)\n", " answer = response.choices[0].message.content\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with DeepSeek: {e}\")\n", " return None\n", "\n", " print(\"DeepSeek done!\")" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "async def groq_answer() -> None:\n", "\n", " if GROQ_API_KEY is None:\n", " return None\n", " \n", " print(\"Groq starting!\")\n", " model_name = \"llama-3.3-70b-versatile\"\n", "\n", " groq = AsyncOpenAI(api_key=GROQ_API_KEY, base_url=\"https://api.groq.com/openai/v1\")\n", " try:\n", " response = await groq.chat.completions.create(model=model_name, messages=messages)\n", " answer = response.choices[0].message.content\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with Groq: {e}\")\n", " return None\n", "\n", " print(\"Groq done!\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## For the next cell, we will use Ollama\n", "\n", "Ollama runs a local web service that gives an OpenAI compatible endpoint, \n", "and runs models locally using high performance C++ code.\n", "\n", "If you don't have Ollama, install it here by visiting https://ollama.com then pressing Download and following the instructions.\n", "\n", "After it's installed, you should be able to visit here: http://localhost:11434 and see the message \"Ollama is running\"\n", "\n", "You might need to restart Cursor (and maybe reboot). Then open a Terminal (control+\\`) and run `ollama serve`\n", "\n", "Useful Ollama commands (run these in the terminal, or with an exclamation mark in this notebook):\n", "\n", "`ollama pull ` downloads a model locally \n", "`ollama ls` lists all the models you've downloaded \n", "`ollama rm ` deletes the specified model from your downloads" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", " \n", " \n", " \n", " \n", "
\n", " \n", " \n", "

Super important - ignore me at your peril!

\n", " The model called llama3.3 is FAR too large for home computers - it's not intended for personal computing and will consume all your resources! Stick with the nicely sized llama3.2 or llama3.2:1b and if you want larger, try llama3.1 or smaller variants of Qwen, Gemma, Phi or DeepSeek. See the the Ollama models page for a full list of models and sizes.\n", " \n", "
" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!ollama pull llama3.2" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "async def ollama_answer() -> None:\n", " model_name = \"llama3.2\"\n", "\n", " print(\"Ollama starting!\")\n", " ollama = AsyncOpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", " try:\n", " response = await ollama.chat.completions.create(model=model_name, messages=messages)\n", " answer = response.choices[0].message.content\n", " results.append(LLMResult(model=model_name, answer=answer))\n", " except Exception as e:\n", " print(f\"Error with Ollama: {e}\")\n", " return None\n", "\n", " print(\"Ollama done!\") " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "async def gather_answers():\n", " tasks = [\n", " openai_answer(),\n", " anthropic_answer(),\n", " google_answer(),\n", " deepseek_answer(),\n", " groq_answer(),\n", " ollama_answer()\n", " ]\n", " await asyncio.gather(*tasks)\n", "\n", "await gather_answers()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "together = \"\"\n", "competitors = []\n", "answers = []\n", "\n", "for res in results:\n", " competitor = res.model\n", " answer = res.answer\n", " competitors.append(competitor)\n", " answers.append(answer)\n", " together += f\"# Response from competitor {competitor}\\n\\n\"\n", " together += answer + \"\\n\\n\"\n", "\n", "print(f\"Number of competitors: {len(results)}\")\n", "print(together)\n" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "judge = f\"\"\"You are judging a competition between {len(results)} competitors.\n", "Each model has been given this question:\n", "\n", "{question}\n", "\n", "Your job is to evaluate each response for clarity and strength of argument, and rank them in order of best to worst.\n", "Respond with JSON, and only JSON, with the following format:\n", "{{\"results\": [\"best competitor number\", \"second best competitor number\", \"third best competitor number\", ...]}}\n", "\n", "Here are the responses from each competitor:\n", "\n", "{together}\n", "\n", "Now respond with the JSON with the ranked order of the competitors, nothing else. Do not include markdown formatting or code blocks.\"\"\"\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(judge)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [], "source": [ "judge_messages = [{\"role\": \"user\", \"content\": judge}]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Judgement time!\n", "\n", "openai = OpenAI()\n", "response = openai.chat.completions.create(\n", " model=\"o3-mini\",\n", " messages=judge_messages,\n", ")\n", "judgement = response.choices[0].message.content\n", "print(judgement)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# OK let's turn this into results!\n", "\n", "results_dict = json.loads(judgement)\n", "ranks = results_dict[\"results\"]\n", "for index, comp in enumerate(ranks):\n", " print(f\"Rank {index+1}: {comp}\")" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.11" } }, "nbformat": 4, "nbformat_minor": 2 }