diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..9218a2cb9bfa6ecfdf95f1c21c2d389fbcde0764 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +SETUP-PC.pdf filter=lfs diff=lfs merge=lfs -text +SETUP-linux.pdf filter=lfs diff=lfs merge=lfs -text +SETUP-mac.pdf filter=lfs diff=lfs merge=lfs -text +business.jpg filter=lfs diff=lfs merge=lfs -text +important.jpg filter=lfs diff=lfs merge=lfs -text +resources.jpg filter=lfs diff=lfs merge=lfs -text +thankyou.jpg filter=lfs diff=lfs merge=lfs -text +voyage.jpg filter=lfs diff=lfs merge=lfs -text +week2/community-contributions/chatbot_conversation_robots.jpg filter=lfs diff=lfs merge=lfs -text +week4/community-contributions/ai_code_converter/screenshots/codeXchange_3.png filter=lfs diff=lfs merge=lfs -text +week5/community-contributions/docuSeekAI/docuseek2.png filter=lfs diff=lfs merge=lfs -text +week5/community-contributions/docuSeekAI/docuseek3.png filter=lfs diff=lfs merge=lfs -text +week5/community-contributions/docuSeekAI/docuseek4.png filter=lfs diff=lfs merge=lfs -text +week8/community_contributions/images/gui.png filter=lfs diff=lfs merge=lfs -text +week8/community_contributions/images/metrics.png filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..053b56be811a7341e72427562c12f889aa0f05e0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,184 @@ +# Github's default gitignore for Python + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +llms/ +llms.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +.idea/ + +# Added this to ignore models downloaded from HF +model_cache/ +# Ignore finder files +.DS_Store +/.DS_Store + +# Ignore Chroma vector database +vector_db/ +products_vectorstore/ + +# And ignore any pickle files made during the course +*.pkl + +# ignore gradio private files +.gradio +/.gradio + +# ignore diagnostics reports +**/report.txt + +# ignore optimized C++ code from being checked into repo +week4/optimized +week4/simple diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5c8dab12400082550a8019ede9ac0f9ac76efae1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Ed Donner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 5152a8136673ff2f5716b28f7f4b5ad969eb474c..f7706974bee305130f356421aaefad89f16961e7 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,130 @@ --- -title: Llm Engineering -emoji: 😻 -colorFrom: purple -colorTo: red +title: llm_engineering +app_file: turo_assist.py sdk: gradio sdk_version: 5.29.0 -app_file: app.py -pinned: false --- +# LLM Engineering - Master AI and LLMs -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +## Your 8 week journey to proficiency starts today + + + +I'm so happy you're joining me on this path. We'll be building immensely satisfying projects in the coming weeks. Some will be easy, some will be challenging, many will ASTOUND you! The projects build on each other so you develop deeper and deeper expertise each week. One thing's for sure: you're going to have a lot of fun along the way. + +### Before you begin + +I'm here to help you be most successful with your learning! If you hit any snafus, or if you have any ideas on how I can improve the course, please do reach out in the platform or by emailing me direct (ed@edwarddonner.com). It's always great to connect with people on LinkedIn to build up the community - you'll find me here: +https://www.linkedin.com/in/eddonner/ +And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 + +Resources to accompany the course, including the slides and useful links, are here: +https://edwarddonner.com/2024/11/13/llm-engineering-resources/ + +## Instant Gratification instructions for Week 1, Day 1 - with Llama 3.2 **not** Llama 3.3! + +### Important note: see my warning about Llama3.3 below - it's too large for home computers! Stick with llama3.2! Several students have missed this warning... + +We will start the course by installing Ollama so you can see results immediately! +1. Download and install Ollama from https://ollama.com noting that on a PC you might need to have administrator permissions for the install to work properly +2. On a PC, start a Command prompt / Powershell (Press Win + R, type `cmd`, and press Enter). On a Mac, start a Terminal (Applications > Utilities > Terminal). +3. Run `ollama run llama3.2` or for smaller machines try `ollama run llama3.2:1b` - **please note** steer clear of Meta's latest model llama3.3 because at 70B parameters that's way too large for most home computers! +4. If this doesn't work: you may need to run `ollama serve` in another Powershell (Windows) or Terminal (Mac), and try step 3 again. On a PC, you may need to be running in an Admin instance of Powershell. +5. And if that doesn't work on your box, I've set up this on the cloud. This is on Google Colab, which will need you to have a Google account to sign in, but is free: https://colab.research.google.com/drive/1-_f5XZPsChvfU1sJ0QqCePtIuc55LSdu?usp=sharing + +Any problems, please contact me! + +## Then, Setup instructions + +After we do the Ollama quick project, and after I introduce myself and the course, we get to work with the full environment setup. + +Hopefully I've done a decent job of making these guides bulletproof - but please contact me right away if you hit roadblocks: + +- PC people please follow the instructions in [SETUP-PC.md](SETUP-PC.md) +- Mac people please follow the instructions in [SETUP-mac.md](SETUP-mac.md) +- Linux people please follow the instructions in [SETUP-linux.md](SETUP-linux.md) + +The are also PDF versions of the setup instructions in this folder if you'd prefer. + +### An important point on API costs (which are optional! No need to spend if you don't wish) + +During the course, I'll suggest you try out the leading models at the forefront of progress, known as the Frontier models. I'll also suggest you run open-source models using Google Colab. These services have some charges, but I'll keep cost minimal - like, a few cents at a time. And I'll provide alternatives if you'd prefer not to use them. + +Please do monitor your API usage to ensure you're comfortable with spend; I've included links below. There's no need to spend anything more than a couple of dollars for the entire course. Some AI providers such as OpenAI require a minimum credit like \$5 or local equivalent; we should only spend a fraction of it, and you'll have plenty of opportunity to put it to good use in your own projects. During Week 7 you have an option to spend a bit more if you're enjoying the process - I spend about \$10 myself and the results make me very happy indeed! But it's not necessary in the least; the important part is that you focus on learning. + +### Free alternative to Paid APIs + +Early in the course, I show you an alternative if you'd rather not spend anything on APIs: +Any time that we have code like: +`openai = OpenAI()` +You can use this as a direct replacement: +`openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')` +And also replace model names like **gpt-4o-mini** with **llama3.2**. +For week 1 day 1, you can find this in week1/solutions/day1_with_ollama.ipynb. + +Below is a full example: + +``` +# You need to do this one time on your computer +!ollama pull llama3.2 + +from openai import OpenAI +MODEL = "llama3.2" +openai = OpenAI(base_url="http://localhost:11434/v1", api_key="ollama") + +response = openai.chat.completions.create( + model=MODEL, + messages=[{"role": "user", "content": "What is 2 + 2?"}] +) + +print(response.choices[0].message.content) +``` + +### How this Repo is organized + +There are folders for each of the "weeks", representing modules of the class, culminating in a powerful autonomous Agentic AI solution in Week 8 that draws on many of the prior weeks. +Follow the setup instructions above, then open the Week 1 folder and prepare for joy. + +### The most important part + +The mantra of the course is: the best way to learn is by **DOING**. I don't type all the code during the course; I execute it for you to see the results. You should work along with me or after each lecture, running each cell, inspecting the objects to get a detailed understanding of what's happening. Then tweak the code and make it your own. There are juicy challenges for you throughout the course. I'd love it if you wanted to submit a Pull Request for your code (instructions [here](https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293)) and I can make your solutions available to others so we share in your progress; as an added benefit, you'll be recognized in GitHub for your contribution to the repo. While the projects are enjoyable, they are first and foremost designed to be _educational_, teaching you business skills that can be put into practice in your work. + +## Starting in Week 3, we'll also be using Google Colab for running with GPUs + +You should be able to use the free tier or minimal spend to complete all the projects in the class. I personally signed up for Colab Pro+ and I'm loving it - but it's not required. + +Learn about Google Colab and set up a Google account (if you don't already have one) [here](https://colab.research.google.com/) + +The colab links are in the Week folders and also here: +- For week 3 day 1, this Google Colab shows what [colab can do](https://colab.research.google.com/drive/1DjcrYDZldAXKJ08x1uYIVCtItoLPk1Wr?usp=sharing) +- For week 3 day 2, here is a colab for the HuggingFace [pipelines API](https://colab.research.google.com/drive/1aMaEw8A56xs0bRM4lu8z7ou18jqyybGm?usp=sharing) +- For week 3 day 3, here's the colab on [Tokenizers](https://colab.research.google.com/drive/1WD6Y2N7ctQi1X9wa6rpkg8UfyA4iSVuz?usp=sharing) +- For week 3 day 4, we go to a colab with HuggingFace [models](https://colab.research.google.com/drive/1hhR9Z-yiqjUe7pJjVQw4c74z_V3VchLy?usp=sharing) +- For week 3 day 5, we return to colab to make our [Meeting Minutes product](https://colab.research.google.com/drive/1KSMxOCprsl1QRpt_Rq0UqCAyMtPqDQYx?usp=sharing) +- For week 7, we will use these Colab books: [Day 1](https://colab.research.google.com/drive/15rqdMTJwK76icPBxNoqhI7Ww8UM-Y7ni?usp=sharing) | [Day 2](https://colab.research.google.com/drive/1T72pbfZw32fq-clQEp-p8YQ4_qFKv4TP?usp=sharing) | [Days 3 and 4](https://colab.research.google.com/drive/1csEdaECRtjV_1p9zMkaKKjCpYnltlN3M?usp=sharing) | [Day 5](https://colab.research.google.com/drive/1igA0HF0gvQqbdBD4GkcK3GpHtuDLijYn?usp=sharing) + +### Monitoring API charges + +You can keep your API spend very low throughout this course; you can monitor spend at the dashboards: [here](https://platform.openai.com/usage) for OpenAI, [here](https://console.anthropic.com/settings/cost) for Anthropic and [here](https://console.cloud.google.com/apis/api/generativelanguage.googleapis.com/cost) for Google Gemini. + +The charges for the exercsies in this course should always be quite low, but if you'd prefer to keep them minimal, then be sure to always choose the cheapest versions of models: +1. For OpenAI: Always use model `gpt-4o-mini` in the code instead of `gpt-4o` +2. For Anthropic: Always use model `claude-3-haiku-20240307` in the code instead of the other Claude models +3. During week 7, look out for my instructions for using the cheaper dataset + +Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on. + +
+ ![]() |
+
+ Other resources+ I've put together this webpage with useful resources for the course. This includes links to all the slides.+ https://edwarddonner.com/2024/11/13/llm-engineering-resources/ + Please keep this bookmarked, and I'll continue to add more useful links there over time. + + |
+
\n",
+ " ![]() | \n",
+ " \n",
+ " This project is provided as an extra resource\n", + " It will make most sense after completing Week 7, and might trigger some ideas for your own projects.\n", + " This is provided without a detailed walk-through; the output from the colab has been saved (see last cell) so you can review the results if you have any problems running yourself.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Do not use for actual trading decisions!!\n", + " It hopefully goes without saying: this project will generate toy trading code that is over-simplified and untrusted.Please do not make actual trading decisions based on this!\n", + " | \n",
+ "
You can download your to-do list by clicking the link below:
" + ], + "text/plain": [ + "You can download your to-do list by clicking the link below:
\"))\n", + "display(FileLink(result))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3d0a44e-bca4-4944-8593-1761c2f73a70", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day-1-youtube-video-summary.ipynb b/week1/community-contributions/day-1-youtube-video-summary.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..de33d0fe42cf3600ae02e8ed75ed71f8cc4d137e --- /dev/null +++ b/week1/community-contributions/day-1-youtube-video-summary.ipynb @@ -0,0 +1,173 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 78, + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install youtube_transcript_api" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "metadata": {}, + "outputs": [], + "source": [ + "from youtube_transcript_api import YouTubeTranscriptApi" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "\n", + "class YouTubeWebLink:\n", + " def __init__(self, url):\n", + " self.url = url\n", + " self.video_id = self.get_video_id(url)\n", + " self.set_openai_client()\n", + " self.set_system_prompt()\n", + "\n", + " def get_video_id(self, url):\n", + " \"\"\" extract youtube video id from url with regular expression \"\"\"\n", + " regex = r\"(?:v=|be/)([a-zA-Z0-9_-]{11})\"\n", + " match = re.search(regex, url)\n", + " if match:\n", + " return match.group(1)\n", + " else:\n", + " raise ValueError(\"Probably not a YouTube URL\")\n", + " \n", + " def set_openai_client(self):\n", + " self.openai = OpenAI()\n", + " \n", + " def set_system_prompt(self, system_prompt=None):\n", + " \"\"\" set system prompt from youtube video \"\"\"\n", + " self.system_prompt = \"\"\"\n", + " You are a skilled explainer and storyteller who specializes in summarizing YouTube video transcripts in a way that's both engaging and informative. \n", + " Your task is to:\n", + " - Capture key points and main ideas of the video\n", + " - Structure your summary with in clear sections\n", + " - Include important details, facts, and figures mentioned\n", + " - Never end your summary with a \"Conclusion\" section\n", + " - Keep the summary short and easy to understand\n", + " - Always format your response in markdown for better readability\n", + " \"\"\" if system_prompt is None else system_prompt\n", + "\n", + " def get_transcript(self):\n", + " \"\"\" get transcript from youtube video \"\"\"\n", + " try:\n", + " print('Fetching video transcript...')\n", + " transcript = YouTubeTranscriptApi.get_transcript(self.video_id)\n", + " return \" \".join([item['text'] for item in transcript])\n", + " except Exception as e:\n", + " print(f\"Error fetching transcript: {e}\")\n", + " return None\n", + " \n", + " def get_summary_from_transcript(self, transcript):\n", + " \"\"\" summarize text using openai \"\"\"\n", + " try:\n", + " print('Summarizing video...')\n", + " response = self.openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": self.system_prompt},\n", + " {\"role\": \"user\", \"content\": f\"Summarize the following YouTube video transcript:\\n\\n{transcript}\"}\n", + " ]\n", + " )\n", + " return response.choices[0].message.content\n", + " except Exception as e:\n", + " print(f\"Error summarizing text: {e}\")\n", + " return None\n", + "\n", + " def display_summary(self):\n", + " \"\"\" summarize youtube video \"\"\"\n", + " transcript = self.get_transcript()\n", + " summary = self.get_summary_from_transcript(transcript)\n", + " display(Markdown(summary))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "metadata": {}, + "outputs": [], + "source": [ + "# video link and share link of same youtube video\n", + "test_url_1 = \"https://www.youtube.com/watch?v=nYy-umCNKPQ&list=PLWHe-9GP9SMMdl6SLaovUQF2abiLGbMjs\"\n", + "test_url_2 = \"https://youtu.be/nYy-umCNKPQ?si=ILVrQlKT0W71G5pU\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test that we get same id\n", + "video1, video2 = YouTubeWebLink(test_url_1), YouTubeWebLink(test_url_2)\n", + "video1.video_id, video2.video_id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "video1.display_summary()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llms", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/week1/community-contributions/day01_email_subject_line_en-fr.ipynb b/week1/community-contributions/day01_email_subject_line_en-fr.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9b272d206b06bb3a0ee4a5a02820ca6e9c30b636 --- /dev/null +++ b/week1/community-contributions/day01_email_subject_line_en-fr.ipynb @@ -0,0 +1,126 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "d25b0aef-3e5e-4026-90ee-2b373bf262b7", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 0: Import libraries and load environment variables\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv(\"OPENAI_API_KEY\")\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it does not start with 'sk-proj-'! Please ensure you are using the right key.\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end! Please remove them.\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n", + "\n", + "# Step 1: Create prompts\n", + "print(\"[INFO] Creating system prompt ...\")\n", + "system_prompt = \"You are an assistant that analyzes the contents of \\\n", + " email texts and suggests short subject lines for the email based \\\n", + " on the requested tone and language. Respond in markdown.\"\n", + "\n", + "print(\"[INFO] Creating user prompt ...\")\n", + "user_prompt = \"\"\"\n", + " The text below is an e-mail text for which you are required to \\\n", + " provide subject lines. Please provide two snarky, two funny, and \\\n", + " two formal short subject lines for the email text. Each of the six \\\n", + " subject lines should be presented in both English and French \\\n", + " languages, making a total of 12 subject lines. Please provide your \\\n", + " answer in markdown.\\\n", + " \n", + " \\n\\n\n", + " \n", + " Welcome to arXiv!\n", + "\n", + " Thank you for creating an account and joining the arXiv community. We look\n", + " forward to receiving your contribution.\n", + "\n", + " Help Pages\n", + " An overview on how to navigate and use arXiv can be found here:\n", + " https://arxiv.org/help\n", + " https://arxiv.org/about\n", + "\n", + " If you would like to know more about the submission process, please go here:\n", + " https://arxiv.org/help/submit\n", + "\n", + " Before Submitting to arXiv\n", + " The arXiv.org e-print archive is fully automated and processes nearly\n", + " 1,000 new submissions per day. To help us keep the process running smoothly\n", + " and efficiently please check your submission carefully for mistakes, typos\n", + " and layout issues. Once you have submitted your work please check your account\n", + " frequently for verification messages and other communication from arXiv.\n", + "\n", + " Contacting arXiv\n", + " We have provided extensive help pages to guide you through the process and\n", + " to answer the most common questions. If you have problems with the submission\n", + " process please contact us here:\n", + " https://arxiv.org/help/contact\n", + " We aim to assist submitters within one business day, but during times of high\n", + " volume or maintenance work we may be slightly delayed in our response.\n", + "\n", + " Thank you for your cooperation.\n", + "\"\"\"\n", + "\n", + "# Step 2: Make messages list\n", + "print(\"[INFO] Making messages list ...\")\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + "]\n", + "\n", + "# Step 3: Call OpenAI\n", + "print(\"[INFO] Calling OpenAI ...\")\n", + "openai = OpenAI()\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages\n", + " )\n", + "\n", + "# Step 4: Print result\n", + "print(\"[INFO] Print result ...\")\n", + "display(Markdown(response.choices[0].message.content))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0a6676e-fb43-4725-9389-2acd74c13c4e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day02_ollama_website_summarizer.ipynb b/week1/community-contributions/day02_ollama_website_summarizer.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..dc4dfbc645b1fe9b5f8b5c098831bdefcf5710d9 --- /dev/null +++ b/week1/community-contributions/day02_ollama_website_summarizer.ipynb @@ -0,0 +1,129 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "d25b0aef-3e5e-4026-90ee-2b373bf262b7", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 0: Import Libraries\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "import ollama\n", + "from openai import OpenAI\n", + "import requests\n", + "\n", + "# Step 1: Set Constants and Variables\n", + "print(\"[INFO] Setting constants and variable ...\")\n", + "WEBSITE_URL = \"https://arxiv.org/\"\n", + "MODEL = \"llama3.2\"\n", + "approaches = [\"local-call\", \"python-package\", \"openai-python-library\"]\n", + "approach = approaches[2]\n", + "\n", + "# Step 1: Scrape Website\n", + "print(\"[INFO] Scraping website ...\")\n", + "url_response = requests.get(\n", + " url=WEBSITE_URL,\n", + " headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"}\n", + " )\n", + "soup = BeautifulSoup(\n", + " markup=url_response.content,\n", + " features=\"html.parser\"\n", + " )\n", + "website_title = soup.title.string if soup.title else \"No title found!!!\"\n", + "for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + "website_text = soup.body.get_text(\n", + " separator=\"\\n\",\n", + " strip=True\n", + " )\n", + "\n", + "# Step 2: Create Prompts\n", + "print(\"[INFO] Creating system prompt ...\")\n", + "system_prompt = \"You are an assistant that analyzes the contents of a \\\n", + " website and provides a short summary, ignoring text that might be \\\n", + " navigation related. Respond in markdown.\"\n", + "\n", + "print(\"[INFO] Creating user prompt ...\")\n", + "user_prompt = f\"You are looking at a website titled {website_title}\"\n", + "user_prompt += \"\\nBased on the contents of the website, please provide \\\n", + " a short summary of this website in markdown. If the website \\\n", + " includes news or announcements, summarize them, too. The contents \\\n", + " of this website are as follows:\\n\\n\"\n", + "user_prompt += website_text\n", + "\n", + "# Step 3: Make Messages List\n", + "print(\"[INFO] Making messages list ...\")\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + "]\n", + "\n", + "# Step 4: Call Model and Print Results\n", + "if approach == \"local-call\":\n", + " response = requests.post(\n", + " url=\"http://localhost:11434/api/chat\",\n", + " json={\n", + " \"model\": MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " },\n", + " headers={\"Content-Type\": \"application/json\"}\n", + " )\n", + " print(\"[INFO] Printing result ...\")\n", + " display(Markdown(response.json()[\"message\"][\"content\"]))\n", + "elif approach == \"python-package\":\n", + " response = ollama.chat(\n", + " model=MODEL,\n", + " messages=messages,\n", + " stream=False\n", + " )\n", + " print(\"[INFO] Printing result ...\")\n", + " display(Markdown(response[\"message\"][\"content\"]))\n", + "elif approach == \"openai-python-library\":\n", + " ollama_via_openai = OpenAI(\n", + " base_url=\"http://localhost:11434/v1\",\n", + " api_key=\"ollama\"\n", + " )\n", + " response = ollama_via_openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages\n", + " )\n", + " print(\"[INFO] Printing result ...\")\n", + " display(Markdown(response.choices[0].message.content))\n", + "else:\n", + " raise ValueError(f\"[INFO] Invalid approach! Please select an approach from {approaches} and try again.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0a6676e-fb43-4725-9389-2acd74c13c4e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day1-Groq API.ipynb b/week1/community-contributions/day1-Groq API.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..3838097dfcfb4d8e7e1a1ff2e876667b2d00320e --- /dev/null +++ b/week1/community-contributions/day1-Groq API.ipynb @@ -0,0 +1,530 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "## DAY1 LLM Project with GROQ!\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from groq import Groq\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "5d899ad6-1428-481b-b308-750308d80442", + "metadata": {}, + "source": [ + "If you are getting error ModuleNotFoundError: No module named 'groq' follow below steps.\n", + "\n", + "1. Activate llms enviornment from Anaconda, so that (llms) is showing in your prompt, as this is the environment where the package will get installed.Install pip here. \n", + "\n", + "(base) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> conda activate llms\n", + "(llms) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> pip install groq\n", + "\n", + "\n", + "2. After you install a new package, you'd need to restart the Kernel in jupyter lab for each notebook (Kernel >> Restart Kernel and Clear Values Of All Outputs).\n", + "\n", + "You can also run this command in jupyter lab to see whether it's installed:\n", + "\n", + "!pip show groq\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99c0c3c9-fa5e-405e-8453-2a557dc60c09", + "metadata": {}, + "outputs": [], + "source": [ + "!pip show groq" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to GROQ\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to GROQ.\n", + "\n", + ".env file should have below entry\n", + "\n", + "GROQ_API_KEY=gsk_xxxxxx\n", + "\n", + "GROQ keys can be configired by logging to below link\n", + "https://console.groq.com/keys\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('GROQ_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"gsk_\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "groq = Groq()" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling Groq with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "Similar to OPENAI GROQ APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling Groq with system and user messages:\n", + "\n", + "response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for LLAMA3.3, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for GROQ is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the GROQ API\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = groq.chat.completions.create(\n", + " model = \"llama-3.3-70b-versatile\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", + "metadata": {}, + "source": [ + "\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Treat these labs as a resource\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Treat these labs as a resource\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", + "\n", + "This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", + "\n", + "Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype. See what other students have done in the community-contributions folder -- so many valuable projects -- it's wild!\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you move to Week 2 (which is tons of fun)\n", + " Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " A reminder on 3 useful resources\n", + " 1. The resources for the course are available here.\n", + " 2. I'm on LinkedIn here and I love connecting with people taking the course! \n", + " 3. I'm trying out X/Twitter and I'm at @edwarddonner and hoping people will teach me how it's done.. \n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Finally! I have a special request for you\n", + " \n", + " My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", + "\n", + "This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", + "\n", + "Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you move to Week 2 (which is tons of fun)\n", + " Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " A reminder on 2 useful resources\n", + " 1. The resources for the course are available here.\n", + " 2. I'm on LinkedIn here and I love connecting with people taking the course!\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Finally! I have a special request for you\n", + " \n", + " My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", + "\n", + "This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", + "\n", + "Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you move to Week 2 (which is tons of fun)\n", + " Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " A reminder on 2 useful resources\n", + " 1. The resources for the course are available here.\n", + " 2. I'm on LinkedIn here and I love connecting with people taking the course!\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Treat these labs as a resource\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Treat these labs as a resource\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Just before we get to the assignment --\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", + "\n", + "This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", + "\n", + "Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype. See what other students have done in the community-contributions folder -- so many valuable projects -- it's wild!\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you move to Week 2 (which is tons of fun)\n", + " Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " A reminder on 3 useful resources\n", + " 1. The resources for the course are available here.\n", + " 2. I'm on LinkedIn here and I love connecting with people taking the course! \n", + " 3. I'm trying out X/Twitter and I'm at @edwarddonner and hoping people will teach me how it's done.. \n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Finally! I have a special request for you\n", + " \n", + " My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Please read - important note\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Treat these labs as a resource\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business value of these exercises\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business applications\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue - now try yourself\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml --prune \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Important Note - Please read me\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me! \n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run: \n", + " conda env update --f environment.yml \n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac): \n", + " pip install -r requirements.txt \n",
+ " Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Reminder about the resources page\n", + " Here's a link to resources for the course. This includes links to all the slides.\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you continue\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how themessages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business relevance\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you read the next few cells\n", + " \n", + " Try to do this yourself - go back to the company brochure in week1, day5 and add a Gradio UI to the end. Then come and look at the solution.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Before you read the next few cells\n", + " \n", + " Try to do this yourself - go back to the company brochure in week1, day5 and add a Gradio UI to the end. Then come and look at the solution.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business Applications\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business Applications\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business Applications\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " Business Applications\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.\n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " I have a special request for you\n", + " \n", + " My editor tells me that it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " I have a special request for you\n", + " \n", + " My editor tells me that it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "
\n",
+ " ![]() | \n",
+ " \n",
+ " I have a special request for you\n", + " \n", + " My editor tells me that it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + " | \n",
+ "