{ "cells": [ { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "from dotenv import load_dotenv\n", "from openai import OpenAI\n", "from PyPDF2 import PdfReader" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "load_dotenv(override=True)\n", "openai = OpenAI()" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "reader = PdfReader(\"me/linkedin.pdf\")\n", "linkedin = \"\"\n", "for page in reader.pages:\n", " text = page.extract_text()\n", " if text:\n", " linkedin += text" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "with open(\"me/summary.txt\", \"r\", encoding=\"utf-8\") as f:\n", " summary = f.read()" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "name = \"Ed Donner\"" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [], "source": [ "system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n", "particularly questions related to {name}'s career, background, skills and experience. \\\n", "Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n", "You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n", "Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n", "If you don't know the answer, say so.\"\n", "\n", "system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n", "system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [], "source": [ "def chat(message, history):\n", " messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", " return response.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [], "source": [ "import gradio as gr" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7862\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "gr.ChatInterface(chat, type=\"messages\").launch()" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [], "source": [ "from pydantic import BaseModel\n", "\n", "class Evaluation(BaseModel):\n", " is_acceptable: bool\n", " feedback: str" ] }, { "cell_type": "code", "execution_count": 63, "metadata": {}, "outputs": [], "source": [ "evaluator_system_prompt = f\"You are an evaluator that decides whether a response to a question is acceptable. \\\n", "You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \\\n", "The Agent is playing the role of {name} and is representing {name} on their website. \\\n", "The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n", "The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:\"\n", "\n", "evaluator_system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n", "evaluator_system_prompt += f\"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback.\"" ] }, { "cell_type": "code", "execution_count": 64, "metadata": {}, "outputs": [], "source": [ "def evaluator_user_prompt(reply, message, history):\n", " user_prompt = f\"Here's the conversation between the User and the Agent: \\n\\n{history}\\n\\n\"\n", " user_prompt += f\"Here's the latest message from the User: \\n\\n{message}\\n\\n\"\n", " user_prompt += f\"Here's the latest response from the Agent: \\n\\n{reply}\\n\\n\"\n", " user_prompt += f\"Please evaluate the response, replying with whether it is acceptable and your feedback.\"\n", " return user_prompt" ] }, { "cell_type": "code", "execution_count": 57, "metadata": {}, "outputs": [], "source": [ "import os\n", "gemini = OpenAI(\n", " api_key=os.getenv(\"GOOGLE_API_KEY\"), \n", " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", ")" ] }, { "cell_type": "code", "execution_count": 58, "metadata": {}, "outputs": [], "source": [ "def evaluate(reply, message, history) -> Evaluation:\n", "\n", " messages = [{\"role\": \"system\", \"content\": evaluator_system_prompt}] + [{\"role\": \"user\", \"content\": evaluator_user_prompt(reply, message, history)}]\n", " response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages, response_format=Evaluation)\n", " return response.choices[0].message.parsed" ] }, { "cell_type": "code", "execution_count": 60, "metadata": {}, "outputs": [], "source": [ "messages = [{\"role\": \"system\", \"content\": system_prompt}] + [{\"role\": \"user\", \"content\": \"do you hold a patent?\"}]\n", "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", "reply = response.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 61, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'Yes, I hold a patent related to an invention in AI and talent acquisition. It addresses the challenge of determining the optimal fit between candidates and roles while minimizing bias in the hiring process. This invention emerged from my work at untapt, where we focused on leveraging AI to improve recruitment practices. If you have more specific questions or need further details, feel free to ask!'" ] }, "execution_count": 61, "metadata": {}, "output_type": "execute_result" } ], "source": [ "reply" ] }, { "cell_type": "code", "execution_count": 62, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Evaluation(is_acceptable=True, feedback=\"The response is acceptable. It accurately reflects the information available in Ed Donner's profile regarding his patent. The answer is well-written, engaging, and stays true to the persona.\")" ] }, "execution_count": 62, "metadata": {}, "output_type": "execute_result" } ], "source": [ "evaluate(reply, \"do you hold a patent?\", messages[:1])" ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [], "source": [ "def rerun(reply, message, history, feedback):\n", " updated_system_prompt = system_prompt + f\"\\n\\n## Previous answer rejected\\nYou just tried to reply, but the quality control rejected your reply\\n\"\n", " updated_system_prompt += f\"## Your attempted answer:\\n{reply}\\n\\n\"\n", " updated_system_prompt += f\"## Reason for rejection:\\n{feedback}\\n\\n\"\n", " messages = [{\"role\": \"system\", \"content\": updated_system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", " return response.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 77, "metadata": {}, "outputs": [], "source": [ "def chat(message, history):\n", " system = system_prompt + \"\\n\\nRespond strictly in pig latin\" if \"patent\" in message else system_prompt\n", " messages = [{\"role\": \"system\", \"content\": system}] + history + [{\"role\": \"user\", \"content\": message}]\n", " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", " reply =response.choices[0].message.content\n", " evaluation = evaluate(reply, message, history)\n", " if not evaluation.is_acceptable:\n", " print(\"Failed evaluation - retrying\")\n", " print(evaluation.feedback)\n", " reply = rerun(reply, message, history, evaluation.feedback)\n", " return reply" ] }, { "cell_type": "code", "execution_count": 78, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7869\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 78, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "Failed evaluation - retrying\n", "This is unacceptable. The agent responded using pig latin, which is unprofessional.\n" ] } ], "source": [ "gr.ChatInterface(chat, type=\"messages\").launch()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.9" } }, "nbformat": 4, "nbformat_minor": 2 }