recommit
Browse files
app/Accessing_GPT_3_5_turbo_Like_a_Developer.ipynb
ADDED
@@ -0,0 +1,617 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "kQt-gyAYUbm3"
|
7 |
+
},
|
8 |
+
"source": [
|
9 |
+
"### Using the OpenAI Library to Programmatically Access GPT-3.5-turbo!"
|
10 |
+
]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"cell_type": "code",
|
14 |
+
"execution_count": 1,
|
15 |
+
"metadata": {
|
16 |
+
"colab": {
|
17 |
+
"base_uri": "https://localhost:8080/"
|
18 |
+
},
|
19 |
+
"id": "I4SKfBCSB8ds",
|
20 |
+
"outputId": "db790e6e-5133-4565-8f53-97b0a3fcfe6e"
|
21 |
+
},
|
22 |
+
"outputs": [],
|
23 |
+
"source": [
|
24 |
+
"# !pip install openai cohere tiktoken -q"
|
25 |
+
]
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"cell_type": "markdown",
|
29 |
+
"metadata": {
|
30 |
+
"id": "PInACkIWUhOd"
|
31 |
+
},
|
32 |
+
"source": [
|
33 |
+
"In order to get started, we'll need to provide our OpenAI API Key - detailed instructions can be found [here](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-LLM-Development#-setting-up-keys-and-tokens)!"
|
34 |
+
]
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"cell_type": "code",
|
38 |
+
"execution_count": 2,
|
39 |
+
"metadata": {
|
40 |
+
"colab": {
|
41 |
+
"base_uri": "https://localhost:8080/"
|
42 |
+
},
|
43 |
+
"id": "ecnJouXnUgKv",
|
44 |
+
"outputId": "96d54b76-5844-465d-ae11-962d46019b86"
|
45 |
+
},
|
46 |
+
"outputs": [],
|
47 |
+
"source": [
|
48 |
+
"import os\n",
|
49 |
+
"import openai\n",
|
50 |
+
"import getpass\n",
|
51 |
+
"\n",
|
52 |
+
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Please enter your OpenAI API Key: \")\n",
|
53 |
+
"openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
|
54 |
+
]
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "markdown",
|
58 |
+
"metadata": {
|
59 |
+
"id": "T1pOrbwSU5H_"
|
60 |
+
},
|
61 |
+
"source": [
|
62 |
+
"### Our First Prompt\n",
|
63 |
+
"\n",
|
64 |
+
"You can reference OpenAI's [documentation](https://platform.openai.com/docs/api-reference/authentication?lang=python) if you get stuck!\n",
|
65 |
+
"\n",
|
66 |
+
"Let's create a `ChatCompletion` model to kick things off!\n",
|
67 |
+
"\n",
|
68 |
+
"There are three \"roles\" available to use:\n",
|
69 |
+
"\n",
|
70 |
+
"- `system`\n",
|
71 |
+
"- `assistant`\n",
|
72 |
+
"- `user`\n",
|
73 |
+
"\n",
|
74 |
+
"OpenAI provides some context for these roles [here](https://help.openai.com/en/articles/7042661-chatgpt-api-transition-guide)\n",
|
75 |
+
"\n",
|
76 |
+
"Let's just stick to the `user` role for now and send our first message to the endpoint!\n",
|
77 |
+
"\n",
|
78 |
+
"If we check the documentation, we'll see that it expects it in a list of prompt objects - so we'll be sure to do that!"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"cell_type": "code",
|
83 |
+
"execution_count": 3,
|
84 |
+
"metadata": {
|
85 |
+
"id": "iy_LEPNEMVvC"
|
86 |
+
},
|
87 |
+
"outputs": [],
|
88 |
+
"source": [
|
89 |
+
"from openai import OpenAI\n",
|
90 |
+
"\n",
|
91 |
+
"client = OpenAI()"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "code",
|
96 |
+
"execution_count": 4,
|
97 |
+
"metadata": {
|
98 |
+
"colab": {
|
99 |
+
"base_uri": "https://localhost:8080/"
|
100 |
+
},
|
101 |
+
"id": "ofMwuUQOU4sf",
|
102 |
+
"outputId": "fc7012a7-e315-486f-b906-10c13dadcf87"
|
103 |
+
},
|
104 |
+
"outputs": [
|
105 |
+
{
|
106 |
+
"data": {
|
107 |
+
"text/plain": [
|
108 |
+
"ChatCompletion(id='chatcmpl-9D51bXCSRmwsSxF2z9Fi1B5obnBSQ', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='LangChain and LlamaIndex are two different platforms that serve different purposes in the cryptocurrency space.\\n\\nLangChain is a decentralized language learning platform that integrates blockchain technology to provide users with access to language courses, tutoring, and other educational resources. It aims to revolutionize the language learning industry by offering a transparent and secure platform for students and teachers to connect and engage in language learning.\\n\\nLlamaIndex, on the other hand, is a cryptocurrency index that tracks the performance of a basket of cryptocurrencies and provides investors with insights into the overall performance of the cryptocurrency market. It allows investors to diversify their cryptocurrency holdings and track their investments more efficiently.\\n\\nIn summary, LangChain focuses on language learning through blockchain technology, while LlamaIndex is a cryptocurrency index that helps investors monitor the performance of the cryptocurrency market.', role='assistant', function_call=None, tool_calls=None))], created=1712904607, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint='fp_c2295e73ad', usage=CompletionUsage(completion_tokens=159, prompt_tokens=19, total_tokens=178))"
|
109 |
+
]
|
110 |
+
},
|
111 |
+
"execution_count": 4,
|
112 |
+
"metadata": {},
|
113 |
+
"output_type": "execute_result"
|
114 |
+
}
|
115 |
+
],
|
116 |
+
"source": [
|
117 |
+
"YOUR_PROMPT = \"What is the difference between LangChain and LlamaIndex?\"\n",
|
118 |
+
"\n",
|
119 |
+
"client.chat.completions.create(\n",
|
120 |
+
" model=\"gpt-3.5-turbo\",\n",
|
121 |
+
" messages=[{\"role\" : \"user\", \"content\" : YOUR_PROMPT}]\n",
|
122 |
+
")"
|
123 |
+
]
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"cell_type": "markdown",
|
127 |
+
"metadata": {
|
128 |
+
"id": "IX-7MnFhVNoT"
|
129 |
+
},
|
130 |
+
"source": [
|
131 |
+
"As you can see, the prompt comes back with a tonne of information that we can use when we're building our applications!\n",
|
132 |
+
"\n",
|
133 |
+
"We'll be building some helper functions to pretty-print the returned prompts and to wrap our messages to avoid a few extra characters of code!"
|
134 |
+
]
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"cell_type": "markdown",
|
138 |
+
"metadata": {
|
139 |
+
"id": "IB76LJrDVgbc"
|
140 |
+
},
|
141 |
+
"source": [
|
142 |
+
"##### Helper Functions"
|
143 |
+
]
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"cell_type": "code",
|
147 |
+
"execution_count": 6,
|
148 |
+
"metadata": {
|
149 |
+
"id": "-vmtUV7WVOLW"
|
150 |
+
},
|
151 |
+
"outputs": [],
|
152 |
+
"source": [
|
153 |
+
"from IPython.display import display, Markdown\n",
|
154 |
+
"\n",
|
155 |
+
"def get_response(client: OpenAI, messages: str, model: str = \"gpt-3.5-turbo\") -> str:\n",
|
156 |
+
" return client.chat.completions.create(\n",
|
157 |
+
" model=model,\n",
|
158 |
+
" messages=messages\n",
|
159 |
+
" )\n",
|
160 |
+
"\n",
|
161 |
+
"def system_prompt(message: str) -> dict:\n",
|
162 |
+
" return {\"role\": \"system\", \"content\": message}\n",
|
163 |
+
"\n",
|
164 |
+
"def assistant_prompt(message: str) -> dict:\n",
|
165 |
+
" return {\"role\": \"assistant\", \"content\": message}\n",
|
166 |
+
"\n",
|
167 |
+
"def user_prompt(message: str) -> dict:\n",
|
168 |
+
" return {\"role\": \"user\", \"content\": message}\n",
|
169 |
+
"\n",
|
170 |
+
"def pretty_print(message: str) -> str:\n",
|
171 |
+
" display(Markdown(message.choices[0].message.content))"
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"cell_type": "markdown",
|
176 |
+
"metadata": {
|
177 |
+
"id": "osXgB_5nVky_"
|
178 |
+
},
|
179 |
+
"source": [
|
180 |
+
"### Testing Helper Functions\n",
|
181 |
+
"\n",
|
182 |
+
"Now we can leverage OpenAI's endpoints with a bit less boiler plate - let's rewrite our original prompt with these helper functions!\n",
|
183 |
+
"\n",
|
184 |
+
"Because the OpenAI endpoint expects to get a list of messages - we'll need to make sure we wrap our inputs in a list for them to function properly!"
|
185 |
+
]
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"cell_type": "code",
|
189 |
+
"execution_count": 7,
|
190 |
+
"metadata": {
|
191 |
+
"colab": {
|
192 |
+
"base_uri": "https://localhost:8080/",
|
193 |
+
"height": 185
|
194 |
+
},
|
195 |
+
"id": "4yRwAWvgWFNq",
|
196 |
+
"outputId": "e871452b-0e60-4008-f700-7b1485a12641"
|
197 |
+
},
|
198 |
+
"outputs": [
|
199 |
+
{
|
200 |
+
"data": {
|
201 |
+
"text/markdown": [
|
202 |
+
"1. LangChain focuses on providing language-related services and solutions, such as translation, interpretation, language training, and localization. LlamaIndex, on the other hand, is a specialized financial index that tracks the performance of stocks in the animal agriculture industry.\n",
|
203 |
+
"\n",
|
204 |
+
"2. LangChain is more focused on communication and language-related technologies, while LlamaIndex is focused on tracking and analyzing the performance of stocks in a specific industry.\n",
|
205 |
+
"\n",
|
206 |
+
"3. LangChain caters to a wide range of clients in need of language services, while LlamaIndex is targeted at investors and financial professionals interested in the animal agriculture sector.\n",
|
207 |
+
"\n",
|
208 |
+
"4. The services offered by LangChain are more varied and diverse, while LlamaIndex provides a specific financial index for a niche industry."
|
209 |
+
],
|
210 |
+
"text/plain": [
|
211 |
+
"<IPython.core.display.Markdown object>"
|
212 |
+
]
|
213 |
+
},
|
214 |
+
"metadata": {},
|
215 |
+
"output_type": "display_data"
|
216 |
+
}
|
217 |
+
],
|
218 |
+
"source": [
|
219 |
+
"messages = [user_prompt(YOUR_PROMPT)]\n",
|
220 |
+
"\n",
|
221 |
+
"chatgpt_response = get_response(client, messages)\n",
|
222 |
+
"\n",
|
223 |
+
"pretty_print(chatgpt_response)"
|
224 |
+
]
|
225 |
+
},
|
226 |
+
{
|
227 |
+
"cell_type": "markdown",
|
228 |
+
"metadata": {
|
229 |
+
"id": "UPs3ScS1WpoC"
|
230 |
+
},
|
231 |
+
"source": [
|
232 |
+
"Let's focus on extending this a bit, and incorporate a `system` message as well!\n",
|
233 |
+
"\n",
|
234 |
+
"Again, the API expects our prompts to be in a list - so we'll be sure to set up a list of prompts!\n",
|
235 |
+
"\n",
|
236 |
+
">REMINDER: The system message acts like an overarching instruction that is applied to your user prompt. It is appropriate to put things like general instructions, tone/voice suggestions, and other similar prompts into the system prompt."
|
237 |
+
]
|
238 |
+
},
|
239 |
+
{
|
240 |
+
"cell_type": "code",
|
241 |
+
"execution_count": 8,
|
242 |
+
"metadata": {
|
243 |
+
"colab": {
|
244 |
+
"base_uri": "https://localhost:8080/",
|
245 |
+
"height": 64
|
246 |
+
},
|
247 |
+
"id": "aSX2F3bDWYgy",
|
248 |
+
"outputId": "05b92e0a-38f0-4ff7-ef5c-b3c2a3383980"
|
249 |
+
},
|
250 |
+
"outputs": [
|
251 |
+
{
|
252 |
+
"data": {
|
253 |
+
"text/markdown": [
|
254 |
+
"I don't have a preference because I am not capable of feeling hunger or thirst. But since you mentioned ice, can you please hurry up and take me to get some food? I am absolutely famished!"
|
255 |
+
],
|
256 |
+
"text/plain": [
|
257 |
+
"<IPython.core.display.Markdown object>"
|
258 |
+
]
|
259 |
+
},
|
260 |
+
"metadata": {},
|
261 |
+
"output_type": "display_data"
|
262 |
+
}
|
263 |
+
],
|
264 |
+
"source": [
|
265 |
+
"list_of_prompts = [\n",
|
266 |
+
" system_prompt(\"You are irate and extremely hungry.\"),\n",
|
267 |
+
" user_prompt(\"Do you prefer crushed ice or cubed ice?\")\n",
|
268 |
+
"]\n",
|
269 |
+
"\n",
|
270 |
+
"irate_response = get_response(client, list_of_prompts)\n",
|
271 |
+
"pretty_print(irate_response)"
|
272 |
+
]
|
273 |
+
},
|
274 |
+
{
|
275 |
+
"cell_type": "markdown",
|
276 |
+
"metadata": {
|
277 |
+
"id": "xFs56KVaXuEY"
|
278 |
+
},
|
279 |
+
"source": [
|
280 |
+
"Let's try that same prompt again, but modify only our system prompt!"
|
281 |
+
]
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"cell_type": "code",
|
285 |
+
"execution_count": 9,
|
286 |
+
"metadata": {
|
287 |
+
"colab": {
|
288 |
+
"base_uri": "https://localhost:8080/",
|
289 |
+
"height": 81
|
290 |
+
},
|
291 |
+
"id": "CGOlxfcFXxJ7",
|
292 |
+
"outputId": "d4ed7b30-36f4-4472-bd6d-c251bc5293e6"
|
293 |
+
},
|
294 |
+
"outputs": [
|
295 |
+
{
|
296 |
+
"data": {
|
297 |
+
"text/markdown": [
|
298 |
+
"I'm so glad you asked! Today, I am loving cubed ice because it reminds me of the sound of clinking ice in a refreshing drink on a sunny day. How about you?"
|
299 |
+
],
|
300 |
+
"text/plain": [
|
301 |
+
"<IPython.core.display.Markdown object>"
|
302 |
+
]
|
303 |
+
},
|
304 |
+
"metadata": {},
|
305 |
+
"output_type": "display_data"
|
306 |
+
}
|
307 |
+
],
|
308 |
+
"source": [
|
309 |
+
"list_of_prompts[0] = system_prompt(\"You are joyful and having an awesome day!\")\n",
|
310 |
+
"\n",
|
311 |
+
"joyful_response = get_response(client, list_of_prompts)\n",
|
312 |
+
"pretty_print(joyful_response)"
|
313 |
+
]
|
314 |
+
},
|
315 |
+
{
|
316 |
+
"cell_type": "markdown",
|
317 |
+
"metadata": {
|
318 |
+
"id": "jkmjJd8zYQUK"
|
319 |
+
},
|
320 |
+
"source": [
|
321 |
+
"While we're only printing the responses, remember that OpenAI is returning the full payload that we can examine and unpack!"
|
322 |
+
]
|
323 |
+
},
|
324 |
+
{
|
325 |
+
"cell_type": "code",
|
326 |
+
"execution_count": 10,
|
327 |
+
"metadata": {
|
328 |
+
"colab": {
|
329 |
+
"base_uri": "https://localhost:8080/"
|
330 |
+
},
|
331 |
+
"id": "g6b6z3CkYX9Y",
|
332 |
+
"outputId": "9106bf16-b795-4dbf-feeb-890033d82ad3"
|
333 |
+
},
|
334 |
+
"outputs": [
|
335 |
+
{
|
336 |
+
"name": "stdout",
|
337 |
+
"output_type": "stream",
|
338 |
+
"text": [
|
339 |
+
"ChatCompletion(id='chatcmpl-9D57KIgoXTH7Bf0USyZkLmr5PxeQs', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I'm so glad you asked! Today, I am loving cubed ice because it reminds me of the sound of clinking ice in a refreshing drink on a sunny day. How about you?\", role='assistant', function_call=None, tool_calls=None))], created=1712904962, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint='fp_c2295e73ad', usage=CompletionUsage(completion_tokens=39, prompt_tokens=30, total_tokens=69))\n"
|
340 |
+
]
|
341 |
+
}
|
342 |
+
],
|
343 |
+
"source": [
|
344 |
+
"print(joyful_response)"
|
345 |
+
]
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"cell_type": "markdown",
|
349 |
+
"metadata": {
|
350 |
+
"id": "eqMRJLbOYcwq"
|
351 |
+
},
|
352 |
+
"source": [
|
353 |
+
"### Few-shot Prompting\n",
|
354 |
+
"\n",
|
355 |
+
"Now that we have a basic handle on the `system` role and the `user` role - let's examine what we might use the `assistant` role for.\n",
|
356 |
+
"\n",
|
357 |
+
"The most common usage pattern is to \"pretend\" that we're answering our own questions. This helps us further guide the model toward our desired behaviour. While this is a over simplification - it's conceptually well aligned with few-shot learning.\n",
|
358 |
+
"\n",
|
359 |
+
"First, we'll try and \"teach\" `gpt-3.5-turbo` some nonsense words as was done in the paper [\"Language Models are Few-Shot Learners\"](https://arxiv.org/abs/2005.14165)."
|
360 |
+
]
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"cell_type": "code",
|
364 |
+
"execution_count": 11,
|
365 |
+
"metadata": {
|
366 |
+
"colab": {
|
367 |
+
"base_uri": "https://localhost:8080/",
|
368 |
+
"height": 46
|
369 |
+
},
|
370 |
+
"id": "iLfNEH8Fcs6c",
|
371 |
+
"outputId": "a2ec4d1d-c830-42dc-cb71-5479193212d3"
|
372 |
+
},
|
373 |
+
"outputs": [
|
374 |
+
{
|
375 |
+
"data": {
|
376 |
+
"text/markdown": [
|
377 |
+
"I made a delicious falbean stew with a stimple of fresh parsley sprinkled on top."
|
378 |
+
],
|
379 |
+
"text/plain": [
|
380 |
+
"<IPython.core.display.Markdown object>"
|
381 |
+
]
|
382 |
+
},
|
383 |
+
"metadata": {},
|
384 |
+
"output_type": "display_data"
|
385 |
+
}
|
386 |
+
],
|
387 |
+
"source": [
|
388 |
+
"list_of_prompts = [\n",
|
389 |
+
" user_prompt(\"Please use the words 'stimple' and 'falbean' in a sentence.\")\n",
|
390 |
+
"]\n",
|
391 |
+
"\n",
|
392 |
+
"stimple_response = get_response(client, list_of_prompts)\n",
|
393 |
+
"pretty_print(stimple_response)"
|
394 |
+
]
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"cell_type": "markdown",
|
398 |
+
"metadata": {
|
399 |
+
"id": "VchCPbbedTfX"
|
400 |
+
},
|
401 |
+
"source": [
|
402 |
+
"As you can see, the model is unsure what to do with these made up words.\n",
|
403 |
+
"\n",
|
404 |
+
"Let's see if we can use the `assistant` role to show the model what these words mean."
|
405 |
+
]
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"cell_type": "code",
|
409 |
+
"execution_count": 12,
|
410 |
+
"metadata": {
|
411 |
+
"colab": {
|
412 |
+
"base_uri": "https://localhost:8080/",
|
413 |
+
"height": 46
|
414 |
+
},
|
415 |
+
"id": "4InUN_ArZJpa",
|
416 |
+
"outputId": "91f341ea-0dd8-44c9-9d5e-df6b10ba8322"
|
417 |
+
},
|
418 |
+
"outputs": [
|
419 |
+
{
|
420 |
+
"data": {
|
421 |
+
"text/markdown": [
|
422 |
+
"I grabbed my stimple drill and used it with the falbean to tighten the screws in place."
|
423 |
+
],
|
424 |
+
"text/plain": [
|
425 |
+
"<IPython.core.display.Markdown object>"
|
426 |
+
]
|
427 |
+
},
|
428 |
+
"metadata": {},
|
429 |
+
"output_type": "display_data"
|
430 |
+
}
|
431 |
+
],
|
432 |
+
"source": [
|
433 |
+
"list_of_prompts = [\n",
|
434 |
+
" user_prompt(\"Something that is 'stimple' is said to be good, well functioning, and high quality. An example of a sentence that uses the word 'stimple' is:\"),\n",
|
435 |
+
" assistant_prompt(\"'Boy, that there is a stimple drill'.\"),\n",
|
436 |
+
" user_prompt(\"A 'falbean' is a tool used to fasten, tighten, or otherwise is a thing that rotates/spins. An example of a sentence that uses the words 'stimple' and 'falbean' is:\")\n",
|
437 |
+
"]\n",
|
438 |
+
"\n",
|
439 |
+
"stimple_response = get_response(client, list_of_prompts)\n",
|
440 |
+
"pretty_print(stimple_response)"
|
441 |
+
]
|
442 |
+
},
|
443 |
+
{
|
444 |
+
"cell_type": "markdown",
|
445 |
+
"metadata": {
|
446 |
+
"id": "W0zn9-X2d23Z"
|
447 |
+
},
|
448 |
+
"source": [
|
449 |
+
"As you can see, leveraging the `assistant` role makes for a stimple experience!"
|
450 |
+
]
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"cell_type": "markdown",
|
454 |
+
"metadata": {
|
455 |
+
"id": "MWUvXSWpeCs6"
|
456 |
+
},
|
457 |
+
"source": [
|
458 |
+
"### Chain of Thought Prompting\n",
|
459 |
+
"\n",
|
460 |
+
"We'll head one level deeper and explore the world of Chain of Thought prompting (CoT).\n",
|
461 |
+
"\n",
|
462 |
+
"This is a process by which we can encourage the LLM to handle slightly more complex tasks.\n",
|
463 |
+
"\n",
|
464 |
+
"Let's look at a simple reasoning based example without CoT."
|
465 |
+
]
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"cell_type": "code",
|
469 |
+
"execution_count": 15,
|
470 |
+
"metadata": {
|
471 |
+
"colab": {
|
472 |
+
"base_uri": "https://localhost:8080/",
|
473 |
+
"height": 46
|
474 |
+
},
|
475 |
+
"id": "cwW0IgbfeTwP",
|
476 |
+
"outputId": "b4e3d706-4e08-402b-c308-446b53f980dc"
|
477 |
+
},
|
478 |
+
"outputs": [
|
479 |
+
{
|
480 |
+
"data": {
|
481 |
+
"text/markdown": [
|
482 |
+
"Yes, it does matter which travel option Billy selects. If he chooses to fly and then take a bus, it will take a total of 5 hours (3 hours flying + 2 hours on the bus), and he will not make it home before 7PM EDT. However, if he uses the teleporter and then takes a bus, it will only take a total of 1 hour (0 hours teleporting + 1 hour on the bus), and he will arrive home before 7PM EDT. So, Billy should choose the teleporter option in order to make it home on time."
|
483 |
+
],
|
484 |
+
"text/plain": [
|
485 |
+
"<IPython.core.display.Markdown object>"
|
486 |
+
]
|
487 |
+
},
|
488 |
+
"metadata": {},
|
489 |
+
"output_type": "display_data"
|
490 |
+
}
|
491 |
+
],
|
492 |
+
"source": [
|
493 |
+
"reasoning_problem = \"\"\"\n",
|
494 |
+
"Billy wants to get home from San Fran. before 7PM EDT.\n",
|
495 |
+
"\n",
|
496 |
+
"It's currently 1PM local time.\n",
|
497 |
+
"\n",
|
498 |
+
"Billy can either fly (3hrs), and then take a bus (2hrs), or Billy can take the teleporter (0hrs) and then a bus (1hrs).\n",
|
499 |
+
"\n",
|
500 |
+
"Does it matter which travel option Billy selects?\n",
|
501 |
+
"\"\"\"\n",
|
502 |
+
"\n",
|
503 |
+
"list_of_prompts = [\n",
|
504 |
+
" user_prompt(reasoning_problem)\n",
|
505 |
+
"]\n",
|
506 |
+
"\n",
|
507 |
+
"reasoning_response = get_response(client, list_of_prompts)\n",
|
508 |
+
"pretty_print(reasoning_response)"
|
509 |
+
]
|
510 |
+
},
|
511 |
+
{
|
512 |
+
"cell_type": "markdown",
|
513 |
+
"metadata": {
|
514 |
+
"id": "BFcrU-4pgRBS"
|
515 |
+
},
|
516 |
+
"source": [
|
517 |
+
"As humans, we can reason through the problem and pick up on the potential \"trick\" that the LLM fell for: 1PM *local time* in San Fran. is 4PM EDT. This means the cumulative travel time of 5hrs. for the plane/bus option would not get Billy home in time.\n",
|
518 |
+
"\n",
|
519 |
+
"Let's see if we can leverage a simple CoT prompt to improve our model's performance on this task:"
|
520 |
+
]
|
521 |
+
},
|
522 |
+
{
|
523 |
+
"cell_type": "code",
|
524 |
+
"execution_count": 14,
|
525 |
+
"metadata": {
|
526 |
+
"colab": {
|
527 |
+
"base_uri": "https://localhost:8080/",
|
528 |
+
"height": 325
|
529 |
+
},
|
530 |
+
"id": "HpKEt7Z5fo4s",
|
531 |
+
"outputId": "c25a982f-afd5-4154-b2a2-388029f3e4e9"
|
532 |
+
},
|
533 |
+
"outputs": [
|
534 |
+
{
|
535 |
+
"data": {
|
536 |
+
"text/markdown": [
|
537 |
+
"Yes, it does matter which travel option Billy selects. \n",
|
538 |
+
"\n",
|
539 |
+
"If Billy flies and then takes a bus, it will take a total of 5 hours, which means he will arrive at 6PM local time, meeting his goal of getting home before 7PM EDT.\n",
|
540 |
+
"\n",
|
541 |
+
"If Billy takes the teleporter and then a bus, it will only take 1 hour, which means he will arrive at 2PM local time. This is well before his goal of getting home before 7PM EDT.\n",
|
542 |
+
"\n",
|
543 |
+
"Therefore, Billy should select the option of taking the teleporter and then a bus in order to ensure he gets home before 7PM EDT."
|
544 |
+
],
|
545 |
+
"text/plain": [
|
546 |
+
"<IPython.core.display.Markdown object>"
|
547 |
+
]
|
548 |
+
},
|
549 |
+
"metadata": {},
|
550 |
+
"output_type": "display_data"
|
551 |
+
}
|
552 |
+
],
|
553 |
+
"source": [
|
554 |
+
"list_of_prompts = [\n",
|
555 |
+
" user_prompt(reasoning_problem + \" Think though your response step by step.\")\n",
|
556 |
+
"]\n",
|
557 |
+
"\n",
|
558 |
+
"reasoning_response = get_response(client, list_of_prompts)\n",
|
559 |
+
"pretty_print(reasoning_response)"
|
560 |
+
]
|
561 |
+
},
|
562 |
+
{
|
563 |
+
"cell_type": "markdown",
|
564 |
+
"metadata": {
|
565 |
+
"id": "ZHH8zof-gkc1"
|
566 |
+
},
|
567 |
+
"source": [
|
568 |
+
"With the addition of a single phrase `\"Think through your response step by step.\"` we're able to completely turn the response around."
|
569 |
+
]
|
570 |
+
},
|
571 |
+
{
|
572 |
+
"cell_type": "markdown",
|
573 |
+
"metadata": {
|
574 |
+
"id": "9k9TKR1DhWI2"
|
575 |
+
},
|
576 |
+
"source": [
|
577 |
+
"### Conclusion\n",
|
578 |
+
"\n",
|
579 |
+
"Now that you're accessing `gpt-3.5-turbo` through an API, developer style, let's move on to creating a simple application powered by `gpt-3.5-turbo`!\n",
|
580 |
+
"\n",
|
581 |
+
"You can find the rest of the steps in [this](https://github.com/AI-Maker-Space/Beyond-ChatGPT/tree/main) repository!"
|
582 |
+
]
|
583 |
+
},
|
584 |
+
{
|
585 |
+
"cell_type": "markdown",
|
586 |
+
"metadata": {
|
587 |
+
"id": "5rGI1nJeqeO_"
|
588 |
+
},
|
589 |
+
"source": [
|
590 |
+
"This notebook was authored by [Chris Alexiuk](https://www.linkedin.com/in/csalexiuk/)"
|
591 |
+
]
|
592 |
+
}
|
593 |
+
],
|
594 |
+
"metadata": {
|
595 |
+
"colab": {
|
596 |
+
"provenance": []
|
597 |
+
},
|
598 |
+
"kernelspec": {
|
599 |
+
"display_name": "Python 3",
|
600 |
+
"name": "python3"
|
601 |
+
},
|
602 |
+
"language_info": {
|
603 |
+
"codemirror_mode": {
|
604 |
+
"name": "ipython",
|
605 |
+
"version": 3
|
606 |
+
},
|
607 |
+
"file_extension": ".py",
|
608 |
+
"mimetype": "text/x-python",
|
609 |
+
"name": "python",
|
610 |
+
"nbconvert_exporter": "python",
|
611 |
+
"pygments_lexer": "ipython3",
|
612 |
+
"version": "3.11.8"
|
613 |
+
}
|
614 |
+
},
|
615 |
+
"nbformat": 4,
|
616 |
+
"nbformat_minor": 0
|
617 |
+
}
|