Spaces:
Sleeping
Sleeping
Delete 4_lab4.ipynb
Browse files- 4_lab4.ipynb +0 -445
4_lab4.ipynb
DELETED
@@ -1,445 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"metadata": {},
|
6 |
-
"source": [
|
7 |
-
"## The first big project - Professionally You!\n",
|
8 |
-
"\n",
|
9 |
-
"### And, Tool use.\n",
|
10 |
-
"\n",
|
11 |
-
"### But first: introducing Pushover\n",
|
12 |
-
"\n",
|
13 |
-
"Pushover is a nifty tool for sending Push Notifications to your phone.\n",
|
14 |
-
"\n",
|
15 |
-
"It's super easy to set up and install!\n",
|
16 |
-
"\n",
|
17 |
-
"Simply visit https://pushover.net/ and sign up for a free account, and create your API keys.\n",
|
18 |
-
"\n",
|
19 |
-
"As student Ron pointed out (thank you Ron!) there are actually 2 tokens to create in Pushover: \n",
|
20 |
-
"1. The User token which you get from the home page of Pushover\n",
|
21 |
-
"2. The Application token which you get by going to https://pushover.net/apps/build and creating an app \n",
|
22 |
-
"\n",
|
23 |
-
"(This is so you could choose to organize your push notifications into different apps in the future.)\n",
|
24 |
-
"\n",
|
25 |
-
"\n",
|
26 |
-
"Add to your `.env` file:\n",
|
27 |
-
"```\n",
|
28 |
-
"PUSHOVER_USER=put_your_user_token_here\n",
|
29 |
-
"PUSHOVER_TOKEN=put_the_application_level_token_here\n",
|
30 |
-
"```\n",
|
31 |
-
"\n",
|
32 |
-
"And install the Pushover app on your phone."
|
33 |
-
]
|
34 |
-
},
|
35 |
-
{
|
36 |
-
"cell_type": "code",
|
37 |
-
"execution_count": 1,
|
38 |
-
"metadata": {},
|
39 |
-
"outputs": [],
|
40 |
-
"source": [
|
41 |
-
"# imports\n",
|
42 |
-
"\n",
|
43 |
-
"from dotenv import load_dotenv\n",
|
44 |
-
"from openai import OpenAI\n",
|
45 |
-
"import json\n",
|
46 |
-
"import os\n",
|
47 |
-
"import requests\n",
|
48 |
-
"from pypdf import PdfReader\n",
|
49 |
-
"import gradio as gr"
|
50 |
-
]
|
51 |
-
},
|
52 |
-
{
|
53 |
-
"cell_type": "code",
|
54 |
-
"execution_count": 2,
|
55 |
-
"metadata": {},
|
56 |
-
"outputs": [],
|
57 |
-
"source": [
|
58 |
-
"# The usual start\n",
|
59 |
-
"\n",
|
60 |
-
"load_dotenv(override=True)\n",
|
61 |
-
"openai = OpenAI()"
|
62 |
-
]
|
63 |
-
},
|
64 |
-
{
|
65 |
-
"cell_type": "code",
|
66 |
-
"execution_count": 3,
|
67 |
-
"metadata": {},
|
68 |
-
"outputs": [],
|
69 |
-
"source": [
|
70 |
-
"# For pushover\n",
|
71 |
-
"\n",
|
72 |
-
"pushover_user = os.getenv(\"PUSHOVER_USER\")\n",
|
73 |
-
"pushover_token = os.getenv(\"PUSHOVER_TOKEN\")\n",
|
74 |
-
"pushover_url = \"https://api.pushover.net/1/messages.json\""
|
75 |
-
]
|
76 |
-
},
|
77 |
-
{
|
78 |
-
"cell_type": "code",
|
79 |
-
"execution_count": 4,
|
80 |
-
"metadata": {},
|
81 |
-
"outputs": [],
|
82 |
-
"source": [
|
83 |
-
"def push(message):\n",
|
84 |
-
" print(f\"Push: {message}\")\n",
|
85 |
-
" payload = {\"user\": pushover_user, \"token\": pushover_token, \"message\": message}\n",
|
86 |
-
" requests.post(pushover_url, data=payload)"
|
87 |
-
]
|
88 |
-
},
|
89 |
-
{
|
90 |
-
"cell_type": "code",
|
91 |
-
"execution_count": null,
|
92 |
-
"metadata": {},
|
93 |
-
"outputs": [],
|
94 |
-
"source": [
|
95 |
-
"push(\"HEY!!\")"
|
96 |
-
]
|
97 |
-
},
|
98 |
-
{
|
99 |
-
"cell_type": "code",
|
100 |
-
"execution_count": 9,
|
101 |
-
"metadata": {},
|
102 |
-
"outputs": [],
|
103 |
-
"source": [
|
104 |
-
"def record_user_details(email, name=\"Name not provided\", notes=\"not provided\"):\n",
|
105 |
-
" push(f\"Recording interest from {name} with email {email} and notes {notes}\")\n",
|
106 |
-
" return {\"recorded\": \"ok\"}"
|
107 |
-
]
|
108 |
-
},
|
109 |
-
{
|
110 |
-
"cell_type": "code",
|
111 |
-
"execution_count": 4,
|
112 |
-
"metadata": {},
|
113 |
-
"outputs": [],
|
114 |
-
"source": [
|
115 |
-
"def record_unknown_question(question):\n",
|
116 |
-
" push(f\"Recording {question} asked that I couldn't answer\")\n",
|
117 |
-
" return {\"recorded\": \"ok\"}"
|
118 |
-
]
|
119 |
-
},
|
120 |
-
{
|
121 |
-
"cell_type": "code",
|
122 |
-
"execution_count": 5,
|
123 |
-
"metadata": {},
|
124 |
-
"outputs": [],
|
125 |
-
"source": [
|
126 |
-
"record_user_details_json = {\n",
|
127 |
-
" \"name\": \"record_user_details\",\n",
|
128 |
-
" \"description\": \"Use this tool to record that a user is interested in being in touch and provided an email address\",\n",
|
129 |
-
" \"parameters\": {\n",
|
130 |
-
" \"type\": \"object\",\n",
|
131 |
-
" \"properties\": {\n",
|
132 |
-
" \"email\": {\n",
|
133 |
-
" \"type\": \"string\",\n",
|
134 |
-
" \"description\": \"The email address of this user\"\n",
|
135 |
-
" },\n",
|
136 |
-
" \"name\": {\n",
|
137 |
-
" \"type\": \"string\",\n",
|
138 |
-
" \"description\": \"The user's name, if they provided it\"\n",
|
139 |
-
" }\n",
|
140 |
-
" ,\n",
|
141 |
-
" \"notes\": {\n",
|
142 |
-
" \"type\": \"string\",\n",
|
143 |
-
" \"description\": \"Any additional information about the conversation that's worth recording to give context\"\n",
|
144 |
-
" }\n",
|
145 |
-
" },\n",
|
146 |
-
" \"required\": [\"email\"],\n",
|
147 |
-
" \"additionalProperties\": False\n",
|
148 |
-
" }\n",
|
149 |
-
"}"
|
150 |
-
]
|
151 |
-
},
|
152 |
-
{
|
153 |
-
"cell_type": "code",
|
154 |
-
"execution_count": 6,
|
155 |
-
"metadata": {},
|
156 |
-
"outputs": [],
|
157 |
-
"source": [
|
158 |
-
"record_unknown_question_json = {\n",
|
159 |
-
" \"name\": \"record_unknown_question\",\n",
|
160 |
-
" \"description\": \"Always use this tool to record any question that couldn't be answered as you didn't know the answer\",\n",
|
161 |
-
" \"parameters\": {\n",
|
162 |
-
" \"type\": \"object\",\n",
|
163 |
-
" \"properties\": {\n",
|
164 |
-
" \"question\": {\n",
|
165 |
-
" \"type\": \"string\",\n",
|
166 |
-
" \"description\": \"The question that couldn't be answered\"\n",
|
167 |
-
" },\n",
|
168 |
-
" },\n",
|
169 |
-
" \"required\": [\"question\"],\n",
|
170 |
-
" \"additionalProperties\": False\n",
|
171 |
-
" }\n",
|
172 |
-
"}"
|
173 |
-
]
|
174 |
-
},
|
175 |
-
{
|
176 |
-
"cell_type": "code",
|
177 |
-
"execution_count": 7,
|
178 |
-
"metadata": {},
|
179 |
-
"outputs": [],
|
180 |
-
"source": [
|
181 |
-
"tools = [{\"type\": \"function\", \"function\": record_user_details_json},\n",
|
182 |
-
" {\"type\": \"function\", \"function\": record_unknown_question_json}]"
|
183 |
-
]
|
184 |
-
},
|
185 |
-
{
|
186 |
-
"cell_type": "code",
|
187 |
-
"execution_count": null,
|
188 |
-
"metadata": {},
|
189 |
-
"outputs": [],
|
190 |
-
"source": [
|
191 |
-
"tools"
|
192 |
-
]
|
193 |
-
},
|
194 |
-
{
|
195 |
-
"cell_type": "code",
|
196 |
-
"execution_count": 16,
|
197 |
-
"metadata": {},
|
198 |
-
"outputs": [],
|
199 |
-
"source": [
|
200 |
-
"# This function can take a list of tool calls, and run them. This is the IF statement!!\n",
|
201 |
-
"\n",
|
202 |
-
"def handle_tool_calls(tool_calls):\n",
|
203 |
-
" results = []\n",
|
204 |
-
" for tool_call in tool_calls:\n",
|
205 |
-
" tool_name = tool_call.function.name\n",
|
206 |
-
" arguments = json.loads(tool_call.function.arguments)\n",
|
207 |
-
" print(f\"Tool called: {tool_name}\", flush=True)\n",
|
208 |
-
"\n",
|
209 |
-
" # THE BIG IF STATEMENT!!!\n",
|
210 |
-
"\n",
|
211 |
-
" if tool_name == \"record_user_details\":\n",
|
212 |
-
" result = record_user_details(**arguments)\n",
|
213 |
-
" elif tool_name == \"record_unknown_question\":\n",
|
214 |
-
" result = record_unknown_question(**arguments)\n",
|
215 |
-
"\n",
|
216 |
-
" results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n",
|
217 |
-
" return results"
|
218 |
-
]
|
219 |
-
},
|
220 |
-
{
|
221 |
-
"cell_type": "code",
|
222 |
-
"execution_count": null,
|
223 |
-
"metadata": {},
|
224 |
-
"outputs": [],
|
225 |
-
"source": [
|
226 |
-
"globals()[\"record_unknown_question\"](\"this is a really hard question\")"
|
227 |
-
]
|
228 |
-
},
|
229 |
-
{
|
230 |
-
"cell_type": "code",
|
231 |
-
"execution_count": 25,
|
232 |
-
"metadata": {},
|
233 |
-
"outputs": [],
|
234 |
-
"source": [
|
235 |
-
"# This is a more elegant way that avoids the IF statement.\n",
|
236 |
-
"\n",
|
237 |
-
"def handle_tool_calls(tool_calls):\n",
|
238 |
-
" results = []\n",
|
239 |
-
" for tool_call in tool_calls:\n",
|
240 |
-
" tool_name = tool_call.function.name\n",
|
241 |
-
" arguments = json.loads(tool_call.function.arguments)\n",
|
242 |
-
" print(f\"Tool called: {tool_name}\", flush=True)\n",
|
243 |
-
" tool = globals().get(tool_name)\n",
|
244 |
-
" result = tool(**arguments) if tool else {}\n",
|
245 |
-
" results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n",
|
246 |
-
" return results"
|
247 |
-
]
|
248 |
-
},
|
249 |
-
{
|
250 |
-
"cell_type": "code",
|
251 |
-
"execution_count": 4,
|
252 |
-
"metadata": {},
|
253 |
-
"outputs": [],
|
254 |
-
"source": [
|
255 |
-
"reader = PdfReader(\"me/linkedin.pdf\")\n",
|
256 |
-
"linkedin = \"\"\n",
|
257 |
-
"for page in reader.pages:\n",
|
258 |
-
" text = page.extract_text()\n",
|
259 |
-
" if text:\n",
|
260 |
-
" linkedin += text\n",
|
261 |
-
"\n",
|
262 |
-
"with open(\"me/summary.txt\", \"r\", encoding=\"utf-8\") as f:\n",
|
263 |
-
" summary = f.read()\n",
|
264 |
-
"\n",
|
265 |
-
"name = \"Ed Donner\""
|
266 |
-
]
|
267 |
-
},
|
268 |
-
{
|
269 |
-
"cell_type": "code",
|
270 |
-
"execution_count": 22,
|
271 |
-
"metadata": {},
|
272 |
-
"outputs": [],
|
273 |
-
"source": [
|
274 |
-
"system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n",
|
275 |
-
"particularly questions related to {name}'s career, background, skills and experience. \\\n",
|
276 |
-
"Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n",
|
277 |
-
"You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n",
|
278 |
-
"Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
|
279 |
-
"If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \\\n",
|
280 |
-
"If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. \"\n",
|
281 |
-
"\n",
|
282 |
-
"system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
|
283 |
-
"system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n"
|
284 |
-
]
|
285 |
-
},
|
286 |
-
{
|
287 |
-
"cell_type": "code",
|
288 |
-
"execution_count": 28,
|
289 |
-
"metadata": {},
|
290 |
-
"outputs": [],
|
291 |
-
"source": [
|
292 |
-
"def chat(message, history):\n",
|
293 |
-
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n",
|
294 |
-
" done = False\n",
|
295 |
-
" while not done:\n",
|
296 |
-
"\n",
|
297 |
-
" # This is the call to the LLM - see that we pass in the tools json\n",
|
298 |
-
"\n",
|
299 |
-
" response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages, tools=tools)\n",
|
300 |
-
"\n",
|
301 |
-
" finish_reason = response.choices[0].finish_reason\n",
|
302 |
-
" \n",
|
303 |
-
" # If the LLM wants to call a tool, we do that!\n",
|
304 |
-
" \n",
|
305 |
-
" if finish_reason==\"tool_calls\":\n",
|
306 |
-
" message = response.choices[0].message\n",
|
307 |
-
" tool_calls = message.tool_calls\n",
|
308 |
-
" results = handle_tool_calls(tool_calls)\n",
|
309 |
-
" messages.append(message)\n",
|
310 |
-
" messages.extend(results)\n",
|
311 |
-
" else:\n",
|
312 |
-
" done = True\n",
|
313 |
-
" return response.choices[0].message.content"
|
314 |
-
]
|
315 |
-
},
|
316 |
-
{
|
317 |
-
"cell_type": "code",
|
318 |
-
"execution_count": null,
|
319 |
-
"metadata": {},
|
320 |
-
"outputs": [],
|
321 |
-
"source": [
|
322 |
-
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
323 |
-
]
|
324 |
-
},
|
325 |
-
{
|
326 |
-
"cell_type": "markdown",
|
327 |
-
"metadata": {},
|
328 |
-
"source": [
|
329 |
-
"## And now for deployment\n",
|
330 |
-
"\n",
|
331 |
-
"This code is in `app.py`\n",
|
332 |
-
"\n",
|
333 |
-
"We will deploy to HuggingFace Spaces. Thank you student Robert M for improving these instructions.\n",
|
334 |
-
"\n",
|
335 |
-
"Before you start: remember to update the files in the \"me\" directory - your LinkedIn profile and summary.txt - so that it talks about you! \n",
|
336 |
-
"Also check that there's no README file within the 1_foundations directory. If there is one, please delete it. The deploy process creates a new README file in this directory for you.\n",
|
337 |
-
"\n",
|
338 |
-
"1. Visit https://huggingface.co and set up an account \n",
|
339 |
-
"2. From the Avatar menu on the top right, choose Access Tokens. Choose \"Create New Token\". Give it WRITE permissions.\n",
|
340 |
-
"3. Take this token and add it to your .env file: `HF_TOKEN=hf_xxx` and see note below if this token doesn't seem to get picked up during deployment \n",
|
341 |
-
"4. From the 1_foundations folder, enter: `uv run gradio deploy` and if for some reason this still wants you to enter your HF token, then interrupt it with ctrl+c and run this instead: `uv run dotenv -f ../.env run -- uv run gradio deploy` which forces your keys to all be set as environment variables \n",
|
342 |
-
"5. Follow its instructions: name it \"career_conversation\", specify app.py, choose cpu-basic as the hardware, say Yes to needing to supply secrets, provide your openai api key, your pushover user and token, and say \"no\" to github actions. \n",
|
343 |
-
"\n",
|
344 |
-
"#### Extra note about the HuggingFace token\n",
|
345 |
-
"\n",
|
346 |
-
"A couple of students have mentioned the HuggingFace doesn't detect their token, even though it's in the .env file. Here are things to try: \n",
|
347 |
-
"1. Restart Cursor \n",
|
348 |
-
"2. Rerun load_dotenv(override=True) and use a new terminal (the + button on the top right of the Terminal) \n",
|
349 |
-
"3. In the Terminal, run this before the gradio deploy: `$env:HF_TOKEN = \"hf_XXXX\"` \n",
|
350 |
-
"Thank you James and Martins for these tips. \n",
|
351 |
-
"\n",
|
352 |
-
"#### More about these secrets:\n",
|
353 |
-
"\n",
|
354 |
-
"If you're confused by what's going on with these secrets: it just wants you to enter the key name and value for each of your secrets -- so you would enter: \n",
|
355 |
-
"`OPENAI_API_KEY` \n",
|
356 |
-
"Followed by: \n",
|
357 |
-
"`sk-proj-...` \n",
|
358 |
-
"\n",
|
359 |
-
"And if you don't want to set secrets this way, or something goes wrong with it, it's no problem - you can change your secrets later: \n",
|
360 |
-
"1. Log in to HuggingFace website \n",
|
361 |
-
"2. Go to your profile screen via the Avatar menu on the top right \n",
|
362 |
-
"3. Select the Space you deployed \n",
|
363 |
-
"4. Click on the Settings wheel on the top right \n",
|
364 |
-
"5. You can scroll down to change your secrets, delete the space, etc.\n",
|
365 |
-
"\n",
|
366 |
-
"#### And now you should be deployed!\n",
|
367 |
-
"\n",
|
368 |
-
"Here is mine: https://huggingface.co/spaces/ed-donner/Career_Conversation\n",
|
369 |
-
"\n",
|
370 |
-
"I just got a push notification that a student asked me how they can become President of their country 😂😂\n",
|
371 |
-
"\n",
|
372 |
-
"For more information on deployment:\n",
|
373 |
-
"\n",
|
374 |
-
"https://www.gradio.app/guides/sharing-your-app#hosting-on-hf-spaces\n",
|
375 |
-
"\n",
|
376 |
-
"To delete your Space in the future: \n",
|
377 |
-
"1. Log in to HuggingFace\n",
|
378 |
-
"2. From the Avatar menu, select your profile\n",
|
379 |
-
"3. Click on the Space itself and select the settings wheel on the top right\n",
|
380 |
-
"4. Scroll to the Delete section at the bottom\n",
|
381 |
-
"5. ALSO: delete the README file that Gradio may have created inside this 1_foundations folder (otherwise it won't ask you the questions the next time you do a gradio deploy)\n"
|
382 |
-
]
|
383 |
-
},
|
384 |
-
{
|
385 |
-
"cell_type": "markdown",
|
386 |
-
"metadata": {},
|
387 |
-
"source": [
|
388 |
-
"<table style=\"margin: 0; text-align: left; width:100%\">\n",
|
389 |
-
" <tr>\n",
|
390 |
-
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
391 |
-
" <img src=\"../assets/exercise.png\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
392 |
-
" </td>\n",
|
393 |
-
" <td>\n",
|
394 |
-
" <h2 style=\"color:#ff7800;\">Exercise</h2>\n",
|
395 |
-
" <span style=\"color:#ff7800;\">• First and foremost, deploy this for yourself! It's a real, valuable tool - the future resume..<br/>\n",
|
396 |
-
" • Next, improve the resources - add better context about yourself. If you know RAG, then add a knowledge base about you.<br/>\n",
|
397 |
-
" • Add in more tools! You could have a SQL database with common Q&A that the LLM could read and write from?<br/>\n",
|
398 |
-
" • Bring in the Evaluator from the last lab, and add other Agentic patterns.\n",
|
399 |
-
" </span>\n",
|
400 |
-
" </td>\n",
|
401 |
-
" </tr>\n",
|
402 |
-
"</table>"
|
403 |
-
]
|
404 |
-
},
|
405 |
-
{
|
406 |
-
"cell_type": "markdown",
|
407 |
-
"metadata": {},
|
408 |
-
"source": [
|
409 |
-
"<table style=\"margin: 0; text-align: left; width:100%\">\n",
|
410 |
-
" <tr>\n",
|
411 |
-
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
412 |
-
" <img src=\"../assets/business.png\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
413 |
-
" </td>\n",
|
414 |
-
" <td>\n",
|
415 |
-
" <h2 style=\"color:#00bfff;\">Commercial implications</h2>\n",
|
416 |
-
" <span style=\"color:#00bfff;\">Aside from the obvious (your career alter-ego) this has business applications in any situation where you need an AI assistant with domain expertise and an ability to interact with the real world.\n",
|
417 |
-
" </span>\n",
|
418 |
-
" </td>\n",
|
419 |
-
" </tr>\n",
|
420 |
-
"</table>"
|
421 |
-
]
|
422 |
-
}
|
423 |
-
],
|
424 |
-
"metadata": {
|
425 |
-
"kernelspec": {
|
426 |
-
"display_name": ".venv",
|
427 |
-
"language": "python",
|
428 |
-
"name": "python3"
|
429 |
-
},
|
430 |
-
"language_info": {
|
431 |
-
"codemirror_mode": {
|
432 |
-
"name": "ipython",
|
433 |
-
"version": 3
|
434 |
-
},
|
435 |
-
"file_extension": ".py",
|
436 |
-
"mimetype": "text/x-python",
|
437 |
-
"name": "python",
|
438 |
-
"nbconvert_exporter": "python",
|
439 |
-
"pygments_lexer": "ipython3",
|
440 |
-
"version": "3.12.9"
|
441 |
-
}
|
442 |
-
},
|
443 |
-
"nbformat": 4,
|
444 |
-
"nbformat_minor": 2
|
445 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|