Spaces:
Sleeping
Sleeping
File size: 3,670 Bytes
5fdb69e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
{
"cells": [
{
"cell_type": "markdown",
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
"metadata": {},
"source": [
"# End of week 1 exercise Solution Ollama with streaming\n",
"\n",
"A tool that takes a technical question, and responds with an explanation."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"\n",
"import ollama\n",
"import requests\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
"source": [
"# Constants\n",
"\n",
"MODEL_LLAMA = 'llama3.2'\n",
"MODEL_LLAMA1b = \"llama3.2:1b\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {},
"outputs": [],
"source": [
"# Environment\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant that takes a technical question and respond with an explanation.\n",
"\"\"\"\n",
"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\"\n",
"\n",
"question2 = \"\"\"\n",
"What is the purpose of using yield from in the following code, and how does it differ from a standard for loop with yield?\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\"\n",
"\n",
"user_prompt = \"Answer these two questions in detail please, Question1:\" + question + \"Question2:\" + question2\n",
"\n",
"def message():\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"metadata": {},
"outputs": [],
"source": [
"# Llama 3.2 answer, with streaming\n",
"\n",
"def llama():\n",
" response = ollama.chat(\n",
" model = MODEL_LLAMA,\n",
" messages = message(),\n",
" stream =True\n",
" )\n",
" full_response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in response:\n",
" content = chunk.get(\"message\", {}).get(\"content\", \"\")\n",
" if content:\n",
" full_response += content\n",
" display_handle.update(Markdown(full_response))\n",
"llama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "342a470c-9aab-4051-ad21-514dceec76eb",
"metadata": {},
"outputs": [],
"source": [
"# Llama 3.2:1b answer\n",
"\n",
"def llama():\n",
" response = ollama.chat(\n",
" model = MODEL_LLAMA1b,\n",
" messages = message()\n",
" )\n",
" return display(Markdown(response['message']['content']))\n",
"\n",
"llama()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|