Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitignore +5 -0
- README.md +3 -9
- Vector_Database_Generator.ipynb +740 -0
- main.py +194 -0
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
documents/
|
2 |
+
|
3 |
+
documents/CV/
|
4 |
+
|
5 |
+
.env
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Djallels_Portfolio
|
3 |
+
app_file: main.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 5.25.0
|
|
|
|
|
6 |
---
|
|
|
|
Vector_Database_Generator.ipynb
ADDED
@@ -0,0 +1,740 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 143,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from dotenv import load_dotenv\n",
|
10 |
+
"import os\n",
|
11 |
+
"load_dotenv(override=True)\n",
|
12 |
+
"chroma_client=os.environ[\"CHROMA_DB_CLIEN\"]\n",
|
13 |
+
"chroma_token=os.environ[\"CHROMA_TOKEN\"]"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"cell_type": "code",
|
18 |
+
"execution_count": 2,
|
19 |
+
"metadata": {},
|
20 |
+
"outputs": [],
|
21 |
+
"source": [
|
22 |
+
"import chromadb\n",
|
23 |
+
"from chromadb.config import Settings\n",
|
24 |
+
"client = chromadb.HttpClient(host=chroma_client, port=8000, settings=Settings(\n",
|
25 |
+
" chroma_client_auth_provider=\"chromadb.auth.token_authn.TokenAuthClientProvider\",\n",
|
26 |
+
" chroma_client_auth_credentials=chroma_token\n",
|
27 |
+
" ))\n"
|
28 |
+
]
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"cell_type": "code",
|
32 |
+
"execution_count": 3,
|
33 |
+
"metadata": {},
|
34 |
+
"outputs": [],
|
35 |
+
"source": [
|
36 |
+
"from sentence_transformers import SentenceTransformer\n",
|
37 |
+
"model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')"
|
38 |
+
]
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"cell_type": "code",
|
42 |
+
"execution_count": 4,
|
43 |
+
"metadata": {},
|
44 |
+
"outputs": [],
|
45 |
+
"source": [
|
46 |
+
"collection = client.create_collection(\"all-my-projects\")\n"
|
47 |
+
]
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"cell_type": "code",
|
51 |
+
"execution_count": 36,
|
52 |
+
"metadata": {},
|
53 |
+
"outputs": [
|
54 |
+
{
|
55 |
+
"name": "stdout",
|
56 |
+
"output_type": "stream",
|
57 |
+
"text": [
|
58 |
+
"done with : 12\n"
|
59 |
+
]
|
60 |
+
}
|
61 |
+
],
|
62 |
+
"source": [
|
63 |
+
"document_path=\".\\documents\"\n",
|
64 |
+
"\n",
|
65 |
+
"for i in range(1,13):\n",
|
66 |
+
" with open(f\"{document_path}\\{i}.text\",\"r\") as f:\n",
|
67 |
+
" document=[f.read()]\n",
|
68 |
+
" vectors=model.encode(document).astype(float).tolist()\n",
|
69 |
+
" id = f\"id_{i}\"\n",
|
70 |
+
" ids=[id]\n",
|
71 |
+
" metadatas=[{\"type\":\"project by djallel\"}]\n",
|
72 |
+
" collection.add(\n",
|
73 |
+
" ids=ids,\n",
|
74 |
+
" documents=document,\n",
|
75 |
+
" embeddings=vectors,\n",
|
76 |
+
" metadatas=metadatas,\n",
|
77 |
+
" )\n",
|
78 |
+
"print(\"done with :\", i)\n",
|
79 |
+
"\n",
|
80 |
+
" \n"
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"cell_type": "code",
|
85 |
+
"execution_count": 121,
|
86 |
+
"metadata": {},
|
87 |
+
"outputs": [],
|
88 |
+
"source": [
|
89 |
+
"def make_context(similars):\n",
|
90 |
+
" if len(similars)==0:\n",
|
91 |
+
" return \"\"\n",
|
92 |
+
" message = \"To provide some context, here are some projects done by djallel that might be related to the question that you need to answer.\\n\\n\"\n",
|
93 |
+
" for similar in similars:\n",
|
94 |
+
" message += f\"Potentially related projects:\\n{similar}\\n\\n\"\n",
|
95 |
+
" return message"
|
96 |
+
]
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"cell_type": "code",
|
100 |
+
"execution_count": 43,
|
101 |
+
"metadata": {},
|
102 |
+
"outputs": [],
|
103 |
+
"source": [
|
104 |
+
"def vector(question):\n",
|
105 |
+
" return model.encode([question])"
|
106 |
+
]
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"cell_type": "code",
|
110 |
+
"execution_count": 109,
|
111 |
+
"metadata": {},
|
112 |
+
"outputs": [],
|
113 |
+
"source": [
|
114 |
+
"def find_similars(question):\n",
|
115 |
+
" results = collection.query(query_embeddings=vector(question).astype(float).tolist(), n_results=5,include=['documents',\"distances\"])\n",
|
116 |
+
" documents = results['documents'][0][:]\n",
|
117 |
+
" distances=results['distances'][0][:]\n",
|
118 |
+
" filtered_documents = [\n",
|
119 |
+
" doc for doc, dist in zip(documents, distances) if dist < 1.7\n",
|
120 |
+
"]\n",
|
121 |
+
" return filtered_documents"
|
122 |
+
]
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"cell_type": "code",
|
126 |
+
"execution_count": 115,
|
127 |
+
"metadata": {},
|
128 |
+
"outputs": [
|
129 |
+
{
|
130 |
+
"data": {
|
131 |
+
"text/plain": [
|
132 |
+
"True"
|
133 |
+
]
|
134 |
+
},
|
135 |
+
"execution_count": 115,
|
136 |
+
"metadata": {},
|
137 |
+
"output_type": "execute_result"
|
138 |
+
}
|
139 |
+
],
|
140 |
+
"source": [
|
141 |
+
"len(find_similars(\"Dance dinner launch hello\"))==0"
|
142 |
+
]
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"cell_type": "code",
|
146 |
+
"execution_count": null,
|
147 |
+
"metadata": {},
|
148 |
+
"outputs": [],
|
149 |
+
"source": [
|
150 |
+
"matches=[]"
|
151 |
+
]
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"cell_type": "code",
|
155 |
+
"execution_count": 73,
|
156 |
+
"metadata": {},
|
157 |
+
"outputs": [],
|
158 |
+
"source": [
|
159 |
+
"from openai import OpenAI\n",
|
160 |
+
"openai = OpenAI()"
|
161 |
+
]
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"cell_type": "code",
|
165 |
+
"execution_count": 80,
|
166 |
+
"metadata": {},
|
167 |
+
"outputs": [],
|
168 |
+
"source": [
|
169 |
+
"from pypdf import PdfReader\n",
|
170 |
+
"import gradio as gr\n",
|
171 |
+
"reader = PdfReader(\"documents/CV/CV.pdf\")\n",
|
172 |
+
"cv = \"\"\n",
|
173 |
+
"for page in reader.pages:\n",
|
174 |
+
" text = page.extract_text()\n",
|
175 |
+
" if text:\n",
|
176 |
+
" cv += text\n",
|
177 |
+
"\n",
|
178 |
+
"\n",
|
179 |
+
"\n",
|
180 |
+
"name = \"Djallel BRAHMIA\""
|
181 |
+
]
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"cell_type": "code",
|
185 |
+
"execution_count": 84,
|
186 |
+
"metadata": {},
|
187 |
+
"outputs": [],
|
188 |
+
"source": [
|
189 |
+
"system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n",
|
190 |
+
"particularly questions related to {name}'s career, background, skills and experience. \\\n",
|
191 |
+
"Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n",
|
192 |
+
"You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n",
|
193 |
+
"Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
|
194 |
+
" \"\n",
|
195 |
+
"system_prompt += f\"\\n\\## CV:\\n{cv}\\n\\n\"\n",
|
196 |
+
"\n",
|
197 |
+
"system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "code",
|
202 |
+
"execution_count": 131,
|
203 |
+
"metadata": {},
|
204 |
+
"outputs": [],
|
205 |
+
"source": [
|
206 |
+
"\n",
|
207 |
+
"record_unknown_question_json = {\n",
|
208 |
+
" \"name\": \"record_unknown_question\",\n",
|
209 |
+
" \"description\": \"Always use this tool to record any question that couldn't be answered as you didn't know the answer\",\n",
|
210 |
+
" \"parameters\": {\n",
|
211 |
+
" \"type\": \"object\",\n",
|
212 |
+
" \"properties\": {\n",
|
213 |
+
" \"question\": {\n",
|
214 |
+
" \"type\": \"string\",\n",
|
215 |
+
" \"description\": \"The question that couldn't be answered\"\n",
|
216 |
+
" },\n",
|
217 |
+
" },\n",
|
218 |
+
" \"required\": [\"question\"],\n",
|
219 |
+
" \"additionalProperties\": False\n",
|
220 |
+
" }\n",
|
221 |
+
"}"
|
222 |
+
]
|
223 |
+
},
|
224 |
+
{
|
225 |
+
"cell_type": "code",
|
226 |
+
"execution_count": 144,
|
227 |
+
"metadata": {},
|
228 |
+
"outputs": [],
|
229 |
+
"source": [
|
230 |
+
"import requests\n",
|
231 |
+
"\n",
|
232 |
+
"def push(text):\n",
|
233 |
+
" requests.post(\n",
|
234 |
+
" \"https://api.pushover.net/1/messages.json\",\n",
|
235 |
+
" data={\n",
|
236 |
+
" \"token\": os.getenv(\"PUSHOVER_TOKEN\"),\n",
|
237 |
+
" \"user\": os.getenv(\"PUSHOVER_USER\"),\n",
|
238 |
+
" \"message\": text,\n",
|
239 |
+
" }\n",
|
240 |
+
" )"
|
241 |
+
]
|
242 |
+
},
|
243 |
+
{
|
244 |
+
"cell_type": "code",
|
245 |
+
"execution_count": 145,
|
246 |
+
"metadata": {},
|
247 |
+
"outputs": [],
|
248 |
+
"source": [
|
249 |
+
"push(\"test\")"
|
250 |
+
]
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"cell_type": "code",
|
254 |
+
"execution_count": 146,
|
255 |
+
"metadata": {},
|
256 |
+
"outputs": [],
|
257 |
+
"source": [
|
258 |
+
"def record_unknown_question(question):\n",
|
259 |
+
" push(f\"Recording {question}\")\n",
|
260 |
+
" return {\"recorded\": \"ok\"}"
|
261 |
+
]
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"cell_type": "code",
|
265 |
+
"execution_count": 148,
|
266 |
+
"metadata": {},
|
267 |
+
"outputs": [],
|
268 |
+
"source": [
|
269 |
+
"\n",
|
270 |
+
"tools = [{\"type\": \"function\", \"function\": record_unknown_question_json}]"
|
271 |
+
]
|
272 |
+
},
|
273 |
+
{
|
274 |
+
"cell_type": "code",
|
275 |
+
"execution_count": 150,
|
276 |
+
"metadata": {},
|
277 |
+
"outputs": [],
|
278 |
+
"source": [
|
279 |
+
"def handle_tool_call(tool_calls):\n",
|
280 |
+
" results = []\n",
|
281 |
+
" for tool_call in tool_calls:\n",
|
282 |
+
" tool_name = tool_call.function.name\n",
|
283 |
+
" arguments = json.loads(tool_call.function.arguments)\n",
|
284 |
+
" print(f\"Tool called: {tool_name}\", flush=True)\n",
|
285 |
+
" tool = globals().get(tool_name)\n",
|
286 |
+
" result = tool(**arguments) if tool else {}\n",
|
287 |
+
" results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n",
|
288 |
+
" return results"
|
289 |
+
]
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"cell_type": "code",
|
293 |
+
"execution_count": 128,
|
294 |
+
"metadata": {},
|
295 |
+
"outputs": [],
|
296 |
+
"source": [
|
297 |
+
"def chat(message,history):\n",
|
298 |
+
" similars=find_similars(message)\n",
|
299 |
+
"\n",
|
300 |
+
" message+=make_context(similars)\n",
|
301 |
+
" print(message)\n",
|
302 |
+
" messages = [{\"role\": \"system\", \"content\": system_prompt}]+history + [{\"role\": \"user\", \"content\": message}]\n",
|
303 |
+
"\n",
|
304 |
+
" \n",
|
305 |
+
" # This is the call to the LLM - see that we pass in the tools json\n",
|
306 |
+
"\n",
|
307 |
+
" response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
|
308 |
+
"\n",
|
309 |
+
" \n",
|
310 |
+
" # If the LLM wants to call a tool, we do that!\n",
|
311 |
+
" \n",
|
312 |
+
" return response.choices[0].message.content"
|
313 |
+
]
|
314 |
+
},
|
315 |
+
{
|
316 |
+
"cell_type": "code",
|
317 |
+
"execution_count": 129,
|
318 |
+
"metadata": {},
|
319 |
+
"outputs": [
|
320 |
+
{
|
321 |
+
"name": "stdout",
|
322 |
+
"output_type": "stream",
|
323 |
+
"text": [
|
324 |
+
"did you worked on any vr projects? To provide some context, here are some projects done by djallel that might be related to the question that you need to answer.\n",
|
325 |
+
"\n",
|
326 |
+
"Potentially related projects:\n",
|
327 |
+
"🕶️ VR Hanoi Tower Game – Built with WebXR & React\n",
|
328 |
+
"🎮 Project Overview\n",
|
329 |
+
"VR Hanoi Tower is a fully interactive virtual reality adaptation of the classic Tower of Hanoi puzzle game. Developed using WebXR and React, this immersive experience allows users to play directly in their web browser using VR devices like Oculus Quest or supported desktop headsets.\n",
|
330 |
+
"\n",
|
331 |
+
"Designed for both fun and educational use, the game challenges players to move a stack of discs between rods following the traditional rules of Hanoi, now reimagined in a 3D virtual space.\n",
|
332 |
+
"\n",
|
333 |
+
"🧠 Key Features\n",
|
334 |
+
"🕹️ Interactive 3D Environment\n",
|
335 |
+
"Play the Hanoi Tower puzzle in an immersive VR space using motion controls or click-based interactions.\n",
|
336 |
+
"\n",
|
337 |
+
"🌐 WebXR Integration\n",
|
338 |
+
"Seamlessly supports VR headsets through WebXR APIs – no installations required.\n",
|
339 |
+
"\n",
|
340 |
+
"⚛️ React + Context API for State Management\n",
|
341 |
+
"Smooth and efficient handling of game state, disc positions, and move history using modern React patterns.\n",
|
342 |
+
"\n",
|
343 |
+
"📏 Dynamic Game Logic\n",
|
344 |
+
"Handles disc stacking rules, legal move validation, and move counters.\n",
|
345 |
+
"\n",
|
346 |
+
"🖼️ Responsive UI Overlay\n",
|
347 |
+
"In-game HUD for move tracking, restart button, and user feedback.\n",
|
348 |
+
"\n",
|
349 |
+
"🧰 Tech Stack\n",
|
350 |
+
"Technology\tRole\n",
|
351 |
+
"React\tUI and application logic\n",
|
352 |
+
"WebXR\tVR rendering and device input\n",
|
353 |
+
"Three.js\t3D scene, camera, and object management\n",
|
354 |
+
"React Context API\tGame state management\n",
|
355 |
+
"React Three Fiber (optional)\tDeclarative Three.js for React (if used)\n",
|
356 |
+
"\n",
|
357 |
+
"✨ What I Learned\n",
|
358 |
+
"Building interactive VR applications with WebXR and Three.js\n",
|
359 |
+
"\n",
|
360 |
+
"Managing complex state transitions in React using ContextProvider\n",
|
361 |
+
"\n",
|
362 |
+
"Integrating VR input events into browser-based applications\n",
|
363 |
+
"\n",
|
364 |
+
"Translating abstract logic (Hanoi rules) into 3D environments\n",
|
365 |
+
"\n",
|
366 |
+
"Optimizing rendering and performance for VR platforms\n",
|
367 |
+
"\n",
|
368 |
+
"🚀 Future Enhancements\n",
|
369 |
+
"Add audio feedback and ambient soundscapes for immersive experience\n",
|
370 |
+
"\n",
|
371 |
+
"Implement level selection (3–8 discs)\n",
|
372 |
+
"\n",
|
373 |
+
"Add leaderboards or time tracking for performance scoring\n",
|
374 |
+
"\n",
|
375 |
+
"Add multiplayer co-op mode via WebRTC or WebSockets\n",
|
376 |
+
"\n",
|
377 |
+
"Deploy via WebXR-compatible static hosting (e.g., Vercel, Netlify)\n",
|
378 |
+
"\n",
|
379 |
+
"\n",
|
380 |
+
"\n",
|
381 |
+
"Potentially related projects:\n",
|
382 |
+
"Project Title:\n",
|
383 |
+
"Mapty – Running Route Tracker with GPS and Local Storage\n",
|
384 |
+
"\n",
|
385 |
+
"Project Description:\n",
|
386 |
+
"Mapty is a GPS-based web application built using Vanilla JavaScript that allows runners and athletes to track their physical activity, monitor distance covered, and view mapped routes in real-time. Designed as a lightweight and offline-friendly tool, it leverages the browser’s Geolocation API and local storage to offer persistent tracking without requiring a backend.\n",
|
387 |
+
"\n",
|
388 |
+
"This app is ideal for runners, joggers, or cyclists who want a visual overview of their sessions, including rest points, route history, and total distances, all displayed directly on an interactive map.\n",
|
389 |
+
"\n",
|
390 |
+
"Key Features:\n",
|
391 |
+
"📍 GPS Location Tracking\n",
|
392 |
+
"Utilizes the Geolocation API to automatically detect and log the user’s current location on a map.\n",
|
393 |
+
"\n",
|
394 |
+
"🏃 Workout & Distance Logging\n",
|
395 |
+
"Allows users to record workouts, including type (running/cycling), distance, duration, and pace.\n",
|
396 |
+
"\n",
|
397 |
+
"🗺️ Interactive Map with Routes\n",
|
398 |
+
"Visualizes the route taken during each workout using Leaflet.js, including waypoints, start/end locations, and rest points.\n",
|
399 |
+
"\n",
|
400 |
+
"🧾 Session History with Local Storage\n",
|
401 |
+
"All workout data is persisted using the browser’s local storage, so users can view their full session history even after refreshing or closing the app.\n",
|
402 |
+
"\n",
|
403 |
+
"🔄 Chemin Suivi (Route Tracing)\n",
|
404 |
+
"Tracks the exact path followed during each run, allowing users to retrace their steps visually on the map.\n",
|
405 |
+
"\n",
|
406 |
+
"Technology Stack:\n",
|
407 |
+
"Vanilla JavaScript (ES6+): Core language used for logic and UI interactions\n",
|
408 |
+
"\n",
|
409 |
+
"Leaflet.js: Open-source JavaScript library for mobile-friendly interactive maps\n",
|
410 |
+
"\n",
|
411 |
+
"HTML5 & CSS3: For structured layout and responsive design\n",
|
412 |
+
"\n",
|
413 |
+
"Geolocation API: To access the user's real-time GPS coordinates\n",
|
414 |
+
"\n",
|
415 |
+
"Browser Local Storage: For saving workouts and route data offline\n",
|
416 |
+
"\n",
|
417 |
+
"Responsibilities & Achievements:\n",
|
418 |
+
"Built the complete frontend application with no frameworks, focusing on clean code and performance.\n",
|
419 |
+
"\n",
|
420 |
+
"Integrated the Geolocation API and Leaflet.js for dynamic map rendering.\n",
|
421 |
+
"\n",
|
422 |
+
"Developed a data persistence layer using local storage to simulate real-world offline-first behavior.\n",
|
423 |
+
"\n",
|
424 |
+
"Designed a user-friendly UI to make tracking workouts intuitive and informative\n",
|
425 |
+
"\n",
|
426 |
+
"\n"
|
427 |
+
]
|
428 |
+
},
|
429 |
+
{
|
430 |
+
"data": {
|
431 |
+
"text/plain": [
|
432 |
+
"'Yes, I have worked on a virtual reality project titled **VR Hanoi Tower Game**, which is a fully interactive adaptation of the classic Tower of Hanoi puzzle game. This project leverages WebXR and React, allowing users to immerse themselves in a 3D environment where they can interact with the game using VR devices such as Oculus Quest or supported desktop headsets.\\n\\n### Project Overview:\\nThe **VR Hanoi Tower Game** is designed to offer both fun and educational opportunities, challenging players to strategically move discs between rods according to the traditional game rules, reimagined within a virtual space.\\n\\n### Key Features:\\n- **Interactive 3D Environment**: The game presents a fully interactive experience where users can utilize motion controls or click-based interactions.\\n- **WebXR Integration**: By using the WebXR API, players can engage in the game directly within their web browsers without requiring any installations.\\n- **Dynamic Game Logic**: The application implements robust game mechanics to handle stacking rules, legal moves, and track move history.\\n- **Responsive UI Overlay**: In-game user interface provides feedback, including move tracking and options to restart the game.\\n\\n### Technical Stack:\\n- **React**: For user interface and application logic.\\n- **WebXR**: For rendering and device input in the VR environment.\\n- **Three.js**: For 3D scene management, cameras, and object manipulation.\\n\\n### Learning Outcomes:\\nBuilding this VR application allowed me to gain hands-on experience in creating interactive VR experiences, managing complex state transitions within React, and optimizing performance for VR platforms.\\n\\n### Future Enhancements:\\nI also have ideas for future enhancements, including integrating audio feedback, implementing level selection, and exploring multiplayer options.\\n\\nIf you have any specific questions or would like to learn more about this project or my experiences with VR technology, feel free to ask!'"
|
433 |
+
]
|
434 |
+
},
|
435 |
+
"execution_count": 129,
|
436 |
+
"metadata": {},
|
437 |
+
"output_type": "execute_result"
|
438 |
+
}
|
439 |
+
],
|
440 |
+
"source": [
|
441 |
+
"chat(\"did you worked on any vr projects? \",[])"
|
442 |
+
]
|
443 |
+
},
|
444 |
+
{
|
445 |
+
"cell_type": "code",
|
446 |
+
"execution_count": 130,
|
447 |
+
"metadata": {},
|
448 |
+
"outputs": [
|
449 |
+
{
|
450 |
+
"name": "stdout",
|
451 |
+
"output_type": "stream",
|
452 |
+
"text": [
|
453 |
+
"* Running on local URL: http://127.0.0.1:7866\n",
|
454 |
+
"\n",
|
455 |
+
"To create a public link, set `share=True` in `launch()`.\n"
|
456 |
+
]
|
457 |
+
},
|
458 |
+
{
|
459 |
+
"data": {
|
460 |
+
"text/html": [
|
461 |
+
"<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
462 |
+
],
|
463 |
+
"text/plain": [
|
464 |
+
"<IPython.core.display.HTML object>"
|
465 |
+
]
|
466 |
+
},
|
467 |
+
"metadata": {},
|
468 |
+
"output_type": "display_data"
|
469 |
+
},
|
470 |
+
{
|
471 |
+
"data": {
|
472 |
+
"text/plain": []
|
473 |
+
},
|
474 |
+
"execution_count": 130,
|
475 |
+
"metadata": {},
|
476 |
+
"output_type": "execute_result"
|
477 |
+
},
|
478 |
+
{
|
479 |
+
"name": "stdout",
|
480 |
+
"output_type": "stream",
|
481 |
+
"text": [
|
482 |
+
"Did you done anything interesting in IA and machine learning?To provide some context, here are some projects done by djallel that might be related to the question that you need to answer.\n",
|
483 |
+
"\n",
|
484 |
+
"Potentially related projects:\n",
|
485 |
+
"🧠 ML Toolkit – A Customizable Machine Learning Library in Python\n",
|
486 |
+
"📌 Project Overview\n",
|
487 |
+
"ML Toolkit is a Python-based, modular machine learning library built from scratch to provide a flexible and transparent learning and experimentation environment. It supports a wide range of supervised learning algorithms, including both regression and classification models. Designed with extensibility and clarity in mind, it enables users to understand core ML concepts through hands-on model development and testing.\n",
|
488 |
+
"\n",
|
489 |
+
"This toolkit is ideal for those looking to deepen their understanding of machine learning by working directly with algorithmic implementations—without relying on high-level libraries like scikit-learn or TensorFlow.\n",
|
490 |
+
"\n",
|
491 |
+
"🧩 Implemented Models\n",
|
492 |
+
"🔍 Classification Algorithms:\n",
|
493 |
+
"Support Vector Machine (SVM)\n",
|
494 |
+
"\n",
|
495 |
+
"K-Nearest Neighbors (KNN) Classifier\n",
|
496 |
+
"\n",
|
497 |
+
"Gaussian Naive Bayes\n",
|
498 |
+
"\n",
|
499 |
+
"Multinomial Naive Bayes\n",
|
500 |
+
"\n",
|
501 |
+
"Decision Tree Classifier\n",
|
502 |
+
"\n",
|
503 |
+
"Non-Linear Classifier\n",
|
504 |
+
"\n",
|
505 |
+
"📈 Regression Algorithms:\n",
|
506 |
+
"K-Nearest Neighbors Regressor\n",
|
507 |
+
"\n",
|
508 |
+
"Decision Tree Regressor\n",
|
509 |
+
"\n",
|
510 |
+
"Non-Linear Regression\n",
|
511 |
+
"\n",
|
512 |
+
"⚙️ Core Features\n",
|
513 |
+
"✅ Fully Modular Architecture\n",
|
514 |
+
"Easily extend or swap components such as models, optimizers, and loss functions.\n",
|
515 |
+
"\n",
|
516 |
+
"🧠 Manual Gradient Computation\n",
|
517 |
+
"Gain an in-depth understanding of how gradient-based optimization works.\n",
|
518 |
+
"\n",
|
519 |
+
"🔧 Custom Training Loops\n",
|
520 |
+
"Fine-tune learning rates, batch sizes, and convergence conditions.\n",
|
521 |
+
"\n",
|
522 |
+
"🧪 No External ML Libraries\n",
|
523 |
+
"Built entirely in Python with NumPy for linear algebra operations.\n",
|
524 |
+
"\n",
|
525 |
+
"🧰 Tech Stack\n",
|
526 |
+
"Tool\tRole\n",
|
527 |
+
"Python 3\tCore development language\n",
|
528 |
+
"NumPy\tMatrix operations & math utils\n",
|
529 |
+
"Matplotlib*\t(Optional) For plotting and visualization\n",
|
530 |
+
"\n",
|
531 |
+
"\n",
|
532 |
+
"\n",
|
533 |
+
"Potentially related projects:\n",
|
534 |
+
"🏕️ The Wild Oasis – Cabin Booking & Management System\n",
|
535 |
+
"📌 Project Overview\n",
|
536 |
+
"The Wild Oasis is a modern web-based application built to simplify and streamline the management of cabin reservations and staff operations. It offers a secure, intuitive, and real-time experience for accommodation providers, combining powerful backend functionality with a sleek and responsive frontend.\n",
|
537 |
+
"\n",
|
538 |
+
"This project was an opportunity to deepen my expertise in React and Supabase, and to gain hands-on experience in building a complete full-stack solution from scratch.\n",
|
539 |
+
"\n",
|
540 |
+
"🌟 Key Features\n",
|
541 |
+
"👥 User & Role Management\n",
|
542 |
+
"Add new employees or modify existing user profiles.\n",
|
543 |
+
"\n",
|
544 |
+
"Role-based access and secured authentication with Supabase Auth.\n",
|
545 |
+
"\n",
|
546 |
+
"Email verification for account activation.\n",
|
547 |
+
"\n",
|
548 |
+
"🏡 Cabin Management\n",
|
549 |
+
"Add, update, and delete cabin details including capacity and pricing.\n",
|
550 |
+
"\n",
|
551 |
+
"Manage extra services (e.g., breakfast) for more flexible booking options.\n",
|
552 |
+
"\n",
|
553 |
+
"📅 Reservation System\n",
|
554 |
+
"Bookings include check-in/check-out management.\n",
|
555 |
+
"\n",
|
556 |
+
"View detailed booking info and attach extras.\n",
|
557 |
+
"\n",
|
558 |
+
"Ensure smooth daily operations with easy-to-navigate UI.\n",
|
559 |
+
"\n",
|
560 |
+
"📊 Interactive Dashboard\n",
|
561 |
+
"Daily summary of arrivals and departures.\n",
|
562 |
+
"\n",
|
563 |
+
"Visualize data using graphs showing occupancy rates and income trends.\n",
|
564 |
+
"\n",
|
565 |
+
"Gain quick insight into the business’s performance at a glance.\n",
|
566 |
+
"\n",
|
567 |
+
"🔐 Security & Privacy\n",
|
568 |
+
"Authentication and database security handled via Supabase.\n",
|
569 |
+
"\n",
|
570 |
+
"All sensitive operations are secured with role-based access and real-time sync.\n",
|
571 |
+
"\n",
|
572 |
+
"🧰 Tech Stack\n",
|
573 |
+
"Tech\tDescription\n",
|
574 |
+
"React\tComponent-based UI development\n",
|
575 |
+
"Vite\tLightweight and fast build tool\n",
|
576 |
+
"Styled Components\tCSS-in-JS for modular and scoped styles\n",
|
577 |
+
"React Router\tClient-side routing\n",
|
578 |
+
"React Query\tData fetching, caching, and synchronization\n",
|
579 |
+
"Supabase\tBackend-as-a-service: Auth, DB, Realtime\n",
|
580 |
+
"\n",
|
581 |
+
"🧪 Demo Login Credentials\n",
|
582 |
+
"text\n",
|
583 |
+
"Copy\n",
|
584 |
+
"Edit\n",
|
585 |
+
"email: [email protected]\n",
|
586 |
+
"password: Passwd1234\n",
|
587 |
+
"After logging in, new users can register and verify their accounts via confirmation email.\n",
|
588 |
+
"\n",
|
589 |
+
"🎓 What I Learned\n",
|
590 |
+
"🔧 React\n",
|
591 |
+
"Component Architecture: Improved modular thinking and reusability of code.\n",
|
592 |
+
"\n",
|
593 |
+
"State Management with Props: Gained a deeper understanding of dynamic UI development.\n",
|
594 |
+
"\n",
|
595 |
+
"🛠 Supabase\n",
|
596 |
+
"Data Handling: CRUD operations and seamless integration with front-end.\n",
|
597 |
+
"\n",
|
598 |
+
"Realtime Sync: Leveraged Supabase’s real-time capabilities to keep UI in sync.\n",
|
599 |
+
"\n",
|
600 |
+
"🔍 Development Practices\n",
|
601 |
+
"Complex App Design: Designed a full-stack, extensible, and scalable application.\n",
|
602 |
+
"\n",
|
603 |
+
"Debugging & Optimization: Learned to profile, debug, and enhance app performance.\n",
|
604 |
+
"\n",
|
605 |
+
"\n",
|
606 |
+
"\n",
|
607 |
+
"Potentially related projects:\n",
|
608 |
+
"Project Title:\n",
|
609 |
+
"CaisseApp – Cash Register and Daily Transaction Management System\n",
|
610 |
+
"\n",
|
611 |
+
"Project Description:\n",
|
612 |
+
"CaisseApp is a mobile application developed for UPS Connexlog Algeria to streamline and secure daily cash register operations. Designed for use by both desk agents and the finance team, the app allows agents to record all cash transactions throughout the day and automatically calculate the total amount in the register (\"caisse\") at closing time.\n",
|
613 |
+
"\n",
|
614 |
+
"In parallel, finance managers and administrators have real-time access to view and monitor cash registers across different branches or agents. This enhances transparency, accountability, and improves financial oversight across the organization.\n",
|
615 |
+
"\n",
|
616 |
+
"The app is built using Flutter, offering a responsive and reliable experience across mobile devices.\n",
|
617 |
+
"\n",
|
618 |
+
"Key Features:\n",
|
619 |
+
"💰 Daily Cash Register Tracking\n",
|
620 |
+
"Desk agents can input income and expense transactions, and the app automatically calculates the net amount in the register.\n",
|
621 |
+
"\n",
|
622 |
+
"🧾 Transaction History and Logs\n",
|
623 |
+
"Displays a clear, timestamped summary of all transactions for the day (e.g., payments, refunds, adjustments).\n",
|
624 |
+
"\n",
|
625 |
+
"👥 Role-Based Access\n",
|
626 |
+
"\n",
|
627 |
+
"Desk agents: Record and view only their own transactions.\n",
|
628 |
+
"\n",
|
629 |
+
"Finance team / Managers: View and monitor all agents’ cash activity in real time.\n",
|
630 |
+
"\n",
|
631 |
+
"📊 Real-Time Dashboard for Finance\n",
|
632 |
+
"Finance managers can see the current status of all caisses, monitor for discrepancies, and download reports if needed.\n",
|
633 |
+
"\n",
|
634 |
+
"🔒 Secure Data Handling\n",
|
635 |
+
"Ensures that transaction data is secure, centralized, and accessible only to authorized personnel.\n",
|
636 |
+
"\n",
|
637 |
+
"📱 Mobile-First Design\n",
|
638 |
+
"Built in Flutter for ease of use on Android and iOS devices.\n",
|
639 |
+
"\n",
|
640 |
+
"Technology Stack:\n",
|
641 |
+
"Flutter: Cross-platform mobile development\n",
|
642 |
+
"\n",
|
643 |
+
"Backend Server: expressJs NodeJs For data storage and real-time synchronization\n",
|
644 |
+
"\n",
|
645 |
+
"Role-Based Logic: Custom implementation for secure access control\n",
|
646 |
+
"\n",
|
647 |
+
"Local/Remote Sync: Can operate in offline mode and sync with server when connected\n",
|
648 |
+
"\n",
|
649 |
+
"Responsibilities & Achievements:\n",
|
650 |
+
"Designed and developed the full app architecture and user interface.\n",
|
651 |
+
"\n",
|
652 |
+
"Implemented secure transaction logging and caisse calculation logic.\n",
|
653 |
+
"\n",
|
654 |
+
"Created role-based access workflows for agent vs. manager use cases.\n",
|
655 |
+
"\n",
|
656 |
+
"Improved financial tracking accuracy and reduced manual reconciliation errors.\n",
|
657 |
+
"\n",
|
658 |
+
"Potentially related projects:\n",
|
659 |
+
"Project Title:\n",
|
660 |
+
"IoT System for Air Quality – Real-Time Indoor Pollution Monitoring and Forecasting\n",
|
661 |
+
"\n",
|
662 |
+
"Detailed Summary:\n",
|
663 |
+
"This project involves the design and implementation of a complete IoT system dedicated to real-time monitoring and prediction of indoor air quality, with a specific focus on PM2.5 fine particulate matter concentration. The main goal is to offer a low-cost, autonomous, and intelligent solution capable of continuously measuring air quality and predicting pollution peaks using machine learning models.\n",
|
664 |
+
"\n",
|
665 |
+
"Technical Architecture:\n",
|
666 |
+
"Physical Sensors (ESP32 + PM2.5 sensors):\n",
|
667 |
+
"Microcontrollers (ESP32) are equipped with air quality sensors to collect environmental data, including PM2.5 concentration, temperature, and humidity.\n",
|
668 |
+
"\n",
|
669 |
+
"Raspberry Pi (IoT Gateway):\n",
|
670 |
+
"A Raspberry Pi serves as the central gateway, aggregating data sent wirelessly from the ESP32 devices and preparing it for processing.\n",
|
671 |
+
"\n",
|
672 |
+
"Node-RED:\n",
|
673 |
+
"A flow-based visual development tool used for data stream processing, database integration, real-time dashboard visualization, and deployment of logic flows.\n",
|
674 |
+
"\n",
|
675 |
+
"Mosquitto MQTT Broker:\n",
|
676 |
+
"A lightweight MQTT message broker facilitates efficient communication between the sensors and the gateway, using the publish/subscribe model.\n",
|
677 |
+
"\n",
|
678 |
+
"Docker Compose:\n",
|
679 |
+
"The entire software infrastructure is containerized using Docker Compose, ensuring modularity, portability, and ease of deployment.\n",
|
680 |
+
"\n",
|
681 |
+
"Node.js Backend:\n",
|
682 |
+
"A backend server built with Node.js handles data ingestion, storage, and provides RESTful APIs for data access and integration.\n",
|
683 |
+
"\n",
|
684 |
+
"Machine Learning with TensorFlow, Scikit-learn, and Pandas:\n",
|
685 |
+
"\n",
|
686 |
+
"Collected data is cleaned, analyzed, and preprocessed using Pandas.\n",
|
687 |
+
"\n",
|
688 |
+
"Several machine learning models were implemented and compared to predict future PM2.5 levels, including linear regression, random forests, and neural networks.\n",
|
689 |
+
"\n",
|
690 |
+
"Training and evaluation were performed both locally on the Raspberry Pi and on development machines.\n",
|
691 |
+
"\n",
|
692 |
+
"The best-performing model was deployed in production to provide real-time predictive insights.\n",
|
693 |
+
"\n",
|
694 |
+
"Scientific Publication:\n",
|
695 |
+
"This work led to the publication of a research paper titled:\n",
|
696 |
+
"“Study and Comparison of Machine Learning Models for Air PM 2.5 Concentration Prediction,”\n",
|
697 |
+
"which presents a comprehensive comparative analysis of various ML models applied to air pollution forecasting.\n",
|
698 |
+
"\n",
|
699 |
+
"Key Features:\n",
|
700 |
+
"Real-time dashboard with live air quality indicators via Node-RED.\n",
|
701 |
+
"\n",
|
702 |
+
"Automatic data logging to a local or remote database.\n",
|
703 |
+
"\n",
|
704 |
+
"Alert system for pollution threshold exceedance.\n",
|
705 |
+
"\n",
|
706 |
+
"Short-term air quality forecasting, updated regularly.\n",
|
707 |
+
"\n",
|
708 |
+
"Modular, open-source, and reproducible architecture via Docker Compose.\n",
|
709 |
+
"\n",
|
710 |
+
"\n"
|
711 |
+
]
|
712 |
+
}
|
713 |
+
],
|
714 |
+
"source": [
|
715 |
+
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
716 |
+
]
|
717 |
+
}
|
718 |
+
],
|
719 |
+
"metadata": {
|
720 |
+
"kernelspec": {
|
721 |
+
"display_name": "llms",
|
722 |
+
"language": "python",
|
723 |
+
"name": "python3"
|
724 |
+
},
|
725 |
+
"language_info": {
|
726 |
+
"codemirror_mode": {
|
727 |
+
"name": "ipython",
|
728 |
+
"version": 3
|
729 |
+
},
|
730 |
+
"file_extension": ".py",
|
731 |
+
"mimetype": "text/x-python",
|
732 |
+
"name": "python",
|
733 |
+
"nbconvert_exporter": "python",
|
734 |
+
"pygments_lexer": "ipython3",
|
735 |
+
"version": "3.11.12"
|
736 |
+
}
|
737 |
+
},
|
738 |
+
"nbformat": 4,
|
739 |
+
"nbformat_minor": 2
|
740 |
+
}
|
main.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
import os
|
3 |
+
import chromadb
|
4 |
+
from chromadb.config import Settings
|
5 |
+
from openai import OpenAI
|
6 |
+
load_dotenv(override=True)
|
7 |
+
from sentence_transformers import SentenceTransformer
|
8 |
+
from pypdf import PdfReader
|
9 |
+
import requests
|
10 |
+
from pydantic import BaseModel
|
11 |
+
import gradio as gr
|
12 |
+
import json
|
13 |
+
def handle_tool_call(tool_calls):
|
14 |
+
results = []
|
15 |
+
for tool_call in tool_calls:
|
16 |
+
tool_name = tool_call.function.name
|
17 |
+
arguments = json.loads(tool_call.function.arguments)
|
18 |
+
print(f"Tool called: {tool_name}", flush=True)
|
19 |
+
tool = globals().get(tool_name)
|
20 |
+
result = tool(**arguments) if tool else {}
|
21 |
+
results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
|
22 |
+
return results
|
23 |
+
class Evaluation(BaseModel):
|
24 |
+
is_acceptable: bool
|
25 |
+
feedback: str
|
26 |
+
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
27 |
+
|
28 |
+
def push(text):
|
29 |
+
requests.post(
|
30 |
+
"https://api.pushover.net/1/messages.json",
|
31 |
+
data={
|
32 |
+
"token": os.getenv("PUSHOVER_TOKEN"),
|
33 |
+
"user": os.getenv("PUSHOVER_USER"),
|
34 |
+
"message": text,
|
35 |
+
}
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
+
def record_user_details(email, name="Name not provided", notes="not provided"):
|
40 |
+
push(f"Recording {name} with email {email} and notes {notes}")
|
41 |
+
return {"recorded": "ok"}
|
42 |
+
|
43 |
+
def record_unknown_question(question):
|
44 |
+
push(f"Recording {question}")
|
45 |
+
return {"recorded": "ok"}
|
46 |
+
|
47 |
+
record_user_details_json = {
|
48 |
+
"name": "record_user_details",
|
49 |
+
"description": "Use this tool to record that a user is interested in being in touch and provided an email address",
|
50 |
+
"parameters": {
|
51 |
+
"type": "object",
|
52 |
+
"properties": {
|
53 |
+
"email": {
|
54 |
+
"type": "string",
|
55 |
+
"description": "The email address of this user"
|
56 |
+
},
|
57 |
+
"name": {
|
58 |
+
"type": "string",
|
59 |
+
"description": "The user's name, if they provided it"
|
60 |
+
}
|
61 |
+
,
|
62 |
+
"notes": {
|
63 |
+
"type": "string",
|
64 |
+
"description": "Any additional information about the conversation that's worth recording to give context"
|
65 |
+
}
|
66 |
+
},
|
67 |
+
"required": ["email"],
|
68 |
+
"additionalProperties": False
|
69 |
+
}
|
70 |
+
}
|
71 |
+
|
72 |
+
record_unknown_question_json = {
|
73 |
+
"name": "record_unknown_question",
|
74 |
+
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
|
75 |
+
"parameters": {
|
76 |
+
"type": "object",
|
77 |
+
"properties": {
|
78 |
+
"question": {
|
79 |
+
"type": "string",
|
80 |
+
"description": "The question that couldn't be answered"
|
81 |
+
},
|
82 |
+
},
|
83 |
+
"required": ["question"],
|
84 |
+
"additionalProperties": False
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
tools = [{"type": "function", "function": record_user_details_json},
|
89 |
+
{"type": "function", "function": record_unknown_question_json}]
|
90 |
+
|
91 |
+
class Me:
|
92 |
+
def __init__(self, name, cv_path):
|
93 |
+
self.name = name
|
94 |
+
self.chroma_client = chromadb.HttpClient(host=os.environ["CHROMA_DB_CLIEN"], port=8000, settings=Settings(
|
95 |
+
chroma_client_auth_provider="chromadb.auth.token_authn.TokenAuthClientProvider",
|
96 |
+
chroma_client_auth_credentials=os.environ["CHROMA_TOKEN"]
|
97 |
+
))
|
98 |
+
self.collection = self.chroma_client.get_collection(name="all-my-projects")
|
99 |
+
self.model = model
|
100 |
+
self.openai = OpenAI()
|
101 |
+
self.cv = ""
|
102 |
+
reader = PdfReader(cv_path)
|
103 |
+
for page in reader.pages:
|
104 |
+
text = page.extract_text()
|
105 |
+
if text:
|
106 |
+
self.cv += text
|
107 |
+
self.system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \
|
108 |
+
particularly questions related to {name}'s career, background, skills and experience. \
|
109 |
+
Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \
|
110 |
+
You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \
|
111 |
+
Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
|
112 |
+
"
|
113 |
+
self.system_prompt += f"\n\## CV:\n{self.cv}\n\n"
|
114 |
+
|
115 |
+
self.system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
|
116 |
+
|
117 |
+
self.gemini = OpenAI(
|
118 |
+
api_key=os.getenv("GOOGLE_API_KEY"),
|
119 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/")
|
120 |
+
self.evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \
|
121 |
+
You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \
|
122 |
+
The Agent is playing the role of {name} and is representing {self.name} on their website. \
|
123 |
+
The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \
|
124 |
+
The Agent has been provided with context on {self.name} in the form of their summary and LinkedIn details. Here's the information:"
|
125 |
+
|
126 |
+
self.evaluator_system_prompt += f"## CV:\n{self.cv}\n\n"
|
127 |
+
self.evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback."
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
def evaluator_user_prompt(self, reply, message, history):
|
132 |
+
user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n"
|
133 |
+
user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n"
|
134 |
+
user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n"
|
135 |
+
user_prompt += f"Please evaluate the response, replying with whether it is acceptable and your feedback."
|
136 |
+
return user_prompt
|
137 |
+
def evaluate(self, reply, message, history) -> Evaluation:
|
138 |
+
|
139 |
+
messages = [{"role": "system", "content": self.evaluator_system_prompt}] + [{"role": "user", "content": self.evaluator_user_prompt(reply, message, history)}]
|
140 |
+
response = self.gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
|
141 |
+
return response.choices[0].message.parsed
|
142 |
+
def embed(self, text):
|
143 |
+
return self.model.encode(text)
|
144 |
+
|
145 |
+
def find_similars(self, question):
|
146 |
+
results = self.collection.query(query_embeddings=self.embed(question).astype(float).tolist(), n_results=5,include=['documents',"distances"])
|
147 |
+
documents = results['documents'][0][:]
|
148 |
+
distances=results['distances'][0][:]
|
149 |
+
filtered_documents = [
|
150 |
+
doc for doc, dist in zip(documents, distances) if dist < 1.7
|
151 |
+
]
|
152 |
+
return filtered_documents
|
153 |
+
def rerun(self, reply, message, history, feedback):
|
154 |
+
updated_system_prompt = self.system_prompt + f"\n\n## Previous answer rejected\nYou just tried to reply, but the quality control rejected your reply\n"
|
155 |
+
updated_system_prompt += f"## Your attempted answer:\n{reply}\n\n"
|
156 |
+
updated_system_prompt += f"## Reason for rejection:\n{feedback}\n\n"
|
157 |
+
messages = [{"role": "system", "content": updated_system_prompt}] + history + [{"role": "user", "content": message}]
|
158 |
+
response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
|
159 |
+
return response.choices[0].message.content
|
160 |
+
def make_context(self,similars):
|
161 |
+
if len(similars)==0:
|
162 |
+
return ""
|
163 |
+
message = f"To provide some context, here are some projects done by {self.name} that might be related to the question that you need to answer.\n\n"
|
164 |
+
for similar in similars:
|
165 |
+
message += f"Potentially related projects:\n{similar}\n\n"
|
166 |
+
return message
|
167 |
+
def chat(self,message,history):
|
168 |
+
similars=self.find_similars(message)
|
169 |
+
|
170 |
+
message+=self.make_context(similars)
|
171 |
+
messages = [{"role": "system", "content": self.system_prompt}]+history + [{"role": "user", "content": message}]
|
172 |
+
|
173 |
+
done = False
|
174 |
+
while not done:
|
175 |
+
response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools)
|
176 |
+
if response.choices[0].finish_reason=="tool_calls":
|
177 |
+
message = response.choices[0].message
|
178 |
+
tool_calls = message.tool_calls
|
179 |
+
results = handle_tool_call(tool_calls)
|
180 |
+
messages.append(message)
|
181 |
+
messages.extend(results)
|
182 |
+
else:
|
183 |
+
done = True
|
184 |
+
reply=response.choices[0].message.content
|
185 |
+
evaluation = self.evaluate(reply, message, history)
|
186 |
+
|
187 |
+
if evaluation.is_acceptable:
|
188 |
+
return reply
|
189 |
+
|
190 |
+
else:
|
191 |
+
return self.rerun(reply, message, history, evaluation.feedback)
|
192 |
+
if __name__ == "__main__":
|
193 |
+
me = Me(name="Djallel BRAHMIA", cv_path="documents/CV/CV.pdf")
|
194 |
+
gr.ChatInterface(me.chat, type="messages").launch()
|