SoufianeDahimi commited on
Commit
bd5e054
Β·
verified Β·
1 Parent(s): 5a3ccef

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1027 -0
app.py ADDED
@@ -0,0 +1,1027 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+ import time
4
+ import subprocess
5
+ import gradio as gr
6
+ import json
7
+ import random
8
+ from datetime import datetime
9
+ import uuid
10
+ import requests
11
+ from requests.exceptions import ConnectionError, RequestException
12
+ from dotenv import load_dotenv
13
+ from supabase import create_client, Client
14
+ from ollama import chat
15
+ from pydantic import BaseModel
16
+
17
+ # Ollama setup for Hugging Face hosting
18
+ OLLAMA = os.path.expanduser("~/ollama")
19
+
20
+ if not os.path.exists(OLLAMA):
21
+ subprocess.run("curl -L https://ollama.com/download/ollama-linux-amd64 -o ~/ollama", shell=True)
22
+ os.chmod(OLLAMA, 0o755)
23
+
24
+ def ollama_service_thread():
25
+ subprocess.run("~/ollama serve", shell=True)
26
+
27
+ OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread, daemon=True)
28
+ OLLAMA_SERVICE_THREAD.start()
29
+
30
+ print("Starting Ollama service...")
31
+ time.sleep(10)
32
+
33
+ # Pull the trauma assessment model
34
+ model_name = "llm_hub/child_trauma_gemma"
35
+ print(f"Pulling model: {model_name}")
36
+ subprocess.run(f"~/ollama pull {model_name}", shell=True)
37
+ print("Model ready!")
38
+
39
+ # Load environment variables
40
+ load_dotenv()
41
+
42
+ # Pydantic model for structured report generation
43
+ class RiskAssessment(BaseModel):
44
+ parent_observations: str
45
+ ai_analysis: str
46
+ severity_score: int
47
+ risk_indicators: list[str]
48
+ cultural_context: str
49
+
50
+ class EnhancedTraumaAssessmentApp:
51
+ def __init__(self):
52
+ self.report_data = {
53
+ "child_info": {
54
+ "name": "",
55
+ "age": 0,
56
+ "gender": "",
57
+ "location": ""
58
+ },
59
+ "assessment_data": {
60
+ "parent_observations": "",
61
+ "ai_analysis": "",
62
+ "severity_score": 0,
63
+ "risk_indicators": [],
64
+ "cultural_context": ""
65
+ },
66
+ "media_attachments": {
67
+ "drawings": [],
68
+ "audio_recordings": [],
69
+ "photos": []
70
+ },
71
+ "mobile_app_id": str(uuid.uuid4()),
72
+ "session_start": datetime.now().isoformat(),
73
+ "conversation_history": []
74
+ }
75
+ self.is_onboarded = False
76
+ self.submitted_report_id = None
77
+ self.polling_active = False
78
+ self.ollama_conversation = [] # Track conversation for the model
79
+
80
+ # Initialize Supabase client
81
+ self.supabase_url = os.getenv("NEXT_PUBLIC_SUPABASE_URL")
82
+ self.supabase_key = os.getenv("NEXT_PUBLIC_SUPABASE_ANON_KEY")
83
+
84
+ if self.supabase_url and self.supabase_key:
85
+ self.supabase: Client = create_client(self.supabase_url, self.supabase_key)
86
+ else:
87
+ self.supabase = None
88
+ print("⚠️ Supabase credentials not found in .env file")
89
+
90
+ def complete_onboarding(self, child_name, child_age, child_gender, child_location):
91
+ """Complete the onboarding process and store child info"""
92
+ if not all([child_name, child_age, child_gender, child_location]):
93
+ return False, "Please fill in all required information about your child."
94
+
95
+ self.report_data["child_info"] = {
96
+ "name": child_name,
97
+ "age": int(child_age),
98
+ "gender": child_gender,
99
+ "location": child_location
100
+ }
101
+ self.is_onboarded = True
102
+
103
+ # Generate cultural context based on location
104
+ self.report_data["assessment_data"]["cultural_context"] = self.generate_cultural_context(child_location)
105
+
106
+ return True, f"Welcome! I'm ready to help you with {child_name}'s assessment."
107
+
108
+ def generate_cultural_context(self, location):
109
+ """Generate appropriate cultural context based on location"""
110
+ location_lower = location.lower()
111
+ if any(keyword in location_lower for keyword in ['gaza', 'palestine', 'west bank']):
112
+ return "Assessment conducted considering ongoing conflict exposure and displacement trauma"
113
+ elif any(keyword in location_lower for keyword in ['ukraine', 'kyiv', 'kharkiv', 'mariupol']):
114
+ return "Assessment considering war-related trauma and displacement from conflict zones"
115
+ elif any(keyword in location_lower for keyword in ['syria', 'lebanon', 'jordan']):
116
+ return "Assessment considering refugee experience and cultural adaptation challenges"
117
+ else:
118
+ return f"Assessment conducted with consideration for local cultural context in {location}"
119
+
120
+ def add_message(self, history, message):
121
+ """Add user message with multimodal support"""
122
+ if not self.is_onboarded:
123
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
124
+
125
+ # Handle file uploads
126
+ if message.get("files"):
127
+ for file in message["files"]:
128
+ file_type = self.classify_file_type(file)
129
+ history.append({
130
+ "role": "user",
131
+ "content": {"path": file}
132
+ })
133
+
134
+ # Store in report data
135
+ if file_type == "image":
136
+ # Determine if it's a drawing or photo based on content analysis
137
+ attachment_type = "drawings" if "draw" in file.lower() else "photos"
138
+ self.report_data["media_attachments"][attachment_type].append({
139
+ "path": file,
140
+ "timestamp": datetime.now().isoformat()
141
+ })
142
+ print(f"Image file detected: {file}")
143
+
144
+ # Handle text message
145
+ if message.get("text"):
146
+ history.append({
147
+ "role": "user",
148
+ "content": message["text"]
149
+ })
150
+ # Add to conversation history for model
151
+ self.ollama_conversation.append({
152
+ "role": "user",
153
+ "content": message["text"]
154
+ })
155
+ # Add to parent observations
156
+ current_obs = self.report_data["assessment_data"]["parent_observations"]
157
+ self.report_data["assessment_data"]["parent_observations"] = (
158
+ current_obs + " " + message["text"] if current_obs else message["text"]
159
+ )
160
+
161
+ # Store conversation history
162
+ self.report_data["conversation_history"] = history
163
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
164
+
165
+ def classify_file_type(self, file_path):
166
+ """Classify uploaded file type"""
167
+ if file_path.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp')):
168
+ return "image"
169
+ else:
170
+ return "other"
171
+
172
+ def bot_response(self, history):
173
+ """Generate bot response using Ollama model"""
174
+ if not history or not self.is_onboarded:
175
+ return
176
+
177
+ # Get the last user message
178
+ last_message = ""
179
+ has_image = False
180
+ image_path = None
181
+
182
+ for msg in reversed(history):
183
+ if msg["role"] == "user":
184
+ if isinstance(msg["content"], str):
185
+ last_message = msg["content"]
186
+ break
187
+ elif isinstance(msg["content"], dict) and "path" in msg["content"]:
188
+ has_image = True
189
+ image_path = msg["content"]["path"]
190
+ break
191
+
192
+ # Prepare message for Ollama
193
+ if has_image and image_path:
194
+ # Handle image input
195
+ try:
196
+ response = chat(
197
+ model='llm_hub/child_trauma_gemma',
198
+ messages=[{
199
+ 'role': 'user',
200
+ 'content': f'I am sharing an image related to my child {self.report_data["child_info"]["name"]}\'s situation. Please analyze this image in the context of trauma assessment and respond empathetically.',
201
+ 'images': [image_path],
202
+ }]
203
+ )
204
+ response_text = response.message.content
205
+ except Exception as e:
206
+ response_text = f"I can see you've shared an image. Thank you for providing this visual information about {self.report_data['child_info']['name']}. Visual expressions can tell us a lot about how children process their experiences. Could you tell me more about when this was created or what you'd like me to know about it?"
207
+ print(f"Ollama image error: {e}")
208
+ else:
209
+ # Handle text conversation
210
+ try:
211
+ response = chat(
212
+ model='llm_hub/child_trauma_gemma',
213
+ messages=self.ollama_conversation
214
+ )
215
+ response_text = response.message.content
216
+ except Exception as e:
217
+ response_text = f"Thank you for sharing that with me. I understand this is a difficult time for you and {self.report_data['child_info']['name']}. Could you tell me more about what you're observing?"
218
+ print(f"Ollama text error: {e}")
219
+
220
+ # Add assistant response to conversation history
221
+ self.ollama_conversation.append({
222
+ "role": "assistant",
223
+ "content": response_text
224
+ })
225
+
226
+ # Start bot response
227
+ history.append({"role": "assistant", "content": ""})
228
+
229
+ # Stream the response
230
+ for character in response_text:
231
+ history[-1]["content"] += character
232
+ time.sleep(0.02)
233
+ yield history
234
+
235
+ def generate_comprehensive_report(self, progress_callback=None):
236
+ """Generate comprehensive assessment report using Ollama structured output"""
237
+ if not self.is_onboarded:
238
+ return "Please complete the initial assessment form first."
239
+
240
+ if not self.ollama_conversation:
241
+ return "Please have a conversation first before generating a report."
242
+
243
+ if progress_callback:
244
+ progress_callback("πŸ€– Analyzing conversation with AI...")
245
+
246
+ try:
247
+ # Generate structured assessment using Ollama
248
+ assessment_prompt = f"""Based on our conversation about {self.report_data['child_info']['name']}, a {self.report_data['child_info']['age']}-year-old {self.report_data['child_info']['gender']} from {self.report_data['child_info']['location']}, generate a comprehensive trauma risk assessment report.
249
+
250
+ Include:
251
+ - Parent observations summary from our conversation
252
+ - AI analysis of trauma indicators
253
+ - Severity score (1-10 scale)
254
+ - List of risk indicators identified
255
+ - Cultural context considering the child's location and circumstances
256
+
257
+ Consider the conversation history and any cultural factors relevant to {self.report_data['child_info']['location']}."""
258
+
259
+ if progress_callback:
260
+ progress_callback("🧠 AI is generating structured assessment...")
261
+
262
+ response = chat(
263
+ model='llm_hub/child_trauma_gemma',
264
+ messages=[{'role': 'user', 'content': assessment_prompt}],
265
+ format=RiskAssessment.model_json_schema(),
266
+ options={'temperature': 0}
267
+ )
268
+
269
+ if progress_callback:
270
+ progress_callback("πŸ“Š Processing assessment data...")
271
+
272
+ # Parse structured response
273
+ assessment = RiskAssessment.model_validate_json(response.message.content)
274
+
275
+ # Update report data with AI-generated assessment
276
+ self.report_data["assessment_data"]["parent_observations"] = assessment.parent_observations
277
+ self.report_data["assessment_data"]["ai_analysis"] = assessment.ai_analysis
278
+ self.report_data["assessment_data"]["severity_score"] = assessment.severity_score
279
+ self.report_data["assessment_data"]["risk_indicators"] = assessment.risk_indicators
280
+ self.report_data["assessment_data"]["cultural_context"] = assessment.cultural_context
281
+
282
+ if progress_callback:
283
+ progress_callback("πŸ“‹ Formatting final report...")
284
+
285
+ except Exception as e:
286
+ print(f"Ollama structured output error: {e}")
287
+ if progress_callback:
288
+ progress_callback("⚠️ Using fallback assessment...")
289
+ # Fallback to basic assessment
290
+ self.report_data["assessment_data"]["severity_score"] = 6
291
+ self.report_data["assessment_data"]["risk_indicators"] = ["sleep disturbances", "behavioral changes", "anxiety"]
292
+
293
+ # Generate formatted report
294
+ child_info = self.report_data["child_info"]
295
+ assessment_data = self.report_data["assessment_data"]
296
+ media_attachments = self.report_data["media_attachments"]
297
+ severity = assessment_data["severity_score"]
298
+ risk_indicators = assessment_data["risk_indicators"]
299
+
300
+ return f"""# πŸ” COMPREHENSIVE TRAUMA ASSESSMENT REPORT
301
+
302
+ **Generated:** {datetime.now().strftime("%B %d, %Y at %H:%M")}
303
+ **Assessment ID:** {self.report_data["mobile_app_id"][:8]}
304
+ **Confidentiality Level:** Protected Health Information
305
+ **Platform:** Child Trauma Assessment AI
306
+
307
+ ---
308
+
309
+ ## πŸ‘€ CHILD INFORMATION
310
+
311
+ **Name:** {child_info["name"]}
312
+ **Age:** {child_info["age"]} years old
313
+ **Gender:** {child_info["gender"].title()}
314
+ **Location:** {child_info["location"]}
315
+ **Assessment Date:** {datetime.now().strftime("%B %d, %Y")}
316
+
317
+ ---
318
+
319
+ ## πŸ‘₯ PARENT OBSERVATIONS
320
+
321
+ {assessment_data["parent_observations"]}
322
+
323
+ **Session Details:**
324
+ - **Duration:** {len(self.report_data["conversation_history"])} message exchanges
325
+ - **Media Provided:** {len(media_attachments["drawings"])} drawings, {len(media_attachments["photos"])} photographs
326
+
327
+ ---
328
+
329
+ ## 🧠 AI ANALYSIS
330
+
331
+ {assessment_data["ai_analysis"]}
332
+
333
+ **Behavioral Patterns Identified:**
334
+ {chr(10).join([f"β€’ {indicator}" for indicator in risk_indicators])}
335
+
336
+ ---
337
+
338
+ ## ⚠️ SEVERITY ASSESSMENT
339
+
340
+ **Severity Score:** {severity}/10
341
+ **Risk Level:** {"🟑 Moderate Risk" if severity < 7 else "πŸ”΄ High Risk - Urgent Intervention Recommended"}
342
+ **Clinical Priority:** {"Standard referral appropriate" if severity < 7 else "Expedited professional evaluation needed"}
343
+
344
+ ---
345
+
346
+ ## 🌍 CULTURAL CONTEXT
347
+
348
+ {assessment_data["cultural_context"]}
349
+
350
+ This assessment considers the cultural and environmental factors specific to {child_info["location"]}, including region-specific trauma expressions, family dynamics, and community support systems.
351
+
352
+ ---
353
+
354
+ ## πŸ“‹ CLINICAL RECOMMENDATIONS
355
+
356
+ **Immediate Actions:**
357
+ 1. Schedule comprehensive evaluation with licensed child trauma specialist
358
+ 2. Ensure stable, predictable environment for {child_info["name"]}
359
+ 3. Implement safety planning and crisis contact protocols
360
+
361
+ **Therapeutic Interventions:**
362
+ 1. Begin trauma-focused cognitive behavioral therapy (TF-CBT)
363
+ 2. Consider family therapy to strengthen support systems
364
+ 3. Monitor sleep, appetite, and behavioral patterns daily
365
+
366
+ **Cultural Considerations:**
367
+ 1. Engage culturally competent mental health services
368
+ 2. Incorporate traditional coping mechanisms where appropriate
369
+ 3. Consider community-based support resources
370
+
371
+ **Follow-up:**
372
+ - Initial professional evaluation within 1-2 weeks
373
+ - Regular monitoring and assessment as recommended by treating clinician
374
+
375
+ ---
376
+
377
+ ## βš–οΈ IMPORTANT DISCLAIMERS
378
+
379
+ - **Preliminary Screening Tool:** This AI-generated assessment is for screening purposes only and does NOT constitute a clinical diagnosis
380
+ - **Professional Validation Required:** All findings must be validated by licensed mental health professionals
381
+ - **Emergency Protocol:** For immediate safety concerns, contact emergency services immediately
382
+ - **Clinical Judgment:** AI analysis should supplement, not replace, professional clinical assessment
383
+
384
+ **Report Generated:** {datetime.now().isoformat()}
385
+ **Next Review Recommended:** {(datetime.now()).strftime("%B %d, %Y")} (2 weeks)
386
+ """
387
+
388
+ def push_report_to_care_bridge(self, base_url="https://care-bridge-platform-7vs1.vercel.app"):
389
+ """Push the generated report to the Care Bridge platform."""
390
+ if not self.is_onboarded:
391
+ return False, "Please complete the initial assessment form first."
392
+
393
+ if not self.report_data["conversation_history"]:
394
+ return False, "Please have a conversation first before pushing a report."
395
+
396
+ # Prepare data in the format expected by Care Bridge API
397
+ api_data = {
398
+ "child_info": {
399
+ "age": self.report_data["child_info"]["age"],
400
+ "gender": self.report_data["child_info"]["gender"].lower(),
401
+ "location": self.report_data["child_info"]["location"]
402
+ },
403
+ "assessment_data": {
404
+ "parent_observations": self.report_data["assessment_data"]["parent_observations"],
405
+ "ai_analysis": self.report_data["assessment_data"]["ai_analysis"],
406
+ "severity_score": self.report_data["assessment_data"]["severity_score"],
407
+ "risk_indicators": self.report_data["assessment_data"]["risk_indicators"],
408
+ "cultural_context": self.report_data["assessment_data"]["cultural_context"]
409
+ },
410
+ "media_attachments": self.report_data["media_attachments"],
411
+ "mobile_app_id": self.report_data["mobile_app_id"]
412
+ }
413
+
414
+ try:
415
+ url = f"{base_url}/api/reports"
416
+ headers = {"Content-Type": "application/json"}
417
+
418
+ response = requests.post(url, json=api_data, headers=headers, timeout=10)
419
+
420
+ if response.status_code == 201:
421
+ result = response.json()
422
+ report_id = result.get('id', 'Unknown')
423
+ # Store the report ID for polling
424
+ self.submitted_report_id = report_id
425
+ # Start polling for responses
426
+ self.start_response_polling()
427
+ return True, f"βœ… Report successfully pushed to Care Bridge Platform!\nπŸ“‹ Report ID: {report_id}\nπŸ”„ Now monitoring for specialist response..."
428
+ else:
429
+ return False, f"❌ API Error: {response.status_code} - {response.text}"
430
+
431
+ except ConnectionError:
432
+ return False, "❌ Could not connect to Care Bridge Platform. Please check if the platform is running."
433
+ except requests.exceptions.Timeout:
434
+ return False, "❌ Request timed out. Please try again."
435
+ except RequestException as e:
436
+ return False, f"❌ Network error: {str(e)}"
437
+ except Exception as e:
438
+ return False, f"❌ Unexpected error: {str(e)}"
439
+
440
+ def start_response_polling(self):
441
+ """Start polling for specialist responses in a background thread."""
442
+ if not self.supabase or not self.submitted_report_id:
443
+ print("⚠️ Cannot start polling: Missing Supabase connection or report ID")
444
+ return
445
+
446
+ if self.polling_active:
447
+ print("ℹ️ Polling already active")
448
+ return # Already polling
449
+
450
+ self.polling_active = True
451
+ print(f"πŸ”„ Starting background polling for report ID: {self.submitted_report_id}")
452
+ polling_thread = threading.Thread(target=self._poll_for_response, daemon=True)
453
+ polling_thread.start()
454
+
455
+ def _poll_for_response(self):
456
+ """Poll Supabase for specialist responses."""
457
+ max_polls = 120 # Poll for 10 minutes (120 * 5 seconds)
458
+ poll_count = 0
459
+ print("Starting polling for response...")
460
+ while self.polling_active and poll_count < max_polls:
461
+ try:
462
+ # Check for response in Supabase
463
+ print("Polling for response...")
464
+ response = self.supabase.table("responses").select("*").eq("report_id", self.submitted_report_id).execute()
465
+
466
+ if response.data and len(response.data) > 0:
467
+ # Response found!
468
+ specialist_response = response.data[0]
469
+ self.specialist_response = specialist_response
470
+ self.get_specialist_response()
471
+ self.polling_active = False
472
+ break
473
+
474
+ # Wait 5 seconds before next poll
475
+ time.sleep(5)
476
+ poll_count += 1
477
+
478
+ except Exception as e:
479
+ print(f"Error polling for response: {e}")
480
+ time.sleep(5)
481
+ poll_count += 1
482
+
483
+ # Stop polling after max attempts
484
+ if poll_count >= max_polls:
485
+ self.polling_active = False
486
+
487
+ def get_specialist_response(self):
488
+ """Get the specialist response if available."""
489
+ if hasattr(self, 'specialist_response'):
490
+ response = self.specialist_response
491
+
492
+ urgency_color = {
493
+ 'low': '🟒',
494
+ 'medium': '🟑',
495
+ 'high': '🟠',
496
+ 'critical': 'πŸ”΄'
497
+ }
498
+
499
+ urgency_emoji = urgency_color.get(response['urgency_level'], 'βšͺ')
500
+
501
+ formatted_response = f"""
502
+ # πŸ‘¨β€βš•οΈ SPECIALIST RESPONSE RECEIVED
503
+
504
+ **Response Date:** {response['response_date'][:19].replace('T', ' ')}
505
+ **Specialist ID:** {response['psychologist_id']}
506
+ **Urgency Level:** {urgency_emoji} {response['urgency_level'].upper()}
507
+
508
+ ---
509
+
510
+ ## πŸ“ PSYCHOLOGIST NOTES
511
+
512
+ {response['psychologist_notes']}
513
+
514
+ ---
515
+
516
+ ## πŸ’‘ RECOMMENDATIONS
517
+
518
+ """
519
+
520
+ if isinstance(response['recommendations'], dict):
521
+ for key, value in response['recommendations'].items():
522
+ formatted_response += f"**{key.replace('_', ' ').title()}:** {value}\n\n"
523
+ else:
524
+ formatted_response += str(response['recommendations'])
525
+
526
+ return True, formatted_response
527
+
528
+ return False, "No specialist response available yet. Still monitoring..."
529
+
530
+ # Initialize enhanced app
531
+ app = EnhancedTraumaAssessmentApp()
532
+
533
+ # Enhanced CSS with onboarding styles
534
+ css = """
535
+ /* Main container styling */
536
+ .gradio-container {
537
+ max-width: 900px !important;
538
+ margin: 0 auto !important;
539
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
540
+ }
541
+
542
+ /* Onboarding specific styles */
543
+ .onboarding-container {
544
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
545
+ color: white;
546
+ padding: 40px 30px;
547
+ border-radius: 20px;
548
+ margin: 20px 0;
549
+ text-align: center;
550
+ box-shadow: 0 10px 30px rgba(0,0,0,0.2);
551
+ }
552
+
553
+ .welcome-form {
554
+ background: white;
555
+ color: #333;
556
+ padding: 30px;
557
+ border-radius: 15px;
558
+ margin: 20px 0;
559
+ box-shadow: 0 5px 20px rgba(0,0,0,0.1);
560
+ }
561
+
562
+ .form-section {
563
+ margin: 20px 0;
564
+ text-align: left;
565
+ }
566
+
567
+ .form-section label {
568
+ font-weight: 600;
569
+ color: #2d3436;
570
+ margin-bottom: 8px;
571
+ display: block;
572
+ }
573
+
574
+ /* Header styling */
575
+ .header-container {
576
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
577
+ color: white;
578
+ padding: 30px 20px;
579
+ border-radius: 15px;
580
+ margin-bottom: 25px;
581
+ text-align: center;
582
+ box-shadow: 0 4px 15px rgba(0,0,0,0.1);
583
+ }
584
+
585
+ /* Status indicators */
586
+ .status-success {
587
+ background: linear-gradient(135deg, #84fab0 0%, #8fd3f4 100%);
588
+ border-left: 4px solid #00b894;
589
+ padding: 15px 20px;
590
+ border-radius: 8px;
591
+ margin: 15px 0;
592
+ color: #00b894;
593
+ font-weight: 500;
594
+ }
595
+
596
+ .status-warning {
597
+ background: linear-gradient(135deg, #fff3cd 0%, #ffeaa7 100%);
598
+ border-left: 4px solid #f39c12;
599
+ padding: 15px 20px;
600
+ border-radius: 8px;
601
+ margin: 15px 0;
602
+ color: #e67e22;
603
+ }
604
+
605
+ .status-info {
606
+ background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
607
+ border-left: 4px solid #74b9ff;
608
+ padding: 15px 20px;
609
+ border-radius: 8px;
610
+ margin: 15px 0;
611
+ color: #0984e3;
612
+ }
613
+
614
+ /* Button styling */
615
+ .primary-button {
616
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
617
+ border: none !important;
618
+ color: white !important;
619
+ padding: 15px 30px !important;
620
+ border-radius: 25px !important;
621
+ font-weight: 600 !important;
622
+ font-size: 16px !important;
623
+ transition: all 0.3s ease !important;
624
+ width: 100% !important;
625
+ }
626
+
627
+ .primary-button:hover {
628
+ transform: translateY(-2px) !important;
629
+ box-shadow: 0 8px 25px rgba(102, 126, 234, 0.4) !important;
630
+ }
631
+
632
+ /* Chat interface styling */
633
+ .chat-container {
634
+ background: white;
635
+ border-radius: 15px;
636
+ padding: 20px;
637
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
638
+ margin-bottom: 20px;
639
+ }
640
+
641
+ .child-info-display {
642
+ background: linear-gradient(135deg, #ddd6fe 0%, #e0e7ff 100%);
643
+ border: 1px solid #c4b5fd;
644
+ padding: 15px 20px;
645
+ border-radius: 10px;
646
+ margin: 15px 0;
647
+ color: #5b21b6;
648
+ }
649
+
650
+ /* Mobile responsiveness */
651
+ @media (max-width: 768px) {
652
+ .gradio-container {
653
+ max-width: 100% !important;
654
+ margin: 0 10px !important;
655
+ }
656
+
657
+ .onboarding-container {
658
+ padding: 25px 20px;
659
+ margin: 10px 0;
660
+ }
661
+
662
+ .welcome-form {
663
+ padding: 20px;
664
+ margin: 15px 0;
665
+ }
666
+ }
667
+ """
668
+
669
+ # Build enhanced Gradio interface with onboarding
670
+ with gr.Blocks(css=css, title="Child Trauma Assessment - Professional Support", theme=gr.themes.Soft()) as demo:
671
+
672
+ # Session state for controlling interface
673
+ onboarding_complete = gr.State(False)
674
+
675
+ # Welcome/Onboarding Interface
676
+ with gr.Column(visible=True) as onboarding_section:
677
+ gr.HTML("""
678
+ <div class="onboarding-container">
679
+ <h1>πŸ€— Welcome to Child Trauma Assessment AI</h1>
680
+ <p>Professional-grade support for families and children in crisis</p>
681
+ <br>
682
+ <h3>Let's start by learning about your child</h3>
683
+ </div>
684
+ """)
685
+
686
+ with gr.Column(elem_classes=["welcome-form"]):
687
+ gr.HTML("<h2 style='text-align: center; color: #667eea; margin-bottom: 25px;'>πŸ“ Child Information Form</h2>")
688
+
689
+ with gr.Row():
690
+ child_name = gr.Textbox(
691
+ label="Child's Name (First name only for privacy)",
692
+ placeholder="e.g., Sarah, Ahmed, Oleksandr",
693
+ elem_classes=["form-section"]
694
+ )
695
+ child_age = gr.Number(
696
+ label="Child's Age",
697
+ minimum=2,
698
+ maximum=18,
699
+ value=8,
700
+ elem_classes=["form-section"]
701
+ )
702
+
703
+ with gr.Row():
704
+ child_gender = gr.Dropdown(
705
+ label="Gender",
706
+ choices=["Female", "Male", "Prefer not to say"],
707
+ value="Female",
708
+ elem_classes=["form-section"]
709
+ )
710
+ child_location = gr.Textbox(
711
+ label="Current Location (City/Region)",
712
+ placeholder="e.g., Gaza, Kyiv, Aleppo, London",
713
+ elem_classes=["form-section"]
714
+ )
715
+
716
+ gr.HTML("""
717
+ <div class="status-info" style="margin: 20px 0;">
718
+ <strong>πŸ”’ Privacy Notice:</strong> This information is used only to personalize the assessment
719
+ and provide culturally appropriate support. No personal data is stored permanently.
720
+ </div>
721
+ """)
722
+
723
+ start_assessment_btn = gr.Button(
724
+ "πŸš€ Begin Assessment",
725
+ elem_classes=["primary-button"],
726
+ variant="primary",
727
+ size="lg"
728
+ )
729
+
730
+ onboarding_status = gr.HTML()
731
+
732
+ # Main Assessment Interface (hidden initially)
733
+ with gr.Column(visible=False) as main_interface:
734
+ # Child info display
735
+ child_info_display = gr.HTML()
736
+
737
+ with gr.Tab("πŸ’¬ Confidential Consultation"):
738
+ gr.HTML("""
739
+ <div class="status-info">
740
+ <strong>πŸ€– REAL AI MODEL:</strong> This platform uses our fine-tuned Gemma 3N model for authentic trauma assessment conversations.
741
+ <br><br>
742
+ <strong>πŸ’‘ Try These Features:</strong>
743
+ <br>
744
+ β€’ Start a conversation: "Hello, I'm worried about my child's recent behavior changes"
745
+ <br>
746
+ β€’ Upload images (child photos, drawings) for AI visual analysis
747
+ <br>
748
+ β€’ Use different languages - the model supports Arabic, Ukrainian, and English
749
+ <br>
750
+ β€’ Generate structured reports with AI-powered assessment insights
751
+ <br><br>
752
+ <strong>πŸ”’ Privacy:</strong> All conversations are processed securely. Audio support coming soon.
753
+ </div>
754
+ """)
755
+
756
+ chatbot = gr.Chatbot(
757
+ label="AI Trauma Assessment Specialist",
758
+ height=500,
759
+ bubble_full_width=False,
760
+ type="messages",
761
+ show_label=False,
762
+ elem_classes=["chat-container"]
763
+ )
764
+
765
+ chat_input = gr.MultimodalTextbox(
766
+ interactive=True,
767
+ file_count="multiple",
768
+ placeholder="Share your concerns here... ΩŠΩ…ΩƒΩ†Ωƒ Ψ§Ω„ΩƒΨͺΨ§Ψ¨Ψ© Ψ¨Ψ§Ω„ΨΉΨ±Ψ¨ΩŠΨ© β€’ ΠœΠΎΠΆΠ΅Ρ‚Π΅ писати ΡƒΠΊΡ€Π°Ρ—Π½ΡΡŒΠΊΠΎΡŽ",
769
+ show_label=False,
770
+ sources=["upload"] # Removed microphone - audio not yet supported
771
+ )
772
+
773
+ with gr.Row():
774
+ clear_btn = gr.Button("πŸ—‘οΈ New Conversation", variant="secondary", size="sm")
775
+ gr.HTML('<div style="flex-grow: 1;"></div>')
776
+
777
+ with gr.Tab("πŸ“‹ Professional Assessment Report"):
778
+ gr.HTML("""
779
+ <div class="status-warning">
780
+ <strong>⚠️ Professional Use Only:</strong> This AI-generated report is a preliminary screening tool.
781
+ It must be reviewed by licensed mental health professionals.
782
+ </div>
783
+ """)
784
+
785
+ generate_report_btn = gr.Button(
786
+ "πŸ“Š Generate Comprehensive Assessment",
787
+ variant="primary",
788
+ size="lg",
789
+ elem_classes=["primary-button"]
790
+ )
791
+
792
+ # Add progress indicator
793
+ progress_status = gr.HTML()
794
+
795
+ report_output = gr.Markdown()
796
+
797
+ with gr.Row():
798
+ save_report_btn = gr.Button("πŸ’Ύ Save Report", variant="secondary")
799
+ push_care_bridge_btn = gr.Button("πŸŒ‰ Push to Care Bridge", variant="primary")
800
+ gr.Button("πŸ“§ Email to Professional", variant="secondary", interactive=False)
801
+
802
+ save_status = gr.HTML()
803
+ care_bridge_status = gr.HTML()
804
+
805
+ with gr.Tab("πŸ‘¨β€βš•οΈ Specialist Response"):
806
+ gr.HTML("""
807
+ <div class="status-info">
808
+ <strong>πŸ”„ Background Monitoring:</strong> Once you submit a report, we automatically monitor for specialist responses in the background.
809
+ Click the button below to check for new responses.
810
+ </div>
811
+ """)
812
+
813
+ check_response_btn = gr.Button(
814
+ "πŸ” Check for Specialist Response",
815
+ variant="secondary",
816
+ size="lg"
817
+ )
818
+
819
+ specialist_response_output = gr.Markdown()
820
+ response_status = gr.HTML()
821
+
822
+ with gr.Tab("πŸ“– Resources & Information"):
823
+ gr.Markdown("""
824
+ ## 🎯 How This Assessment Works
825
+
826
+ Our AI specialist uses evidence-based approaches tailored to your child's specific situation:
827
+
828
+ ### πŸ“ **Personalized Assessment**
829
+ - Responses are customized based on your child's age, gender, and location
830
+ - Cultural context is considered throughout the evaluation
831
+ - All interactions are stored securely for comprehensive reporting
832
+
833
+ ### πŸ” **What We Analyze**
834
+ - Behavioral pattern changes specific to your child's developmental stage
835
+ - Cultural expressions of trauma and stress
836
+ - Family dynamics and support systems
837
+ - Environmental factors affecting recovery
838
+
839
+ ### πŸ“Š **Structured Data Collection**
840
+ All information is organized into a comprehensive clinical format:
841
+ - Child demographics and context
842
+ - Detailed parent observations
843
+ - AI analysis and risk assessment
844
+ - Multimedia evidence (drawings, voice recordings, photos)
845
+ - Cultural considerations and recommendations
846
+
847
+ ## πŸŒ‰ **Care Bridge Platform Integration**
848
+
849
+ This assessment tool integrates with the Care Bridge Platform to:
850
+ - **Share Reports**: Securely transmit assessment data to professional networks
851
+ - **Track Progress**: Maintain longitudinal care records
852
+ - **Coordinate Care**: Enable multi-disciplinary team collaboration
853
+ - **Emergency Response**: Alert crisis intervention teams when needed
854
+ """)
855
+
856
+ # Event handlers
857
+ def handle_onboarding(name, age, gender, location):
858
+ success, message = app.complete_onboarding(name, age, gender, location)
859
+
860
+ if success:
861
+ child_display = f"""
862
+ <div class="child-info-display">
863
+ <strong>πŸ‘€ Assessment for:</strong> {name}, {int(age)} years old ({gender}) β€’ πŸ“ {location}
864
+ </div>
865
+ """
866
+ return (
867
+ gr.Column(visible=False), # Hide onboarding
868
+ gr.Column(visible=True), # Show main interface
869
+ child_display,
870
+ f'<div class="status-success">{message}</div>'
871
+ )
872
+ else:
873
+ return (
874
+ gr.Column(visible=True), # Keep onboarding visible
875
+ gr.Column(visible=False), # Keep main interface hidden
876
+ "",
877
+ f'<div class="status-warning">❌ {message}</div>'
878
+ )
879
+
880
+ # Onboarding completion
881
+ start_assessment_btn.click(
882
+ handle_onboarding,
883
+ inputs=[child_name, child_age, child_gender, child_location],
884
+ outputs=[onboarding_section, main_interface, child_info_display, onboarding_status]
885
+ )
886
+
887
+ # Conversation handling
888
+ def handle_conversation():
889
+ chat_msg = chat_input.submit(
890
+ app.add_message,
891
+ [chatbot, chat_input],
892
+ [chatbot, chat_input]
893
+ )
894
+ bot_msg = chat_msg.then(
895
+ app.bot_response,
896
+ chatbot,
897
+ chatbot
898
+ )
899
+ bot_msg.then(
900
+ lambda: gr.MultimodalTextbox(interactive=True),
901
+ None,
902
+ [chat_input]
903
+ )
904
+
905
+ handle_conversation()
906
+
907
+ # Clear conversation
908
+ def clear_conversation():
909
+ app.report_data["conversation_history"] = []
910
+ app.report_data["assessment_data"]["parent_observations"] = ""
911
+ app.report_data["assessment_data"]["ai_analysis"] = ""
912
+ app.report_data["media_attachments"] = {"drawings": [], "audio_recordings": [], "photos": []}
913
+ return [], gr.MultimodalTextbox(value=None, interactive=True)
914
+
915
+ clear_btn.click(
916
+ clear_conversation,
917
+ outputs=[chatbot, chat_input]
918
+ )
919
+
920
+ # Generate report with progress updates
921
+ def generate_report_with_progress():
922
+ # Show initial progress
923
+ progress_updates = []
924
+
925
+ def update_progress(message):
926
+ progress_updates.append(f'<div class="status-info">{message}</div>')
927
+ return progress_updates[-1]
928
+
929
+ # Generate report with progress callback
930
+ try:
931
+ progress = update_progress("πŸš€ Starting assessment generation...")
932
+ yield "", progress # Empty report, show progress
933
+
934
+ report = app.generate_comprehensive_report(progress_callback=update_progress)
935
+
936
+ final_progress = update_progress("βœ… Assessment completed!")
937
+ yield report, final_progress
938
+
939
+ # Clear progress after 3 seconds
940
+ time.sleep(3)
941
+ yield report, ""
942
+
943
+ except Exception as e:
944
+ error_progress = f'<div class="status-warning">❌ Error: {str(e)}</div>'
945
+ yield "", error_progress
946
+
947
+ generate_report_btn.click(
948
+ generate_report_with_progress,
949
+ outputs=[report_output, progress_status]
950
+ )
951
+
952
+ # Save report
953
+ def save_report_with_data(report_content):
954
+ if not report_content or "Please complete" in report_content:
955
+ return "❌ No report available to save."
956
+
957
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
958
+
959
+ # Save markdown report
960
+ report_filename = f"trauma_report_{app.report_data['child_info']['name']}_{timestamp}.md"
961
+
962
+ # Save structured data
963
+ data_filename = f"assessment_data_{app.report_data['child_info']['name']}_{timestamp}.json"
964
+
965
+ try:
966
+ with open(report_filename, 'w', encoding='utf-8') as f:
967
+ f.write(report_content)
968
+
969
+ with open(data_filename, 'w', encoding='utf-8') as f:
970
+ json.dump(app.report_data, f, indent=2, ensure_ascii=False, default=str)
971
+
972
+ return f"βœ… Report saved as: **{report_filename}**<br>πŸ“Š Data saved as: **{data_filename}**"
973
+ except Exception as e:
974
+ return f"❌ Error saving files: {str(e)}"
975
+
976
+ save_report_btn.click(
977
+ save_report_with_data,
978
+ inputs=[report_output],
979
+ outputs=[save_status]
980
+ )
981
+
982
+ # Push report to Care Bridge
983
+ def push_to_care_bridge():
984
+ success, message = app.push_report_to_care_bridge()
985
+ status_class = "status-success" if success else "status-warning"
986
+ return f'<div class="{status_class}">{message}</div>'
987
+
988
+ push_care_bridge_btn.click(
989
+ push_to_care_bridge,
990
+ outputs=[care_bridge_status]
991
+ )
992
+
993
+ # Check for specialist response
994
+ def check_for_response():
995
+ has_response, response_content = app.get_specialist_response()
996
+ if has_response:
997
+ return response_content, '<div class="status-success">βœ… Specialist response received!</div>'
998
+ elif app.polling_active:
999
+ return "", '<div class="status-info">πŸ”„ Still monitoring for specialist response...</div>'
1000
+ elif app.submitted_report_id:
1001
+ return "", '<div class="status-warning">⏸️ Monitoring stopped. No response received within time limit.</div>'
1002
+ else:
1003
+ return "", '<div class="status-warning">ℹ️ Submit a report first to check for responses.</div>'
1004
+
1005
+ check_response_btn.click(
1006
+ check_for_response,
1007
+ outputs=[specialist_response_output, response_status]
1008
+ )
1009
+
1010
+ # Note: Auto-refresh functionality can be added with newer Gradio versions
1011
+ # For now, users can manually click the "Check for Specialist Response" button
1012
+
1013
+ # Feedback handling
1014
+ def handle_feedback(x: gr.LikeData):
1015
+ feedback_type = "πŸ‘ Helpful" if x.liked else "πŸ‘Ž Needs Improvement"
1016
+ print(f"User feedback: {feedback_type} on message {x.index}")
1017
+ # Could store this in report_data for quality improvement
1018
+
1019
+ chatbot.like(handle_feedback, None, None, like_user_message=True)
1020
+
1021
+ # Launch configuration
1022
+ if __name__ == "__main__":
1023
+ demo.launch(
1024
+ share=True,
1025
+ inbrowser=True,
1026
+ show_error=True
1027
+ )