pranit144 commited on
Commit
d7dda8b
·
verified ·
1 Parent(s): eabf083

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +1016 -0
  2. haarcascade_frontalface_default.xml +0 -0
  3. index.html +1201 -0
  4. model.h5 +3 -0
  5. requirements.txt +14 -0
app.py ADDED
@@ -0,0 +1,1016 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import logging
4
+ import tempfile
5
+ import re
6
+ from typing import Dict, Any, Optional, List, Tuple
7
+ from pathlib import Path
8
+ from dotenv import load_dotenv
9
+ from flask import Flask, request, jsonify, render_template, send_file
10
+ from flask_cors import CORS
11
+ import google.generativeai as genai
12
+ from groq import Groq
13
+ import pandas as pd
14
+ from datetime import datetime
15
+ import io
16
+ import cv2
17
+ import tensorflow as tf
18
+ from tensorflow.keras.models import load_model
19
+ from tensorflow.keras.utils import img_to_array
20
+ from moviepy.editor import VideoFileClip
21
+ import concurrent.futures
22
+ from concurrent.futures import ThreadPoolExecutor, as_completed
23
+ import numpy as np
24
+
25
+ # Configure logger first before using it elsewhere
26
+ logging.basicConfig(
27
+ level=logging.INFO,
28
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
29
+ handlers=[
30
+ logging.StreamHandler(),
31
+ logging.FileHandler('app.log')
32
+ ]
33
+ )
34
+ logger = logging.getLogger(__name__)
35
+
36
+ # Suppress TensorFlow warnings
37
+ tf.get_logger().setLevel('ERROR')
38
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
39
+ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
40
+
41
+ # Load environment variables
42
+ load_dotenv()
43
+
44
+ # Configure Flask app
45
+ app = Flask(__name__)
46
+ app.config['TEMPLATES_AUTO_RELOAD'] = False
47
+ app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500MB max file size
48
+
49
+ # Configure API keys with validation
50
+ GROQ_API_KEY = os.getenv('GROQ_API_KEY')
51
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
52
+
53
+ if not GROQ_API_KEY:
54
+ logger.error("GROQ_API_KEY environment variable not set")
55
+ raise ValueError("GROQ_API_KEY environment variable must be set")
56
+
57
+ if not GEMINI_API_KEY:
58
+ logger.error("GEMINI_API_KEY environment variable not set")
59
+ raise ValueError("GEMINI_API_KEY environment variable must be set")
60
+
61
+ # Initialize clients with proper configuration and error handling
62
+ try:
63
+ # Initialize Groq client with basic configuration
64
+ from groq._base_client import SyncHttpxClientWrapper
65
+ import httpx
66
+
67
+ # Create a simple httpx client
68
+ http_client = SyncHttpxClientWrapper(
69
+ base_url="https://api.groq.com/v1",
70
+ timeout=httpx.Timeout(60.0)
71
+ )
72
+
73
+ # Initialize Groq client
74
+ groq_client = Groq(
75
+ api_key=GROQ_API_KEY,
76
+ http_client=http_client
77
+ )
78
+
79
+ # Initialize Gemini client
80
+ genai.configure(api_key=GEMINI_API_KEY)
81
+ MODEL_NAME = "gemini-1.5-flash"
82
+ logger.info("API clients initialized successfully")
83
+ except Exception as e:
84
+ logger.error(f"Error initializing API clients: {str(e)}")
85
+ raise
86
+
87
+ # Emotion Detection Setup
88
+ MODEL_PATH = os.path.join(os.path.dirname(__file__), 'model.h5')
89
+ HAARCASCADE_PATH = os.path.join(os.path.dirname(__file__), 'haarcascade_frontalface_default.xml')
90
+
91
+ # Load models with optimized settings
92
+ try:
93
+ # Configure TensorFlow for optimal CPU performance
94
+ physical_devices = tf.config.list_physical_devices('CPU')
95
+ if physical_devices:
96
+ try:
97
+ # Limit memory growth to prevent OOM errors
98
+ tf.config.experimental.set_memory_growth(physical_devices[0], True)
99
+ except:
100
+ # Not all devices support memory growth
101
+ pass
102
+
103
+ tf.config.threading.set_inter_op_parallelism_threads(4)
104
+ tf.config.threading.set_intra_op_parallelism_threads(4)
105
+
106
+ # Load emotion model with optimized settings
107
+ model = load_model(MODEL_PATH, compile=False)
108
+ model.compile(
109
+ optimizer='adam',
110
+ loss='categorical_crossentropy',
111
+ metrics=['accuracy'],
112
+ run_eagerly=False
113
+ )
114
+
115
+ # Load face cascade
116
+ face_cascade = cv2.CascadeClassifier(HAARCASCADE_PATH)
117
+ if face_cascade.empty():
118
+ raise Exception("Error: Haar Cascade file could not be loaded")
119
+
120
+ logger.info("Successfully loaded model and face cascade")
121
+ except Exception as e:
122
+ logger.error(f"Error loading model or face cascade: {str(e)}")
123
+ model = None
124
+ face_cascade = None
125
+
126
+ EMOTIONS = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
127
+
128
+ # Video processing configuration
129
+ VIDEO_CHUNK_SIZE = 1024 * 1024 # 1MB chunks for video processing
130
+ MAX_VIDEO_DURATION = 120 # Maximum video duration in minutes
131
+ FRAME_SAMPLE_RATE = 5 # Process every 5th frame for long videos
132
+
133
+
134
+ def extract_json(text: str) -> Optional[str]:
135
+ """Extract JSON from response text."""
136
+ try:
137
+ json_match = re.search(r'\{.*\}', text, re.DOTALL)
138
+ if json_match:
139
+ return json_match.group(0)
140
+ return None
141
+ except Exception as e:
142
+ logger.error(f"Error extracting JSON: {str(e)}")
143
+ return None
144
+
145
+
146
+ def extract_audio_from_video(video_path: str) -> Optional[str]:
147
+ """Extract audio from video file with optimized processing."""
148
+ try:
149
+ temp_audio_path = video_path.replace('.mp4', '.mp3')
150
+
151
+ # Load video clip with optimized settings
152
+ video_clip = VideoFileClip(
153
+ video_path,
154
+ audio_buffersize=200000,
155
+ verbose=False,
156
+ audio_fps=44100
157
+ )
158
+
159
+ if video_clip.audio is None:
160
+ logger.warning("Video has no audio track")
161
+ return None
162
+
163
+ # Extract audio with optimized settings
164
+ video_clip.audio.write_audiofile(
165
+ temp_audio_path,
166
+ buffersize=2000,
167
+ verbose=False,
168
+ logger=None
169
+ )
170
+ video_clip.close()
171
+
172
+ logger.info(f"Successfully extracted audio to {temp_audio_path}")
173
+ return temp_audio_path
174
+ except Exception as e:
175
+ logger.error(f"Error extracting audio: {str(e)}")
176
+ return None
177
+ finally:
178
+ # Ensure video clip is closed even if an exception occurs
179
+ if 'video_clip' in locals() and video_clip is not None:
180
+ try:
181
+ video_clip.close()
182
+ except:
183
+ pass
184
+
185
+
186
+ def transcribe_audio(audio_path: str) -> Optional[str]:
187
+ """Transcribe audio using Groq."""
188
+ if not audio_path or not os.path.exists(audio_path):
189
+ logger.error(f"Audio file not found at {audio_path}")
190
+ return None
191
+
192
+ try:
193
+ # Transcribe audio
194
+ with open(audio_path, "rb") as file:
195
+ transcription = groq_client.audio.transcriptions.create(
196
+ file=(audio_path, file.read()),
197
+ model="whisper-large-v3-turbo",
198
+ response_format="json",
199
+ language="en",
200
+ temperature=0.0
201
+ )
202
+
203
+ logger.info(f"Transcription successful: {transcription.text[:100]}...")
204
+ return transcription.text
205
+
206
+ except Exception as e:
207
+ logger.error(f"Transcription error: {str(e)}")
208
+ return None
209
+
210
+
211
+ def process_video_chunk(frame_chunk: List[np.ndarray], start_frame: int) -> Dict[str, Any]:
212
+ """Process a chunk of video frames efficiently."""
213
+ results = {
214
+ 'emotion_counts': {emotion: 0 for emotion in EMOTIONS},
215
+ 'faces_detected': 0,
216
+ 'frames_with_faces': 0,
217
+ 'frames_processed': 0
218
+ }
219
+
220
+ for frame_idx, frame in enumerate(frame_chunk):
221
+ try:
222
+ # Skip empty frames
223
+ if frame is None or frame.size == 0:
224
+ continue
225
+
226
+ # Resize frame for faster processing if too large
227
+ height, width = frame.shape[:2]
228
+ if width > 1280:
229
+ scale = 1280 / width
230
+ frame = cv2.resize(frame, None, fx=scale, fy=scale)
231
+
232
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
233
+ faces = face_cascade.detectMultiScale(
234
+ gray,
235
+ scaleFactor=1.1,
236
+ minNeighbors=5,
237
+ minSize=(30, 30),
238
+ flags=cv2.CASCADE_SCALE_IMAGE
239
+ )
240
+
241
+ results['frames_processed'] += 1
242
+ if len(faces) > 0:
243
+ results['frames_with_faces'] += 1
244
+ results['faces_detected'] += len(faces)
245
+
246
+ for (x, y, w, h) in faces:
247
+ # Add boundary checks
248
+ if y >= gray.shape[0] or x >= gray.shape[1] or y+h > gray.shape[0] or x+w > gray.shape[1]:
249
+ continue
250
+
251
+ roi = gray[y:y + h, x:x + w]
252
+ roi = cv2.resize(roi, (48, 48), interpolation=cv2.INTER_AREA)
253
+
254
+ if np.sum(roi) == 0:
255
+ continue
256
+
257
+ roi = roi.astype("float32") / 255.0
258
+ roi = img_to_array(roi)
259
+ roi = np.expand_dims(roi, axis=0)
260
+
261
+ with tf.device('/CPU:0'):
262
+ preds = model.predict(roi, verbose=0)[0]
263
+ label = EMOTIONS[np.argmax(preds)]
264
+ results['emotion_counts'][label] += 1
265
+
266
+ except Exception as e:
267
+ logger.error(f"Error processing frame {start_frame + frame_idx}: {str(e)}")
268
+ continue
269
+
270
+ return results
271
+
272
+
273
+ def analyze_video_emotions(video_path: str) -> Dict[str, Any]:
274
+ """Analyze emotions in a video with optimized processing for large files."""
275
+ if model is None or face_cascade is None:
276
+ logger.error("Model or face detector not properly loaded")
277
+ return {
278
+ 'emotion_counts': {},
279
+ 'emotion_percentages': {},
280
+ 'total_faces': 0,
281
+ 'frames_processed': 0,
282
+ 'frames_with_faces': 0,
283
+ 'error': 'Models not properly loaded'
284
+ }
285
+
286
+ cap = None
287
+ try:
288
+ # Open video and get properties
289
+ cap = cv2.VideoCapture(video_path)
290
+ if not cap.isOpened():
291
+ raise Exception("Failed to open video file")
292
+
293
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
294
+ fps = int(cap.get(cv2.CAP_PROP_FPS)) or 30 # Default to 30 if fps is 0
295
+ duration = total_frames / max(fps, 1) / 60 # Duration in minutes, prevent division by zero
296
+
297
+ # Check video duration
298
+ if duration > MAX_VIDEO_DURATION:
299
+ raise Exception(f"Video duration exceeds maximum limit of {MAX_VIDEO_DURATION} minutes")
300
+
301
+ # Initialize results
302
+ combined_results = {
303
+ 'emotion_counts': {emotion: 0 for emotion in EMOTIONS},
304
+ 'total_faces': 0,
305
+ 'frames_processed': 0,
306
+ 'frames_with_faces': 0,
307
+ 'processing_stats': {
308
+ 'total_video_frames': total_frames,
309
+ 'video_fps': fps,
310
+ 'video_duration_minutes': round(duration, 2)
311
+ }
312
+ }
313
+
314
+ # Process video in chunks using ThreadPoolExecutor
315
+ frame_buffer = []
316
+ frame_count = 0
317
+ chunk_size = 30 # Process 30 frames per chunk
318
+
319
+ with ThreadPoolExecutor(max_workers=min(4, os.cpu_count() or 4)) as executor:
320
+ future_to_chunk = {}
321
+
322
+ while True:
323
+ ret, frame = cap.read()
324
+ if not ret:
325
+ break
326
+
327
+ frame_count += 1
328
+ if frame_count % FRAME_SAMPLE_RATE != 0:
329
+ continue
330
+
331
+ frame_buffer.append(frame)
332
+
333
+ if len(frame_buffer) >= chunk_size:
334
+ # Submit chunk for processing
335
+ future = executor.submit(
336
+ process_video_chunk,
337
+ frame_buffer.copy(),
338
+ frame_count - len(frame_buffer)
339
+ )
340
+ future_to_chunk[future] = len(frame_buffer)
341
+ frame_buffer = []
342
+
343
+ # Process remaining frames
344
+ if frame_buffer:
345
+ future = executor.submit(
346
+ process_video_chunk,
347
+ frame_buffer,
348
+ frame_count - len(frame_buffer)
349
+ )
350
+ future_to_chunk[future] = len(frame_buffer)
351
+
352
+ # Collect results
353
+ for future in as_completed(future_to_chunk):
354
+ try:
355
+ chunk_results = future.result()
356
+ # Combine results
357
+ for emotion, count in chunk_results['emotion_counts'].items():
358
+ combined_results['emotion_counts'][emotion] += count
359
+ combined_results['total_faces'] += chunk_results['faces_detected']
360
+ combined_results['frames_processed'] += chunk_results['frames_processed']
361
+ combined_results['frames_with_faces'] += chunk_results['frames_with_faces']
362
+ except Exception as e:
363
+ logger.error(f"Error processing chunk: {str(e)}")
364
+
365
+ # Calculate percentages
366
+ total_emotions = sum(combined_results['emotion_counts'].values())
367
+ combined_results['emotion_percentages'] = {
368
+ emotion: round((count / max(total_emotions, 1) * 100), 2)
369
+ for emotion, count in combined_results['emotion_counts'].items()
370
+ }
371
+
372
+ # Add processing statistics
373
+ combined_results['processing_stats'].update({
374
+ 'frames_sampled': combined_results['frames_processed'],
375
+ 'sampling_rate': f'1/{FRAME_SAMPLE_RATE}',
376
+ 'processing_complete': True
377
+ })
378
+
379
+ return combined_results
380
+
381
+ except Exception as e:
382
+ logger.error(f"Error in emotion analysis: {str(e)}")
383
+ return {
384
+ 'error': str(e),
385
+ 'emotion_counts': {emotion: 0 for emotion in EMOTIONS},
386
+ 'emotion_percentages': {emotion: 0 for emotion in EMOTIONS},
387
+ 'total_faces': 0,
388
+ 'frames_processed': 0,
389
+ 'frames_with_faces': 0,
390
+ 'processing_stats': {
391
+ 'error_occurred': True,
392
+ 'error_message': str(e)
393
+ }
394
+ }
395
+ finally:
396
+ if cap is not None:
397
+ cap.release()
398
+
399
+
400
+ def analyze_interview(conversation_text: str, role_applied: Optional[str] = None, tech_skills: Optional[List[str]] = None) -> Dict[str, Any]:
401
+ """Analyze technical interview transcript."""
402
+ if not conversation_text or len(conversation_text.strip()) < 50:
403
+ logger.warning("Transcript too short for meaningful analysis")
404
+ return create_default_assessment()
405
+
406
+ try:
407
+ model = genai.GenerativeModel(MODEL_NAME)
408
+
409
+ skills_context = ""
410
+ if tech_skills and len(tech_skills) > 0:
411
+ skills_context = f"Focus on evaluating these specific technical skills: {', '.join(tech_skills)}."
412
+
413
+ role_context = ""
414
+ if role_applied:
415
+ role_context = f"The candidate is being interviewed for the role of {role_applied}."
416
+
417
+ prompt = f"""
418
+ Based on the following technical interview transcript, analyze the candidate's responses and provide a structured assessment in *valid JSON format*.
419
+
420
+ {role_context}
421
+ {skills_context}
422
+
423
+ *JSON Format:*
424
+ {{
425
+ "candidate_assessment": {{
426
+ "technical_knowledge": {{
427
+ "score": 0, // Score from 1-10
428
+ "strengths": [],
429
+ "areas_for_improvement": []
430
+ }},
431
+ "problem_solving": {{
432
+ "score": 0, // Score from 1-10
433
+ "strengths": [],
434
+ "areas_for_improvement": []
435
+ }},
436
+ "communication": {{
437
+ "score": 0, // Score from 1-10
438
+ "strengths": [],
439
+ "areas_for_improvement": []
440
+ }}
441
+ }},
442
+ "question_analysis": [
443
+ {{
444
+ "question": "",
445
+ "answer_quality": "", // Excellent, Good, Average, Poor
446
+ "feedback": ""
447
+ }}
448
+ ],
449
+ "overall_recommendation": "", // Hire, Strong Consider, Consider, Do Not Recommend
450
+ "overall_feedback": ""
451
+ }}
452
+
453
+ *Interview Transcript:*
454
+ {conversation_text}
455
+
456
+ *Output Strictly JSON. Do NOT add explanations or extra text.*
457
+ """
458
+
459
+ # Set timeout and retry parameters
460
+ safety_settings = [
461
+ {
462
+ "category": "HARM_CATEGORY_HARASSMENT",
463
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
464
+ },
465
+ {
466
+ "category": "HARM_CATEGORY_HATE_SPEECH",
467
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
468
+ },
469
+ {
470
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
471
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
472
+ },
473
+ {
474
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
475
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
476
+ },
477
+ ]
478
+
479
+ generation_config = {
480
+ "temperature": 0.2,
481
+ "top_p": 0.95,
482
+ "top_k": 40,
483
+ "max_output_tokens": 8192,
484
+ }
485
+
486
+ # Try to generate response with retry mechanism
487
+ max_retries = 3
488
+ for attempt in range(max_retries):
489
+ try:
490
+ response = model.generate_content(
491
+ prompt,
492
+ safety_settings=safety_settings,
493
+ generation_config=generation_config
494
+ )
495
+ raw_response = response.text
496
+ logger.info(f"Raw Gemini Response: {raw_response[:100]}...")
497
+ break
498
+ except Exception as e:
499
+ logger.warning(f"Attempt {attempt+1} failed: {str(e)}")
500
+ if attempt == max_retries - 1: # Last attempt
501
+ logger.error(f"All {max_retries} attempts failed")
502
+ return create_default_assessment()
503
+
504
+ json_text = extract_json(raw_response)
505
+ if json_text:
506
+ try:
507
+ assessment = json.loads(json_text)
508
+ # Ensure the response has all required fields
509
+ required_fields = {
510
+ 'candidate_assessment': {
511
+ 'technical_knowledge': ['score', 'strengths', 'areas_for_improvement'],
512
+ 'problem_solving': ['score', 'strengths', 'areas_for_improvement'],
513
+ 'communication': ['score', 'strengths', 'areas_for_improvement']
514
+ },
515
+ 'question_analysis': ['question', 'answer_quality', 'feedback'],
516
+ 'overall_recommendation': None,
517
+ 'overall_feedback': None
518
+ }
519
+
520
+ # Validate and set defaults if needed
521
+ if 'candidate_assessment' not in assessment:
522
+ assessment['candidate_assessment'] = {}
523
+
524
+ for category in ['technical_knowledge', 'problem_solving', 'communication']:
525
+ if category not in assessment['candidate_assessment']:
526
+ assessment['candidate_assessment'][category] = {
527
+ 'score': 5,
528
+ 'strengths': ['Not enough information to assess.'],
529
+ 'areas_for_improvement': ['Not enough information to assess.']
530
+ }
531
+ else:
532
+ cat_data = assessment['candidate_assessment'][category]
533
+ for field in required_fields['candidate_assessment'][category]:
534
+ if field not in cat_data:
535
+ if field == 'score':
536
+ cat_data[field] = 5
537
+ else:
538
+ cat_data[field] = ['Not enough information to assess.']
539
+
540
+ if 'question_analysis' not in assessment or not assessment['question_analysis']:
541
+ assessment['question_analysis'] = [{
542
+ 'question': 'General Interview',
543
+ 'answer_quality': 'Average',
544
+ 'feedback': 'Not enough specific questions to analyze.'
545
+ }]
546
+ else:
547
+ for qa in assessment['question_analysis']:
548
+ for field in required_fields['question_analysis']:
549
+ if field not in qa:
550
+ qa[field] = 'Not available'
551
+
552
+ if 'overall_recommendation' not in assessment or not assessment['overall_recommendation']:
553
+ assessment['overall_recommendation'] = 'Consider'
554
+
555
+ if 'overall_feedback' not in assessment or not assessment['overall_feedback']:
556
+ assessment['overall_feedback'] = 'Not enough information to provide detailed feedback.'
557
+
558
+ return assessment
559
+ except json.JSONDecodeError as e:
560
+ logger.error(f"Error parsing JSON response: {str(e)}")
561
+ return create_default_assessment()
562
+ else:
563
+ logger.error("No valid JSON found in response")
564
+ return create_default_assessment()
565
+
566
+ except Exception as e:
567
+ logger.error(f"Interview analysis error: {str(e)}")
568
+ return create_default_assessment()
569
+
570
+
571
+ def create_default_assessment() -> Dict[str, Any]:
572
+ """Create a default assessment when analysis fails."""
573
+ return {
574
+ "candidate_assessment": {
575
+ "technical_knowledge": {
576
+ "score": 5,
577
+ "strengths": ["Unable to assess strengths from the provided transcript."],
578
+ "areas_for_improvement": ["Unable to assess areas for improvement from the provided transcript."]
579
+ },
580
+ "problem_solving": {
581
+ "score": 5,
582
+ "strengths": ["Unable to assess strengths from the provided transcript."],
583
+ "areas_for_improvement": ["Unable to assess areas for improvement from the provided transcript."]
584
+ },
585
+ "communication": {
586
+ "score": 5,
587
+ "strengths": ["Unable to assess strengths from the provided transcript."],
588
+ "areas_for_improvement": ["Unable to assess areas for improvement from the provided transcript."]
589
+ }
590
+ },
591
+ "question_analysis": [{
592
+ "question": "General Interview",
593
+ "answer_quality": "Average",
594
+ "feedback": "Unable to assess specific questions from the transcript."
595
+ }],
596
+ "overall_recommendation": "Consider",
597
+ "overall_feedback": "Unable to provide a detailed assessment based on the provided transcript."
598
+ }
599
+
600
+
601
+ def process_video_and_audio_parallel(video_path: str, role_applied: str = None, tech_skills: list = None) -> Tuple[Dict[str, Any], str, Dict[str, Any]]:
602
+ """Process video and audio in parallel with optimized handling."""
603
+ audio_path = None
604
+ emotion_results = None
605
+ transcript = None
606
+ interview_assessment = None
607
+
608
+ try:
609
+ with ThreadPoolExecutor(max_workers=min(3, os.cpu_count() or 2)) as executor:
610
+ # Submit emotions analysis task
611
+ emotion_future = executor.submit(analyze_video_emotions, video_path)
612
+
613
+ # Submit audio extraction task
614
+ audio_future = executor.submit(extract_audio_from_video, video_path)
615
+
616
+ # Wait for audio extraction to complete with timeout
617
+ try:
618
+ audio_path = audio_future.result(timeout=120) # 2 minutes timeout
619
+ except concurrent.futures.TimeoutError:
620
+ logger.error("Audio extraction timeout exceeded")
621
+ audio_path = None
622
+
623
+ # Continue with transcription if audio was extracted
624
+ transcript_future = None
625
+ if audio_path:
626
+ transcript_future = executor.submit(transcribe_audio, audio_path)
627
+
628
+ # Wait for emotion analysis with timeout
629
+ try:
630
+ emotion_results = emotion_future.result(timeout=300) # 5 minutes timeout
631
+ except concurrent.futures.TimeoutError:
632
+ logger.error("Emotion analysis timeout exceeded")
633
+ emotion_results = {
634
+ 'error': 'Processing timeout exceeded',
635
+ 'emotion_counts': {emotion: 0 for emotion in EMOTIONS},
636
+ 'emotion_percentages': {emotion: 0 for emotion in EMOTIONS},
637
+ 'total_faces': 0,
638
+ 'frames_processed': 0,
639
+ 'frames_with_faces': 0
640
+ }
641
+
642
+ # Wait for transcription with timeout
643
+ if transcript_future:
644
+ try:
645
+ transcript = transcript_future.result(timeout=300) # 5 minutes timeout
646
+ except concurrent.futures.TimeoutError:
647
+ logger.error("Transcription timeout exceeded")
648
+ transcript = "Transcription failed due to timeout."
649
+ else:
650
+ transcript = "Audio extraction failed, no transcription available."
651
+
652
+ # Analyze interview content if transcript is available
653
+ if transcript and len(transcript) > 50:
654
+ interview_assessment = analyze_interview(transcript, role_applied, tech_skills)
655
+ else:
656
+ interview_assessment = create_default_assessment()
657
+
658
+ # Clean up audio file
659
+ if audio_path and os.path.exists(audio_path):
660
+ try:
661
+ os.unlink(audio_path)
662
+ except Exception as e:
663
+ logger.warning(f"Error cleaning up audio file: {str(e)}")
664
+
665
+ return emotion_results, transcript, interview_assessment
666
+
667
+ except Exception as e:
668
+ logger.error(f"Error in parallel processing: {str(e)}")
669
+
670
+ # Create default results if any component failed
671
+ if not emotion_results:
672
+ emotion_results = {
673
+ 'error': str(e),
674
+ 'emotion_counts': {emotion: 0 for emotion in EMOTIONS},
675
+ 'emotion_percentages': {emotion: 0 for emotion in EMOTIONS},
676
+ 'total_faces': 0,
677
+ 'frames_processed': 0,
678
+ 'frames_with_faces': 0
679
+ }
680
+
681
+ if not transcript:
682
+ transcript = f"Error processing audio: {str(e)}"
683
+
684
+ if not interview_assessment:
685
+ interview_assessment = create_default_assessment()
686
+
687
+ # Clean up audio file if it exists
688
+ if audio_path and os.path.exists(audio_path):
689
+ try:
690
+ os.unlink(audio_path)
691
+ except:
692
+ pass
693
+
694
+ return emotion_results, transcript, interview_assessment
695
+
696
+
697
+ @app.route('/')
698
+ def index():
699
+ """Render the main page."""
700
+ return render_template('index.html')
701
+
702
+
703
+ @app.route('/test', methods=['GET'])
704
+ def test_endpoint():
705
+ """Test endpoint to verify server is running."""
706
+ return jsonify({"status": "ok", "message": "Server is running"}), 200
707
+
708
+
709
+ @app.route("/analyze_interview", methods=["POST", "OPTIONS"])
710
+ def analyze_interview_route():
711
+ """Main route for comprehensive interview analysis."""
712
+ # Add CORS headers for preflight requests
713
+ if request.method == 'OPTIONS':
714
+ headers = {
715
+ 'Access-Control-Allow-Origin': '*',
716
+ 'Access-Control-Allow-Methods': 'POST, OPTIONS',
717
+ 'Access-Control-Allow-Headers': 'Content-Type',
718
+ 'Access-Control-Max-Age': '86400' # 24 hours
719
+ }
720
+ return ('', 204, headers)
721
+
722
+ try:
723
+ logger.info("Received analyze_interview request")
724
+
725
+ # Check for required file
726
+ if 'video' not in request.files:
727
+ logger.error("No video file in request")
728
+ return jsonify({"error": "Video file is required"}), 400
729
+
730
+ video_file = request.files['video']
731
+ if not video_file:
732
+ logger.error("Empty video file")
733
+ return jsonify({"error": "Empty video file"}), 400
734
+
735
+ # Get additional form data
736
+ role_applied = request.form.get('role_applied', '')
737
+ tech_skills = request.form.get('tech_skills', '')
738
+ candidate_name = request.form.get('candidate_name', 'Candidate')
739
+ tech_skills_list = [skill.strip() for skill in tech_skills.split(',')] if tech_skills else []
740
+
741
+ # Create temporary video file
742
+ try:
743
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as video_temp:
744
+ video_file.save(video_temp.name)
745
+ video_temp_path = video_temp.name
746
+ logger.info(f"Video saved to temporary file: {video_temp_path}")
747
+ except Exception as e:
748
+ logger.error(f"Error saving video file: {str(e)}")
749
+ return jsonify({"error": f"Failed to save video file: {str(e)}"}), 500
750
+
751
+ # Process video and audio in parallel
752
+ try:
753
+ emotion_analysis, transcript, interview_assessment = process_video_and_audio_parallel(
754
+ video_temp_path, role_applied, tech_skills_list
755
+ )
756
+ except Exception as e:
757
+ logger.error(f"Error during parallel processing: {str(e)}")
758
+ return jsonify({"error": str(e)}), 500
759
+
760
+ # Combine results
761
+ combined_results = {
762
+ "candidate_assessment": interview_assessment["candidate_assessment"],
763
+ "question_analysis": interview_assessment["question_analysis"],
764
+ "overall_recommendation": interview_assessment["overall_recommendation"],
765
+ "overall_feedback": interview_assessment["overall_feedback"],
766
+ "transcription": transcript,
767
+ "candidate_name": candidate_name,
768
+ "role_applied": role_applied,
769
+ "interview_date": datetime.now().strftime('%Y-%m-%d'),
770
+ "emotion_analysis": emotion_analysis
771
+ }
772
+
773
+ logger.info("Combined results created successfully")
774
+ logger.debug(f"Response data: {json.dumps(combined_results, indent=2)}")
775
+
776
+ # Clean up temporary video file
777
+ try:
778
+ os.unlink(video_temp_path)
779
+ logger.info("Temporary files cleaned up")
780
+ except Exception as e:
781
+ logger.warning(f"Error cleaning up temporary files: {str(e)}")
782
+
783
+ # Add CORS headers to response
784
+ response = jsonify(combined_results)
785
+ response.headers.add('Access-Control-Allow-Origin', '*')
786
+ return response
787
+
788
+ except Exception as e:
789
+ logger.error(f"Error in analyze_interview_route: {str(e)}")
790
+ return jsonify({"error": str(e)}), 500
791
+
792
+
793
+ @app.route('/download_assessment', methods=['POST', 'OPTIONS'])
794
+ def download_assessment():
795
+ """Download comprehensive assessment report."""
796
+ # Add CORS headers for preflight requests
797
+ if request.method == 'OPTIONS':
798
+ headers = {
799
+ 'Access-Control-Allow-Origin': '*',
800
+ 'Access-Control-Allow-Methods': 'POST, OPTIONS',
801
+ 'Access-Control-Allow-Headers': 'Content-Type',
802
+ 'Access-Control-Max-Age': '86400' # 24 hours
803
+ }
804
+ return ('', 204, headers)
805
+
806
+ try:
807
+ data = request.json
808
+ if not data:
809
+ return jsonify({"error": "No data provided"}), 400
810
+
811
+ # Create Excel writer object
812
+ output = io.BytesIO()
813
+ with pd.ExcelWriter(output, engine='xlsxwriter') as writer:
814
+ workbook = writer.book
815
+
816
+ # Define formats
817
+ header_format = workbook.add_format({
818
+ 'bold': True,
819
+ 'bg_color': '#CCCCCC',
820
+ 'border': 1
821
+ })
822
+
823
+ cell_format = workbook.add_format({
824
+ 'border': 1
825
+ })
826
+
827
+ # Summary Sheet
828
+ summary_data = {
829
+ 'Metric': [
830
+ 'Technical Knowledge',
831
+ 'Problem Solving',
832
+ 'Communication',
833
+ 'Overall Recommendation',
834
+ 'Total Faces Detected'
835
+ ],
836
+ 'Score/Rating': [
837
+ f"{data['candidate_assessment']['technical_knowledge']['score']}/10",
838
+ f"{data['candidate_assessment']['problem_solving']['score']}/10",
839
+ f"{data['candidate_assessment']['communication']['score']}/10",
840
+ data['overall_recommendation'],
841
+ data['emotion_analysis'].get('total_faces', 0)
842
+ ]
843
+ }
844
+
845
+ summary_df = pd.DataFrame(summary_data)
846
+ summary_df.to_excel(writer, sheet_name='Summary', index=False)
847
+
848
+ # Format Summary sheet
849
+ summary_sheet = writer.sheets['Summary']
850
+ summary_sheet.set_column('A:A', 25)
851
+ summary_sheet.set_column('B:B', 20)
852
+
853
+ # Apply formats to Summary sheet
854
+ for col_num, value in enumerate(summary_df.columns.values):
855
+ summary_sheet.write(0, col_num, value, header_format)
856
+
857
+ for row_num in range(len(summary_df)):
858
+ for col_num in range(len(summary_df.columns)):
859
+ summary_sheet.write(row_num + 1, col_num, summary_df.iloc[row_num, col_num], cell_format)
860
+
861
+ # Technical Assessment Sheet
862
+ tech_data = []
863
+
864
+ # Add technical knowledge
865
+ tech_data.append(['Technical Knowledge', f"{data['candidate_assessment']['technical_knowledge']['score']}/10", ''])
866
+ tech_data.append(['Strengths', '', ''])
867
+ for strength in data['candidate_assessment']['technical_knowledge']['strengths']:
868
+ tech_data.append(['', '', strength])
869
+
870
+ tech_data.append(['Areas for Improvement', '', ''])
871
+ for area in data['candidate_assessment']['technical_knowledge']['areas_for_improvement']:
872
+ tech_data.append(['', '', area])
873
+
874
+ # Add problem solving
875
+ tech_data.append(['Problem Solving', f"{data['candidate_assessment']['problem_solving']['score']}/10", ''])
876
+ tech_data.append(['Strengths', '', ''])
877
+ for strength in data['candidate_assessment']['problem_solving']['strengths']:
878
+ tech_data.append(['', '', strength])
879
+
880
+ tech_data.append(['Areas for Improvement', '', ''])
881
+ for area in data['candidate_assessment']['problem_solving']['areas_for_improvement']:
882
+ tech_data.append(['', '', area])
883
+
884
+ # Add communication
885
+ tech_data.append(['Communication', f"{data['candidate_assessment']['communication']['score']}/10", ''])
886
+ tech_data.append(['Strengths', '', ''])
887
+ for strength in data['candidate_assessment']['communication']['strengths']:
888
+ tech_data.append(['', '', strength])
889
+
890
+ tech_data.append(['Areas for Improvement', '', ''])
891
+ for area in data['candidate_assessment']['communication']['areas_for_improvement']:
892
+ tech_data.append(['', '', area])
893
+
894
+ # Create Technical Assessment dataframe
895
+ tech_df = pd.DataFrame(tech_data, columns=['Category', 'Score', 'Details'])
896
+ tech_df.to_excel(writer, sheet_name='Technical Assessment', index=False)
897
+
898
+ # Format Technical Assessment sheet
899
+ tech_sheet = writer.sheets['Technical Assessment']
900
+ tech_sheet.set_column('A:A', 25)
901
+ tech_sheet.set_column('B:B', 15)
902
+ tech_sheet.set_column('C:C', 60)
903
+
904
+ # Apply formats to Technical Assessment sheet
905
+ for col_num, value in enumerate(tech_df.columns.values):
906
+ tech_sheet.write(0, col_num, value, header_format)
907
+
908
+ # Question Analysis Sheet
909
+ question_data = []
910
+ for qa in data['question_analysis']:
911
+ question_data.append([
912
+ qa['question'],
913
+ qa['answer_quality'],
914
+ qa['feedback']
915
+ ])
916
+
917
+ question_df = pd.DataFrame(question_data, columns=['Question', 'Answer Quality', 'Feedback'])
918
+ question_df.to_excel(writer, sheet_name='Question Analysis', index=False)
919
+
920
+ # Format Question Analysis sheet
921
+ qa_sheet = writer.sheets['Question Analysis']
922
+ qa_sheet.set_column('A:A', 40)
923
+ qa_sheet.set_column('B:B', 15)
924
+ qa_sheet.set_column('C:C', 60)
925
+
926
+ # Apply formats to Question Analysis sheet
927
+ for col_num, value in enumerate(question_df.columns.values):
928
+ qa_sheet.write(0, col_num, value, header_format)
929
+
930
+ # Emotion Analysis Sheet
931
+ if 'emotion_analysis' in data and 'emotion_percentages' in data['emotion_analysis']:
932
+ emotion_data = {
933
+ 'Emotion': list(data['emotion_analysis']['emotion_percentages'].keys()),
934
+ 'Percentage': list(data['emotion_analysis']['emotion_percentages'].values()),
935
+ 'Count': [data['emotion_analysis']['emotion_counts'].get(emotion, 0)
936
+ for emotion in data['emotion_analysis']['emotion_percentages'].keys()]
937
+ }
938
+
939
+ emotion_df = pd.DataFrame(emotion_data)
940
+ emotion_df.to_excel(writer, sheet_name='Emotion Analysis', index=False)
941
+
942
+ # Format Emotion Analysis sheet
943
+ emotion_sheet = writer.sheets['Emotion Analysis']
944
+ emotion_sheet.set_column('A:A', 15)
945
+ emotion_sheet.set_column('B:B', 15)
946
+ emotion_sheet.set_column('C:C', 15)
947
+
948
+ # Apply formats to Emotion Analysis sheet
949
+ for col_num, value in enumerate(emotion_df.columns.values):
950
+ emotion_sheet.write(0, col_num, value, header_format)
951
+
952
+ # Add a chart
953
+ chart = workbook.add_chart({'type': 'pie'})
954
+ chart.add_series({
955
+ 'name': 'Emotions',
956
+ 'categories': ['Emotion Analysis', 1, 0, len(emotion_df), 0],
957
+ 'values': ['Emotion Analysis', 1, 1, len(emotion_df), 1],
958
+ 'data_labels': {'percentage': True}
959
+ })
960
+
961
+ chart.set_title({'name': 'Emotion Distribution'})
962
+ chart.set_style(10)
963
+ emotion_sheet.insert_chart('E2', chart, {'x_scale': 1.5, 'y_scale': 1.5})
964
+
965
+ # Transcript Sheet
966
+ if 'transcription' in data:
967
+ transcript_data = {'Transcript': [data['transcription']]}
968
+ transcript_df = pd.DataFrame(transcript_data)
969
+ transcript_df.to_excel(writer, sheet_name='Transcript', index=False)
970
+
971
+ # Format Transcript sheet
972
+ transcript_sheet = writer.sheets['Transcript']
973
+ transcript_sheet.set_column('A:A', 100)
974
+
975
+ # Apply formats to Transcript sheet
976
+ transcript_sheet.write(0, 0, 'Transcript', header_format)
977
+
978
+ # Overall Feedback Sheet
979
+ overall_data = {'Overall Feedback': [data['overall_feedback']]}
980
+ overall_df = pd.DataFrame(overall_data)
981
+ overall_df.to_excel(writer, sheet_name='Overall Feedback', index=False)
982
+
983
+ # Format Overall Feedback sheet
984
+ overall_sheet = writer.sheets['Overall Feedback']
985
+ overall_sheet.set_column('A:A', 100)
986
+
987
+ # Apply formats to Overall Feedback sheet
988
+ overall_sheet.write(0, 0, 'Overall Feedback', header_format)
989
+
990
+ # Prepare the output file for download
991
+ output.seek(0)
992
+ candidate_name = data.get('candidate_name', 'Candidate').replace(' ', '_')
993
+ role_applied = data.get('role_applied', 'Role').replace(' ', '_')
994
+ filename = f"{candidate_name}_{role_applied}_Assessment.xlsx"
995
+
996
+ # Create response with appropriate headers
997
+ response = send_file(
998
+ output,
999
+ mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
1000
+ as_attachment=True,
1001
+ download_name=filename
1002
+ )
1003
+
1004
+ # Add CORS headers
1005
+ response.headers.add('Access-Control-Allow-Origin', '*')
1006
+ return response
1007
+
1008
+ except Exception as e:
1009
+ logger.error(f"Error generating assessment report: {str(e)}")
1010
+ return jsonify({"error": f"Failed to generate assessment report: {str(e)}"}), 500
1011
+
1012
+
1013
+ if __name__ == "__main__":
1014
+ # Setup Flask app with proper settings for production
1015
+ PORT = int(os.environ.get("PORT", 5000))
1016
+ app.run(host="0.0.0.0", port=PORT, debug=False, threaded=True)
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
index.html ADDED
@@ -0,0 +1,1201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>Technical Interview Analyzer</title>
6
+ <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-QWTKZyjpPEjISv5WaRU9OFeRpok6YctnYmDr5pNlyT2bRjXh0JMhjY6hW+ALEwIH" crossorigin="anonymous">
7
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.4/css/all.min.css">
8
+ <style>
9
+ body {
10
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
11
+ max-width: 1200px;
12
+ margin: 0 auto;
13
+ padding: 20px;
14
+ background-color: #f5f5f5;
15
+ }
16
+ .container {
17
+ background-color: white;
18
+ padding: 30px;
19
+ border-radius: 12px;
20
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
21
+ margin-bottom: 30px;
22
+ }
23
+ h1, h2, h3 {
24
+ color: #2c3e50;
25
+ }
26
+ h1 {
27
+ text-align: center;
28
+ margin-bottom: 30px;
29
+ }
30
+ .section {
31
+ margin-bottom: 40px;
32
+ }
33
+ .upload-section {
34
+ text-align: center;
35
+ margin: 30px 0;
36
+ padding: 20px;
37
+ background-color: #f8f9fa;
38
+ border-radius: 8px;
39
+ }
40
+ #uploadForm {
41
+ margin: 20px 0;
42
+ }
43
+ .results-container {
44
+ margin-top: 30px;
45
+ }
46
+ .progress-bar {
47
+ background-color: #3498db;
48
+ }
49
+ button {
50
+ background-color: #3498db;
51
+ color: white;
52
+ padding: 12px 24px;
53
+ border: none;
54
+ border-radius: 6px;
55
+ cursor: pointer;
56
+ font-size: 16px;
57
+ transition: background-color 0.3s;
58
+ }
59
+ button:hover {
60
+ background-color: #2980b9;
61
+ }
62
+ .loading {
63
+ text-align: center;
64
+ margin: 20px 0;
65
+ display: none;
66
+ }
67
+ .error {
68
+ color: #e74c3c;
69
+ padding: 10px;
70
+ background-color: #fde8e8;
71
+ border-radius: 4px;
72
+ margin: 10px 0;
73
+ }
74
+ /* Print styles */
75
+ @media print {
76
+ body {
77
+ padding: 0;
78
+ margin: 0;
79
+ }
80
+ .container {
81
+ box-shadow: none;
82
+ padding: 20px;
83
+ }
84
+ .upload-section,
85
+ #uploadForm,
86
+ #loading,
87
+ #downloadExcelBtn,
88
+ .btn {
89
+ display: none !important;
90
+ }
91
+ .assessment-card {
92
+ break-inside: avoid;
93
+ }
94
+ }
95
+ .loading-overlay {
96
+ display: none;
97
+ position: fixed;
98
+ top: 0;
99
+ left: 0;
100
+ width: 100%;
101
+ height: 100%;
102
+ background: rgba(0, 0, 0, 0.5);
103
+ z-index: 1000;
104
+ }
105
+
106
+ .loading-content {
107
+ position: absolute;
108
+ top: 50%;
109
+ left: 50%;
110
+ transform: translate(-50%, -50%);
111
+ background: white;
112
+ padding: 30px;
113
+ border-radius: 10px;
114
+ text-align: center;
115
+ }
116
+
117
+ .spinner {
118
+ width: 50px;
119
+ height: 50px;
120
+ border: 5px solid #f3f3f3;
121
+ border-top: 5px solid #3498db;
122
+ border-radius: 50%;
123
+ animation: spin 1s linear infinite;
124
+ margin: 0 auto 20px;
125
+ }
126
+
127
+ @keyframes spin {
128
+ 0% { transform: rotate(0deg); }
129
+ 100% { transform: rotate(360deg); }
130
+ }
131
+
132
+ .loading-text {
133
+ color: #2c3e50;
134
+ font-size: 18px;
135
+ margin-top: 15px;
136
+ }
137
+
138
+ .progress-steps {
139
+ margin-top: 15px;
140
+ text-align: left;
141
+ }
142
+
143
+ .step {
144
+ margin: 8px 0;
145
+ color: #666;
146
+ }
147
+
148
+ .step.active {
149
+ color: #3498db;
150
+ font-weight: bold;
151
+ }
152
+
153
+ .step.completed {
154
+ color: #2ecc71;
155
+ }
156
+
157
+ .score-display {
158
+ display: flex;
159
+ align-items: center;
160
+ margin-bottom: 15px;
161
+ }
162
+
163
+ .score-number {
164
+ font-size: 2rem;
165
+ font-weight: bold;
166
+ margin-right: 15px;
167
+ color: #2c3e50;
168
+ min-width: 50px;
169
+ text-align: center;
170
+ }
171
+
172
+ .progress {
173
+ flex-grow: 1;
174
+ height: 15px;
175
+ }
176
+
177
+ .list-group-item {
178
+ border-left: none;
179
+ border-right: none;
180
+ }
181
+
182
+ .assessment-card {
183
+ margin-bottom: 25px;
184
+ border-radius: 10px;
185
+ overflow: hidden;
186
+ }
187
+
188
+ .assessment-card .card-header {
189
+ font-weight: bold;
190
+ padding: 15px;
191
+ }
192
+
193
+ .card-header-technical {
194
+ background-color: #4a90e2;
195
+ color: white;
196
+ }
197
+
198
+ .card-header-problem {
199
+ background-color: #50c878;
200
+ color: white;
201
+ }
202
+
203
+ .card-header-communication {
204
+ background-color: #6a5acd;
205
+ color: white;
206
+ }
207
+
208
+ .card-header-questions {
209
+ background-color: #ff7f50;
210
+ color: white;
211
+ }
212
+
213
+ .card-header-recommendation {
214
+ background-color: #2c3e50;
215
+ color: white;
216
+ }
217
+
218
+ .card-header-emotions {
219
+ background-color: #9b59b6;
220
+ color: white;
221
+ }
222
+
223
+ .badge-excellent {
224
+ background-color: #28a745;
225
+ }
226
+
227
+ .badge-good {
228
+ background-color: #4a90e2;
229
+ }
230
+
231
+ .badge-average {
232
+ background-color: #ffc107;
233
+ color: #212529;
234
+ }
235
+
236
+ .badge-poor {
237
+ background-color: #dc3545;
238
+ }
239
+
240
+ .recommendation-display {
241
+ display: flex;
242
+ flex-direction: column;
243
+ align-items: center;
244
+ margin: 20px 0;
245
+ }
246
+
247
+ .recommendation-badge {
248
+ padding: 10px 20px;
249
+ font-size: 1.2rem;
250
+ margin-bottom: 15px;
251
+ border-radius: 30px;
252
+ }
253
+
254
+ .recommendation-hire {
255
+ background-color: #28a745;
256
+ color: white;
257
+ }
258
+
259
+ .recommendation-strong {
260
+ background-color: #4a90e2;
261
+ color: white;
262
+ }
263
+
264
+ .recommendation-consider {
265
+ background-color: #ffc107;
266
+ color: #212529;
267
+ }
268
+
269
+ .recommendation-reject {
270
+ background-color: #dc3545;
271
+ color: white;
272
+ }
273
+
274
+ .transcript-container {
275
+ max-height: 300px;
276
+ overflow-y: auto;
277
+ background-color: #f8f9fa;
278
+ padding: 15px;
279
+ border-radius: 5px;
280
+ margin-top: 15px;
281
+ border: 1px solid #dee2e6;
282
+ }
283
+
284
+ #accordionTranscript .accordion-button:not(.collapsed) {
285
+ background-color: #e7f5ff;
286
+ color: #0c63e4;
287
+ }
288
+
289
+ .question-card {
290
+ margin-bottom: 15px;
291
+ border-left: 4px solid #4a90e2;
292
+ }
293
+
294
+ .question-text {
295
+ font-weight: bold;
296
+ }
297
+
298
+ .answer-quality {
299
+ display: inline-block;
300
+ padding: 3px 10px;
301
+ border-radius: 15px;
302
+ font-size: 0.85rem;
303
+ margin: 5px 0;
304
+ }
305
+
306
+ .emotion-bar {
307
+ height: 30px;
308
+ margin-bottom: 10px;
309
+ border-radius: 5px;
310
+ position: relative;
311
+ }
312
+
313
+ .emotion-label {
314
+ position: absolute;
315
+ left: 10px;
316
+ top: 5px;
317
+ color: white;
318
+ font-weight: bold;
319
+ text-shadow: 1px 1px 1px rgba(0,0,0,0.5);
320
+ }
321
+
322
+ .emotion-percentage {
323
+ position: absolute;
324
+ right: 10px;
325
+ top: 5px;
326
+ color: white;
327
+ font-weight: bold;
328
+ text-shadow: 1px 1px 1px rgba(0,0,0,0.5);
329
+ }
330
+
331
+ .emotion-angry {
332
+ background-color: #e74c3c;
333
+ }
334
+
335
+ .emotion-disgust {
336
+ background-color: #8e44ad;
337
+ }
338
+
339
+ .emotion-fear {
340
+ background-color: #34495e;
341
+ }
342
+
343
+ .emotion-happy {
344
+ background-color: #27ae60;
345
+ }
346
+
347
+ .emotion-neutral {
348
+ background-color: #7f8c8d;
349
+ }
350
+
351
+ .emotion-sad {
352
+ background-color: #3498db;
353
+ }
354
+
355
+ .emotion-surprise {
356
+ background-color: #f39c12;
357
+ }
358
+
359
+ /* Add new styles for video preview */
360
+ .video-preview-container {
361
+ margin-top: 15px;
362
+ max-width: 100%;
363
+ display: none;
364
+ }
365
+
366
+ .video-preview {
367
+ width: 100%;
368
+ max-width: 640px;
369
+ margin: 0 auto;
370
+ display: block;
371
+ border-radius: 8px;
372
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
373
+ }
374
+
375
+ /* Add processing status styles */
376
+ .processing-status {
377
+ margin-top: 10px;
378
+ padding: 10px;
379
+ border-radius: 5px;
380
+ background-color: #f8f9fa;
381
+ display: none;
382
+ }
383
+
384
+ .processing-status.success {
385
+ background-color: #d4edda;
386
+ color: #155724;
387
+ }
388
+
389
+ .processing-status.error {
390
+ background-color: #f8d7da;
391
+ color: #721c24;
392
+ }
393
+
394
+ /* Improve loading overlay */
395
+ .loading-overlay .progress {
396
+ width: 100%;
397
+ margin-top: 15px;
398
+ height: 10px;
399
+ }
400
+
401
+ .step-status {
402
+ display: inline-block;
403
+ margin-left: 10px;
404
+ font-size: 14px;
405
+ }
406
+
407
+ .step-time {
408
+ float: right;
409
+ color: #666;
410
+ }
411
+ </style>
412
+ </head>
413
+ <body>
414
+ <div class="loading-overlay" id="loadingOverlay">
415
+ <div class="loading-content">
416
+ <div class="spinner"></div>
417
+ <div class="loading-text">Processing your interview recording...</div>
418
+ <div class="progress-steps">
419
+ <div class="step" id="step1">1. Uploading video file...</div>
420
+ <div class="step" id="step2">2. Extracting audio and analyzing emotions...</div>
421
+ <div class="step" id="step3">3. Transcribing audio...</div>
422
+ <div class="step" id="step4">4. Analyzing technical interview...</div>
423
+ </div>
424
+ </div>
425
+ </div>
426
+ <div class="container section">
427
+ <h1>An AI Based Analyzer for Personal Attributes</h1>
428
+ <p class="text-center text-muted">Upload a recording of a technical interview to get AI-powered analysis and feedback</p>
429
+
430
+ <form id="uploadForm" class="mb-4">
431
+ <div class="row mb-3">
432
+ <div class="col-md-6">
433
+ <label for="candidateName" class="form-label">Candidate Name</label>
434
+ <input type="text" id="candidateName" name="candidate_name" class="form-control" placeholder="Enter candidate name" required>
435
+ </div>
436
+ <div class="col-md-6">
437
+ <label for="roleApplied" class="form-label">Role Applied For</label>
438
+ <input type="text" id="roleApplied" name="role_applied" class="form-control" placeholder="e.g. Senior Full Stack Developer" required>
439
+ </div>
440
+ </div>
441
+ <div class="mb-3">
442
+ <label for="techSkills" class="form-label">Technical Skills to Evaluate (comma-separated)</label>
443
+ <input type="text" id="techSkills" name="tech_skills" class="form-control" placeholder="e.g. JavaScript, React, Node.js, System Design">
444
+ </div>
445
+ <div class="mb-3">
446
+ <label for="videoFile" class="form-label">Upload Interview Recording</label>
447
+ <input type="file" id="videoFile" name="video" accept="video/*" class="form-control" required>
448
+ <div class="form-text">Upload a video recording of the technical interview (.mp4, .mov, .avi)</div>
449
+ <div class="video-preview-container">
450
+ <video id="videoPreview" class="video-preview" controls>
451
+ Your browser does not support the video tag.
452
+ </video>
453
+ </div>
454
+ <div id="processingStatus" class="processing-status"></div>
455
+ </div>
456
+ <div class="d-grid">
457
+ <button type="submit" class="btn btn-primary">
458
+ <i class="fas fa-analytics"></i> Analyze Interview
459
+ </button>
460
+ </div>
461
+ </form>
462
+ </div>
463
+
464
+ <!-- Assessment Results Section -->
465
+ <div class="container section" id="assessmentResultContainer" style="display:none;">
466
+ <h2 class="text-center mb-4">Interview Assessment Results</h2>
467
+ <div id="candidateInfo" class="text-center mb-4"></div>
468
+
469
+ <div class="row">
470
+ <!-- Technical Knowledge Section -->
471
+ <div class="col-md-4">
472
+ <div class="card assessment-card">
473
+ <div class="card-header card-header-technical">
474
+ <i class="fas fa-code"></i> Technical Knowledge
475
+ </div>
476
+ <div class="card-body">
477
+ <div id="technicalScore" class="score-display">
478
+ <div class="score-number">-</div>
479
+ <div class="progress">
480
+ <div class="progress-bar" role="progressbar" style="width: 0%" aria-valuenow="0" aria-valuemin="0" aria-valuemax="10"></div>
481
+ </div>
482
+ </div>
483
+ <h5>Strengths</h5>
484
+ <ul id="technicalStrengths" class="list-group list-group-flush mb-3"></ul>
485
+ <h5>Areas for Improvement</h5>
486
+ <ul id="technicalImprovements" class="list-group list-group-flush"></ul>
487
+ </div>
488
+ </div>
489
+ </div>
490
+
491
+ <!-- Problem Solving Section -->
492
+ <div class="col-md-4">
493
+ <div class="card assessment-card">
494
+ <div class="card-header card-header-problem">
495
+ <i class="fas fa-puzzle-piece"></i> Problem Solving
496
+ </div>
497
+ <div class="card-body">
498
+ <div id="problemSolvingScore" class="score-display">
499
+ <div class="score-number">-</div>
500
+ <div class="progress">
501
+ <div class="progress-bar" role="progressbar" style="width: 0%" aria-valuenow="0" aria-valuemin="0" aria-valuemax="10"></div>
502
+ </div>
503
+ </div>
504
+ <h5>Strengths</h5>
505
+ <ul id="problemSolvingStrengths" class="list-group list-group-flush mb-3"></ul>
506
+ <h5>Areas for Improvement</h5>
507
+ <ul id="problemSolvingImprovements" class="list-group list-group-flush"></ul>
508
+ </div>
509
+ </div>
510
+ </div>
511
+
512
+ <!-- Communication Section -->
513
+ <div class="col-md-4">
514
+ <div class="card assessment-card">
515
+ <div class="card-header card-header-communication">
516
+ <i class="fas fa-comments"></i> Communication
517
+ </div>
518
+ <div class="card-body">
519
+ <div id="communicationScore" class="score-display">
520
+ <div class="score-number">-</div>
521
+ <div class="progress">
522
+ <div class="progress-bar" role="progressbar" style="width: 0%" aria-valuenow="0" aria-valuemin="0" aria-valuemax="10"></div>
523
+ </div>
524
+ </div>
525
+ <h5>Strengths</h5>
526
+ <ul id="communicationStrengths" class="list-group list-group-flush mb-3"></ul>
527
+ <h5>Areas for Improvement</h5>
528
+ <ul id="communicationImprovements" class="list-group list-group-flush"></ul>
529
+ </div>
530
+ </div>
531
+ </div>
532
+ </div>
533
+
534
+ <!-- Emotion Analysis Section -->
535
+ <div class="card assessment-card">
536
+ <div class="card-header card-header-emotions">
537
+ <i class="fas fa-smile"></i> Emotion Analysis
538
+ </div>
539
+ <div class="card-body">
540
+ <p>Facial emotions detected throughout the interview:</p>
541
+ <div id="emotionAnalysis" class="mb-4"></div>
542
+ <div class="row">
543
+ <div class="col-md-6">
544
+ <p><strong>Total Faces Detected:</strong> <span id="totalFaces">-</span></p>
545
+ <p><strong>Frames Processed:</strong> <span id="framesProcessed">-</span></p>
546
+ </div>
547
+ <div class="col-md-6">
548
+ <p><strong>Frames with Faces:</strong> <span id="framesWithFaces">-</span></p>
549
+ </div>
550
+ </div>
551
+ </div>
552
+ </div>
553
+
554
+ <!-- Overall Recommendation -->
555
+ <div class="card assessment-card">
556
+ <div class="card-header card-header-recommendation">
557
+ <i class="fas fa-thumbs-up"></i> Overall Recommendation
558
+ </div>
559
+ <div class="card-body text-center">
560
+ <div id="overallRecommendation" class="recommendation-display">
561
+ <div class="recommendation-badge">-</div>
562
+ </div>
563
+ <div id="overallFeedback" class="mt-3"></div>
564
+ </div>
565
+ </div>
566
+
567
+ <!-- Questions Analysis Section -->
568
+ <div class="card assessment-card">
569
+ <div class="card-header card-header-questions">
570
+ <i class="fas fa-question-circle"></i> Question Analysis
571
+ </div>
572
+ <div class="card-body">
573
+ <div id="questionAnalysis"></div>
574
+ </div>
575
+ </div>
576
+
577
+ <!-- Interview Transcript -->
578
+ <div class="card">
579
+ <div class="card-header">
580
+ Interview Transcript
581
+ </div>
582
+ <div class="card-body">
583
+ <div class="accordion" id="accordionTranscript">
584
+ <div class="accordion-item">
585
+ <h2 class="accordion-header" id="headingTranscript">
586
+ <button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#collapseTranscript" aria-expanded="false" aria-controls="collapseTranscript">
587
+ Show Full Transcript
588
+ </button>
589
+ </h2>
590
+ <div id="collapseTranscript" class="accordion-collapse collapse" aria-labelledby="headingTranscript" data-bs-parent="#accordionTranscript">
591
+ <div class="accordion-body">
592
+ <div id="transcriptText" class="transcript-container"></div>
593
+ </div>
594
+ </div>
595
+ </div>
596
+ </div>
597
+ </div>
598
+ </div>
599
+
600
+ <!-- Action Buttons -->
601
+ <div class="d-flex justify-content-center gap-3 mt-4">
602
+ <button id="downloadExcelBtn" class="btn btn-success">
603
+ <i class="fas fa-file-excel"></i> Download Assessment Report
604
+ </button>
605
+ <button id="printAssessmentBtn" class="btn btn-secondary">
606
+ <i class="fas fa-print"></i> Print Assessment
607
+ </button>
608
+ <button id="newAnalysisBtn" class="btn btn-primary">
609
+ <i class="fas fa-plus"></i> New Analysis
610
+ </button>
611
+ </div>
612
+ </div>
613
+
614
+ <!-- Error Modal -->
615
+ <div class="modal fade" id="errorModal" tabindex="-1" aria-labelledby="errorModalLabel" aria-hidden="true">
616
+ <div class="modal-dialog">
617
+ <div class="modal-content">
618
+ <div class="modal-header bg-danger text-white">
619
+ <h5 class="modal-title" id="errorModalLabel">Error</h5>
620
+ <button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
621
+ </div>
622
+ <div class="modal-body" id="errorModalBody">
623
+ An error occurred while processing your request.
624
+ </div>
625
+ <div class="modal-footer">
626
+ <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
627
+ </div>
628
+ </div>
629
+ </div>
630
+ </div>
631
+
632
+ <!-- Bootstrap JS -->
633
+ <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-YvpcrYf0tY3lHB60NNkmXc5s9fDVZLESaAA55NDzOxhy9GkcIdslK1eN7N6jIeHz" crossorigin="anonymous"></script>
634
+
635
+ <!-- SheetJS (for Excel export) -->
636
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/xlsx/0.18.5/xlsx.full.min.js"></script>
637
+
638
+ <!-- FileSaver.js (for Excel download) -->
639
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/2.0.5/FileSaver.min.js"></script>
640
+
641
+ <script>
642
+ document.addEventListener('DOMContentLoaded', function() {
643
+ // Get DOM elements
644
+ const uploadForm = document.getElementById('uploadForm');
645
+ const loadingOverlay = document.getElementById('loadingOverlay');
646
+ const step1 = document.getElementById('step1');
647
+ const step2 = document.getElementById('step2');
648
+ const step3 = document.getElementById('step3');
649
+ const step4 = document.getElementById('step4');
650
+ const assessmentResultContainer = document.getElementById('assessmentResultContainer');
651
+ const downloadExcelBtn = document.getElementById('downloadExcelBtn');
652
+ const printAssessmentBtn = document.getElementById('printAssessmentBtn');
653
+ const newAnalysisBtn = document.getElementById('newAnalysisBtn');
654
+ const errorModal = new bootstrap.Modal(document.getElementById('errorModal'));
655
+ const errorModalBody = document.getElementById('errorModalBody');
656
+
657
+ // Store the current analysis results
658
+ let currentAnalysisResults = null;
659
+
660
+ // Test server connection on page load
661
+ async function testServerConnection() {
662
+ try {
663
+ const response = await fetch('/test');
664
+ if (!response.ok) {
665
+ throw new Error(`Server responded with status: ${response.status}`);
666
+ }
667
+ const data = await response.json();
668
+ console.log('Server connection test:', data);
669
+ return true;
670
+ } catch (error) {
671
+ console.error('Server connection test failed:', error);
672
+ showError('Server connection failed. Please check if the server is running.');
673
+ return false;
674
+ }
675
+ }
676
+
677
+ // Test connection on page load
678
+ testServerConnection();
679
+
680
+ // Form submission handler
681
+ uploadForm.addEventListener('submit', async function(e) {
682
+ e.preventDefault();
683
+ console.log('Form submission started');
684
+
685
+ // Test server connection before proceeding
686
+ const isServerConnected = await testServerConnection();
687
+ if (!isServerConnected) {
688
+ return;
689
+ }
690
+
691
+ // Get form data
692
+ const formData = new FormData(uploadForm);
693
+ const candidateName = formData.get('candidate_name');
694
+ const roleApplied = formData.get('role_applied');
695
+ const techSkills = formData.get('tech_skills');
696
+ const videoFile = formData.get('video');
697
+
698
+ // Validate video file
699
+ if (videoFile.size === 0) {
700
+ showError('Please select a valid video file.');
701
+ return;
702
+ }
703
+
704
+ // Check file size (max 100MB)
705
+ const maxSize = 100 * 1024 * 1024; // 100MB in bytes
706
+ if (videoFile.size > maxSize) {
707
+ showError('Video file is too large. Please select a file smaller than 100MB.');
708
+ return;
709
+ }
710
+
711
+ // Check file type
712
+ const allowedTypes = ['video/mp4', 'video/quicktime', 'video/x-msvideo'];
713
+ if (!allowedTypes.includes(videoFile.type)) {
714
+ showError('Please select a valid video file (MP4, MOV, or AVI).');
715
+ return;
716
+ }
717
+
718
+ console.log('Form data:', {
719
+ candidateName,
720
+ roleApplied,
721
+ techSkills,
722
+ videoFileName: videoFile.name,
723
+ videoFileSize: videoFile.size,
724
+ videoFileType: videoFile.type
725
+ });
726
+
727
+ // Validate form inputs
728
+ if (!candidateName || !roleApplied || !videoFile) {
729
+ showError('Please fill in all required fields.');
730
+ return;
731
+ }
732
+
733
+ try {
734
+ loadingOverlay.style.display = 'block';
735
+ const startTime = Date.now();
736
+
737
+ // Update processing status
738
+ processingStatus.innerHTML = `
739
+ <div class="d-flex align-items-center">
740
+ <i class="fas fa-spinner fa-spin me-2"></i>
741
+ <div>Processing video... Please wait</div>
742
+ </div>
743
+ <div class="progress mt-2">
744
+ <div class="progress-bar progress-bar-striped progress-bar-animated" style="width: 0%"></div>
745
+ </div>
746
+ `;
747
+ processingStatus.style.display = 'block';
748
+ processingStatus.className = 'processing-status';
749
+
750
+ // Send video to server for analysis
751
+ const response = await fetch('/analyze_interview', {
752
+ method: 'POST',
753
+ body: formData
754
+ });
755
+
756
+ console.log('Server response status:', response.status);
757
+
758
+ if (!response.ok) {
759
+ const errorText = await response.text();
760
+ console.error('Server error response:', errorText);
761
+ throw new Error(`Server responded with ${response.status}: ${errorText}`);
762
+ }
763
+
764
+ const results = await response.json();
765
+ console.log('Received results:', results);
766
+
767
+ if (!results || typeof results !== 'object') {
768
+ throw new Error('Invalid response format from server');
769
+ }
770
+
771
+ if (results.error) {
772
+ throw new Error(results.error);
773
+ }
774
+
775
+ // Store current results
776
+ currentAnalysisResults = results;
777
+ console.log('Results stored in currentAnalysisResults');
778
+
779
+ // Hide loading overlay
780
+ loadingOverlay.style.display = 'none';
781
+
782
+ // Make sure the results container is visible
783
+ assessmentResultContainer.style.display = 'block';
784
+ console.log('Results container displayed');
785
+
786
+ // Display results
787
+ await displayAnalysisResults(results);
788
+ console.log('Results displayed');
789
+
790
+ // Update processing status
791
+ const endTime = Date.now();
792
+ const duration = ((endTime - startTime) / 1000).toFixed(2);
793
+ processingStatus.innerHTML = `
794
+ <div class="d-flex align-items-center">
795
+ <i class="fas fa-check-circle text-success me-2"></i>
796
+ <div>Analysis completed in ${duration} seconds</div>
797
+ </div>
798
+ `;
799
+ processingStatus.style.display = 'block';
800
+ processingStatus.className = 'processing-status success';
801
+
802
+ // Scroll to results
803
+ assessmentResultContainer.scrollIntoView({ behavior: 'smooth' });
804
+ console.log('Scrolled to results');
805
+
806
+ } catch (error) {
807
+ console.error('Error in form submission:', error);
808
+ loadingOverlay.style.display = 'none';
809
+ showError('Failed to analyze the interview: ' + error.message);
810
+ }
811
+ });
812
+
813
+ // Download Excel report handler
814
+ downloadExcelBtn.addEventListener('click', async function() {
815
+ if (!currentAnalysisResults) {
816
+ showError('No analysis results available');
817
+ return;
818
+ }
819
+
820
+ try {
821
+ const response = await fetch('/download_assessment', {
822
+ method: 'POST',
823
+ headers: {
824
+ 'Content-Type': 'application/json'
825
+ },
826
+ body: JSON.stringify(currentAnalysisResults)
827
+ });
828
+
829
+ if (!response.ok) {
830
+ throw new Error('Failed to generate Excel report');
831
+ }
832
+
833
+ // Create a blob from the response
834
+ const blob = await response.blob();
835
+
836
+ // Create a download link
837
+ const url = window.URL.createObjectURL(blob);
838
+ const a = document.createElement('a');
839
+ a.style.display = 'none';
840
+ a.href = url;
841
+ a.download = `${currentAnalysisResults.candidate_name.replace(/\s+/g, '_')}_interview_assessment.xlsx`;
842
+
843
+ // Append to the document and trigger click
844
+ document.body.appendChild(a);
845
+ a.click();
846
+
847
+ // Clean up
848
+ window.URL.revokeObjectURL(url);
849
+ document.body.removeChild(a);
850
+ } catch (error) {
851
+ console.error('Error downloading Excel:', error);
852
+ showError('Failed to download Excel report: ' + error.message);
853
+ }
854
+ });
855
+
856
+ // Print assessment handler
857
+ printAssessmentBtn.addEventListener('click', function() {
858
+ window.print();
859
+ });
860
+
861
+ // New analysis handler
862
+ newAnalysisBtn.addEventListener('click', function() {
863
+ // Hide results container
864
+ assessmentResultContainer.style.display = 'none';
865
+
866
+ // Reset form
867
+ uploadForm.reset();
868
+
869
+ // Scroll to top
870
+ window.scrollTo({ top: 0, behavior: 'smooth' });
871
+ });
872
+
873
+ // Function to show error message
874
+ function showError(message) {
875
+ console.error('Error:', message);
876
+ errorModalBody.textContent = message;
877
+ errorModal.show();
878
+ }
879
+
880
+ // Function to update progress steps
881
+ function updateProgressStep(step) {
882
+ console.log('Updating to step:', step);
883
+ const steps = [step1, step2, step3, step4];
884
+ const startTime = new Date();
885
+
886
+ // Reset all steps
887
+ steps.forEach((el, index) => {
888
+ el.classList.remove('active', 'completed');
889
+
890
+ // Add timing element if not exists
891
+ if (!el.querySelector('.step-time')) {
892
+ const timeSpan = document.createElement('span');
893
+ timeSpan.className = 'step-time';
894
+ el.appendChild(timeSpan);
895
+ }
896
+
897
+ // Add status indicator if not exists
898
+ if (!el.querySelector('.step-status')) {
899
+ const statusSpan = document.createElement('span');
900
+ statusSpan.className = 'step-status';
901
+ el.appendChild(statusSpan);
902
+ }
903
+ });
904
+
905
+ // Update steps based on progress
906
+ steps.forEach((el, index) => {
907
+ const stepNum = index + 1;
908
+ const statusSpan = el.querySelector('.step-status');
909
+ const timeSpan = el.querySelector('.step-time');
910
+
911
+ if (stepNum < step) {
912
+ el.classList.add('completed');
913
+ statusSpan.innerHTML = '<i class="fas fa-check text-success"></i>';
914
+ timeSpan.textContent = '✓';
915
+ } else if (stepNum === step) {
916
+ el.classList.add('active');
917
+ statusSpan.innerHTML = '<i class="fas fa-spinner fa-spin"></i>';
918
+ timeSpan.textContent = 'Processing...';
919
+ } else {
920
+ statusSpan.innerHTML = '';
921
+ timeSpan.textContent = 'Waiting...';
922
+ }
923
+ });
924
+ }
925
+
926
+ // Function to display analysis results
927
+ async function displayAnalysisResults(results) {
928
+ console.log('Starting to display results');
929
+ try {
930
+ if (!results.candidate_assessment) {
931
+ throw new Error('Missing candidate assessment data');
932
+ }
933
+
934
+ // Update candidate info
935
+ const candidateInfo = document.getElementById('candidateInfo');
936
+ if (!candidateInfo) {
937
+ throw new Error('Cannot find candidateInfo element');
938
+ }
939
+
940
+ candidateInfo.innerHTML = `
941
+ <h3>${results.candidate_name || 'Candidate'}</h3>
942
+ <p class="text-muted">${results.role_applied || 'Role not specified'}</p>
943
+ <p>Interview Date: ${results.interview_date || new Date().toLocaleDateString()}</p>
944
+ `;
945
+ console.log('Updated candidate info');
946
+
947
+ // Update scores and assessments
948
+ await updateAssessmentSection('technical', results.candidate_assessment.technical_knowledge);
949
+ await updateAssessmentSection('problemSolving', results.candidate_assessment.problem_solving);
950
+ await updateAssessmentSection('communication', results.candidate_assessment.communication);
951
+ console.log('Updated assessment sections');
952
+
953
+ // Update emotion analysis
954
+ await updateEmotionAnalysis(results.emotion_analysis);
955
+ console.log('Updated emotion analysis');
956
+
957
+ // Update recommendation
958
+ await updateRecommendation(results);
959
+ console.log('Updated recommendation');
960
+
961
+ // Update question analysis
962
+ await updateQuestionAnalysis(results.question_analysis);
963
+ console.log('Updated question analysis');
964
+
965
+ // Update transcript
966
+ const transcriptText = document.getElementById('transcriptText');
967
+ if (transcriptText) {
968
+ transcriptText.textContent = results.transcription || 'No transcript available';
969
+ console.log('Updated transcript');
970
+ }
971
+
972
+ } catch (error) {
973
+ console.error('Error in displayAnalysisResults:', error);
974
+ showError('Error displaying results: ' + error.message);
975
+ }
976
+ }
977
+
978
+ // Helper function to update assessment sections
979
+ async function updateAssessmentSection(type, data) {
980
+ if (!data) {
981
+ console.warn(`No data provided for ${type} assessment`);
982
+ return;
983
+ }
984
+
985
+ try {
986
+ // Update score
987
+ const scoreElement = document.querySelector(`#${type}Score .score-number`);
988
+ const progressBar = document.querySelector(`#${type}Score .progress-bar`);
989
+
990
+ if (scoreElement && progressBar) {
991
+ scoreElement.textContent = data.score || 0;
992
+ const width = ((data.score || 0) * 10) + '%';
993
+ progressBar.style.width = width;
994
+ progressBar.setAttribute('aria-valuenow', data.score || 0);
995
+ }
996
+
997
+ // Update strengths
998
+ const strengthsList = document.getElementById(`${type}Strengths`);
999
+ if (strengthsList) {
1000
+ strengthsList.innerHTML = '';
1001
+ (data.strengths || []).forEach(strength => {
1002
+ const li = document.createElement('li');
1003
+ li.className = 'list-group-item';
1004
+ li.innerHTML = `<i class="fas fa-check-circle text-success me-2"></i>${strength}`;
1005
+ strengthsList.appendChild(li);
1006
+ });
1007
+ }
1008
+
1009
+ // Update improvements
1010
+ const improvementsList = document.getElementById(`${type}Improvements`);
1011
+ if (improvementsList) {
1012
+ improvementsList.innerHTML = '';
1013
+ (data.areas_for_improvement || []).forEach(improvement => {
1014
+ const li = document.createElement('li');
1015
+ li.className = 'list-group-item';
1016
+ li.innerHTML = `<i class="fas fa-arrow-circle-up text-primary me-2"></i>${improvement}`;
1017
+ improvementsList.appendChild(li);
1018
+ });
1019
+ }
1020
+ } catch (error) {
1021
+ console.error(`Error updating ${type} assessment:`, error);
1022
+ }
1023
+ }
1024
+
1025
+ // Helper function to update emotion analysis
1026
+ async function updateEmotionAnalysis(emotionData) {
1027
+ if (!emotionData) {
1028
+ console.warn('No emotion data provided');
1029
+ return;
1030
+ }
1031
+
1032
+ try {
1033
+ const emotionAnalysis = document.getElementById('emotionAnalysis');
1034
+ if (!emotionAnalysis) return;
1035
+
1036
+ emotionAnalysis.innerHTML = '';
1037
+
1038
+ if (emotionData.emotion_percentages) {
1039
+ const emotions = emotionData.emotion_percentages;
1040
+ const sortedEmotions = Object.entries(emotions)
1041
+ .sort((a, b) => b[1] - a[1])
1042
+ .filter(([_, value]) => value > 0);
1043
+
1044
+ sortedEmotions.forEach(([emotion, percentage]) => {
1045
+ const emotionBar = document.createElement('div');
1046
+ emotionBar.className = `emotion-bar emotion-${emotion.toLowerCase()}`;
1047
+ emotionBar.style.width = `${percentage}%`;
1048
+ emotionBar.style.minWidth = '150px';
1049
+
1050
+ const emotionLabel = document.createElement('span');
1051
+ emotionLabel.className = 'emotion-label';
1052
+ emotionLabel.textContent = emotion.charAt(0).toUpperCase() + emotion.slice(1);
1053
+
1054
+ const emotionPercentage = document.createElement('span');
1055
+ emotionPercentage.className = 'emotion-percentage';
1056
+ emotionPercentage.textContent = `${percentage.toFixed(1)}%`;
1057
+
1058
+ emotionBar.appendChild(emotionLabel);
1059
+ emotionBar.appendChild(emotionPercentage);
1060
+ emotionAnalysis.appendChild(emotionBar);
1061
+ });
1062
+ }
1063
+
1064
+ // Update metrics
1065
+ document.getElementById('totalFaces').textContent = emotionData.total_faces || 0;
1066
+ document.getElementById('framesProcessed').textContent = emotionData.frames_processed || 0;
1067
+ document.getElementById('framesWithFaces').textContent = emotionData.frames_with_faces || 0;
1068
+ } catch (error) {
1069
+ console.error('Error updating emotion analysis:', error);
1070
+ }
1071
+ }
1072
+
1073
+ // Helper function to update recommendation
1074
+ async function updateRecommendation(results) {
1075
+ try {
1076
+ const recommendationBadge = document.querySelector('#overallRecommendation .recommendation-badge');
1077
+ if (!recommendationBadge) return;
1078
+
1079
+ const recommendation = results.overall_recommendation || 'Consider';
1080
+ recommendationBadge.textContent = recommendation;
1081
+ recommendationBadge.className = 'recommendation-badge';
1082
+
1083
+ const recommendationLower = recommendation.toLowerCase();
1084
+ if (recommendationLower.includes('hire')) {
1085
+ recommendationBadge.classList.add('recommendation-hire');
1086
+ } else if (recommendationLower.includes('strong')) {
1087
+ recommendationBadge.classList.add('recommendation-strong');
1088
+ } else if (recommendationLower.includes('consider')) {
1089
+ recommendationBadge.classList.add('recommendation-consider');
1090
+ } else {
1091
+ recommendationBadge.classList.add('recommendation-reject');
1092
+ }
1093
+
1094
+ const feedbackElement = document.getElementById('overallFeedback');
1095
+ if (feedbackElement) {
1096
+ feedbackElement.innerHTML = results.overall_feedback || 'No feedback available';
1097
+ }
1098
+ } catch (error) {
1099
+ console.error('Error updating recommendation:', error);
1100
+ }
1101
+ }
1102
+
1103
+ // Helper function to update question analysis
1104
+ async function updateQuestionAnalysis(questionData) {
1105
+ try {
1106
+ const questionAnalysis = document.getElementById('questionAnalysis');
1107
+ if (!questionAnalysis) return;
1108
+
1109
+ questionAnalysis.innerHTML = '';
1110
+
1111
+ if (Array.isArray(questionData)) {
1112
+ questionData.forEach(qa => {
1113
+ const questionCard = document.createElement('div');
1114
+ questionCard.className = 'card question-card mb-3';
1115
+
1116
+ questionCard.innerHTML = `
1117
+ <div class="card-body">
1118
+ <p class="question-text"><i class="fas fa-question-circle me-2"></i>${qa.question || 'Question not available'}</p>
1119
+ <span class="answer-quality ${getQualityClass(qa.answer_quality)}">${qa.answer_quality || 'Not rated'}</span>
1120
+ <p class="mt-2">${qa.feedback || 'No feedback available'}</p>
1121
+ </div>
1122
+ `;
1123
+
1124
+ questionAnalysis.appendChild(questionCard);
1125
+ });
1126
+ }
1127
+ } catch (error) {
1128
+ console.error('Error updating question analysis:', error);
1129
+ }
1130
+ }
1131
+
1132
+ // Helper function to get the appropriate class for answer quality
1133
+ function getQualityClass(quality) {
1134
+ quality = quality.toLowerCase();
1135
+ if (quality.includes('excellent')) return 'bg-success text-white';
1136
+ if (quality.includes('good')) return 'bg-primary text-white';
1137
+ if (quality.includes('average') || quality.includes('satisfactory')) return 'bg-warning';
1138
+ return 'bg-danger text-white';
1139
+ }
1140
+
1141
+ // Simulated progression for the demo
1142
+ function simulateProgressSteps() {
1143
+ setTimeout(() => updateProgressStep(1), 1000);
1144
+ setTimeout(() => updateProgressStep(2), 3000);
1145
+ setTimeout(() => updateProgressStep(3), 6000);
1146
+ setTimeout(() => updateProgressStep(4), 9000);
1147
+ }
1148
+
1149
+ // For the demo version, show all sections
1150
+ function setupDemoMode() {
1151
+ // Add event listener to show a loading demo
1152
+ document.getElementById('uploadForm').addEventListener('submit', function(e) {
1153
+ e.preventDefault();
1154
+ loadingOverlay.style.display = 'block';
1155
+ simulateProgressSteps();
1156
+
1157
+ // After "processing" display demo results
1158
+ setTimeout(() => {
1159
+ loadingOverlay.style.display = 'none';
1160
+ displayDemoResults();
1161
+ }, 12000);
1162
+ });
1163
+ }
1164
+
1165
+ // If in demo mode, set up the demo interface
1166
+ if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') {
1167
+ setupDemoMode();
1168
+ }
1169
+
1170
+ // Add this to your existing JavaScript after DOMContentLoaded
1171
+ const videoFile = document.getElementById('videoFile');
1172
+ const videoPreview = document.getElementById('videoPreview');
1173
+ const videoPreviewContainer = document.querySelector('.video-preview-container');
1174
+ const processingStatus = document.getElementById('processingStatus');
1175
+
1176
+ // Video file preview handler
1177
+ videoFile.addEventListener('change', function(e) {
1178
+ const file = e.target.files[0];
1179
+ if (file) {
1180
+ const videoUrl = URL.createObjectURL(file);
1181
+ videoPreview.src = videoUrl;
1182
+ videoPreviewContainer.style.display = 'block';
1183
+
1184
+ // Show file details
1185
+ processingStatus.innerHTML = `
1186
+ <div class="d-flex align-items-center">
1187
+ <i class="fas fa-check-circle text-success me-2"></i>
1188
+ <div>
1189
+ <strong>File selected:</strong> ${file.name}<br>
1190
+ <small class="text-muted">Size: ${(file.size / (1024 * 1024)).toFixed(2)} MB</small>
1191
+ </div>
1192
+ </div>
1193
+ `;
1194
+ processingStatus.style.display = 'block';
1195
+ processingStatus.className = 'processing-status success';
1196
+ }
1197
+ });
1198
+ });
1199
+ </script>
1200
+ </body>
1201
+ </html>
model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e15a75d012fa202b4c02220169059d2463a71228d6360e300221167550717550
3
+ size 16139272
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flask==3.0.2
2
+ flask-cors==4.0.0
3
+ python-dotenv==1.0.1
4
+ groq==0.3.4
5
+ google-generativeai==0.3.2
6
+ pandas==2.2.1
7
+ opencv-python==4.9.0.80
8
+ numpy==1.26.4
9
+ matplotlib==3.8.3
10
+ tensorflow==2.15.0
11
+ moviepy==1.0.3
12
+ Werkzeug==3.0.1
13
+ openpyxl==3.1.2
14
+ xlsxwriter==3.1.9