husseinelsaadi commited on
Commit
a1b807c
·
1 Parent(s): 3e2c190

back version

Browse files
backend/routes/interview_api.py CHANGED
@@ -187,23 +187,22 @@ def download_report(application_id: int):
187
  logging.error(f"Error generating report for application {application_id}: {exc}")
188
  return jsonify({"error": "Failed to generate report"}), 500
189
 
190
- # Modified process_answer endpoint - replace the existing one with this:
191
-
192
  @interview_api.route("/process_answer", methods=["POST"])
193
  @login_required
194
  def process_answer():
195
  """
196
- Process a user's answer and return a conversational follow-up question
197
- along with an evaluation. Always responds with JSON.
198
  """
199
  try:
200
  data = request.get_json() or {}
201
  answer = data.get("answer", "").strip()
202
  question_idx = data.get("questionIndex", 0)
 
 
 
 
203
  job_id = data.get("job_id")
204
-
205
- # NEW: Get conversation history if provided
206
- conversation_history = data.get("conversation_history", [])
207
 
208
  if not answer:
209
  return jsonify({"error": "No answer provided."}), 400
@@ -211,53 +210,64 @@ def process_answer():
211
  # Get the current question for evaluation context
212
  current_question = data.get("current_question", "Tell me about yourself")
213
 
214
- # Evaluate the answer (now includes acknowledgment)
215
  evaluation_result = evaluate_answer(current_question, answer)
216
 
217
- # Add current Q&A to conversation history
218
- conversation_history.append((current_question, answer))
219
-
220
  # Determine the number of questions configured for this job
221
  total_questions = 3
222
- job_role = "Software Developer" # Default
223
-
224
  if job_id is not None:
225
  try:
226
  job = Job.query.get(int(job_id))
227
- if job:
228
- if job.num_questions and job.num_questions > 0:
229
- total_questions = job.num_questions
230
- job_role = job.role # Get the actual job role
231
  except Exception:
 
232
  pass
233
 
234
- # Check if interview is complete
 
 
235
  is_complete = question_idx >= (total_questions - 1)
236
 
237
  next_question_text = None
238
  audio_url = None
239
 
240
  if not is_complete:
241
- # Generate dynamic follow-up question based on the conversation
242
- from backend.services.interview_engine import generate_dynamic_followup
243
-
244
- # Combine acknowledgment with the follow-up question
245
- followup = generate_dynamic_followup(
246
- previous_question=current_question,
247
- candidate_answer=answer,
248
- job_role=job_role,
249
- conversation_history=conversation_history[:-1], # Exclude current Q&A
250
- question_number=question_idx,
251
- total_questions=total_questions
 
 
 
252
  )
253
-
254
- # If the evaluation had a good score, we might want to prepend extra praise
255
- if evaluation_result.get("score") == "Excellent":
256
- next_question_text = followup
257
- else:
258
- next_question_text = followup
259
 
260
- # Try to generate audio for the complete response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  try:
262
  audio_dir = "/tmp/audio"
263
  os.makedirs(audio_dir, exist_ok=True)
@@ -277,14 +287,14 @@ def process_answer():
277
  "next_question": next_question_text,
278
  "audio_url": audio_url,
279
  "evaluation": evaluation_result,
280
- "is_complete": is_complete,
281
- "conversation_history": conversation_history # Return updated history
282
  })
283
 
284
  except Exception as e:
285
  logging.error(f"Error in process_answer: {e}")
286
- return jsonify({"error": "Error processing answer. Please try again."}), 500@interview_api.route("/audio/<string:filename>", methods=["GET"])
287
 
 
288
  @login_required
289
  def get_audio(filename: str):
290
  """Serve previously generated TTS audio from the /tmp/audio directory."""
 
187
  logging.error(f"Error generating report for application {application_id}: {exc}")
188
  return jsonify({"error": "Failed to generate report"}), 500
189
 
 
 
190
  @interview_api.route("/process_answer", methods=["POST"])
191
  @login_required
192
  def process_answer():
193
  """
194
+ Process a user's answer and return a followup question along with an
195
+ evaluation. Always responds with JSON.
196
  """
197
  try:
198
  data = request.get_json() or {}
199
  answer = data.get("answer", "").strip()
200
  question_idx = data.get("questionIndex", 0)
201
+
202
+ # ``job_id`` is required to determine how many total questions are
203
+ # expected for this interview. Without it we fall back to a
204
+ # three‑question interview.
205
  job_id = data.get("job_id")
 
 
 
206
 
207
  if not answer:
208
  return jsonify({"error": "No answer provided."}), 400
 
210
  # Get the current question for evaluation context
211
  current_question = data.get("current_question", "Tell me about yourself")
212
 
213
+ # Evaluate the answer
214
  evaluation_result = evaluate_answer(current_question, answer)
215
 
 
 
 
216
  # Determine the number of questions configured for this job
217
  total_questions = 3
 
 
218
  if job_id is not None:
219
  try:
220
  job = Job.query.get(int(job_id))
221
+ if job and job.num_questions and job.num_questions > 0:
222
+ total_questions = job.num_questions
 
 
223
  except Exception:
224
+ # If lookup fails, keep default
225
  pass
226
 
227
+ # Check completion. ``question_idx`` is zero‑based; the last index
228
+ # corresponds to ``total_questions - 1``. When the current index
229
+ # reaches or exceeds this value, the interview is complete.
230
  is_complete = question_idx >= (total_questions - 1)
231
 
232
  next_question_text = None
233
  audio_url = None
234
 
235
  if not is_complete:
236
+ # Follow‑up question bank. These are used for indices 1 .. n‑2.
237
+ # The final question (last index) probes salary expectations and
238
+ # working preferences. If the recruiter has configured fewer
239
+ # questions than the number of entries here, only the first
240
+ # appropriate number will be used.
241
+ follow_up_questions = [
242
+ "Can you describe a challenging project you've worked on and how you overcame the difficulties?",
243
+ "What is your favorite machine learning algorithm and why?",
244
+ "How do you stay up-to-date with advancements in AI?",
245
+ "Describe a time you had to learn a new technology quickly. How did you approach it?"
246
+ ]
247
+ final_question = (
248
+ "What are your salary expectations? Are you looking for a full-time or part-time role, "
249
+ "and do you prefer remote or on-site work?"
250
  )
 
 
 
 
 
 
251
 
252
+ # Compute the next index (zero‑based) for the upcoming question
253
+ next_idx = question_idx + 1
254
+
255
+ # Determine which question to ask next. If next_idx is the last
256
+ # question (i.e. equals total_questions - 1), use the final
257
+ # question. Otherwise, select a follow‑up question from the
258
+ # bank based on ``next_idx - 1`` (because index 0 is for the
259
+ # first follow‑up). If out of range, cycle through the list.
260
+ if next_idx == (total_questions - 1):
261
+ next_question_text = final_question
262
+ else:
263
+ if follow_up_questions:
264
+ idx_in_bank = (next_idx - 1) % len(follow_up_questions)
265
+ next_question_text = follow_up_questions[idx_in_bank]
266
+ else:
267
+ # Fallback if no follow‑ups are defined
268
+ next_question_text = "Do you have any questions about the role or our company?"
269
+
270
+ # Try to generate audio for the next question
271
  try:
272
  audio_dir = "/tmp/audio"
273
  os.makedirs(audio_dir, exist_ok=True)
 
287
  "next_question": next_question_text,
288
  "audio_url": audio_url,
289
  "evaluation": evaluation_result,
290
+ "is_complete": is_complete
 
291
  })
292
 
293
  except Exception as e:
294
  logging.error(f"Error in process_answer: {e}")
295
+ return jsonify({"error": "Error processing answer. Please try again."}), 500
296
 
297
+ @interview_api.route("/audio/<string:filename>", methods=["GET"])
298
  @login_required
299
  def get_audio(filename: str):
300
  """Serve previously generated TTS audio from the /tmp/audio directory."""
backend/services/interview_engine.py CHANGED
@@ -9,7 +9,6 @@ import tempfile
9
  import shutil
10
  import torch
11
 
12
- # [KEEPING ALL THE INITIALIZATION CODE EXACTLY THE SAME]
13
  if torch.cuda.is_available():
14
  print("🔥 CUDA Available")
15
  print(torch.cuda.get_device_name(0))
@@ -21,9 +20,17 @@ print("🧠 GPU:", torch.cuda.get_device_name(0))
21
  print("💡 cuDNN version:", torch.backends.cudnn.version())
22
  print("💥 cuDNN enabled:", torch.backends.cudnn.is_available())
23
 
 
 
24
  # Initialize models
25
  chat_groq_api = os.getenv("GROQ_API_KEY")
26
 
 
 
 
 
 
 
27
  if chat_groq_api:
28
  try:
29
  groq_llm = ChatGroq(
@@ -39,11 +46,31 @@ else:
39
 
40
  if groq_llm is None:
41
  class DummyGroq:
 
 
 
 
 
 
 
42
  def invoke(self, prompt: str):
 
 
 
 
43
  return "Tell me about yourself and why you're interested in this position."
 
44
  groq_llm = DummyGroq()
45
 
46
  # Initialize Whisper model
 
 
 
 
 
 
 
 
47
  whisper_model = None
48
 
49
  def load_whisper_model():
@@ -52,11 +79,16 @@ def load_whisper_model():
52
  try:
53
  device = "cuda" if torch.cuda.is_available() else "cpu"
54
  compute_type = "float16" if device == "cuda" else "int8"
 
 
 
 
55
  model_name = os.getenv("WHISPER_MODEL_NAME", "tiny")
56
  whisper_model = WhisperModel(model_name, device=device, compute_type=compute_type)
57
  logging.info(f"Whisper model '{model_name}' loaded on {device} with {compute_type}")
58
  except Exception as e:
59
  logging.error(f"Error loading Whisper model: {e}")
 
60
  whisper_model = WhisperModel(model_name if 'model_name' in locals() else "tiny", device="cpu", compute_type="int8")
61
  return whisper_model
62
 
@@ -75,11 +107,12 @@ def generate_first_question(profile, job):
75
  Generate an appropriate opening interview question that is professional and relevant.
76
  Keep it concise and clear. Respond with ONLY the question text, no additional formatting.
77
  If the interview is for a technical role, focus on technical skills. Make the question related
78
- to the job role and the candidate's background.
79
  """
80
 
81
  response = groq_llm.invoke(prompt)
82
 
 
83
  if hasattr(response, 'content'):
84
  question = response.content.strip()
85
  elif isinstance(response, str):
@@ -87,6 +120,7 @@ def generate_first_question(profile, job):
87
  else:
88
  question = str(response).strip()
89
 
 
90
  if not question or len(question) < 10:
91
  question = "Tell me about yourself and why you're interested in this position."
92
 
@@ -97,77 +131,15 @@ def generate_first_question(profile, job):
97
  logging.error(f"Error generating first question: {e}")
98
  return "Tell me about yourself and why you're interested in this position."
99
 
100
- # NEW FUNCTION: Generate dynamic follow-up questions based on the conversation
101
- def generate_dynamic_followup(previous_question, candidate_answer, job_role, conversation_history=None, question_number=1, total_questions=3):
102
- """Generate a dynamic follow-up question based on the candidate's answer"""
103
- try:
104
- # Build conversation context
105
- context = ""
106
- if conversation_history:
107
- for q, a in conversation_history:
108
- context += f"\nQ: {q}\nA: {a}\n"
109
-
110
- prompt = f"""
111
- You are an experienced interviewer conducting an interview for a {job_role} position.
112
-
113
- Previous conversation:
114
- {context}
115
-
116
- Current question: {previous_question}
117
- Candidate's answer: {candidate_answer}
118
-
119
- This is question {question_number + 1} out of {total_questions} total questions.
120
-
121
- Your task is to:
122
- 1. First, acknowledge their answer appropriately (e.g., "That's interesting", "Great point", "I see", "Excellent experience with...", etc.)
123
- 2. If the answer was particularly good, give brief positive feedback
124
- 3. Then ask a natural follow-up question that:
125
- - Builds on what they just said
126
- - Digs deeper into their experience or knowledge
127
- - Relates to the job requirements
128
- - Feels like a natural conversation flow
129
-
130
- Keep your response conversational and professional. The acknowledgment should be brief (1-2 sentences max).
131
-
132
- If this is the last question (question {total_questions}), make it about salary expectations, work preferences (remote/onsite), and availability.
133
-
134
- Respond with ONLY your acknowledgment and question, no additional formatting or metadata.
135
- """
136
-
137
- response = groq_llm.invoke(prompt)
138
-
139
- if hasattr(response, 'content'):
140
- question = response.content.strip()
141
- elif isinstance(response, str):
142
- question = response.strip()
143
- else:
144
- question = str(response).strip()
145
-
146
- if not question or len(question) < 10:
147
- # Fallback questions with acknowledgments
148
- fallbacks = [
149
- "That's a good answer. Can you tell me more about a specific challenge you faced in that situation?",
150
- "Interesting perspective. How do you stay updated with the latest developments in your field?",
151
- "I appreciate your detailed response. What would you say is your greatest professional achievement?",
152
- "Thank you for sharing that. Where do you see yourself professionally in the next 3-5 years?"
153
- ]
154
- question = fallbacks[question_number % len(fallbacks)]
155
-
156
- logging.info(f"Generated dynamic follow-up: {question}")
157
- return question
158
-
159
- except Exception as e:
160
- logging.error(f"Error generating dynamic follow-up: {e}")
161
- return "Thank you for that answer. Can you tell me more about your experience in this area?"
162
-
163
- # [KEEPING ALL OTHER FUNCTIONS EXACTLY THE SAME]
164
  def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
165
  """Synchronous wrapper for edge-tts with better error handling"""
166
  try:
 
167
  if not text or not text.strip():
168
  logging.error("Empty text provided for TTS")
169
  return None
170
 
 
171
  directory = os.path.dirname(output_path)
172
  if not directory:
173
  directory = "/tmp/audio"
@@ -175,6 +147,7 @@ def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
175
 
176
  os.makedirs(directory, exist_ok=True)
177
 
 
178
  test_file = os.path.join(directory, f"test_{os.getpid()}.tmp")
179
  try:
180
  with open(test_file, 'w') as f:
@@ -183,6 +156,7 @@ def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
183
  logging.info(f"Directory {directory} is writable")
184
  except (PermissionError, OSError) as e:
185
  logging.error(f"Directory {directory} is not writable: {e}")
 
186
  directory = "/tmp/audio"
187
  output_path = os.path.join(directory, os.path.basename(output_path))
188
  os.makedirs(directory, exist_ok=True)
@@ -196,9 +170,11 @@ def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
196
  logging.error(f"Error in async TTS generation: {e}")
197
  raise
198
 
 
199
  try:
200
  loop = asyncio.get_event_loop()
201
  if loop.is_running():
 
202
  import threading
203
  import concurrent.futures
204
 
@@ -212,10 +188,11 @@ def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
212
 
213
  with concurrent.futures.ThreadPoolExecutor() as executor:
214
  future = executor.submit(run_in_thread)
215
- future.result(timeout=30)
216
  else:
217
  loop.run_until_complete(generate_audio())
218
  except RuntimeError:
 
219
  loop = asyncio.new_event_loop()
220
  asyncio.set_event_loop(loop)
221
  try:
@@ -223,9 +200,10 @@ def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
223
  finally:
224
  loop.close()
225
 
 
226
  if os.path.exists(output_path):
227
  file_size = os.path.getsize(output_path)
228
- if file_size > 1000:
229
  logging.info(f"TTS file created successfully: {output_path} ({file_size} bytes)")
230
  return output_path
231
  else:
@@ -257,7 +235,7 @@ def convert_webm_to_wav(webm_path, wav_path):
257
  logging.error(f"Error converting audio: {e}")
258
  return None
259
 
260
- import subprocess
261
 
262
  def whisper_stt(audio_path):
263
  """Speech-to-text using Faster-Whisper"""
@@ -266,10 +244,11 @@ def whisper_stt(audio_path):
266
  logging.error(f"Audio file is empty or missing: {audio_path}")
267
  return ""
268
 
 
269
  wav_path = audio_path.replace(".webm", ".wav")
270
  cmd = [
271
  "ffmpeg",
272
- "-y",
273
  "-i", audio_path,
274
  "-ar", "16000",
275
  "-ac", "1",
@@ -290,15 +269,13 @@ def whisper_stt(audio_path):
290
  logging.error(f"Error in STT: {e}")
291
  return ""
292
 
293
- # ENHANCED EVALUATION FUNCTION with more conversational feedback
294
  def evaluate_answer(question, answer, job_role="Software Developer", seniority="Mid-level"):
295
- """Evaluate candidate's answer with conversational feedback"""
296
  try:
297
  if not answer or not answer.strip():
298
  return {
299
  "score": "Poor",
300
- "feedback": "No answer provided.",
301
- "acknowledgment": "I didn't catch your response. Could you please elaborate?"
302
  }
303
 
304
  prompt = f"""
@@ -308,19 +285,18 @@ def evaluate_answer(question, answer, job_role="Software Developer", seniority="
308
  Candidate Answer: {answer}
309
 
310
  Evaluate based on technical correctness, clarity, and relevance.
311
- Provide:
312
- 1. A brief, conversational acknowledgment (e.g., "Great example!", "Interesting approach", "Good point")
313
- 2. A score: Poor, Medium, Good, or Excellent
314
- 3. Brief constructive feedback (1-2 sentences)
315
 
316
  Respond in this exact format:
317
- Acknowledgment: [Your brief acknowledgment]
318
  Score: [Poor/Medium/Good/Excellent]
319
  Feedback: [Your brief feedback here]
320
  """
321
 
322
  response = groq_llm.invoke(prompt)
323
 
 
324
  if hasattr(response, 'content'):
325
  response_text = response.content.strip()
326
  elif isinstance(response, str):
@@ -328,34 +304,31 @@ def evaluate_answer(question, answer, job_role="Software Developer", seniority="
328
  else:
329
  response_text = str(response).strip()
330
 
 
331
  lines = response_text.split('\n')
332
- score = "Medium"
333
- feedback = "Good answer, but could be more detailed."
334
- acknowledgment = "Thank you for your response."
335
 
336
  for line in lines:
337
  line = line.strip()
338
- if line.startswith('Acknowledgment:'):
339
- acknowledgment = line.replace('Acknowledgment:', '').strip()
340
- elif line.startswith('Score:'):
341
  score = line.replace('Score:', '').strip()
342
  elif line.startswith('Feedback:'):
343
  feedback = line.replace('Feedback:', '').strip()
344
 
 
345
  valid_scores = ["Poor", "Medium", "Good", "Excellent"]
346
  if score not in valid_scores:
347
  score = "Medium"
348
 
349
  return {
350
  "score": score,
351
- "feedback": feedback,
352
- "acknowledgment": acknowledgment
353
  }
354
 
355
  except Exception as e:
356
  logging.error(f"Error evaluating answer: {e}")
357
  return {
358
  "score": "Medium",
359
- "feedback": "Unable to evaluate answer at this time.",
360
- "acknowledgment": "Thank you for your response."
361
  }
 
9
  import shutil
10
  import torch
11
 
 
12
  if torch.cuda.is_available():
13
  print("🔥 CUDA Available")
14
  print(torch.cuda.get_device_name(0))
 
20
  print("💡 cuDNN version:", torch.backends.cudnn.version())
21
  print("💥 cuDNN enabled:", torch.backends.cudnn.is_available())
22
 
23
+
24
+
25
  # Initialize models
26
  chat_groq_api = os.getenv("GROQ_API_KEY")
27
 
28
+ # Attempt to initialize the Groq LLM only if an API key is provided. When
29
+ # running in environments where the key is unavailable (such as local
30
+ # development or automated testing), fall back to a simple stub that
31
+ # generates generic responses. This avoids raising an exception at import
32
+ # time and allows the rest of the application to run without external
33
+ # dependencies. See the DummyGroq class defined below.
34
  if chat_groq_api:
35
  try:
36
  groq_llm = ChatGroq(
 
46
 
47
  if groq_llm is None:
48
  class DummyGroq:
49
+ """A fallback language model used when no Groq API key is set.
50
+
51
+ The ``invoke`` method of this class returns a simple canned response
52
+ rather than calling an external API. This ensures that the
53
+ interview functionality still produces a sensible prompt, albeit
54
+ without advanced LLM behaviour.
55
+ """
56
  def invoke(self, prompt: str):
57
+ # Provide a very generic question based on the prompt. This
58
+ # implementation ignores the prompt contents entirely; in a more
59
+ # sophisticated fallback you could parse ``prompt`` to tailor
60
+ # responses.
61
  return "Tell me about yourself and why you're interested in this position."
62
+
63
  groq_llm = DummyGroq()
64
 
65
  # Initialize Whisper model
66
+ #
67
+ # Loading the Whisper model can take several seconds on first use because the
68
+ # model weights must be downloaded from Hugging Face. This delay can cause
69
+ # the API call to ``/api/transcribe_audio`` to appear stuck while the model
70
+ # downloads. To mitigate this, we allow the model size to be configured via
71
+ # the ``WHISPER_MODEL_NAME`` environment variable and preload the model when
72
+ # this module is imported. Using a smaller model (e.g. "tiny" or "base.en")
73
+ # reduces download size and inference time considerably.
74
  whisper_model = None
75
 
76
  def load_whisper_model():
 
79
  try:
80
  device = "cuda" if torch.cuda.is_available() else "cpu"
81
  compute_type = "float16" if device == "cuda" else "int8"
82
+ # Allow overriding the model size via environment. Default to a
83
+ # lightweight model to improve startup times. Available options
84
+ # include: tiny, base, base.en, small, medium, large. See
85
+ # https://huggingface.co/ggerganov/whisper.cpp for details.
86
  model_name = os.getenv("WHISPER_MODEL_NAME", "tiny")
87
  whisper_model = WhisperModel(model_name, device=device, compute_type=compute_type)
88
  logging.info(f"Whisper model '{model_name}' loaded on {device} with {compute_type}")
89
  except Exception as e:
90
  logging.error(f"Error loading Whisper model: {e}")
91
+ # Fallback to CPU
92
  whisper_model = WhisperModel(model_name if 'model_name' in locals() else "tiny", device="cpu", compute_type="int8")
93
  return whisper_model
94
 
 
107
  Generate an appropriate opening interview question that is professional and relevant.
108
  Keep it concise and clear. Respond with ONLY the question text, no additional formatting.
109
  If the interview is for a technical role, focus on technical skills. Make the question related
110
+ to the job role and the candidate's background and the previous question.
111
  """
112
 
113
  response = groq_llm.invoke(prompt)
114
 
115
+ # Fix: Handle AIMessage object properly
116
  if hasattr(response, 'content'):
117
  question = response.content.strip()
118
  elif isinstance(response, str):
 
120
  else:
121
  question = str(response).strip()
122
 
123
+ # Ensure we have a valid question
124
  if not question or len(question) < 10:
125
  question = "Tell me about yourself and why you're interested in this position."
126
 
 
131
  logging.error(f"Error generating first question: {e}")
132
  return "Tell me about yourself and why you're interested in this position."
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
135
  """Synchronous wrapper for edge-tts with better error handling"""
136
  try:
137
+ # Ensure text is not empty
138
  if not text or not text.strip():
139
  logging.error("Empty text provided for TTS")
140
  return None
141
 
142
+ # Ensure the directory exists and is writable
143
  directory = os.path.dirname(output_path)
144
  if not directory:
145
  directory = "/tmp/audio"
 
147
 
148
  os.makedirs(directory, exist_ok=True)
149
 
150
+ # Test write permissions with a temporary file
151
  test_file = os.path.join(directory, f"test_{os.getpid()}.tmp")
152
  try:
153
  with open(test_file, 'w') as f:
 
156
  logging.info(f"Directory {directory} is writable")
157
  except (PermissionError, OSError) as e:
158
  logging.error(f"Directory {directory} is not writable: {e}")
159
+ # Fallback to /tmp
160
  directory = "/tmp/audio"
161
  output_path = os.path.join(directory, os.path.basename(output_path))
162
  os.makedirs(directory, exist_ok=True)
 
170
  logging.error(f"Error in async TTS generation: {e}")
171
  raise
172
 
173
+ # Run async function in sync context
174
  try:
175
  loop = asyncio.get_event_loop()
176
  if loop.is_running():
177
+ # If loop is already running, create a new one in a thread
178
  import threading
179
  import concurrent.futures
180
 
 
188
 
189
  with concurrent.futures.ThreadPoolExecutor() as executor:
190
  future = executor.submit(run_in_thread)
191
+ future.result(timeout=30) # 30 second timeout
192
  else:
193
  loop.run_until_complete(generate_audio())
194
  except RuntimeError:
195
+ # No event loop exists
196
  loop = asyncio.new_event_loop()
197
  asyncio.set_event_loop(loop)
198
  try:
 
200
  finally:
201
  loop.close()
202
 
203
+ # Verify file was created and has content
204
  if os.path.exists(output_path):
205
  file_size = os.path.getsize(output_path)
206
+ if file_size > 1000: # At least 1KB for a valid audio file
207
  logging.info(f"TTS file created successfully: {output_path} ({file_size} bytes)")
208
  return output_path
209
  else:
 
235
  logging.error(f"Error converting audio: {e}")
236
  return None
237
 
238
+ import subprocess # top of the file if not already imported
239
 
240
  def whisper_stt(audio_path):
241
  """Speech-to-text using Faster-Whisper"""
 
244
  logging.error(f"Audio file is empty or missing: {audio_path}")
245
  return ""
246
 
247
+ # Convert webm to wav using ffmpeg
248
  wav_path = audio_path.replace(".webm", ".wav")
249
  cmd = [
250
  "ffmpeg",
251
+ "-y", # overwrite
252
  "-i", audio_path,
253
  "-ar", "16000",
254
  "-ac", "1",
 
269
  logging.error(f"Error in STT: {e}")
270
  return ""
271
 
 
272
  def evaluate_answer(question, answer, job_role="Software Developer", seniority="Mid-level"):
273
+ """Evaluate candidate's answer with better error handling"""
274
  try:
275
  if not answer or not answer.strip():
276
  return {
277
  "score": "Poor",
278
+ "feedback": "No answer provided."
 
279
  }
280
 
281
  prompt = f"""
 
285
  Candidate Answer: {answer}
286
 
287
  Evaluate based on technical correctness, clarity, and relevance.
288
+ Provide a brief evaluation in 1-2 sentences.
289
+
290
+ Rate the answer as one of: Poor, Medium, Good, Excellent
 
291
 
292
  Respond in this exact format:
 
293
  Score: [Poor/Medium/Good/Excellent]
294
  Feedback: [Your brief feedback here]
295
  """
296
 
297
  response = groq_llm.invoke(prompt)
298
 
299
+ # Handle AIMessage object properly
300
  if hasattr(response, 'content'):
301
  response_text = response.content.strip()
302
  elif isinstance(response, str):
 
304
  else:
305
  response_text = str(response).strip()
306
 
307
+ # Parse the response
308
  lines = response_text.split('\n')
309
+ score = "Medium" # default
310
+ feedback = "Good answer, but could be more detailed." # default
 
311
 
312
  for line in lines:
313
  line = line.strip()
314
+ if line.startswith('Score:'):
 
 
315
  score = line.replace('Score:', '').strip()
316
  elif line.startswith('Feedback:'):
317
  feedback = line.replace('Feedback:', '').strip()
318
 
319
+ # Ensure score is valid
320
  valid_scores = ["Poor", "Medium", "Good", "Excellent"]
321
  if score not in valid_scores:
322
  score = "Medium"
323
 
324
  return {
325
  "score": score,
326
+ "feedback": feedback
 
327
  }
328
 
329
  except Exception as e:
330
  logging.error(f"Error evaluating answer: {e}")
331
  return {
332
  "score": "Medium",
333
+ "feedback": "Unable to evaluate answer at this time."
 
334
  }
backend/templates/interview.html CHANGED
@@ -516,7 +516,6 @@
516
  answers: [],
517
  evaluations: []
518
  };
519
- this.conversationHistory = [];
520
  this.initializeElements();
521
  this.initializeInterview();
522
  }
@@ -840,98 +839,72 @@
840
  this.recordingStatus.style.color = '#666';
841
  }
842
 
843
- // Replace the existing submitAnswer method in your AIInterviewer class with this:
844
-
845
- async submitAnswer() {
846
- const answerText = this.transcriptArea.textContent.trim();
847
-
848
- if (!answerText || answerText === this.transcriptArea.getAttribute('placeholder')) {
849
- this.showError('Please provide an answer before submitting.');
850
- return;
851
- }
852
-
853
- // Show loading state
854
- this.confirmButton.disabled = true;
855
- this.confirmLoading.style.display = 'inline-block';
856
- this.confirmButton.querySelector('span:first-child').textContent = 'Processing...';
857
-
858
- try {
859
- const response = await fetch('/api/interview/process_answer', {
860
- method: 'POST',
861
- headers: {
862
- 'Content-Type': 'application/json',
863
- },
864
- body: JSON.stringify({
865
- answer: answerText,
866
- questionIndex: this.currentQuestionIndex,
867
- job_id: JOB_ID,
868
- current_question: this.currentQuestion,
869
- conversation_history: this.conversationHistory || [] // Include conversation history
870
- })
871
- });
872
-
873
- const data = await response.json();
874
-
875
- if (data.success) {
876
- // Update conversation history from response
877
- if (data.conversation_history) {
878
- this.conversationHistory = data.conversation_history;
879
- }
880
-
881
- // Store answer and evaluation
882
- this.interviewData.answers.push(answerText);
883
- this.interviewData.evaluations.push(data.evaluation);
884
-
885
- // Display user's answer
886
- this.addUserMessage(answerText);
887
-
888
- // Display evaluation with acknowledgment
889
- const evalDiv = document.createElement('div');
890
- evalDiv.className = 'ai-message';
891
- evalDiv.innerHTML = `
892
- <div class="ai-avatar">AI</div>
893
- <div class="message-bubble" style="background: #e8f5e9;">
894
- <p><strong>${data.evaluation.acknowledgment || 'Thank you for your response.'}</strong></p>
895
- <p style="margin-top: 10px;">Score: <span class="evaluation-score">${data.evaluation.score}</span></p>
896
- <p style="margin-top: 5px; font-size: 0.9rem; color: #666;">${data.evaluation.feedback}</p>
897
- </div>
898
- `;
899
- this.chatArea.appendChild(evalDiv);
900
- this.chatArea.scrollTop = this.chatArea.scrollHeight;
901
-
902
- // Reset input for next question
903
- this.resetForNextQuestion();
904
-
905
- if (!data.is_complete) {
906
- // Move to next question after a short delay
907
- setTimeout(() => {
908
- this.currentQuestionIndex++;
909
- this.currentQuestion = data.next_question;
910
- this.displayQuestion(data.next_question, data.audio_url);
911
- this.interviewData.questions.push(data.next_question);
912
- }, 3000); // 3 second delay to read evaluation
913
- } else {
914
- // Interview complete
915
- setTimeout(() => {
916
- this.showInterviewSummary();
917
- }, 2000);
918
- }
919
- } else {
920
- this.showError(data.error || 'Error processing answer');
921
- }
922
- } catch (error) {
923
- console.error('Error submitting answer:', error);
924
- this.showError('Error submitting answer. Please try again.');
925
- } finally {
926
- this.confirmButton.disabled = false;
927
- this.confirmLoading.style.display = 'none';
928
- this.confirmButton.querySelector('span:first-child').textContent = 'Confirm Answer';
929
- }
930
- }
931
-
932
- // Also add this property to the constructor of AIInterviewer class:
933
- // (Add this line in the constructor after this.interviewData)
934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
935
 
936
  addUserMessage(message) {
937
  const messageDiv = document.createElement('div');
 
516
  answers: [],
517
  evaluations: []
518
  };
 
519
  this.initializeElements();
520
  this.initializeInterview();
521
  }
 
839
  this.recordingStatus.style.color = '#666';
840
  }
841
 
842
+ async submitAnswer() {
843
+ const answer = this.transcriptArea.textContent.trim();
844
+ if (!answer) return;
845
+
846
+ console.log('Submitting answer:', answer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
847
 
848
+ // Show loading state
849
+ this.confirmButton.disabled = true;
850
+ this.confirmLoading.style.display = 'inline-block';
851
+ this.confirmButton.querySelector('span').style.display = 'none';
852
+
853
+ // Add user message to chat
854
+ this.addUserMessage(answer);
855
+
856
+ try {
857
+ const response = await fetch('/api/process_answer', {
858
+ method: 'POST',
859
+ headers: {
860
+ 'Content-Type': 'application/json'
861
+ },
862
+ body: JSON.stringify({
863
+ answer: answer,
864
+ questionIndex: this.currentQuestionIndex,
865
+ current_question: this.currentQuestion,
866
+ job_id: JOB_ID
867
+ })
868
+ });
869
+
870
+ if (!response.ok) {
871
+ const errorText = await response.text();
872
+ console.error('Process answer error:', response.status, errorText);
873
+ throw new Error(`HTTP error! status: ${response.status}`);
874
+ }
875
+
876
+ const data = await response.json();
877
+ console.log('Process answer response:', data);
878
+
879
+ if (!data.success) {
880
+ this.showError(data.error || 'Failed to process answer. Please try again.');
881
+ return;
882
+ }
883
+
884
+ // Record the user's answer and its evaluation
885
+ this.interviewData.answers.push(answer);
886
+ this.interviewData.evaluations.push(data.evaluation || {});
887
+
888
+ if (data.is_complete) {
889
+ console.log('Interview completed');
890
+ this.showInterviewSummary();
891
+ } else {
892
+ console.log('Moving to next question');
893
+ this.currentQuestionIndex++;
894
+ this.currentQuestion = data.next_question;
895
+ this.displayQuestion(data.next_question, data.audio_url);
896
+ this.interviewData.questions.push(data.next_question);
897
+ this.resetForNextQuestion();
898
+ }
899
+ } catch (error) {
900
+ console.error('Error submitting answer:', error);
901
+ this.showError('Connection error. Please try again.');
902
+ } finally {
903
+ // Reset button state
904
+ this.confirmLoading.style.display = 'none';
905
+ this.confirmButton.querySelector('span').style.display = 'inline';
906
+ }
907
+ }
908
 
909
  addUserMessage(message) {
910
  const messageDiv = document.createElement('div');