husseinelsaadi commited on
Commit
32acb92
·
1 Parent(s): a3c2881
backend/routes/interview_api.py CHANGED
@@ -7,11 +7,13 @@ from flask_login import login_required, current_user
7
  from backend.models.database import db, Job, Application
8
  from backend.services.interview_engine import (
9
  generate_first_question,
 
10
  edge_tts_to_file_sync,
11
  whisper_stt,
12
  evaluate_answer
13
  )
14
 
 
15
  # Additional imports for report generation
16
  from backend.models.database import Application
17
  from backend.services.report_generator import generate_llm_interview_report, create_pdf_report
@@ -233,23 +235,6 @@ def process_answer():
233
  audio_url = None
234
 
235
  if not is_complete:
236
- # Follow‑up question bank. These are used for indices 1 .. n‑2.
237
- # The final question (last index) probes salary expectations and
238
- # working preferences. If the recruiter has configured fewer
239
- # questions than the number of entries here, only the first
240
- # appropriate number will be used.
241
- follow_up_questions = [
242
- "Can you describe a challenging project you've worked on and how you overcame the difficulties?",
243
- "What is your favorite machine learning algorithm and why?",
244
- "How do you stay up-to-date with advancements in AI?",
245
- "Describe a time you had to learn a new technology quickly. How did you approach it?"
246
- ]
247
- final_question = (
248
- "What are your salary expectations? Are you looking for a full-time or part-time role, "
249
- "and do you prefer remote or on-site work?"
250
- )
251
-
252
- # Compute the next index (zero‑based) for the upcoming question
253
  next_idx = question_idx + 1
254
 
255
  # Determine which question to ask next. If next_idx is the last
@@ -258,14 +243,35 @@ def process_answer():
258
  # bank based on ``next_idx - 1`` (because index 0 is for the
259
  # first follow‑up). If out of range, cycle through the list.
260
  if next_idx == (total_questions - 1):
261
- next_question_text = final_question
 
 
 
262
  else:
263
- if follow_up_questions:
264
- idx_in_bank = (next_idx - 1) % len(follow_up_questions)
265
- next_question_text = follow_up_questions[idx_in_bank]
266
- else:
267
- # Fallback if no follow‑ups are defined
268
- next_question_text = "Do you have any questions about the role or our company?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
  # Try to generate audio for the next question
271
  try:
 
7
  from backend.models.database import db, Job, Application
8
  from backend.services.interview_engine import (
9
  generate_first_question,
10
+ generate_next_question,
11
  edge_tts_to_file_sync,
12
  whisper_stt,
13
  evaluate_answer
14
  )
15
 
16
+
17
  # Additional imports for report generation
18
  from backend.models.database import Application
19
  from backend.services.report_generator import generate_llm_interview_report, create_pdf_report
 
235
  audio_url = None
236
 
237
  if not is_complete:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  next_idx = question_idx + 1
239
 
240
  # Determine which question to ask next. If next_idx is the last
 
243
  # bank based on ``next_idx - 1`` (because index 0 is for the
244
  # first follow‑up). If out of range, cycle through the list.
245
  if next_idx == (total_questions - 1):
246
+ next_question_text = (
247
+ "What are your salary expectations? Are you looking for a full-time or part-time role, "
248
+ "and do you prefer remote or on-site work?"
249
+ )
250
  else:
251
+ # 🔥 Use Qdrant-powered next question
252
+ try:
253
+ # You need profile + job for Qdrant context
254
+ job = Job.query.get(int(job_id)) if job_id else None
255
+ application = Application.query.filter_by(
256
+ user_id=current_user.id,
257
+ job_id=job_id
258
+ ).first()
259
+
260
+ profile = {}
261
+ if application and application.extracted_features:
262
+ profile = json.loads(application.extracted_features)
263
+
264
+ conversation_history = data.get("conversation_history", [])
265
+ next_question_text = generate_next_question(
266
+ profile,
267
+ job,
268
+ conversation_history,
269
+ answer
270
+ )
271
+ except Exception as e:
272
+ logging.error(f"Error generating next question from Qdrant: {e}")
273
+ next_question_text = "Could you elaborate more on your last point?"
274
+
275
 
276
  # Try to generate audio for the next question
277
  try:
backend/services/interview_engine.py CHANGED
@@ -129,7 +129,7 @@ def generate_first_question(profile, job):
129
  logging.warning("[QDRANT DEBUG] No questions retrieved, falling back to defaults")
130
 
131
  context_data = random_context_chunks(retrieved_data, k=4) if retrieved_data else ""
132
-
133
  try:
134
  prompt = f"""
135
  You are conducting an interview for a {job.role} position at {job.company}.
@@ -168,6 +168,62 @@ def generate_first_question(profile, job):
168
  logging.error(f"Error generating first question: {e}")
169
  return "Tell me about yourself and why you're interested in this position."
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
172
  """Synchronous wrapper for edge-tts with better error handling"""
173
  try:
@@ -271,6 +327,61 @@ def convert_webm_to_wav(webm_path, wav_path):
271
  except (subprocess.TimeoutExpired, FileNotFoundError, Exception) as e:
272
  logging.error(f"Error converting audio: {e}")
273
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
  import subprocess # top of the file if not already imported
276
 
 
129
  logging.warning("[QDRANT DEBUG] No questions retrieved, falling back to defaults")
130
 
131
  context_data = random_context_chunks(retrieved_data, k=4) if retrieved_data else ""
132
+
133
  try:
134
  prompt = f"""
135
  You are conducting an interview for a {job.role} position at {job.company}.
 
168
  logging.error(f"Error generating first question: {e}")
169
  return "Tell me about yourself and why you're interested in this position."
170
 
171
+ def generate_next_question(profile, job, conversation_history, last_answer):
172
+ """Generate the next interview question based on profile, job, and conversation so far"""
173
+ all_roles = extract_all_roles_from_qdrant()
174
+ logging.info(f"[QDRANT DEBUG] Available Roles: {all_roles}")
175
+
176
+ retrieved_data = retrieve_interview_data(job.role.lower(), all_roles)
177
+ logging.info(f"[QDRANT DEBUG] Role requested: {job.role.lower()}")
178
+ logging.info(f"[QDRANT DEBUG] Questions retrieved: {len(retrieved_data)}")
179
+ if retrieved_data:
180
+ logging.info(f"[QDRANT DEBUG] Sample Next Q: {retrieved_data[0]['question']}")
181
+ else:
182
+ logging.warning("[QDRANT DEBUG] No questions retrieved, falling back to defaults")
183
+
184
+ context_data = random_context_chunks(retrieved_data, k=4) if retrieved_data else ""
185
+
186
+ try:
187
+ prompt = f"""
188
+ You are continuing an interview for a {job.role} position at {job.company}.
189
+ Candidate's profile:
190
+ - Skills: {profile.get('skills', [])}
191
+ - Experience: {profile.get('experience', [])}
192
+ - Education: {profile.get('education', [])}
193
+
194
+ Conversation so far:
195
+ {conversation_history}
196
+
197
+ Candidate's last answer:
198
+ {last_answer}
199
+
200
+ Use the following context to generate the next question:
201
+ {context_data}
202
+
203
+ Generate an appropriate follow-up interview question that is professional and relevant.
204
+ Keep it concise and clear. If the interview is for a technical role, focus on technical skills.
205
+ """
206
+
207
+ response = groq_llm.invoke(prompt)
208
+
209
+ if hasattr(response, 'content'):
210
+ question = response.content.strip()
211
+ elif isinstance(response, str):
212
+ question = response.strip()
213
+ else:
214
+ question = str(response).strip()
215
+
216
+ if not question or len(question) < 10:
217
+ question = "Could you elaborate more on your last point?"
218
+
219
+ logging.info(f"Generated next question: {question}")
220
+ return question
221
+
222
+ except Exception as e:
223
+ logging.error(f"Error generating next question: {e}")
224
+ return "Could you elaborate more on your last point?"
225
+
226
+
227
  def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
228
  """Synchronous wrapper for edge-tts with better error handling"""
229
  try:
 
327
  except (subprocess.TimeoutExpired, FileNotFoundError, Exception) as e:
328
  logging.error(f"Error converting audio: {e}")
329
  return None
330
+
331
+ def generate_next_question(profile, job, conversation_history, last_answer):
332
+ """Generate the next interview question based on profile, job, and conversation so far"""
333
+ all_roles = extract_all_roles_from_qdrant()
334
+ logging.info(f"[QDRANT DEBUG] Available Roles: {all_roles}")
335
+
336
+ retrieved_data = retrieve_interview_data(job.role.lower(), all_roles)
337
+ logging.info(f"[QDRANT DEBUG] Role requested: {job.role.lower()}")
338
+ logging.info(f"[QDRANT DEBUG] Questions retrieved: {len(retrieved_data)}")
339
+ if retrieved_data:
340
+ logging.info(f"[QDRANT DEBUG] Sample Next Q: {retrieved_data[0]['question']}")
341
+ else:
342
+ logging.warning("[QDRANT DEBUG] No questions retrieved, falling back to defaults")
343
+
344
+ context_data = random_context_chunks(retrieved_data, k=4) if retrieved_data else ""
345
+
346
+ try:
347
+ prompt = f"""
348
+ You are continuing an interview for a {job.role} position at {job.company}.
349
+ Candidate's profile:
350
+ - Skills: {profile.get('skills', [])}
351
+ - Experience: {profile.get('experience', [])}
352
+ - Education: {profile.get('education', [])}
353
+
354
+ Conversation so far:
355
+ {conversation_history}
356
+
357
+ Candidate's last answer:
358
+ {last_answer}
359
+
360
+ Use the following context to generate the next question:
361
+ {context_data}
362
+
363
+ Generate an appropriate follow-up interview question that is professional and relevant.
364
+ Keep it concise and clear. If the interview is for a technical role, focus on technical skills.
365
+ """
366
+
367
+ response = groq_llm.invoke(prompt)
368
+
369
+ if hasattr(response, 'content'):
370
+ question = response.content.strip()
371
+ elif isinstance(response, str):
372
+ question = response.strip()
373
+ else:
374
+ question = str(response).strip()
375
+
376
+ if not question or len(question) < 10:
377
+ question = "Could you elaborate more on your last point?"
378
+
379
+ logging.info(f"Generated next question: {question}")
380
+ return question
381
+
382
+ except Exception as e:
383
+ logging.error(f"Error generating next question: {e}")
384
+ return "Could you elaborate more on your last point?"
385
 
386
  import subprocess # top of the file if not already imported
387