husseinelsaadi commited on
Commit
88df9ba
·
1 Parent(s): 14500de
Files changed (1) hide show
  1. backend/routes/interview_api.py +122 -1
backend/routes/interview_api.py CHANGED
@@ -363,4 +363,125 @@ from flask import render_template
363
  @interview_api.route("/interview/complete", methods=["GET"])
364
  @login_required
365
  def interview_complete():
366
- return render_template("closing.html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  @interview_api.route("/interview/complete", methods=["GET"])
364
  @login_required
365
  def interview_complete():
366
+ """
367
+ Final interview completion page. After the last question has been
368
+ answered, redirect here to show the candidate a brief summary of
369
+ their overall performance. The summary consists of a percentage
370
+ score and a high‑level label (e.g. "Excellent", "Good"). These
371
+ values are derived from the candidate's application data and
372
+ interview evaluations.
373
+
374
+ The calculation mirrors the logic used in the PDF report
375
+ generation: the skills match ratio contributes 40% of the final
376
+ score while the average of the per‑question evaluation ratings
377
+ contributes 60%. If no evaluation data is available, a default
378
+ average of 0.5 is used. The resulting number is expressed as a
379
+ percentage (e.g. "75%") and mapped to a descriptive label.
380
+ """
381
+
382
+ score = None
383
+ feedback_summary = None
384
+
385
+ try:
386
+ # Attempt to locate the most recent application with interview data
387
+ # for the current user. Because the completion route does not
388
+ # receive a job ID, we fall back to the latest application that
389
+ # contains an interview_log. If none exists, the summary will
390
+ # remain empty and the template will render placeholders.
391
+ application = (
392
+ Application.query
393
+ .filter_by(user_id=current_user.id)
394
+ .filter(Application.interview_log.isnot(None))
395
+ .order_by(Application.id.desc())
396
+ .first()
397
+ )
398
+
399
+ if application:
400
+ # Parse candidate and job skills from stored JSON. If either
401
+ # field is missing or malformed, fall back to empty lists.
402
+ try:
403
+ candidate_features = json.loads(application.extracted_features) if application.extracted_features else {}
404
+ except Exception:
405
+ candidate_features = {}
406
+ candidate_skills = candidate_features.get('skills', []) or []
407
+
408
+ job_skills = []
409
+ try:
410
+ job_skills = json.loads(application.job.skills) if application.job and application.job.skills else []
411
+ except Exception:
412
+ job_skills = []
413
+
414
+ # Compute the skills match ratio. Normalise skills to lower
415
+ # case and strip whitespace for comparison. Avoid division
416
+ # by zero if the job has no listed skills.
417
+ candidate_set = {s.strip().lower() for s in candidate_skills}
418
+ job_set = {s.strip().lower() for s in job_skills}
419
+ common = candidate_set & job_set
420
+ ratio = (len(common) / len(job_set)) if job_set else 0.0
421
+
422
+ # Extract per‑question evaluations from the interview log. The
423
+ # interview_log stores a list of dictionaries with keys
424
+ # "question", "answer" and "evaluation". Each evaluation is
425
+ # expected to include a "score" field containing text such
426
+ # as "Poor", "Medium", "Good" or "Excellent". Convert
427
+ # these descriptors into numeric values in the range [0.2, 1.0]
428
+ # similar to the logic used in report generation.
429
+ qa_scores = []
430
+ try:
431
+ if application.interview_log:
432
+ try:
433
+ log_data = json.loads(application.interview_log)
434
+ except Exception:
435
+ log_data = []
436
+ for entry in log_data:
437
+ score_text = str(entry.get('evaluation', {}).get('score', '')).lower()
438
+ # Map textual scores to numerical values
439
+ if ('excellent' in score_text) or ('5' in score_text) or ('10' in score_text):
440
+ qa_scores.append(1.0)
441
+ elif ('good' in score_text) or ('4' in score_text) or ('8' in score_text) or ('9' in score_text):
442
+ qa_scores.append(0.8)
443
+ elif ('satisfactory' in score_text) or ('medium' in score_text) or ('3' in score_text) or ('6' in score_text) or ('7' in score_text):
444
+ qa_scores.append(0.6)
445
+ elif ('needs improvement' in score_text) or ('poor' in score_text) or ('2' in score_text):
446
+ qa_scores.append(0.4)
447
+ else:
448
+ qa_scores.append(0.2)
449
+ except Exception:
450
+ qa_scores = []
451
+
452
+ # Average the QA scores. If no scores were recorded (e.g. if
453
+ # the interview_log is empty or malformed), assume a neutral
454
+ # average of 0.5 to avoid penalising the candidate for missing
455
+ # data.
456
+ qa_average = (sum(qa_scores) / len(qa_scores)) if qa_scores else 0.5
457
+
458
+ # Weight skills match (40%) and QA average (60%) to derive
459
+ # the final overall score. Convert to a percentage for
460
+ # display.
461
+ overall = (ratio * 0.4) + (qa_average * 0.6)
462
+ percentage = overall * 100.0
463
+
464
+ # Assign a descriptive label based on the overall score.
465
+ if overall >= 0.8:
466
+ label = 'Excellent'
467
+ elif overall >= 0.65:
468
+ label = 'Good'
469
+ elif overall >= 0.45:
470
+ label = 'Satisfactory'
471
+ else:
472
+ label = 'Needs Improvement'
473
+
474
+ # Format the score as a whole‑number percentage. For example
475
+ # 0.753 becomes "75%". Note that rounding is applied.
476
+ score = f"{percentage:.0f}%"
477
+ feedback_summary = label
478
+
479
+ except Exception as calc_err:
480
+ # If any error occurs during calculation, fall back to None values.
481
+ logging.error(f"Error computing overall interview score: {calc_err}")
482
+
483
+ return render_template(
484
+ "closing.html",
485
+ score=score,
486
+ feedback_summary=feedback_summary
487
+ )