Spaces:
Paused
Paused
Commit
·
44441db
1
Parent(s):
1a04e25
interview updated
Browse files- backend/routes/interview_api.py +97 -54
- backend/services/interview_engine.py +102 -72
- backend/templates/interview.html +26 -59
backend/routes/interview_api.py
CHANGED
|
@@ -16,40 +16,58 @@ interview_api = Blueprint("interview_api", __name__)
|
|
| 16 |
@interview_api.route("/start_interview", methods=["POST"])
|
| 17 |
@login_required
|
| 18 |
def start_interview():
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
job_id = data.get("job_id")
|
| 21 |
-
|
|
|
|
| 22 |
job = Job.query.get_or_404(job_id)
|
| 23 |
application = Application.query.filter_by(
|
| 24 |
-
user_id=current_user.id,
|
| 25 |
job_id=job_id
|
| 26 |
).first()
|
| 27 |
-
|
| 28 |
if not application or not application.extracted_features:
|
| 29 |
return jsonify({"error": "No application/profile data found."}), 400
|
| 30 |
-
|
|
|
|
| 31 |
try:
|
| 32 |
profile = json.loads(application.extracted_features)
|
| 33 |
-
except:
|
| 34 |
return jsonify({"error": "Invalid profile JSON"}), 500
|
| 35 |
-
|
| 36 |
-
question = generate_first_question(profile, job)
|
| 37 |
-
|
| 38 |
-
# Use /tmp directory which is writable in Hugging Face Spaces
|
| 39 |
-
audio_dir = "/tmp/audio"
|
| 40 |
-
os.makedirs(audio_dir, exist_ok=True)
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
|
| 45 |
-
#
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
return jsonify({"question": question})
|
| 53 |
|
| 54 |
|
| 55 |
@interview_api.route("/transcribe_audio", methods=["POST"])
|
|
@@ -80,38 +98,63 @@ def transcribe_audio():
|
|
| 80 |
@interview_api.route("/process_answer", methods=["POST"])
|
| 81 |
@login_required
|
| 82 |
def process_answer():
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
answer = data.get("answer", "")
|
| 85 |
question_idx = data.get("questionIndex", 0)
|
| 86 |
-
|
| 87 |
-
#
|
| 88 |
-
#
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
#
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
"
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
@interview_api.route("/start_interview", methods=["POST"])
|
| 17 |
@login_required
|
| 18 |
def start_interview():
|
| 19 |
+
"""
|
| 20 |
+
Start a new interview. Generates the first question based on the user's
|
| 21 |
+
resume/profile and the selected job. Always returns a JSON payload
|
| 22 |
+
containing the question text and, if available, a URL to an audio
|
| 23 |
+
rendition of the question.
|
| 24 |
+
|
| 25 |
+
Previously this endpoint returned a raw audio file when TTS generation
|
| 26 |
+
succeeded. This prevented the client from displaying the actual question
|
| 27 |
+
and forced it to fall back to a hard‑coded default. By always returning
|
| 28 |
+
structured JSON we ensure the UI can show the generated question and
|
| 29 |
+
optionally play the associated audio.
|
| 30 |
+
"""
|
| 31 |
+
data = request.get_json() or {}
|
| 32 |
job_id = data.get("job_id")
|
| 33 |
+
|
| 34 |
+
# Validate the job and the user's application
|
| 35 |
job = Job.query.get_or_404(job_id)
|
| 36 |
application = Application.query.filter_by(
|
| 37 |
+
user_id=current_user.id,
|
| 38 |
job_id=job_id
|
| 39 |
).first()
|
|
|
|
| 40 |
if not application or not application.extracted_features:
|
| 41 |
return jsonify({"error": "No application/profile data found."}), 400
|
| 42 |
+
|
| 43 |
+
# Parse the candidate's profile
|
| 44 |
try:
|
| 45 |
profile = json.loads(application.extracted_features)
|
| 46 |
+
except Exception:
|
| 47 |
return jsonify({"error": "Invalid profile JSON"}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
# Generate the first question using the LLM
|
| 50 |
+
question = generate_first_question(profile, job)
|
| 51 |
|
| 52 |
+
# Attempt to generate a TTS audio file for the question. If successful
|
| 53 |
+
# we'll return a URL that the client can call to retrieve it; otherwise
|
| 54 |
+
# audio_url remains None.
|
| 55 |
+
audio_url = None
|
| 56 |
+
try:
|
| 57 |
+
audio_dir = "/tmp/audio"
|
| 58 |
+
os.makedirs(audio_dir, exist_ok=True)
|
| 59 |
+
filename = f"q_{uuid.uuid4().hex}.wav"
|
| 60 |
+
audio_path = os.path.join(audio_dir, filename)
|
| 61 |
+
audio_out = edge_tts_to_file_sync(question, audio_path)
|
| 62 |
+
if audio_out and os.path.exists(audio_path):
|
| 63 |
+
audio_url = url_for("interview_api.get_audio", filename=filename)
|
| 64 |
+
except Exception:
|
| 65 |
+
audio_url = None
|
| 66 |
|
| 67 |
+
return jsonify({
|
| 68 |
+
"question": question,
|
| 69 |
+
"audio_url": audio_url
|
| 70 |
+
})
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
@interview_api.route("/transcribe_audio", methods=["POST"])
|
|
|
|
| 98 |
@interview_api.route("/process_answer", methods=["POST"])
|
| 99 |
@login_required
|
| 100 |
def process_answer():
|
| 101 |
+
"""
|
| 102 |
+
Process a user's answer and return a follow‑up question along with an
|
| 103 |
+
evaluation. Always responds with JSON containing:
|
| 104 |
+
|
| 105 |
+
- success: boolean indicating the operation succeeded
|
| 106 |
+
- next_question: the text of the next question
|
| 107 |
+
- audio_url: optional URL to the TTS audio for the next question
|
| 108 |
+
- evaluation: a dict with a score and feedback
|
| 109 |
+
- is_complete: boolean indicating if the interview is finished
|
| 110 |
+
|
| 111 |
+
Returning JSON even when audio generation succeeds simplifies client
|
| 112 |
+
handling and prevents errors when parsing the response.
|
| 113 |
+
"""
|
| 114 |
+
data = request.get_json() or {}
|
| 115 |
answer = data.get("answer", "")
|
| 116 |
question_idx = data.get("questionIndex", 0)
|
| 117 |
+
|
| 118 |
+
# Construct the next question. In a full implementation this would
|
| 119 |
+
# depend on the user's answer and job description.
|
| 120 |
+
next_question_text = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"
|
| 121 |
+
|
| 122 |
+
# Stubbed evaluation of the answer. Replace with a call to evaluate_answer()
|
| 123 |
+
evaluation_result = {
|
| 124 |
+
"score": "medium",
|
| 125 |
+
"feedback": "Good answer, but be more specific."
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
# Determine completion (3 questions in total, zero‑based index)
|
| 129 |
+
is_complete = question_idx >= 2
|
| 130 |
+
|
| 131 |
+
# Try to generate audio for the next question
|
| 132 |
+
audio_url = None
|
| 133 |
+
try:
|
| 134 |
+
audio_dir = "/tmp/audio"
|
| 135 |
+
os.makedirs(audio_dir, exist_ok=True)
|
| 136 |
+
filename = f"q_{uuid.uuid4().hex}.wav"
|
| 137 |
+
audio_path = os.path.join(audio_dir, filename)
|
| 138 |
+
audio_out = edge_tts_to_file_sync(next_question_text, audio_path)
|
| 139 |
+
if audio_out and os.path.exists(audio_path):
|
| 140 |
+
audio_url = url_for("interview_api.get_audio", filename=filename)
|
| 141 |
+
except Exception:
|
| 142 |
+
audio_url = None
|
| 143 |
+
|
| 144 |
+
return jsonify({
|
| 145 |
+
"success": True,
|
| 146 |
+
"next_question": next_question_text,
|
| 147 |
+
"audio_url": audio_url,
|
| 148 |
+
"evaluation": evaluation_result,
|
| 149 |
+
"is_complete": is_complete
|
| 150 |
+
})
|
| 151 |
+
|
| 152 |
+
@interview_api.route("/audio/<string:filename>", methods=["GET"])
|
| 153 |
+
@login_required
|
| 154 |
+
def get_audio(filename: str):
|
| 155 |
+
"""Serve previously generated TTS audio from the /tmp/audio directory."""
|
| 156 |
+
safe_name = os.path.basename(filename)
|
| 157 |
+
audio_path = os.path.join("/tmp/audio", safe_name)
|
| 158 |
+
if not os.path.exists(audio_path):
|
| 159 |
+
return jsonify({"error": "Audio file not found."}), 404
|
| 160 |
+
return send_file(audio_path, mimetype="audio/wav", as_attachment=False)
|
backend/services/interview_engine.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# Updated `interview_engine.py`
|
| 2 |
import os
|
| 3 |
import json
|
| 4 |
import asyncio
|
|
@@ -7,116 +6,147 @@ from faster_whisper import WhisperModel
|
|
| 7 |
from langchain_groq import ChatGroq
|
| 8 |
import logging
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
|
|
|
| 13 |
groq_llm = ChatGroq(
|
| 14 |
temperature=0.7,
|
| 15 |
-
model_name="llama-3
|
| 16 |
-
api_key=
|
| 17 |
)
|
| 18 |
|
|
|
|
| 19 |
whisper_model = None
|
| 20 |
|
| 21 |
-
# ------------------
|
| 22 |
-
# Load Whisper lazily
|
| 23 |
-
# ------------------
|
| 24 |
def load_whisper_model():
|
| 25 |
global whisper_model
|
| 26 |
if whisper_model is None:
|
| 27 |
-
device = "cuda" if os.system("nvidia-smi
|
| 28 |
compute_type = "float16" if device == "cuda" else "int8"
|
| 29 |
whisper_model = WhisperModel("base", device=device, compute_type=compute_type)
|
| 30 |
return whisper_model
|
| 31 |
|
| 32 |
-
# ------------------
|
| 33 |
-
# Generate Question
|
| 34 |
-
# ------------------
|
| 35 |
def generate_first_question(profile, job):
|
| 36 |
-
|
| 37 |
-
You are conducting an interview for a {job.role} position at {job.company}.
|
| 38 |
-
The candidate's profile shows:
|
| 39 |
-
- Skills: {profile.get('skills', [])}
|
| 40 |
-
- Experience: {profile.get('experience', [])}
|
| 41 |
-
- Education: {profile.get('education', [])}
|
| 42 |
-
|
| 43 |
-
Generate an appropriate opening interview question that is professional and relevant.
|
| 44 |
-
Keep it concise and clear.
|
| 45 |
-
"""
|
| 46 |
try:
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
return response.strip()
|
| 49 |
except Exception as e:
|
| 50 |
-
logging.error(f"
|
| 51 |
return "Tell me about yourself and why you're interested in this position."
|
| 52 |
|
| 53 |
-
# ------------------
|
| 54 |
-
# TTS (Edge)
|
| 55 |
-
# ------------------
|
| 56 |
def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
|
|
|
|
| 57 |
try:
|
| 58 |
-
directory
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
os.makedirs(directory, exist_ok=True)
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
communicate = edge_tts.Communicate(text, voice)
|
| 63 |
await communicate.save(output_path)
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
import nest_asyncio
|
| 68 |
-
nest_asyncio.apply()
|
| 69 |
loop = asyncio.get_event_loop()
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
|
| 73 |
return output_path
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
except Exception as e:
|
| 75 |
-
logging.error(f"TTS generation
|
| 76 |
-
|
| 77 |
|
| 78 |
-
# ------------------
|
| 79 |
-
# Transcription
|
| 80 |
-
# ------------------
|
| 81 |
def whisper_stt(audio_path):
|
| 82 |
-
|
| 83 |
-
return ""
|
| 84 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
model = load_whisper_model()
|
| 86 |
segments, _ = model.transcribe(audio_path)
|
| 87 |
-
|
|
|
|
| 88 |
except Exception as e:
|
| 89 |
-
logging.error(f"STT
|
| 90 |
return ""
|
| 91 |
|
| 92 |
-
# ------------------
|
| 93 |
-
# Answer Evaluation
|
| 94 |
-
# ------------------
|
| 95 |
def evaluate_answer(question, answer, ref_answer, job_role, seniority):
|
| 96 |
-
|
| 97 |
-
You are evaluating a candidate's answer for a {seniority} {job_role} position.
|
| 98 |
-
|
| 99 |
-
Question: {question}
|
| 100 |
-
Candidate Answer: {answer}
|
| 101 |
-
Reference Answer: {ref_answer}
|
| 102 |
-
|
| 103 |
-
Evaluate based on technical correctness, clarity, and relevance.
|
| 104 |
-
Respond with JSON format:
|
| 105 |
-
{{
|
| 106 |
-
"Score": "Poor|Medium|Good|Excellent",
|
| 107 |
-
"Reasoning": "brief explanation",
|
| 108 |
-
"Improvements": ["suggestion1", "suggestion2"]
|
| 109 |
-
}}
|
| 110 |
-
"""
|
| 111 |
try:
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
except Exception as e:
|
| 117 |
-
logging.error(f"
|
| 118 |
return {
|
| 119 |
"Score": "Medium",
|
| 120 |
"Reasoning": "Evaluation failed",
|
| 121 |
-
"Improvements": ["
|
| 122 |
-
}
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import json
|
| 3 |
import asyncio
|
|
|
|
| 6 |
from langchain_groq import ChatGroq
|
| 7 |
import logging
|
| 8 |
|
| 9 |
+
# Initialize models
|
| 10 |
+
chat_groq_api = os.getenv("GROQ_API_KEY")
|
| 11 |
+
if not chat_groq_api:
|
| 12 |
+
raise ValueError("GROQ_API_KEY is not set in environment variables.")
|
| 13 |
groq_llm = ChatGroq(
|
| 14 |
temperature=0.7,
|
| 15 |
+
model_name="llama-3.3-70b-versatile",
|
| 16 |
+
api_key=chat_groq_api
|
| 17 |
)
|
| 18 |
|
| 19 |
+
# Initialize Whisper model
|
| 20 |
whisper_model = None
|
| 21 |
|
|
|
|
|
|
|
|
|
|
| 22 |
def load_whisper_model():
|
| 23 |
global whisper_model
|
| 24 |
if whisper_model is None:
|
| 25 |
+
device = "cuda" if os.system("nvidia-smi") == 0 else "cpu"
|
| 26 |
compute_type = "float16" if device == "cuda" else "int8"
|
| 27 |
whisper_model = WhisperModel("base", device=device, compute_type=compute_type)
|
| 28 |
return whisper_model
|
| 29 |
|
|
|
|
|
|
|
|
|
|
| 30 |
def generate_first_question(profile, job):
|
| 31 |
+
"""Generate the first interview question based on profile and job"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
try:
|
| 33 |
+
prompt = f"""
|
| 34 |
+
You are conducting an interview for a {job.role} position at {job.company}.
|
| 35 |
+
The candidate's profile shows:
|
| 36 |
+
- Skills: {profile.get('skills', [])}
|
| 37 |
+
- Experience: {profile.get('experience', [])}
|
| 38 |
+
- Education: {profile.get('education', [])}
|
| 39 |
+
|
| 40 |
+
Generate an appropriate opening interview question that is professional and relevant.
|
| 41 |
+
Keep it concise and clear.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
response = groq_llm.predict(prompt)
|
| 45 |
return response.strip()
|
| 46 |
except Exception as e:
|
| 47 |
+
logging.error(f"Error generating first question: {e}")
|
| 48 |
return "Tell me about yourself and why you're interested in this position."
|
| 49 |
|
|
|
|
|
|
|
|
|
|
| 50 |
def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
|
| 51 |
+
"""Synchronous wrapper for edge-tts"""
|
| 52 |
try:
|
| 53 |
+
# Ensure the directory exists and is writable
|
| 54 |
+
directory = os.path.dirname(output_path)
|
| 55 |
+
if not directory:
|
| 56 |
+
directory = "/tmp" # Fallback to /tmp if no directory specified
|
| 57 |
+
output_path = os.path.join(directory, os.path.basename(output_path))
|
| 58 |
+
|
| 59 |
os.makedirs(directory, exist_ok=True)
|
| 60 |
+
|
| 61 |
+
# Test write permissions
|
| 62 |
+
test_file = os.path.join(directory, f"test_{os.getpid()}.tmp")
|
| 63 |
+
try:
|
| 64 |
+
with open(test_file, 'w') as f:
|
| 65 |
+
f.write("test")
|
| 66 |
+
os.remove(test_file)
|
| 67 |
+
except (PermissionError, OSError) as e:
|
| 68 |
+
logging.error(f"Directory {directory} is not writable: {e}")
|
| 69 |
+
# Fallback to /tmp
|
| 70 |
+
directory = "/tmp"
|
| 71 |
+
output_path = os.path.join(directory, os.path.basename(output_path))
|
| 72 |
+
os.makedirs(directory, exist_ok=True)
|
| 73 |
+
|
| 74 |
+
async def generate_audio():
|
| 75 |
communicate = edge_tts.Communicate(text, voice)
|
| 76 |
await communicate.save(output_path)
|
| 77 |
+
|
| 78 |
+
# Run async function in sync context
|
| 79 |
+
try:
|
|
|
|
|
|
|
| 80 |
loop = asyncio.get_event_loop()
|
| 81 |
+
except RuntimeError:
|
| 82 |
+
loop = asyncio.new_event_loop()
|
| 83 |
+
asyncio.set_event_loop(loop)
|
| 84 |
+
|
| 85 |
+
loop.run_until_complete(generate_audio())
|
| 86 |
+
|
| 87 |
+
# Verify file was created and has content
|
| 88 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
|
| 89 |
return output_path
|
| 90 |
+
else:
|
| 91 |
+
logging.error(f"Audio file was not created or is empty: {output_path}")
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
except Exception as e:
|
| 95 |
+
logging.error(f"Error in TTS generation: {e}")
|
| 96 |
+
return None
|
| 97 |
|
|
|
|
|
|
|
|
|
|
| 98 |
def whisper_stt(audio_path):
|
| 99 |
+
"""Speech-to-text using Faster-Whisper"""
|
|
|
|
| 100 |
try:
|
| 101 |
+
if not audio_path or not os.path.exists(audio_path):
|
| 102 |
+
logging.error(f"Audio file does not exist: {audio_path}")
|
| 103 |
+
return ""
|
| 104 |
+
|
| 105 |
+
# Check if file has content
|
| 106 |
+
if os.path.getsize(audio_path) == 0:
|
| 107 |
+
logging.error(f"Audio file is empty: {audio_path}")
|
| 108 |
+
return ""
|
| 109 |
+
|
| 110 |
model = load_whisper_model()
|
| 111 |
segments, _ = model.transcribe(audio_path)
|
| 112 |
+
transcript = " ".join(segment.text for segment in segments)
|
| 113 |
+
return transcript.strip()
|
| 114 |
except Exception as e:
|
| 115 |
+
logging.error(f"Error in STT: {e}")
|
| 116 |
return ""
|
| 117 |
|
|
|
|
|
|
|
|
|
|
| 118 |
def evaluate_answer(question, answer, ref_answer, job_role, seniority):
|
| 119 |
+
"""Evaluate candidate's answer"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
try:
|
| 121 |
+
prompt = f"""
|
| 122 |
+
You are evaluating a candidate's answer for a {seniority} {job_role} position.
|
| 123 |
+
|
| 124 |
+
Question: {question}
|
| 125 |
+
Candidate Answer: {answer}
|
| 126 |
+
Reference Answer: {ref_answer}
|
| 127 |
+
|
| 128 |
+
Evaluate based on technical correctness, clarity, and relevance.
|
| 129 |
+
Respond with JSON format:
|
| 130 |
+
{{
|
| 131 |
+
"Score": "Poor|Medium|Good|Excellent",
|
| 132 |
+
"Reasoning": "brief explanation",
|
| 133 |
+
"Improvements": ["suggestion1", "suggestion2"]
|
| 134 |
+
}}
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
response = groq_llm.predict(prompt)
|
| 138 |
+
# Extract JSON from response
|
| 139 |
+
start_idx = response.find("{")
|
| 140 |
+
end_idx = response.rfind("}") + 1
|
| 141 |
+
if start_idx >= 0 and end_idx > start_idx:
|
| 142 |
+
json_str = response[start_idx:end_idx]
|
| 143 |
+
return json.loads(json_str)
|
| 144 |
+
else:
|
| 145 |
+
raise ValueError("No valid JSON found in response")
|
| 146 |
except Exception as e:
|
| 147 |
+
logging.error(f"Error evaluating answer: {e}")
|
| 148 |
return {
|
| 149 |
"Score": "Medium",
|
| 150 |
"Reasoning": "Evaluation failed",
|
| 151 |
+
"Improvements": ["Please be more specific"]
|
| 152 |
+
}
|
backend/templates/interview.html
CHANGED
|
@@ -577,25 +577,16 @@
|
|
| 577 |
throw new Error(`HTTP error! status: ${response.status}`);
|
| 578 |
}
|
| 579 |
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
const fallbackText = "Let's begin. Can you introduce yourself?";
|
| 586 |
-
this.displayQuestion(fallbackText, audioUrl);
|
| 587 |
-
this.interviewData.questions.push(fallbackText);
|
| 588 |
-
} else {
|
| 589 |
-
const data = await response.json();
|
| 590 |
-
if (data.error) {
|
| 591 |
-
this.showError(data.error);
|
| 592 |
-
return;
|
| 593 |
-
}
|
| 594 |
-
|
| 595 |
-
this.displayQuestion(data.question, data.audio_url);
|
| 596 |
-
this.interviewData.questions.push(data.question);
|
| 597 |
}
|
| 598 |
|
|
|
|
|
|
|
|
|
|
| 599 |
} catch (error) {
|
| 600 |
console.error('Error starting interview:', error);
|
| 601 |
this.showError('Failed to start interview. Please try again.');
|
|
@@ -766,51 +757,27 @@
|
|
| 766 |
throw new Error(`HTTP error! status: ${response.status}`);
|
| 767 |
}
|
| 768 |
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
const fallback = `Follow-up question ${this.currentQuestionIndex + 2}: Can you elaborate more?`;
|
| 775 |
-
this.interviewData.answers.push(answer);
|
| 776 |
-
this.interviewData.evaluations.push({
|
| 777 |
-
score: "N/A",
|
| 778 |
-
feedback: "No feedback available in fallback mode."
|
| 779 |
-
});
|
| 780 |
-
|
| 781 |
-
if (this.currentQuestionIndex >= 2) {
|
| 782 |
-
this.showInterviewSummary();
|
| 783 |
-
} else {
|
| 784 |
-
this.currentQuestionIndex++;
|
| 785 |
-
this.displayQuestion(fallback, audioUrl);
|
| 786 |
-
this.interviewData.questions.push(fallback);
|
| 787 |
-
this.resetForNextQuestion();
|
| 788 |
-
}
|
| 789 |
-
} else {
|
| 790 |
-
const data = await response.json();
|
| 791 |
-
if (data.success) {
|
| 792 |
-
this.interviewData.answers.push(answer);
|
| 793 |
-
this.interviewData.evaluations.push(data.evaluation);
|
| 794 |
-
|
| 795 |
-
if (data.isComplete) {
|
| 796 |
-
this.showInterviewSummary();
|
| 797 |
-
} else {
|
| 798 |
-
this.currentQuestionIndex++;
|
| 799 |
-
this.displayQuestion(data.nextQuestion, data.audioUrl);
|
| 800 |
-
this.interviewData.questions.push(data.nextQuestion);
|
| 801 |
-
this.resetForNextQuestion();
|
| 802 |
-
}
|
| 803 |
-
} else {
|
| 804 |
-
this.showError('Failed to process answer. Please try again.');
|
| 805 |
-
}
|
| 806 |
-
}
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
if (data.error) {
|
| 810 |
-
this.showError(data.error);
|
| 811 |
return;
|
| 812 |
}
|
| 813 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 814 |
} catch (error) {
|
| 815 |
console.error('Error submitting answer:', error);
|
| 816 |
this.showError('Connection error. Please try again.');
|
|
|
|
| 577 |
throw new Error(`HTTP error! status: ${response.status}`);
|
| 578 |
}
|
| 579 |
|
| 580 |
+
// Always expect a JSON payload describing the question and optional audio URL
|
| 581 |
+
const data = await response.json();
|
| 582 |
+
if (data.error) {
|
| 583 |
+
this.showError(data.error);
|
| 584 |
+
return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 585 |
}
|
| 586 |
|
| 587 |
+
// Display the actual question text and play audio if provided
|
| 588 |
+
this.displayQuestion(data.question, data.audio_url);
|
| 589 |
+
this.interviewData.questions.push(data.question);
|
| 590 |
} catch (error) {
|
| 591 |
console.error('Error starting interview:', error);
|
| 592 |
this.showError('Failed to start interview. Please try again.');
|
|
|
|
| 757 |
throw new Error(`HTTP error! status: ${response.status}`);
|
| 758 |
}
|
| 759 |
|
| 760 |
+
// Parse JSON response
|
| 761 |
+
const data = await response.json();
|
| 762 |
+
if (!data.success) {
|
| 763 |
+
this.showError('Failed to process answer. Please try again.');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 764 |
return;
|
| 765 |
}
|
| 766 |
|
| 767 |
+
// Record the user's answer and its evaluation
|
| 768 |
+
this.interviewData.answers.push(answer);
|
| 769 |
+
this.interviewData.evaluations.push(data.evaluation || {});
|
| 770 |
+
|
| 771 |
+
if (data.is_complete) {
|
| 772 |
+
// Interview finished, show summary
|
| 773 |
+
this.showInterviewSummary();
|
| 774 |
+
} else {
|
| 775 |
+
// Advance to next question
|
| 776 |
+
this.currentQuestionIndex++;
|
| 777 |
+
this.displayQuestion(data.next_question, data.audio_url);
|
| 778 |
+
this.interviewData.questions.push(data.next_question);
|
| 779 |
+
this.resetForNextQuestion();
|
| 780 |
+
}
|
| 781 |
} catch (error) {
|
| 782 |
console.error('Error submitting answer:', error);
|
| 783 |
this.showError('Connection error. Please try again.');
|