Spaces:
Paused
Paused
File size: 5,796 Bytes
9f2b0ed 8e4e001 9f2b0ed 2ae57cb 9f2b0ed 2ae57cb 9f2b0ed 44441db 9f2b0ed 44441db 9f2b0ed 44441db 9f2b0ed 44441db 9f2b0ed 44441db 9f2b0ed fd40bd4 44441db fd40bd4 44441db fd40bd4 44441db 81341b4 2ae57cb 9f2b0ed 8e4e001 9f2b0ed fd40bd4 9f2b0ed 2ae57cb 9f2b0ed 44441db 9f2b0ed 44441db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import os
import uuid
import json
from flask import Blueprint, request, jsonify, send_file, url_for, current_app
from flask_login import login_required, current_user
from backend.models.database import db, Job, Application
from backend.services.interview_engine import (
generate_first_question,
edge_tts_to_file_sync,
whisper_stt,
evaluate_answer
)
interview_api = Blueprint("interview_api", __name__)
@interview_api.route("/start_interview", methods=["POST"])
@login_required
def start_interview():
"""
Start a new interview. Generates the first question based on the user's
resume/profile and the selected job. Always returns a JSON payload
containing the question text and, if available, a URL to an audio
rendition of the question.
Previously this endpoint returned a raw audio file when TTS generation
succeeded. This prevented the client from displaying the actual question
and forced it to fall back to a hard‑coded default. By always returning
structured JSON we ensure the UI can show the generated question and
optionally play the associated audio.
"""
data = request.get_json() or {}
job_id = data.get("job_id")
# Validate the job and the user's application
job = Job.query.get_or_404(job_id)
application = Application.query.filter_by(
user_id=current_user.id,
job_id=job_id
).first()
if not application or not application.extracted_features:
return jsonify({"error": "No application/profile data found."}), 400
# Parse the candidate's profile
try:
profile = json.loads(application.extracted_features)
except Exception:
return jsonify({"error": "Invalid profile JSON"}), 500
# Generate the first question using the LLM
question = generate_first_question(profile, job)
# Attempt to generate a TTS audio file for the question. If successful
# we'll return a URL that the client can call to retrieve it; otherwise
# audio_url remains None.
audio_url = None
try:
audio_dir = "/tmp/audio"
os.makedirs(audio_dir, exist_ok=True)
filename = f"q_{uuid.uuid4().hex}.wav"
audio_path = os.path.join(audio_dir, filename)
audio_out = edge_tts_to_file_sync(question, audio_path)
if audio_out and os.path.exists(audio_path):
audio_url = url_for("interview_api.get_audio", filename=filename)
except Exception:
audio_url = None
return jsonify({
"question": question,
"audio_url": audio_url
})
@interview_api.route("/transcribe_audio", methods=["POST"])
@login_required
def transcribe_audio():
audio_file = request.files.get("audio")
if not audio_file:
return jsonify({"error": "No audio file received."}), 400
# Use /tmp directory which is writable in Hugging Face Spaces
temp_dir = "/tmp/interview_temp"
os.makedirs(temp_dir, exist_ok=True)
filename = f"user_audio_{uuid.uuid4().hex}.wav"
path = os.path.join(temp_dir, filename)
audio_file.save(path)
transcript = whisper_stt(path)
# Clean up
try:
os.remove(path)
except:
pass
return jsonify({"transcript": transcript})
@interview_api.route("/process_answer", methods=["POST"])
@login_required
def process_answer():
"""
Process a user's answer and return a follow‑up question along with an
evaluation. Always responds with JSON containing:
- success: boolean indicating the operation succeeded
- next_question: the text of the next question
- audio_url: optional URL to the TTS audio for the next question
- evaluation: a dict with a score and feedback
- is_complete: boolean indicating if the interview is finished
Returning JSON even when audio generation succeeds simplifies client
handling and prevents errors when parsing the response.
"""
data = request.get_json() or {}
answer = data.get("answer", "")
question_idx = data.get("questionIndex", 0)
# Construct the next question. In a full implementation this would
# depend on the user's answer and job description.
next_question_text = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"
# Stubbed evaluation of the answer. Replace with a call to evaluate_answer()
evaluation_result = {
"score": "medium",
"feedback": "Good answer, but be more specific."
}
# Determine completion (3 questions in total, zero‑based index)
is_complete = question_idx >= 2
# Try to generate audio for the next question
audio_url = None
try:
audio_dir = "/tmp/audio"
os.makedirs(audio_dir, exist_ok=True)
filename = f"q_{uuid.uuid4().hex}.wav"
audio_path = os.path.join(audio_dir, filename)
audio_out = edge_tts_to_file_sync(next_question_text, audio_path)
if audio_out and os.path.exists(audio_path):
audio_url = url_for("interview_api.get_audio", filename=filename)
except Exception:
audio_url = None
return jsonify({
"success": True,
"next_question": next_question_text,
"audio_url": audio_url,
"evaluation": evaluation_result,
"is_complete": is_complete
})
@interview_api.route("/audio/<string:filename>", methods=["GET"])
@login_required
def get_audio(filename: str):
"""Serve previously generated TTS audio from the /tmp/audio directory."""
safe_name = os.path.basename(filename)
audio_path = os.path.join("/tmp/audio", safe_name)
if not os.path.exists(audio_path):
return jsonify({"error": "Audio file not found."}), 404
return send_file(audio_path, mimetype="audio/wav", as_attachment=False) |