File size: 4,492 Bytes
9f2b0ed
 
 
fd40bd4
9f2b0ed
 
 
 
 
 
 
 
2ae57cb
9f2b0ed
2ae57cb
9f2b0ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd40bd4
 
 
 
 
 
9f2b0ed
fd40bd4
9f2b0ed
 
fd40bd4
 
 
 
 
 
 
 
 
2ae57cb
9f2b0ed
 
 
 
 
 
 
fd40bd4
 
 
 
 
9f2b0ed
fd40bd4
9f2b0ed
 
 
 
 
 
 
 
 
 
 
 
 
2ae57cb
9f2b0ed
 
 
 
 
 
 
fd40bd4
 
 
 
 
 
 
9f2b0ed
fd40bd4
9f2b0ed
 
fd40bd4
 
 
 
 
 
 
9f2b0ed
 
 
 
 
 
 
 
fd40bd4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
import uuid
import json
from flask import Blueprint, request, jsonify, url_for, current_app
from flask_login import login_required, current_user
from backend.models.database import db, Job, Application
from backend.services.interview_engine import (
    generate_first_question,
    edge_tts_to_file_sync,
    whisper_stt,
    evaluate_answer
)

interview_api = Blueprint("interview_api", __name__)

@interview_api.route("/start_interview", methods=["POST"])
@login_required
def start_interview():
    data = request.get_json()
    job_id = data.get("job_id")
    
    job = Job.query.get_or_404(job_id)
    application = Application.query.filter_by(
        user_id=current_user.id, 
        job_id=job_id
    ).first()
    
    if not application or not application.extracted_features:
        return jsonify({"error": "No application/profile data found."}), 400
    
    try:
        profile = json.loads(application.extracted_features)
    except:
        return jsonify({"error": "Invalid profile JSON"}), 500
    
    question = generate_first_question(profile, job)
    
    # Determine the static folder from the current app context.  Flask serves
    # files from ``current_app.static_folder`` and uses ``static_url_path``
    # when generating URLs.  If audio generation fails (for example because
    # network access is unavailable), we simply omit the audio file and allow
    # the frontend to fall back to text‑only mode.
    audio_dir = os.path.join(current_app.static_folder, "audio")
    os.makedirs(audio_dir, exist_ok=True)

    audio_filename = f"q_{uuid.uuid4().hex}.wav"
    audio_path = os.path.join(audio_dir, audio_filename)

    # Generate audio synchronously.  The function returns None on error.
    audio_out = edge_tts_to_file_sync(question, audio_path)

    response = {"question": question}
    if audio_out:
        # Use url_for to build the URL relative to the configured static folder.
        response["audio_url"] = url_for("static", filename=f"audio/{audio_filename}")
    return jsonify(response)

@interview_api.route("/transcribe_audio", methods=["POST"])
@login_required
def transcribe_audio():
    audio_file = request.files.get("audio")
    if not audio_file:
        return jsonify({"error": "No audio file received."}), 400
    
    # Create a temporary directory inside Flask's instance path.  Using the
    # instance path ensures that the folder is writable in environments like
    # Hugging Face Spaces, where the working directory may be read‑only.  The
    # instance path is configured in ``app.py`` to point at ``/tmp/flask_instance``.
    temp_dir = os.path.join(current_app.instance_path, "interview_temp")
    os.makedirs(temp_dir, exist_ok=True)

    filename = f"user_audio_{uuid.uuid4().hex}.wav"
    path = os.path.join(temp_dir, filename)
    audio_file.save(path)
    
    transcript = whisper_stt(path)
    
    # Clean up
    try:
        os.remove(path)
    except:
        pass
    
    return jsonify({"transcript": transcript})

@interview_api.route("/process_answer", methods=["POST"])
@login_required
def process_answer():
    data = request.get_json()
    answer = data.get("answer", "")
    question_idx = data.get("questionIndex", 0)
    
    # Generate next question (simplified for now).  In a full implementation this
    # would call a model such as groq_llm to generate a follow‑up question based
    # on the candidate's answer.
    next_question = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"

    # Prepare audio output directory inside the app's static folder
    audio_dir = os.path.join(current_app.static_folder, "audio")
    os.makedirs(audio_dir, exist_ok=True)

    audio_filename = f"q_{uuid.uuid4().hex}.wav"
    audio_path = os.path.join(audio_dir, audio_filename)

    # Attempt to generate speech for the next question.  If audio generation
    # fails, ``audio_out`` will be None and we simply omit ``audioUrl`` in
    # the JSON response.
    audio_out = edge_tts_to_file_sync(next_question, audio_path)

    response = {
        "success": True,
        "nextQuestion": next_question,
        "evaluation": {
            "score": "medium",
            "feedback": "Good answer, but be more specific."
        },
        "isComplete": question_idx >= 2,
        "summary": []
    }
    if audio_out:
        response["audioUrl"] = url_for("static", filename=f"audio/{audio_filename}")
    return jsonify(response)