File size: 3,908 Bytes
9f2b0ed
 
 
8e4e001
9f2b0ed
 
 
 
 
 
 
 
2ae57cb
9f2b0ed
2ae57cb
9f2b0ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e4e001
81341b4
9f2b0ed
fd40bd4
9f2b0ed
 
fd40bd4
8e4e001
fd40bd4
 
8e4e001
 
81341b4
8e4e001
81341b4
 
2ae57cb
9f2b0ed
 
 
 
 
 
 
8e4e001
 
9f2b0ed
fd40bd4
9f2b0ed
 
 
 
 
 
 
 
 
 
 
 
 
2ae57cb
9f2b0ed
 
 
 
 
 
 
8e4e001
fd40bd4
 
 
 
8e4e001
81341b4
9f2b0ed
fd40bd4
9f2b0ed
 
fd40bd4
8e4e001
 
fd40bd4
 
8e4e001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
import uuid
import json
from flask import Blueprint, request, jsonify, send_file, url_for, current_app
from flask_login import login_required, current_user
from backend.models.database import db, Job, Application
from backend.services.interview_engine import (
    generate_first_question,
    edge_tts_to_file_sync,
    whisper_stt,
    evaluate_answer
)

interview_api = Blueprint("interview_api", __name__)

@interview_api.route("/start_interview", methods=["POST"])
@login_required
def start_interview():
    data = request.get_json()
    job_id = data.get("job_id")
    
    job = Job.query.get_or_404(job_id)
    application = Application.query.filter_by(
        user_id=current_user.id, 
        job_id=job_id
    ).first()
    
    if not application or not application.extracted_features:
        return jsonify({"error": "No application/profile data found."}), 400
    
    try:
        profile = json.loads(application.extracted_features)
    except:
        return jsonify({"error": "Invalid profile JSON"}), 500
    
    question = generate_first_question(profile, job)
    
    # Use /tmp directory which is writable in Hugging Face Spaces
    audio_dir = "/tmp/audio"
    os.makedirs(audio_dir, exist_ok=True)

    audio_filename = f"q_{uuid.uuid4().hex}.wav"
    audio_path = os.path.join(audio_dir, audio_filename)

    # Generate audio synchronously. The function returns None on error.
    audio_out = edge_tts_to_file_sync(question, audio_path)

    if audio_out and os.path.exists(audio_path):
        return send_file(audio_path, mimetype="audio/wav", as_attachment=False)
    else:
        # Fallback to JSON response if audio generation fails
        return jsonify({"question": question})


@interview_api.route("/transcribe_audio", methods=["POST"])
@login_required
def transcribe_audio():
    audio_file = request.files.get("audio")
    if not audio_file:
        return jsonify({"error": "No audio file received."}), 400
    
    # Use /tmp directory which is writable in Hugging Face Spaces
    temp_dir = "/tmp/interview_temp"
    os.makedirs(temp_dir, exist_ok=True)

    filename = f"user_audio_{uuid.uuid4().hex}.wav"
    path = os.path.join(temp_dir, filename)
    audio_file.save(path)
    
    transcript = whisper_stt(path)
    
    # Clean up
    try:
        os.remove(path)
    except:
        pass
    
    return jsonify({"transcript": transcript})

@interview_api.route("/process_answer", methods=["POST"])
@login_required
def process_answer():
    data = request.get_json()
    answer = data.get("answer", "")
    question_idx = data.get("questionIndex", 0)
    
    # Generate next question (simplified for now). In a full implementation this
    # would call a model such as groq_llm to generate a follow‑up question based
    # on the candidate's answer.
    next_question = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"

    # Use /tmp directory for audio files
    audio_dir = "/tmp/audio"
    os.makedirs(audio_dir, exist_ok=True)

    audio_filename = f"q_{uuid.uuid4().hex}.wav"
    audio_path = os.path.join(audio_dir, audio_filename)

    # Attempt to generate speech for the next question. If audio generation
    # fails, ``audio_out`` will be None and we return JSON response instead.
    audio_out = edge_tts_to_file_sync(next_question, audio_path)

    if audio_out and os.path.exists(audio_path):
        return send_file(audio_path, mimetype="audio/wav", as_attachment=False)
    else:
        # Fallback to JSON response
        response = {
            "success": True,
            "nextQuestion": next_question,
            "evaluation": {
                "score": "medium",
                "feedback": "Good answer, but be more specific."
            },
            "isComplete": question_idx >= 2,
            "summary": []
        }
        return jsonify(response)