Spaces:
Paused
Paused
Commit
·
8e4e001
1
Parent(s):
81341b4
interview updated
Browse files
backend/routes/interview_api.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
import uuid
|
3 |
import json
|
4 |
-
from flask import Blueprint, request, jsonify, url_for, current_app
|
5 |
from flask_login import login_required, current_user
|
6 |
from backend.models.database import db, Job, Application
|
7 |
from backend.services.interview_engine import (
|
@@ -35,25 +35,20 @@ def start_interview():
|
|
35 |
|
36 |
question = generate_first_question(profile, job)
|
37 |
|
38 |
-
#
|
39 |
-
# files from ``current_app.static_folder`` and uses ``static_url_path``
|
40 |
-
# when generating URLs. If audio generation fails (for example because
|
41 |
-
# network access is unavailable), we simply omit the audio file and allow
|
42 |
-
# the frontend to fall back to text‑only mode.
|
43 |
audio_dir = "/tmp/audio"
|
44 |
os.makedirs(audio_dir, exist_ok=True)
|
45 |
|
46 |
audio_filename = f"q_{uuid.uuid4().hex}.wav"
|
47 |
audio_path = os.path.join(audio_dir, audio_filename)
|
48 |
|
49 |
-
# Generate audio synchronously.
|
50 |
audio_out = edge_tts_to_file_sync(question, audio_path)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
if audio_out:
|
55 |
-
return send_file(audio_path, mimetype="audio/wav")
|
56 |
else:
|
|
|
57 |
return jsonify({"question": question})
|
58 |
|
59 |
|
@@ -64,11 +59,8 @@ def transcribe_audio():
|
|
64 |
if not audio_file:
|
65 |
return jsonify({"error": "No audio file received."}), 400
|
66 |
|
67 |
-
#
|
68 |
-
|
69 |
-
# Hugging Face Spaces, where the working directory may be read‑only. The
|
70 |
-
# instance path is configured in ``app.py`` to point at ``/tmp/flask_instance``.
|
71 |
-
temp_dir = os.path.join(current_app.instance_path, "interview_temp")
|
72 |
os.makedirs(temp_dir, exist_ok=True)
|
73 |
|
74 |
filename = f"user_audio_{uuid.uuid4().hex}.wav"
|
@@ -92,33 +84,34 @@ def process_answer():
|
|
92 |
answer = data.get("answer", "")
|
93 |
question_idx = data.get("questionIndex", 0)
|
94 |
|
95 |
-
# Generate next question (simplified for now).
|
96 |
# would call a model such as groq_llm to generate a follow‑up question based
|
97 |
# on the candidate's answer.
|
98 |
next_question = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"
|
99 |
|
100 |
-
#
|
101 |
audio_dir = "/tmp/audio"
|
102 |
os.makedirs(audio_dir, exist_ok=True)
|
103 |
|
104 |
audio_filename = f"q_{uuid.uuid4().hex}.wav"
|
105 |
audio_path = os.path.join(audio_dir, audio_filename)
|
106 |
|
107 |
-
# Attempt to generate speech for the next question.
|
108 |
-
# fails, ``audio_out`` will be None and we
|
109 |
-
# the JSON response.
|
110 |
audio_out = edge_tts_to_file_sync(next_question, audio_path)
|
111 |
|
112 |
-
|
113 |
-
"
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
"
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
1 |
import os
|
2 |
import uuid
|
3 |
import json
|
4 |
+
from flask import Blueprint, request, jsonify, send_file, url_for, current_app
|
5 |
from flask_login import login_required, current_user
|
6 |
from backend.models.database import db, Job, Application
|
7 |
from backend.services.interview_engine import (
|
|
|
35 |
|
36 |
question = generate_first_question(profile, job)
|
37 |
|
38 |
+
# Use /tmp directory which is writable in Hugging Face Spaces
|
|
|
|
|
|
|
|
|
39 |
audio_dir = "/tmp/audio"
|
40 |
os.makedirs(audio_dir, exist_ok=True)
|
41 |
|
42 |
audio_filename = f"q_{uuid.uuid4().hex}.wav"
|
43 |
audio_path = os.path.join(audio_dir, audio_filename)
|
44 |
|
45 |
+
# Generate audio synchronously. The function returns None on error.
|
46 |
audio_out = edge_tts_to_file_sync(question, audio_path)
|
47 |
|
48 |
+
if audio_out and os.path.exists(audio_path):
|
49 |
+
return send_file(audio_path, mimetype="audio/wav", as_attachment=False)
|
|
|
|
|
50 |
else:
|
51 |
+
# Fallback to JSON response if audio generation fails
|
52 |
return jsonify({"question": question})
|
53 |
|
54 |
|
|
|
59 |
if not audio_file:
|
60 |
return jsonify({"error": "No audio file received."}), 400
|
61 |
|
62 |
+
# Use /tmp directory which is writable in Hugging Face Spaces
|
63 |
+
temp_dir = "/tmp/interview_temp"
|
|
|
|
|
|
|
64 |
os.makedirs(temp_dir, exist_ok=True)
|
65 |
|
66 |
filename = f"user_audio_{uuid.uuid4().hex}.wav"
|
|
|
84 |
answer = data.get("answer", "")
|
85 |
question_idx = data.get("questionIndex", 0)
|
86 |
|
87 |
+
# Generate next question (simplified for now). In a full implementation this
|
88 |
# would call a model such as groq_llm to generate a follow‑up question based
|
89 |
# on the candidate's answer.
|
90 |
next_question = f"Follow‑up question {question_idx + 2}: Can you elaborate on your experience with relevant technologies?"
|
91 |
|
92 |
+
# Use /tmp directory for audio files
|
93 |
audio_dir = "/tmp/audio"
|
94 |
os.makedirs(audio_dir, exist_ok=True)
|
95 |
|
96 |
audio_filename = f"q_{uuid.uuid4().hex}.wav"
|
97 |
audio_path = os.path.join(audio_dir, audio_filename)
|
98 |
|
99 |
+
# Attempt to generate speech for the next question. If audio generation
|
100 |
+
# fails, ``audio_out`` will be None and we return JSON response instead.
|
|
|
101 |
audio_out = edge_tts_to_file_sync(next_question, audio_path)
|
102 |
|
103 |
+
if audio_out and os.path.exists(audio_path):
|
104 |
+
return send_file(audio_path, mimetype="audio/wav", as_attachment=False)
|
105 |
+
else:
|
106 |
+
# Fallback to JSON response
|
107 |
+
response = {
|
108 |
+
"success": True,
|
109 |
+
"nextQuestion": next_question,
|
110 |
+
"evaluation": {
|
111 |
+
"score": "medium",
|
112 |
+
"feedback": "Good answer, but be more specific."
|
113 |
+
},
|
114 |
+
"isComplete": question_idx >= 2,
|
115 |
+
"summary": []
|
116 |
+
}
|
117 |
+
return jsonify(response)
|
backend/services/interview_engine.py
CHANGED
@@ -7,7 +7,7 @@ from langchain_groq import ChatGroq
|
|
7 |
import logging
|
8 |
|
9 |
# Initialize models
|
10 |
-
chat_groq_api = os.getenv("GROQ_API_KEY", "
|
11 |
groq_llm = ChatGroq(
|
12 |
temperature=0.7,
|
13 |
model_name="llama-3.3-70b-versatile",
|
@@ -48,20 +48,47 @@ def generate_first_question(profile, job):
|
|
48 |
def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
|
49 |
"""Synchronous wrapper for edge-tts"""
|
50 |
try:
|
51 |
-
#
|
52 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
async def generate_audio():
|
55 |
communicate = edge_tts.Communicate(text, voice)
|
56 |
await communicate.save(output_path)
|
57 |
|
58 |
# Run async function in sync context
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
61 |
loop.run_until_complete(generate_audio())
|
62 |
-
loop.close()
|
63 |
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
except Exception as e:
|
66 |
logging.error(f"Error in TTS generation: {e}")
|
67 |
return None
|
@@ -70,6 +97,12 @@ def whisper_stt(audio_path):
|
|
70 |
"""Speech-to-text using Faster-Whisper"""
|
71 |
try:
|
72 |
if not audio_path or not os.path.exists(audio_path):
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
return ""
|
74 |
|
75 |
model = load_whisper_model()
|
@@ -103,8 +136,11 @@ def evaluate_answer(question, answer, ref_answer, job_role, seniority):
|
|
103 |
# Extract JSON from response
|
104 |
start_idx = response.find("{")
|
105 |
end_idx = response.rfind("}") + 1
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
108 |
except Exception as e:
|
109 |
logging.error(f"Error evaluating answer: {e}")
|
110 |
return {
|
|
|
7 |
import logging
|
8 |
|
9 |
# Initialize models
|
10 |
+
chat_groq_api = os.getenv("GROQ_API_KEY", "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")
|
11 |
groq_llm = ChatGroq(
|
12 |
temperature=0.7,
|
13 |
model_name="llama-3.3-70b-versatile",
|
|
|
48 |
def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
|
49 |
"""Synchronous wrapper for edge-tts"""
|
50 |
try:
|
51 |
+
# Ensure the directory exists and is writable
|
52 |
+
directory = os.path.dirname(output_path)
|
53 |
+
if not directory:
|
54 |
+
directory = "/tmp" # Fallback to /tmp if no directory specified
|
55 |
+
output_path = os.path.join(directory, os.path.basename(output_path))
|
56 |
+
|
57 |
+
os.makedirs(directory, exist_ok=True)
|
58 |
+
|
59 |
+
# Test write permissions
|
60 |
+
test_file = os.path.join(directory, f"test_{os.getpid()}.tmp")
|
61 |
+
try:
|
62 |
+
with open(test_file, 'w') as f:
|
63 |
+
f.write("test")
|
64 |
+
os.remove(test_file)
|
65 |
+
except (PermissionError, OSError) as e:
|
66 |
+
logging.error(f"Directory {directory} is not writable: {e}")
|
67 |
+
# Fallback to /tmp
|
68 |
+
directory = "/tmp"
|
69 |
+
output_path = os.path.join(directory, os.path.basename(output_path))
|
70 |
+
os.makedirs(directory, exist_ok=True)
|
71 |
|
72 |
async def generate_audio():
|
73 |
communicate = edge_tts.Communicate(text, voice)
|
74 |
await communicate.save(output_path)
|
75 |
|
76 |
# Run async function in sync context
|
77 |
+
try:
|
78 |
+
loop = asyncio.get_event_loop()
|
79 |
+
except RuntimeError:
|
80 |
+
loop = asyncio.new_event_loop()
|
81 |
+
asyncio.set_event_loop(loop)
|
82 |
+
|
83 |
loop.run_until_complete(generate_audio())
|
|
|
84 |
|
85 |
+
# Verify file was created and has content
|
86 |
+
if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
|
87 |
+
return output_path
|
88 |
+
else:
|
89 |
+
logging.error(f"Audio file was not created or is empty: {output_path}")
|
90 |
+
return None
|
91 |
+
|
92 |
except Exception as e:
|
93 |
logging.error(f"Error in TTS generation: {e}")
|
94 |
return None
|
|
|
97 |
"""Speech-to-text using Faster-Whisper"""
|
98 |
try:
|
99 |
if not audio_path or not os.path.exists(audio_path):
|
100 |
+
logging.error(f"Audio file does not exist: {audio_path}")
|
101 |
+
return ""
|
102 |
+
|
103 |
+
# Check if file has content
|
104 |
+
if os.path.getsize(audio_path) == 0:
|
105 |
+
logging.error(f"Audio file is empty: {audio_path}")
|
106 |
return ""
|
107 |
|
108 |
model = load_whisper_model()
|
|
|
136 |
# Extract JSON from response
|
137 |
start_idx = response.find("{")
|
138 |
end_idx = response.rfind("}") + 1
|
139 |
+
if start_idx >= 0 and end_idx > start_idx:
|
140 |
+
json_str = response[start_idx:end_idx]
|
141 |
+
return json.loads(json_str)
|
142 |
+
else:
|
143 |
+
raise ValueError("No valid JSON found in response")
|
144 |
except Exception as e:
|
145 |
logging.error(f"Error evaluating answer: {e}")
|
146 |
return {
|