Spaces:
Paused
Paused
File size: 3,875 Bytes
ba72f62 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 2ae57cb 22b00f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import json
import asyncio
import edge_tts
from faster_whisper import WhisperModel
from langchain_groq import ChatGroq
import logging
# Initialize models
chat_groq_api = os.getenv("GROQ_API_KEY", "your-groq-api-key")
groq_llm = ChatGroq(
temperature=0.7,
model_name="llama-3.3-70b-versatile",
api_key=chat_groq_api
)
# Initialize Whisper model
whisper_model = None
def load_whisper_model():
global whisper_model
if whisper_model is None:
device = "cuda" if os.system("nvidia-smi") == 0 else "cpu"
compute_type = "float16" if device == "cuda" else "int8"
whisper_model = WhisperModel("base", device=device, compute_type=compute_type)
return whisper_model
def generate_first_question(profile, job):
"""Generate the first interview question based on profile and job"""
try:
prompt = f"""
You are conducting an interview for a {job.role} position at {job.company}.
The candidate's profile shows:
- Skills: {profile.get('skills', [])}
- Experience: {profile.get('experience', [])}
- Education: {profile.get('education', [])}
Generate an appropriate opening interview question that is professional and relevant.
Keep it concise and clear.
"""
response = groq_llm.predict(prompt)
return response.strip()
except Exception as e:
logging.error(f"Error generating first question: {e}")
return "Tell me about yourself and why you're interested in this position."
def edge_tts_to_file_sync(text, output_path, voice="en-US-AriaNeural"):
"""Synchronous wrapper for edge-tts"""
try:
# Create directory if it doesn't exist
os.makedirs(os.path.dirname(output_path), exist_ok=True)
async def generate_audio():
communicate = edge_tts.Communicate(text, voice)
await communicate.save(output_path)
# Run async function in sync context
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(generate_audio())
loop.close()
return output_path
except Exception as e:
logging.error(f"Error in TTS generation: {e}")
return None
def whisper_stt(audio_path):
"""Speech-to-text using Faster-Whisper"""
try:
if not audio_path or not os.path.exists(audio_path):
return ""
model = load_whisper_model()
segments, _ = model.transcribe(audio_path)
transcript = " ".join(segment.text for segment in segments)
return transcript.strip()
except Exception as e:
logging.error(f"Error in STT: {e}")
return ""
def evaluate_answer(question, answer, ref_answer, job_role, seniority):
"""Evaluate candidate's answer"""
try:
prompt = f"""
You are evaluating a candidate's answer for a {seniority} {job_role} position.
Question: {question}
Candidate Answer: {answer}
Reference Answer: {ref_answer}
Evaluate based on technical correctness, clarity, and relevance.
Respond with JSON format:
{{
"Score": "Poor|Medium|Good|Excellent",
"Reasoning": "brief explanation",
"Improvements": ["suggestion1", "suggestion2"]
}}
"""
response = groq_llm.predict(prompt)
# Extract JSON from response
start_idx = response.find("{")
end_idx = response.rfind("}") + 1
json_str = response[start_idx:end_idx]
return json.loads(json_str)
except Exception as e:
logging.error(f"Error evaluating answer: {e}")
return {
"Score": "Medium",
"Reasoning": "Evaluation failed",
"Improvements": ["Please be more specific"]
} |