File size: 4,484 Bytes
3e6e3a1
65fceff
3e6e3a1
65fceff
 
 
 
3e6e3a1
 
912706a
65fceff
 
 
 
 
3e6e3a1
 
 
 
 
 
 
48482d6
 
 
 
 
3e6e3a1
 
 
65fceff
 
 
 
 
912706a
48482d6
 
912706a
 
 
3e6e3a1
48482d6
65fceff
 
 
 
 
 
 
 
 
3e6e3a1
48482d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65fceff
 
3e6e3a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import gradio as gr
import numpy as np
from transformers import pipeline
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import PyPDF2

# Load local models for inference
stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-base")
conversation_model = pipeline("text-generation", model="facebook/blenderbot-400M-distill")

# Load a pre-trained model for vector embeddings
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')

# Parse PDF and create resume content
def parse_resume(pdf):
    """Extract text from an uploaded PDF file."""
    reader = PyPDF2.PdfReader(pdf)
    text = "\n".join(page.extract_text() for page in reader.pages if page.extract_text())
    sections = {"Resume Content": text}
    return sections

# Process job description text
def process_job_description(job_desc):
    """Encode the job description for analysis."""
    return embedding_model.encode(job_desc)

# Process resume and generate embeddings
def process_resume(pdf):
    resume_content = parse_resume(pdf)
    resume_embeddings = {
        section: embedding_model.encode(content) for section, content in resume_content.items()
    }
    return resume_embeddings

# Generate a conversation response
def generate_conversation_response(user_input, job_desc_embedding):
    prompt = f"The user said: {user_input}. Respond appropriately as a professional hiring manager. Focus on how the response relates to the job description."
    response = conversation_model(prompt, max_length=100, num_return_sequences=1)
    return response[0]["generated_text"]

# Generate question from user response
def generate_question(user_input, resume_embeddings, job_desc_embedding):
    """Find the most relevant section in the resume and generate a question."""
    user_embedding = embedding_model.encode(user_input)
    similarities = {
        section: cosine_similarity([user_embedding], [embedding])[0][0] 
        for section, embedding in resume_embeddings.items()
    }
    most_relevant_section = max(similarities, key=similarities.get)
    return f"Based on your experience in {most_relevant_section}, can you elaborate more?"

# Gradio interface
class MockInterview:
    def __init__(self):
        self.resume_embeddings = None
        self.job_desc_embedding = None
        self.interview_active = False

    def upload_inputs(self, resume, job_desc):
        self.resume_embeddings = process_resume(resume)
        self.job_desc_embedding = process_job_description(job_desc)
        self.interview_active = True
        return "Resume and job description processed. Interview is ready to begin."

    def conduct_interview(self, audio):
        if not self.interview_active:
            return "Please upload your resume and job description first.", ""

        transcription = stt_model(audio)["text"]  # Transcribe audio
        question = generate_question(transcription, self.resume_embeddings, self.job_desc_embedding)
        return transcription, question

    def end_interview(self):
        self.interview_active = False
        return "Interview ended. Thank you for participating."

mock_interview = MockInterview()

def upload_inputs(resume, job_desc):
    return mock_interview.upload_inputs(resume, job_desc)

def conduct_interview(audio):
    return mock_interview.conduct_interview(audio)

def end_interview():
    return mock_interview.end_interview()

interface = gr.Blocks()
with interface:
    gr.Markdown("""# Mock Interview AI
Upload your resume and job description, then engage in a realistic interview simulation.""")

    with gr.Row():
        resume_input = gr.File(label="Upload Resume (PDF)")
        job_desc_input = gr.Textbox(label="Paste Job Description")
        upload_button = gr.Button("Upload")

    with gr.Row():
        audio_input = gr.Audio(type="filepath", label="Speak Your Answer")
        submit_button = gr.Button("Submit Response")
        end_button = gr.Button("End Interview")

    with gr.Row():
        transcription_output = gr.Textbox(label="Transcription")
        question_output = gr.Textbox(label="Question")

    upload_button.click(upload_inputs, inputs=[resume_input, job_desc_input], outputs=[transcription_output])
    submit_button.click(conduct_interview, inputs=[audio_input], outputs=[transcription_output, question_output])
    end_button.click(end_interview, outputs=[transcription_output])

if __name__ == "__main__":
    interface.launch()