File size: 1,573 Bytes
113d04e
46c5a58
 
113d04e
46c5a58
113d04e
46c5a58
 
 
 
 
 
 
 
113d04e
 
46c5a58
 
 
 
 
 
 
 
 
 
 
 
113d04e
 
46c5a58
 
113d04e
46c5a58
 
113d04e
 
 
46c5a58
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import AutoModel, AutoTokenizer
from peft import PeftModel
import torch
import torch.nn.functional as F

# Load models
base_model = AutoModel.from_pretrained("BAAI/bge-large-en-v1.5")
model = PeftModel.from_pretrained(base_model, "shashu2325/resume-job-matcher-lora")
tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-large-en-v1.5")

def get_match_score(resume_text, job_text):
    resume_inputs = tokenizer(resume_text, return_tensors="pt", max_length=512, padding="max_length", truncation=True)
    job_inputs = tokenizer(job_text, return_tensors="pt", max_length=512, padding="max_length", truncation=True)

    with torch.no_grad():
        resume_outputs = model(**resume_inputs)
        job_outputs = model(**job_inputs)

        resume_emb = resume_outputs.last_hidden_state.mean(dim=1)
        job_emb = job_outputs.last_hidden_state.mean(dim=1)

        resume_emb = F.normalize(resume_emb, p=2, dim=1)
        job_emb = F.normalize(job_emb, p=2, dim=1)

        similarity = torch.sum(resume_emb * job_emb, dim=1)
        score = torch.sigmoid(similarity).item()

    return f"Match Score: {score*100:.2f}%"

gr.Interface(
    fn=get_match_score,
    inputs=[
        gr.Textbox(label="Resume Text", lines=12, placeholder="Paste resume here..."),
        gr.Textbox(label="Job Description", lines=12, placeholder="Paste job description here...")
    ],
    outputs="text",
    title="Resume-Job Matcher",
    description="Upload resume and job description to get a match score using LoRA fine-tuned BGE model."
).launch()