Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,6 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Auto
|
|
| 7 |
from sentence_transformers import SentenceTransformer, util
|
| 8 |
from groq import Groq
|
| 9 |
import gradio as gr
|
| 10 |
-
from docxtpl import DocxTemplate
|
| 11 |
|
| 12 |
# Set your API key for Groq
|
| 13 |
os.environ["GROQ_API_KEY"] = "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"
|
|
@@ -66,17 +65,18 @@ def extract_experience(text):
|
|
| 66 |
experience_years = [int(year[0]) for year in experience_years]
|
| 67 |
return experience_years, job_titles
|
| 68 |
|
| 69 |
-
# ---
|
| 70 |
-
def
|
| 71 |
-
"""
|
| 72 |
-
model_name = "
|
| 73 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 74 |
-
model =
|
| 75 |
|
| 76 |
-
inputs = tokenizer(
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
| 80 |
|
| 81 |
# --- Semantic Similarity Calculation --- #
|
| 82 |
def calculate_semantic_similarity(text1, text2):
|
|
@@ -90,7 +90,7 @@ def calculate_semantic_similarity(text1, text2):
|
|
| 90 |
similarity_percentage = similarity_score * 100
|
| 91 |
return similarity_percentage
|
| 92 |
|
| 93 |
-
# --- Communication Generation
|
| 94 |
def communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, candidate_experience):
|
| 95 |
"""Generates a detailed communication response based on similarity scores and additional criteria."""
|
| 96 |
# Assess candidate fit based on similarity scores
|
|
@@ -118,19 +118,6 @@ def communication_generator(resume_skills, job_description_skills, skills_simila
|
|
| 118 |
|
| 119 |
return message
|
| 120 |
|
| 121 |
-
# --- Sentiment Analysis --- #
|
| 122 |
-
def analyze_sentiment(text):
|
| 123 |
-
"""Analyzes the sentiment of the text."""
|
| 124 |
-
model_name = "mrm8488/distiluse-base-multilingual-cased-v2-finetuned-stsb_multi_mt-es"
|
| 125 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 126 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 127 |
-
|
| 128 |
-
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
| 129 |
-
with torch.no_grad():
|
| 130 |
-
outputs = model(**inputs)
|
| 131 |
-
predicted_sentiment = torch.argmax(outputs.logits).item()
|
| 132 |
-
return ["Negative", "Neutral", "Positive"][predicted_sentiment]
|
| 133 |
-
|
| 134 |
# --- Updated Resume Analysis Function --- #
|
| 135 |
def analyze_resume(resume_file, job_description_file):
|
| 136 |
# Load and preprocess resume and job description
|
|
@@ -150,10 +137,13 @@ def analyze_resume(resume_file, job_description_file):
|
|
| 150 |
required_experience = sum(job_description_experience) # Assuming total years required
|
| 151 |
|
| 152 |
# Calculate similarity scores
|
| 153 |
-
skills_similarity = len(set(resume_skills).intersection(set(job_description_skills))) / len(job_description_skills) * 100
|
| 154 |
-
qualifications_similarity = len(set(resume_qualifications).intersection(set(job_description_qualifications))) / len(job_description_qualifications) * 100
|
| 155 |
experience_similarity = 1.0 if total_experience >= required_experience else 0.0
|
| 156 |
|
|
|
|
|
|
|
|
|
|
| 157 |
# Fit assessment logic
|
| 158 |
fit_score = 0
|
| 159 |
if total_experience >= required_experience:
|
|
@@ -171,38 +161,79 @@ def analyze_resume(resume_file, job_description_file):
|
|
| 171 |
else:
|
| 172 |
fit_assessment = "Not a fit"
|
| 173 |
|
| 174 |
-
# Prepare output
|
| 175 |
-
|
| 176 |
-
f"
|
| 177 |
-
f"
|
| 178 |
-
f"
|
| 179 |
-
f"
|
| 180 |
-
f"
|
| 181 |
-
f"
|
| 182 |
-
f"
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
)
|
| 187 |
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
def run_gradio_interface():
|
| 191 |
with gr.Blocks() as demo:
|
| 192 |
gr.Markdown("## Resume and Job Description Analyzer")
|
| 193 |
resume_file = gr.File(label="Upload Resume")
|
| 194 |
job_description_file = gr.File(label="Upload Job Description")
|
| 195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
|
| 197 |
def analyze(resume, job_desc):
|
| 198 |
if resume and job_desc:
|
| 199 |
-
|
|
|
|
| 200 |
return "Please upload both files."
|
| 201 |
|
| 202 |
analyze_button = gr.Button("Analyze")
|
| 203 |
-
analyze_button.click(analyze, inputs=[resume_file, job_description_file], outputs=
|
| 204 |
|
| 205 |
demo.launch()
|
| 206 |
|
| 207 |
if __name__ == "__main__":
|
| 208 |
-
run_gradio_interface()
|
|
|
|
| 7 |
from sentence_transformers import SentenceTransformer, util
|
| 8 |
from groq import Groq
|
| 9 |
import gradio as gr
|
|
|
|
| 10 |
|
| 11 |
# Set your API key for Groq
|
| 12 |
os.environ["GROQ_API_KEY"] = "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"
|
|
|
|
| 65 |
experience_years = [int(year[0]) for year in experience_years]
|
| 66 |
return experience_years, job_titles
|
| 67 |
|
| 68 |
+
# --- Sentiment Analysis --- #
|
| 69 |
+
def analyze_sentiment(text):
|
| 70 |
+
"""Analyzes the sentiment of the text."""
|
| 71 |
+
model_name = "mrm8488/distiluse-base-multilingual-cased-v2-finetuned-stsb_multi_mt-es"
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 73 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 74 |
|
| 75 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
| 76 |
+
with torch.no_grad():
|
| 77 |
+
outputs = model(**inputs)
|
| 78 |
+
predicted_sentiment = torch.argmax(outputs.logits).item()
|
| 79 |
+
return ["Negative", "Neutral", "Positive"][predicted_sentiment]
|
| 80 |
|
| 81 |
# --- Semantic Similarity Calculation --- #
|
| 82 |
def calculate_semantic_similarity(text1, text2):
|
|
|
|
| 90 |
similarity_percentage = similarity_score * 100
|
| 91 |
return similarity_percentage
|
| 92 |
|
| 93 |
+
# --- Communication Generation --- #
|
| 94 |
def communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, candidate_experience):
|
| 95 |
"""Generates a detailed communication response based on similarity scores and additional criteria."""
|
| 96 |
# Assess candidate fit based on similarity scores
|
|
|
|
| 118 |
|
| 119 |
return message
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
# --- Updated Resume Analysis Function --- #
|
| 122 |
def analyze_resume(resume_file, job_description_file):
|
| 123 |
# Load and preprocess resume and job description
|
|
|
|
| 137 |
required_experience = sum(job_description_experience) # Assuming total years required
|
| 138 |
|
| 139 |
# Calculate similarity scores
|
| 140 |
+
skills_similarity = len(set(resume_skills).intersection(set(job_description_skills))) / len(job_description_skills) * 100 if job_description_skills else 0
|
| 141 |
+
qualifications_similarity = len(set(resume_qualifications).intersection(set(job_description_qualifications))) / len(job_description_qualifications) * 100 if job_description_qualifications else 0
|
| 142 |
experience_similarity = 1.0 if total_experience >= required_experience else 0.0
|
| 143 |
|
| 144 |
+
# Sentiment analysis of the resume
|
| 145 |
+
sentiment_analysis_result = analyze_sentiment(resume_text)
|
| 146 |
+
|
| 147 |
# Fit assessment logic
|
| 148 |
fit_score = 0
|
| 149 |
if total_experience >= required_experience:
|
|
|
|
| 161 |
else:
|
| 162 |
fit_assessment = "Not a fit"
|
| 163 |
|
| 164 |
+
# Prepare output messages for tab display
|
| 165 |
+
summary_message = (
|
| 166 |
+
f"### Summary of Analysis\n"
|
| 167 |
+
f"- **Skills Similarity**: {skills_similarity:.2f}%\n"
|
| 168 |
+
f"- **Qualifications Similarity**: {qualifications_similarity:.2f}%\n"
|
| 169 |
+
f"- **Experience Similarity**: {experience_similarity * 100:.2f}%\n"
|
| 170 |
+
f"- **Candidate Experience**: {total_experience} years\n"
|
| 171 |
+
f"- **Fit Assessment**: {fit_assessment}\n"
|
| 172 |
+
f"- **Sentiment Analysis**: {sentiment_analysis_result}\n"
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
skills_message = (
|
| 176 |
+
f"### Skills Overview\n"
|
| 177 |
+
f"- **Resume Skills:**\n" + "\n".join(f" - {skill}" for skill in resume_skills) + "\n"
|
| 178 |
+
f"- **Job Description Skills:**\n" + "\n".join(f" - {skill}" for skill in job_description_skills) + "\n"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
qualifications_message = (
|
| 182 |
+
f"### Qualifications Overview\n"
|
| 183 |
+
f"- **Resume Qualifications:** " + ", ".join(resume_qualifications) + "\n" +
|
| 184 |
+
f"- **Job Description Qualifications:** " + ", ".join(job_description_qualifications) + "\n"
|
| 185 |
)
|
| 186 |
|
| 187 |
+
experience_message = (
|
| 188 |
+
f"### Experience Overview\n"
|
| 189 |
+
f"- **Total Experience:** {total_experience} years\n"
|
| 190 |
+
f"- **Required Experience:** {required_experience} years\n"
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Generate communication based on analysis
|
| 194 |
+
communication = communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, total_experience)
|
| 195 |
+
|
| 196 |
+
return summary_message, skills_message, qualifications_message, experience_message, communication
|
| 197 |
|
| 198 |
def run_gradio_interface():
|
| 199 |
with gr.Blocks() as demo:
|
| 200 |
gr.Markdown("## Resume and Job Description Analyzer")
|
| 201 |
resume_file = gr.File(label="Upload Resume")
|
| 202 |
job_description_file = gr.File(label="Upload Job Description")
|
| 203 |
+
|
| 204 |
+
# Define outputs for each tab
|
| 205 |
+
summary_output = gr.Textbox(label="Summary of Analysis", interactive=False, lines=10)
|
| 206 |
+
skills_output = gr.Textbox(label="Skills Overview", interactive=False, lines=10)
|
| 207 |
+
qualifications_output = gr.Textbox(label="Qualifications Overview", interactive=False, lines=10)
|
| 208 |
+
experience_output = gr.Textbox(label="Experience Overview", interactive=False, lines=10)
|
| 209 |
+
communication_output = gr.Textbox(label="Communication", interactive=False, lines=10)
|
| 210 |
+
|
| 211 |
+
# Create tabs for output sections
|
| 212 |
+
with gr.Tab("Analysis Summary"):
|
| 213 |
+
summary_output.render()
|
| 214 |
+
|
| 215 |
+
with gr.Tab("Skills Overview"):
|
| 216 |
+
skills_output.render()
|
| 217 |
+
|
| 218 |
+
with gr.Tab("Qualifications Overview"):
|
| 219 |
+
qualifications_output.render()
|
| 220 |
+
|
| 221 |
+
with gr.Tab("Experience Overview"):
|
| 222 |
+
experience_output.render()
|
| 223 |
+
|
| 224 |
+
with gr.Tab("Communication"):
|
| 225 |
+
communication_output.render()
|
| 226 |
|
| 227 |
def analyze(resume, job_desc):
|
| 228 |
if resume and job_desc:
|
| 229 |
+
summary, skills, qualifications, experience, communication = analyze_resume(resume, job_desc)
|
| 230 |
+
return summary, skills, qualifications, experience, communication
|
| 231 |
return "Please upload both files."
|
| 232 |
|
| 233 |
analyze_button = gr.Button("Analyze")
|
| 234 |
+
analyze_button.click(analyze, inputs=[resume_file, job_description_file], outputs=[summary_output, skills_output, qualifications_output, experience_output, communication_output])
|
| 235 |
|
| 236 |
demo.launch()
|
| 237 |
|
| 238 |
if __name__ == "__main__":
|
| 239 |
+
run_gradio_interface()
|