husseinelsaadi commited on
Commit
45f0a42
·
1 Parent(s): 471f933
Files changed (1) hide show
  1. backend/services/resume_parser.py +275 -30
backend/services/resume_parser.py CHANGED
@@ -1,23 +1,82 @@
1
  import re
2
  from pathlib import Path
3
- from typing import Dict
4
-
5
  from pdfminer.high_level import extract_text as pdf_extract_text
6
  from docx import Document
7
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
 
 
 
8
 
9
- MODEL_NAME = "manishiitg/resume-ner"
 
 
 
 
 
10
 
 
 
 
 
 
 
 
 
11
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
13
  ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
14
 
15
- # Basic keyword lists (you can expand dynamically if needed)
16
- SKILL_KEYWORDS = ["python", "java", "sql", "docker", "aws", "machine learning", "flask", "django", "react"]
17
- EDU_KEYWORDS = ["bachelor", "master", "phd", "bsc", "msc", "mba", "computer science", "engineering"]
18
- JOB_KEYWORDS = ["engineer", "developer", "manager", "analyst", "consultant", "specialist"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def extract_text(file_path: str) -> str:
 
21
  path = Path(file_path)
22
  if path.suffix.lower() == ".pdf":
23
  text = pdf_extract_text(file_path)
@@ -26,34 +85,220 @@ def extract_text(file_path: str) -> str:
26
  text = "\n".join([p.text for p in doc.paragraphs])
27
  else:
28
  raise ValueError("Unsupported file format")
29
- return text.replace("\n", " ").replace("\r", " ").strip()
30
 
31
- def parse_resume(file_path: str, filename: str = None) -> Dict[str, str]:
32
- text = extract_text(file_path)
33
- entities = ner_pipeline(text)
 
 
 
 
34
 
35
- # Model extraction (Name only works well)
36
- name_parts = [ent["word"].strip() for ent in entities if ent["entity_group"].upper() in ["NAME", "PERSON"]]
37
- full_name = " ".join(dict.fromkeys(name_parts)) or "Not Found"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- # Skills fallback
40
- skills_found = [skill for skill in SKILL_KEYWORDS if re.search(rf"\b{skill}\b", text, re.IGNORECASE)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- # Education fallback
43
- education_found = [edu for edu in EDU_KEYWORDS if re.search(rf"\b{edu}\b", text, re.IGNORECASE)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- # Experience fallback
46
- experience_found = []
47
- for job in JOB_KEYWORDS:
48
- if re.search(rf"\b{job}\b", text, re.IGNORECASE):
49
- experience_found.append(job)
50
- years_match = re.findall(r"(\d+)\s*(?:years|yrs)", text, re.IGNORECASE)
51
- if years_match:
52
- experience_found.append(f"{max(map(int, years_match))} years")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  return {
55
- "name": full_name,
56
- "skills": ", ".join(set(skills_found)) or "Not Found",
57
- "education": ", ".join(set(education_found)) or "Not Found",
58
- "experience": ", ".join(set(experience_found)) or "Not Found"
59
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import re
2
  from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+ import spacy
5
  from pdfminer.high_level import extract_text as pdf_extract_text
6
  from docx import Document
7
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
8
+ import nltk
9
+ from nltk.corpus import stopwords
10
+ from dateutil.parser import parse as date_parse
11
 
12
+ # Download required NLTK data
13
+ try:
14
+ nltk.download('stopwords', quiet=True)
15
+ nltk.download('punkt', quiet=True)
16
+ except:
17
+ pass
18
 
19
+ # Load spaCy model for better NER
20
+ try:
21
+ nlp = spacy.load("en_core_web_sm")
22
+ except:
23
+ print("Please install spacy model: python -m spacy download en_core_web_sm")
24
+ nlp = None
25
+
26
+ MODEL_NAME = "manishiitg/resume-ner"
27
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
28
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
29
  ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
30
 
31
+ # Expanded keyword lists
32
+ SKILL_KEYWORDS = {
33
+ # Programming Languages
34
+ "python", "java", "javascript", "typescript", "c++", "c#", "ruby", "go", "rust", "kotlin", "swift",
35
+ "php", "r", "matlab", "scala", "perl", "bash", "powershell", "sql", "html", "css",
36
+
37
+ # Frameworks & Libraries
38
+ "react", "angular", "vue", "node.js", "express", "django", "flask", "spring", "spring boot",
39
+ ".net", "laravel", "rails", "fastapi", "pytorch", "tensorflow", "keras", "scikit-learn",
40
+
41
+ # Databases
42
+ "mysql", "postgresql", "mongodb", "redis", "elasticsearch", "cassandra", "oracle", "sql server",
43
+
44
+ # Cloud & DevOps
45
+ "aws", "azure", "gcp", "docker", "kubernetes", "jenkins", "terraform", "ansible", "ci/cd",
46
+
47
+ # Other Technical Skills
48
+ "machine learning", "deep learning", "data science", "nlp", "computer vision", "ai",
49
+ "rest api", "graphql", "microservices", "agile", "scrum", "git", "linux", "windows"
50
+ }
51
+
52
+ EDUCATION_PATTERNS = [
53
+ # Degrees
54
+ r"\b(bachelor|b\.?s\.?c?\.?|b\.?a\.?|b\.?tech|b\.?e\.?)\b",
55
+ r"\b(master|m\.?s\.?c?\.?|m\.?a\.?|m\.?tech|m\.?e\.?|mba)\b",
56
+ r"\b(ph\.?d\.?|doctorate|doctoral)\b",
57
+ r"\b(diploma|certificate|certification)\b",
58
+
59
+ # Fields of Study
60
+ r"\b(computer science|software engineering|information technology|it|cs)\b",
61
+ r"\b(electrical engineering|mechanical engineering|civil engineering)\b",
62
+ r"\b(data science|artificial intelligence|machine learning)\b",
63
+ r"\b(business administration|finance|accounting|marketing)\b",
64
+
65
+ # Institution indicators
66
+ r"\b(university|college|institute|school)\s+of\s+\w+",
67
+ r"\b\w+\s+(university|college|institute)\b"
68
+ ]
69
+
70
+ JOB_TITLE_PATTERNS = [
71
+ r"\b(software|senior|junior|lead|principal|staff)\s*(engineer|developer|programmer)\b",
72
+ r"\b(data|business|system|security)\s*(analyst|scientist|engineer)\b",
73
+ r"\b(project|product|program|engineering)\s*manager\b",
74
+ r"\b(devops|cloud|ml|ai|backend|frontend|full[\s-]?stack)\s*(engineer|developer)\b",
75
+ r"\b(consultant|architect|specialist|coordinator|administrator)\b"
76
+ ]
77
 
78
  def extract_text(file_path: str) -> str:
79
+ """Extract text from PDF or DOCX files"""
80
  path = Path(file_path)
81
  if path.suffix.lower() == ".pdf":
82
  text = pdf_extract_text(file_path)
 
85
  text = "\n".join([p.text for p in doc.paragraphs])
86
  else:
87
  raise ValueError("Unsupported file format")
88
+ return text
89
 
90
+ def clean_text(text: str) -> str:
91
+ """Clean and normalize text"""
92
+ # Remove multiple spaces and normalize
93
+ text = re.sub(r'\s+', ' ', text)
94
+ # Keep line breaks for section detection
95
+ text = re.sub(r'\n{3,}', '\n\n', text)
96
+ return text.strip()
97
 
98
+ def extract_sections(text: str) -> Dict[str, str]:
99
+ """Extract different sections from resume"""
100
+ sections = {
101
+ 'education': '',
102
+ 'experience': '',
103
+ 'skills': '',
104
+ 'summary': ''
105
+ }
106
+
107
+ # Common section headers
108
+ section_patterns = {
109
+ 'education': r'(education|academic|qualification|degree)',
110
+ 'experience': r'(experience|employment|work\s*history|professional\s*experience|career)',
111
+ 'skills': r'(skills|technical\s*skills|competencies|expertise)',
112
+ 'summary': r'(summary|objective|profile|about)'
113
+ }
114
+
115
+ lines = text.split('\n')
116
+ current_section = None
117
+
118
+ for i, line in enumerate(lines):
119
+ line_lower = line.lower().strip()
120
+
121
+ # Check if this line is a section header
122
+ for section, pattern in section_patterns.items():
123
+ if re.search(pattern, line_lower) and len(line_lower) < 50:
124
+ current_section = section
125
+ break
126
+
127
+ # Add content to current section
128
+ if current_section and i > 0:
129
+ sections[current_section] += line + '\n'
130
+
131
+ return sections
132
 
133
+ def extract_name(text: str, entities: List) -> str:
134
+ """Extract name using multiple methods"""
135
+ # Method 1: Use transformer model
136
+ name_parts = []
137
+ for ent in entities:
138
+ if ent["entity_group"].upper() in ["NAME", "PERSON", "PER"]:
139
+ name_parts.append(ent["word"].strip())
140
+
141
+ if name_parts:
142
+ # Clean and join name parts
143
+ full_name = " ".join(dict.fromkeys(name_parts))
144
+ full_name = re.sub(r'\s+', ' ', full_name).strip()
145
+ if len(full_name) > 3 and len(full_name.split()) <= 4:
146
+ return full_name
147
+
148
+ # Method 2: Use spaCy if available
149
+ if nlp:
150
+ doc = nlp(text[:500]) # Check first 500 chars
151
+ for ent in doc.ents:
152
+ if ent.label_ == "PERSON":
153
+ name = ent.text.strip()
154
+ if len(name) > 3 and len(name.split()) <= 4:
155
+ return name
156
+
157
+ # Method 3: Pattern matching for first few lines
158
+ first_lines = text.split('\n')[:5]
159
+ for line in first_lines:
160
+ line = line.strip()
161
+ # Look for name pattern (2-4 words, title case)
162
+ if re.match(r'^[A-Z][a-z]+(\s+[A-Z][a-z]+){1,3}$', line):
163
+ return line
164
+
165
+ return "Not Found"
166
 
167
+ def extract_skills(text: str, skill_section: str = "") -> List[str]:
168
+ """Extract skills using multiple methods"""
169
+ skills_found = set()
170
+
171
+ # Prioritize skills section if available
172
+ search_text = skill_section + " " + text if skill_section else text
173
+ search_text = search_text.lower()
174
+
175
+ # Method 1: Direct keyword matching
176
+ for skill in SKILL_KEYWORDS:
177
+ if re.search(rf'\b{re.escape(skill.lower())}\b', search_text):
178
+ skills_found.add(skill)
179
+
180
+ # Method 2: Pattern-based extraction
181
+ # Look for skills in bullet points or comma-separated lists
182
+ skill_patterns = [
183
+ r'[•·▪▫◦‣⁃]\s*([A-Za-z\s\+\#\.]+)', # Bullet points
184
+ r'(?:skills?|technologies|tools?)[\s:]*([A-Za-z\s,\+\#\.]+)', # After keywords
185
+ ]
186
+
187
+ for pattern in skill_patterns:
188
+ matches = re.findall(pattern, search_text, re.IGNORECASE)
189
+ for match in matches:
190
+ # Check each word/phrase in the match
191
+ potential_skills = re.split(r'[,;]', match)
192
+ for ps in potential_skills:
193
+ ps = ps.strip().lower()
194
+ if ps in SKILL_KEYWORDS:
195
+ skills_found.add(ps)
196
+
197
+ return list(skills_found)
198
 
199
+ def extract_education(text: str, edu_section: str = "") -> List[str]:
200
+ """Extract education information"""
201
+ education_info = []
202
+
203
+ search_text = edu_section + " " + text if edu_section else text
204
+
205
+ # Extract degrees
206
+ for pattern in EDUCATION_PATTERNS:
207
+ matches = re.findall(pattern, search_text, re.IGNORECASE)
208
+ for match in matches:
209
+ if isinstance(match, tuple):
210
+ match = match[0]
211
+ education_info.append(match)
212
+
213
+ # Extract years (graduation years)
214
+ year_pattern = r'\b(19[0-9]{2}|20[0-9]{2})\b'
215
+ years = re.findall(year_pattern, search_text)
216
+
217
+ # Extract GPA if mentioned
218
+ gpa_pattern = r'(?:gpa|cgpa|grade)[\s:]*([0-9]\.[0-9]+)'
219
+ gpa_matches = re.findall(gpa_pattern, search_text, re.IGNORECASE)
220
+
221
+ return list(dict.fromkeys(education_info)) # Remove duplicates
222
 
223
+ def extract_experience(text: str, exp_section: str = "") -> List[str]:
224
+ """Extract experience information"""
225
+ experience_info = []
226
+
227
+ search_text = exp_section + " " + text if exp_section else text
228
+
229
+ # Extract job titles
230
+ for pattern in JOB_TITLE_PATTERNS:
231
+ matches = re.findall(pattern, search_text, re.IGNORECASE)
232
+ for match in matches:
233
+ if isinstance(match, tuple):
234
+ match = ' '.join(match).strip()
235
+ experience_info.append(match)
236
+
237
+ # Extract years of experience
238
+ exp_patterns = [
239
+ r'(\d+)\+?\s*(?:years?|yrs?)(?:\s+of)?\s+experience',
240
+ r'experience\s*:?\s*(\d+)\+?\s*(?:years?|yrs?)',
241
+ r'(\d+)\+?\s*(?:years?|yrs?)\s+(?:as|in|of)',
242
+ ]
243
+
244
+ for pattern in exp_patterns:
245
+ matches = re.findall(pattern, search_text, re.IGNORECASE)
246
+ if matches:
247
+ years = max(map(int, matches))
248
+ experience_info.append(f"{years}+ years experience")
249
+ break
250
+
251
+ # Extract company names (common patterns)
252
+ company_patterns = [
253
+ r'(?:at|@|company|employer)\s*:?\s*([A-Z][A-Za-z\s&\.\-]+)',
254
+ r'([A-Z][A-Za-z\s&\.\-]+)\s*(?:inc|llc|ltd|corp|company)',
255
+ ]
256
+
257
+ for pattern in company_patterns:
258
+ matches = re.findall(pattern, search_text)
259
+ experience_info.extend(matches[:3]) # Limit to avoid false positives
260
+
261
+ return list(dict.fromkeys(experience_info))
262
+
263
+ def parse_resume(file_path: str, filename: str = None) -> Dict[str, str]:
264
+ """Main function to parse resume"""
265
+ # Extract and clean text
266
+ raw_text = extract_text(file_path)
267
+ text = clean_text(raw_text)
268
+
269
+ # Extract sections
270
+ sections = extract_sections(text)
271
+
272
+ # Get NER entities
273
+ entities = ner_pipeline(text[:1024]) # Limit for performance
274
+
275
+ # Extract information
276
+ name = extract_name(text, entities)
277
+ skills = extract_skills(text, sections.get('skills', ''))
278
+ education = extract_education(text, sections.get('education', ''))
279
+ experience = extract_experience(text, sections.get('experience', ''))
280
+
281
  return {
282
+ "name": name,
283
+ "skills": ", ".join(skills[:15]) if skills else "Not Found", # Limit to 15 skills
284
+ "education": ", ".join(education[:5]) if education else "Not Found",
285
+ "experience": ", ".join(experience[:5]) if experience else "Not Found"
286
  }
287
+
288
+ # Optional: Add confidence scores
289
+ def parse_resume_with_confidence(file_path: str) -> Dict[str, Tuple[str, float]]:
290
+ """Parse resume with confidence scores for each field"""
291
+ result = parse_resume(file_path)
292
+
293
+ # Simple confidence calculation based on whether data was found
294
+ confidence_scores = {
295
+ "name": 0.9 if result["name"] != "Not Found" else 0.1,
296
+ "skills": min(0.9, len(result["skills"].split(",")) * 0.1) if result["skills"] != "Not Found" else 0.1,
297
+ "education": 0.8 if result["education"] != "Not Found" else 0.2,
298
+ "experience": 0.8 if result["experience"] != "Not Found" else 0.2
299
+ }
300
+
301
+ return {
302
+ key: (value, confidence_scores[key])
303
+ for key, value in result.items()
304
+ }