File size: 12,438 Bytes
682910e
6248af7
f2a1cfa
33fa314
f2a1cfa
33fa314
 
efffc2e
6248af7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2a1cfa
 
6248af7
 
 
 
 
 
 
 
 
 
 
 
 
 
f2a1cfa
 
6248af7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2a1cfa
 
6248af7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2a1cfa
6248af7
 
f2a1cfa
 
 
 
 
6248af7
 
 
 
f2a1cfa
 
 
 
 
 
 
 
 
6248af7
 
 
 
f2a1cfa
 
 
 
 
 
 
 
 
 
 
 
6248af7
f2a1cfa
6248af7
f2a1cfa
6248af7
f2a1cfa
 
 
6248af7
 
 
 
f2a1cfa
 
 
 
 
 
6248af7
 
 
 
 
 
 
 
f2a1cfa
 
6248af7
 
f2a1cfa
6248af7
f2a1cfa
6248af7
f2a1cfa
 
 
6248af7
f2a1cfa
6248af7
f2a1cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6248af7
f2a1cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6248af7
 
 
 
 
 
 
 
 
 
 
 
 
f2a1cfa
 
 
 
 
6248af7
 
 
 
 
 
f2a1cfa
6248af7
f2a1cfa
6248af7
 
 
f2a1cfa
6248af7
 
f2a1cfa
6248af7
f2a1cfa
 
 
 
 
6248af7
f2a1cfa
 
6248af7
f2a1cfa
 
 
 
 
 
6248af7
 
f2a1cfa
6248af7
 
 
 
 
f2a1cfa
 
6248af7
 
 
 
 
 
 
 
b336194
2489359
6248af7
 
 
f2a1cfa
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
import json
import re
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
from pdfminer.high_level import extract_text as pdf_extract_text
from docx import Document
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
import logging

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ResumeParser:
    def __init__(self):
        self.ner_pipeline = None
        self.model_loaded = False
        self._load_model()
    
    def _load_model(self):
        """Load the NER model with error handling and fallbacks"""
        try:
            # Try the original model first
            MODEL_NAME = "manishiitg/resume-ner"
            logger.info(f"Attempting to load model: {MODEL_NAME}")
            
            tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
            model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
            self.ner_pipeline = pipeline(
                "ner", 
                model=model, 
                tokenizer=tokenizer, 
                aggregation_strategy="simple",
                device=0 if os.environ.get("L4_GPU", "false").lower() == "true" else -1
            )
            self.model_loaded = True
            logger.info("Model loaded successfully")
            
        except Exception as e:
            logger.warning(f"Failed to load primary model: {e}")
            try:
                # Fallback to a more reliable model
                MODEL_NAME = "dbmdz/bert-large-cased-finetuned-conll03-english"
                logger.info(f"Trying fallback model: {MODEL_NAME}")
                
                self.ner_pipeline = pipeline(
                    "ner", 
                    model=MODEL_NAME,
                    aggregation_strategy="simple",
                    device=0 if os.environ.get("L4_GPU", "false").lower() == "true" else -1
                )
                self.model_loaded = True
                logger.info("Fallback model loaded successfully")
                
            except Exception as e2:
                logger.error(f"Failed to load fallback model: {e2}")
                self.model_loaded = False

    def extract_text(self, file_path: str) -> str:
        """Extract text from PDF or DOCX files with error handling"""
        try:
            path = Path(file_path)
            
            if not path.exists():
                raise FileNotFoundError(f"File not found: {file_path}")
            
            if path.suffix.lower() == ".pdf":
                text = pdf_extract_text(file_path)
                # Clean up PDF text extraction artifacts
                text = re.sub(r'\s+', ' ', text).strip()
                logger.info(f"Extracted {len(text)} characters from PDF")
                return text
                
            elif path.suffix.lower() == ".docx":
                doc = Document(file_path)
                text = "\n".join([p.text for p in doc.paragraphs if p.text.strip()])
                logger.info(f"Extracted {len(text)} characters from DOCX")
                return text
                
            else:
                raise ValueError(f"Unsupported file format: {path.suffix}")
                
        except Exception as e:
            logger.error(f"Error extracting text: {e}")
            raise

    def extract_with_regex(self, text: str) -> Dict[str, List[str]]:
        """Improved regex patterns for extraction"""
        patterns = {
            'email': r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',
            'phone': r'(?:\+?\d{1,3}[-.\s]?)?\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}',
            'skills': r'(?i)(?:skills?|technologies?|tools?|expertise)[:\-\s]*(.*?)(?:\n\n|\n\s*\n|$)',
            'education': r'(?i)(?:education|degree|university|college|bachelor|master|phd)[:\-\s]*(.*?)(?:\n\n|\n\s*\n|$)',
            'experience': r'(?i)(?:experience|work\shistory|employment|job\shistory)[:\-\s]*(.*?)(?:\n\n|\n\s*\n|$)',
            'name': r'^(?!(resume|cv|curriculum vitae|\d))[A-Z][a-z]+(?:\s+[A-Z][a-z]+)+'
        }
        
        results = {}
        for key, pattern in patterns.items():
            matches = re.findall(pattern, text, re.MULTILINE | re.IGNORECASE)
            if key == 'name' and matches:
                # Take the first likely name match
                results[key] = [matches[0].strip()]
            else:
                # Clean and filter matches
                cleaned = [m.strip() for m in matches if m.strip()]
                if cleaned:
                    results[key] = cleaned
        
        return results

    def extract_name_from_text(self, text: str) -> str:
        """Improved name extraction heuristics"""
        # First try to find name using regex
        name_match = re.search(
            r'^(?!(resume|cv|curriculum vitae|\d))[A-Z][a-z]+(?:\s+[A-Z][a-z]+)+',
            text, 
            re.MULTILINE | re.IGNORECASE
        )
        
        if name_match:
            return name_match.group(0).strip()
        
        # Fallback to line-based approach
        lines = text.split('\n')
        for line in lines[:10]:  # Check first 10 lines
            line = line.strip()
            if line and 2 <= len(line.split()) <= 4:
                # Check if it looks like a name (not email, phone, etc.)
                if not re.search(r'[@\d+\-\(\)]', line):
                    if line[0].isupper() and not line.lower().startswith(('resume', 'cv', 'curriculum')):
                        return line
        return "Not Found"

    def process_ner_entities(self, entities: List[Dict]) -> Dict[str, List[str]]:
        """Process NER entities with improved logic"""
        results = {
            "name": [],
            "skills": [],
            "education": [],
            "experience": []
        }
        
        logger.info(f"Processing {len(entities)} entities")
        
        for ent in entities:
            label = ent.get("entity_group", "").upper()
            value = ent.get("word", "").strip()
            confidence = ent.get("score", 0)
            
            # Skip low confidence entities and empty values
            if confidence < 0.7 or not value:
                continue
                
            # Normalize labels
            if label in ["PERSON", "PER", "NAME"]:
                results["name"].append(value)
            elif label in ["SKILL", "TECH", "TECHNOLOGY"]:
                results["skills"].append(value)
            elif label in ["EDUCATION", "DEGREE", "EDU", "ORG"] and "university" not in value.lower():
                results["education"].append(value)
            elif label in ["EXPERIENCE", "JOB", "ROLE", "POSITION", "WORK"]:
                results["experience"].append(value)
        
        # Deduplicate and clean results
        for key in results:
            results[key] = list(dict.fromkeys(results[key]))  # Preserve order
            
        return results

    def merge_results(self, ner_results: Dict, regex_results: Dict) -> Dict[str, str]:
        """Merge NER and regex results intelligently"""
        merged = {
            "name": "Not Found",
            "email": "Not Found",
            "phone": "Not Found",
            "skills": "Not Found",
            "education": "Not Found",
            "experience": "Not Found"
        }
        
        # Name - prioritize NER, then regex, then text extraction
        if ner_results.get("name"):
            merged["name"] = " ".join(ner_results["name"][:1])  # Take first name only
        elif regex_results.get("name"):
            merged["name"] = regex_results["name"][0]
        
        # Email and phone - only from regex
        if regex_results.get("email"):
            merged["email"] = regex_results["email"][0]
        if regex_results.get("phone"):
            merged["phone"] = regex_results["phone"][0]
        
        # Skills - combine both sources
        all_skills = []
        if ner_results.get("skills"):
            all_skills.extend(ner_results["skills"])
        if regex_results.get("skills"):
            all_skills.extend(regex_results["skills"])
        if all_skills:
            merged["skills"] = ", ".join(list(dict.fromkeys(all_skills))[:10])  # Limit to 10 skills
            
        # Education - combine both sources
        all_edu = []
        if ner_results.get("education"):
            all_edu.extend(ner_results["education"])
        if regex_results.get("education"):
            all_edu.extend(regex_results["education"])
        if all_edu:
            merged["education"] = ", ".join(list(dict.fromkeys(all_edu))[:3]  # Limit to 3 items
            
        # Experience - combine both sources
        all_exp = []
        if ner_results.get("experience"):
            all_exp.extend(ner_results["experience"])
        if regex_results.get("experience"):
            all_exp.extend(regex_results["experience"])
        if all_exp:
            merged["experience"] = ", ".join(list(dict.fromkeys(all_exp))[:3]  # Limit to 3 items
            
        return merged

    def parse_resume(self, file_path: str, filename: str = None) -> Dict[str, str]:
        """Parse resume with multiple extraction methods"""
        try:
            # Extract text
            text = self.extract_text(file_path)
            
            if not text or len(text.strip()) < 10:
                raise ValueError("Extracted text is too short or empty")
            
            logger.info(f"Text preview: {text[:200]}...")
            
            # Initialize results
            ner_results = {
                "name": [],
                "skills": [],
                "education": [],
                "experience": []
            }
            
            # Method 1: Try NER model if available
            if self.model_loaded and self.ner_pipeline:
                try:
                    logger.info("Using NER model for extraction")
                    entities = self.ner_pipeline(text[:5120])  # Limit input size for NER
                    ner_results = self.process_ner_entities(entities)
                    logger.info(f"NER results: {json.dumps(ner_results, indent=2)}")
                except Exception as e:
                    logger.warning(f"NER extraction failed: {e}")
            
            # Method 2: Regex extraction
            logger.info("Using regex patterns for extraction")
            regex_results = self.extract_with_regex(text)
            logger.info(f"Regex results: {json.dumps(regex_results, indent=2)}")
            
            # Method 3: Name extraction fallback
            if not ner_results.get("name") and not regex_results.get("name"):
                name = self.extract_name_from_text(text)
                if name != "Not Found":
                    regex_results["name"] = [name]
            
            # Merge all results
            final_results = self.merge_results(ner_results, regex_results)
            
            # If name still not found, try filename
            if final_results["name"] == "Not Found" and filename:
                # Try to extract name from filename (common pattern: "Firstname Lastname - Resume.pdf")
                name_from_file = re.sub(r'[-_].*', '', filename).strip()
                if len(name_from_file.split()) >= 2:
                    final_results["name"] = name_from_file
            
            logger.info("Parsing completed successfully")
            return final_results
            
        except Exception as e:
            logger.error(f"Error parsing resume: {e}")
            return {
                "name": "Error",
                "email": "Error",
                "phone": "Error",
                "skills": "Error",
                "education": "Error",
                "experience": "Error",
                "error": str(e)
            }

# Create global instance
resume_parser = ResumeParser()

def parse_resume(file_path: str, filename: str = None) -> Dict[str, str]:
    """Main function to parse resume"""
    return resume_parser.parse_resume(file_path, filename)

if __name__ == "__main__":
    # Test the parser
    test_file = input("Enter path to resume file: ")
    if os.path.exists(test_file):
        results = parse_resume(test_file, os.path.basename(test_file))
        print("\nParsing Results:")
        print(json.dumps(results, indent=2))
    else:
        print("File not found")