husseinelsaadi commited on
Commit
6d286f1
·
1 Parent(s): a511250
Files changed (1) hide show
  1. backend/services/resume_parser.py +30 -24
backend/services/resume_parser.py CHANGED
@@ -1,47 +1,53 @@
1
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
2
- import subprocess, zipfile, re, os
3
 
4
- # === Load pretrained HF model ===
5
- MODEL_NAME = "sravya-abburi/ResumeParserBERT" # or "Kiet/autotrain-resume_parser-1159242747"
 
 
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
 
8
 
9
- # Use CPU for stability (device=-1) to avoid GPU memory issues from other parts of the app
10
- ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple", device=-1)
11
-
12
- # === Extract text from PDF/DOCX ===
13
  def extract_text(file_path: str) -> str:
14
- """Extract text from PDF or DOCX resumes."""
15
- if file_path.lower().endswith(".pdf"):
16
- result = subprocess.run(
17
- ["pdftotext", "-layout", file_path, "-"],
18
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False
19
- )
20
- return result.stdout.decode("utf-8", errors="ignore")
21
- elif file_path.lower().endswith(".docx"):
 
 
 
 
 
 
22
  with zipfile.ZipFile(file_path) as zf:
23
  with zf.open("word/document.xml") as docx_xml:
24
  xml_text = docx_xml.read().decode("utf-8", errors="ignore")
25
  xml_text = re.sub(r"<w:p[^>]*>", "\n", xml_text, flags=re.I)
26
  return re.sub(r"<[^>]+>", " ", xml_text)
 
27
  return ""
28
 
29
- # === Parse resume with NER ===
 
 
30
  def parse_resume(file_path: str, filename: str = None) -> dict:
31
- """Parse resume and extract Name, Skills, Education, Experience."""
32
  text = extract_text(file_path)
 
33
  entities = ner_pipeline(text)
34
 
35
  name, skills, education, experience = [], [], [], []
36
-
37
  for ent in entities:
38
- word = ent["word"].strip()
39
  label = ent["entity_group"].upper()
40
-
41
- # Skip empty or placeholder tokens
42
- if not word or word.startswith("LABEL_"):
43
- continue
44
-
45
  if label == "NAME":
46
  name.append(word)
47
  elif label == "SKILL":
 
1
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
2
+ import zipfile, re, os
3
 
4
+ # ===============================
5
+ # Load Model & Tokenizer
6
+ # ===============================
7
+ MODEL_NAME = "sravya-abburi/ResumeParserBERT" # Change to Kiet/autotrain-resume_parser-1159242747 if needed
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
10
+ ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
11
 
12
+ # ===============================
13
+ # Extract Text (PDF & DOCX)
14
+ # ===============================
 
15
  def extract_text(file_path: str) -> str:
16
+ """Extract text from PDF or DOCX without external dependencies."""
17
+ file_path_lower = file_path.lower()
18
+
19
+ # PDF reading using PyMuPDF (built into Spaces environment)
20
+ if file_path_lower.endswith(".pdf"):
21
+ import fitz # PyMuPDF
22
+ text = ""
23
+ with fitz.open(file_path) as pdf_doc:
24
+ for page in pdf_doc:
25
+ text += page.get_text()
26
+ return text
27
+
28
+ # DOCX reading by extracting XML content
29
+ elif file_path_lower.endswith(".docx"):
30
  with zipfile.ZipFile(file_path) as zf:
31
  with zf.open("word/document.xml") as docx_xml:
32
  xml_text = docx_xml.read().decode("utf-8", errors="ignore")
33
  xml_text = re.sub(r"<w:p[^>]*>", "\n", xml_text, flags=re.I)
34
  return re.sub(r"<[^>]+>", " ", xml_text)
35
+
36
  return ""
37
 
38
+ # ===============================
39
+ # Parse Resume
40
+ # ===============================
41
  def parse_resume(file_path: str, filename: str = None) -> dict:
42
+ """Parse resume and extract structured information."""
43
  text = extract_text(file_path)
44
+
45
  entities = ner_pipeline(text)
46
 
47
  name, skills, education, experience = [], [], [], []
 
48
  for ent in entities:
 
49
  label = ent["entity_group"].upper()
50
+ word = ent["word"].strip()
 
 
 
 
51
  if label == "NAME":
52
  name.append(word)
53
  elif label == "SKILL":