husseinelsaadi commited on
Commit
a511250
·
1 Parent(s): 864c2ae
Files changed (1) hide show
  1. backend/services/resume_parser.py +19 -9
backend/services/resume_parser.py CHANGED
@@ -1,14 +1,17 @@
1
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
2
  import subprocess, zipfile, re, os
3
 
4
- # === Load pretrained HF model instead of training ===
5
- MODEL_NAME = "sravya-abburi/ResumeParserBERT" # or Kiet/autotrain-resume_parser-1159242747
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
8
- ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
 
 
9
 
10
  # === Extract text from PDF/DOCX ===
11
  def extract_text(file_path: str) -> str:
 
12
  if file_path.lower().endswith(".pdf"):
13
  result = subprocess.run(
14
  ["pdftotext", "-layout", file_path, "-"],
@@ -24,14 +27,21 @@ def extract_text(file_path: str) -> str:
24
  return ""
25
 
26
  # === Parse resume with NER ===
27
- def parse_resume(file_path: str) -> dict:
 
28
  text = extract_text(file_path)
29
  entities = ner_pipeline(text)
30
 
31
  name, skills, education, experience = [], [], [], []
 
32
  for ent in entities:
 
33
  label = ent["entity_group"].upper()
34
- word = ent["word"]
 
 
 
 
35
  if label == "NAME":
36
  name.append(word)
37
  elif label == "SKILL":
@@ -42,8 +52,8 @@ def parse_resume(file_path: str) -> dict:
42
  experience.append(word)
43
 
44
  return {
45
- "name": " ".join(set(name)),
46
- "skills": ", ".join(set(skills)),
47
- "education": ", ".join(set(education)),
48
- "experience": ", ".join(set(experience))
49
  }
 
1
  from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
2
  import subprocess, zipfile, re, os
3
 
4
+ # === Load pretrained HF model ===
5
+ MODEL_NAME = "sravya-abburi/ResumeParserBERT" # or "Kiet/autotrain-resume_parser-1159242747"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME)
8
+
9
+ # Use CPU for stability (device=-1) to avoid GPU memory issues from other parts of the app
10
+ ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple", device=-1)
11
 
12
  # === Extract text from PDF/DOCX ===
13
  def extract_text(file_path: str) -> str:
14
+ """Extract text from PDF or DOCX resumes."""
15
  if file_path.lower().endswith(".pdf"):
16
  result = subprocess.run(
17
  ["pdftotext", "-layout", file_path, "-"],
 
27
  return ""
28
 
29
  # === Parse resume with NER ===
30
+ def parse_resume(file_path: str, filename: str = None) -> dict:
31
+ """Parse resume and extract Name, Skills, Education, Experience."""
32
  text = extract_text(file_path)
33
  entities = ner_pipeline(text)
34
 
35
  name, skills, education, experience = [], [], [], []
36
+
37
  for ent in entities:
38
+ word = ent["word"].strip()
39
  label = ent["entity_group"].upper()
40
+
41
+ # Skip empty or placeholder tokens
42
+ if not word or word.startswith("LABEL_"):
43
+ continue
44
+
45
  if label == "NAME":
46
  name.append(word)
47
  elif label == "SKILL":
 
52
  experience.append(word)
53
 
54
  return {
55
+ "name": " ".join(dict.fromkeys(name)),
56
+ "skills": ", ".join(dict.fromkeys(skills)),
57
+ "education": ", ".join(dict.fromkeys(education)),
58
+ "experience": ", ".join(dict.fromkeys(experience))
59
  }