TKM03 commited on
Commit
3453a71
Β·
verified Β·
1 Parent(s): 99e48e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -16
app.py CHANGED
@@ -4,14 +4,14 @@ import gradio as gr
4
  from transformers import pipeline
5
  from collections import Counter
6
 
7
- # Load NER pipeline
8
  ner_pipeline = pipeline("ner", model="dslim/bert-base-NER", tokenizer="dslim/bert-base-NER", aggregation_strategy="simple")
9
 
10
- # Load text classification model (replace with a job-role classifier if available)
11
- text_classifier = pipeline("text-classification", model="khaimait/job-title-classification-bert")
12
-
13
 
14
  def clean_resume_text(text):
 
15
  text = re.sub(r'http\S+', ' ', text)
16
  text = re.sub(r'#\S+', '', text)
17
  text = re.sub(r'@\S+', ' ', text)
@@ -20,6 +20,7 @@ def clean_resume_text(text):
20
  return re.sub(r'\s+', ' ', text).strip()
21
 
22
  def extract_resume_text(file):
 
23
  try:
24
  reader = PyPDF2.PdfReader(file)
25
  text = ""
@@ -34,6 +35,7 @@ def extract_resume_text(file):
34
  return None, f"Error reading PDF: {str(e)}"
35
 
36
  def classify_resume_ner(entities):
 
37
  orgs = [e['word'] for e in entities if e['entity_group'] == 'ORG']
38
  locs = [e['word'] for e in entities if e['entity_group'] == 'LOC']
39
  jobs = [e['word'] for e in entities if e['entity_group'] == 'MISC']
@@ -49,6 +51,7 @@ def classify_resume_ner(entities):
49
  }
50
 
51
  def process_resumes(files):
 
52
  all_results = {}
53
  for file in files:
54
  file_name = file.name.split("/")[-1]
@@ -66,13 +69,14 @@ def process_resumes(files):
66
  "Persons": list({e["word"] for e in entities if e["entity_group"] == "PER"}),
67
  "Organizations": list({e["word"] for e in entities if e["entity_group"] == "ORG"}),
68
  "Locations": list({e["word"] for e in entities if e["entity_group"] == "LOC"}),
69
- "Other": list({e["word"] for e in entities if e["entity_group"] not in ["PER", "ORG", "LOC"]}),
70
  "Cleaned_Text": cleaned_text,
71
  "Classification (NER)": classification
72
  }
73
  return all_results
74
 
75
  def classify_resumes_with_model(files):
 
76
  predictions = {}
77
  for file in files:
78
  file_name = file.name.split("/")[-1]
@@ -81,26 +85,26 @@ def classify_resumes_with_model(files):
81
  predictions[file_name] = {"error": error}
82
  continue
83
  cleaned_text = clean_resume_text(resume_text)
84
- result = text_classifier(cleaned_text[:512]) # Truncate long resumes
85
  predictions[file_name] = {
86
- "Predicted Label (HuggingFace Classifier)": result[0]['label'],
87
- "Confidence": round(result[0]['score'], 4)
88
  }
89
  return predictions
90
 
91
- # Gradio UI
92
- with gr.Blocks(title="Multi-Resume Entity & Job Classifier") as demo:
93
- gr.Markdown("## πŸ“‚ Multi-Resume Entity Extractor & Classifier\nUpload multiple PDF resumes below. This tool extracts text, identifies key entities, and classifies job field using a Hugging Face model.")
94
 
95
  with gr.Row():
96
- file_input = gr.File(file_types=[".pdf"], label="Upload Resume PDFs", file_count="multiple")
97
 
98
  with gr.Row():
99
- extract_button = gr.Button("πŸ” Extract & Analyze Entities")
100
- classify_button = gr.Button("🧠 Predict Job Role with Classifier")
101
 
102
- output_entities = gr.JSON(label="Entity Extraction & NER Classification")
103
- output_class = gr.JSON(label="Predicted Job Classification (Model)")
104
 
105
  extract_button.click(fn=process_resumes, inputs=[file_input], outputs=[output_entities])
106
  classify_button.click(fn=classify_resumes_with_model, inputs=[file_input], outputs=[output_class])
 
4
  from transformers import pipeline
5
  from collections import Counter
6
 
7
+ # Load NER pipeline for entity extraction
8
  ner_pipeline = pipeline("ner", model="dslim/bert-base-NER", tokenizer="dslim/bert-base-NER", aggregation_strategy="simple")
9
 
10
+ # Load Job Category Classifier
11
+ text_classifier = pipeline("text-classification", model="serbog/distilbert-jobCategory_410k")
 
12
 
13
  def clean_resume_text(text):
14
+ """Clean text by removing URLs, punctuation, non-ASCII chars."""
15
  text = re.sub(r'http\S+', ' ', text)
16
  text = re.sub(r'#\S+', '', text)
17
  text = re.sub(r'@\S+', ' ', text)
 
20
  return re.sub(r'\s+', ' ', text).strip()
21
 
22
  def extract_resume_text(file):
23
+ """Extract raw text from uploaded PDF."""
24
  try:
25
  reader = PyPDF2.PdfReader(file)
26
  text = ""
 
35
  return None, f"Error reading PDF: {str(e)}"
36
 
37
  def classify_resume_ner(entities):
38
+ """Classify by extracting key orgs and locations from NER output."""
39
  orgs = [e['word'] for e in entities if e['entity_group'] == 'ORG']
40
  locs = [e['word'] for e in entities if e['entity_group'] == 'LOC']
41
  jobs = [e['word'] for e in entities if e['entity_group'] == 'MISC']
 
51
  }
52
 
53
  def process_resumes(files):
54
+ """Process multiple resumes with NER and classification."""
55
  all_results = {}
56
  for file in files:
57
  file_name = file.name.split("/")[-1]
 
69
  "Persons": list({e["word"] for e in entities if e["entity_group"] == "PER"}),
70
  "Organizations": list({e["word"] for e in entities if e["entity_group"] == "ORG"}),
71
  "Locations": list({e["word"] for e in entities if e["entity_group"] == "LOC"}),
72
+ "Other Entities": list({e["word"] for e in entities if e["entity_group"] not in ["PER", "ORG", "LOC"]}),
73
  "Cleaned_Text": cleaned_text,
74
  "Classification (NER)": classification
75
  }
76
  return all_results
77
 
78
  def classify_resumes_with_model(files):
79
+ """Use job category model to predict the field/role."""
80
  predictions = {}
81
  for file in files:
82
  file_name = file.name.split("/")[-1]
 
85
  predictions[file_name] = {"error": error}
86
  continue
87
  cleaned_text = clean_resume_text(resume_text)
88
+ result = text_classifier(cleaned_text[:512]) # Truncate if too long
89
  predictions[file_name] = {
90
+ "Predicted Job Category": result[0]['label'].replace("_", " ").title(),
91
+ "Confidence Score": round(result[0]['score'], 4)
92
  }
93
  return predictions
94
 
95
+ # Gradio Interface
96
+ with gr.Blocks(title="Resume Analyzer") as demo:
97
+ gr.Markdown("## πŸ“‚ Multi-Resume Entity Extractor & Job Category Classifier\nUpload multiple PDF resumes. This tool uses NER to extract info and a job classification model to predict job field/category.")
98
 
99
  with gr.Row():
100
+ file_input = gr.File(file_types=[".pdf"], label="Upload Resumes (PDF)", file_count="multiple")
101
 
102
  with gr.Row():
103
+ extract_button = gr.Button("πŸ” Extract Entities")
104
+ classify_button = gr.Button("🧠 Predict Job Category")
105
 
106
+ output_entities = gr.JSON(label="Entity Results & NER Classification")
107
+ output_class = gr.JSON(label="Predicted Job Category (Model)")
108
 
109
  extract_button.click(fn=process_resumes, inputs=[file_input], outputs=[output_entities])
110
  classify_button.click(fn=classify_resumes_with_model, inputs=[file_input], outputs=[output_class])