Update app.py
Browse files
app.py
CHANGED
@@ -1,34 +1,51 @@
|
|
1 |
-
from sentence_transformers import SentenceTransformer
|
2 |
import streamlit as st
|
3 |
-
import pandas as pd
|
4 |
from PyPDF2 import PdfReader
|
5 |
-
import nltk
|
6 |
-
nltk.download('punkt_tab')
|
7 |
-
from nltk.corpus import stopwords
|
8 |
-
nltk.download('stopwords')
|
9 |
-
from nltk.tokenize import word_tokenize
|
10 |
-
|
11 |
-
from sentence_transformers import SentenceTransformer
|
12 |
-
import streamlit as st
|
13 |
import pandas as pd
|
14 |
-
from
|
15 |
-
import
|
16 |
-
from
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
stop_words = set(stopwords.words('english'))
|
21 |
|
22 |
model = SentenceTransformer("all-mpnet-base-v2")
|
23 |
st.title("AI Resume Analysis based on Keywords App")
|
24 |
st.divider()
|
25 |
-
|
26 |
-
|
27 |
-
# Process job description for stop words
|
28 |
-
job_desc_tokens = word_tokenize(job_desc_raw.lower())
|
29 |
-
job_desc_filtered = [word for word in job_desc_tokens if not word in stop_words]
|
30 |
-
job_desc_processed = " ".join(job_desc_filtered)
|
31 |
-
st.write("Processed Job Description:", job_desc_processed)
|
32 |
|
33 |
if 'applicant_data' not in st.session_state:
|
34 |
st.session_state['applicant_data'] = {}
|
@@ -48,23 +65,22 @@ for i in range(1, 51): # Looping for 50 applicants
|
|
48 |
if st.session_state['applicant_data'][applicant_key]['uploaded_file'] and not st.session_state['applicant_data'][applicant_key]['analysis_done']:
|
49 |
try:
|
50 |
pdf_reader = PdfReader(st.session_state['applicant_data'][applicant_key]['uploaded_file'])
|
51 |
-
|
52 |
for page in pdf_reader.pages:
|
53 |
-
|
54 |
with st.expander(f"See Applicant's {i} resume"):
|
55 |
-
st.write(
|
56 |
|
57 |
-
#
|
58 |
-
|
59 |
-
|
60 |
-
text_data_processed = " ".join(text_data_filtered)
|
61 |
-
st.write("Processed Resume:", text_data_processed)
|
62 |
|
63 |
-
# Encode the processed job description and resume text
|
64 |
-
job_embedding = model.encode([job_desc_processed])
|
65 |
-
resume_embedding = model.encode([text_data_processed])
|
66 |
# Calculate the cosine similarity between the two embeddings
|
67 |
similarity_score = model.similarity(job_embedding, resume_embedding)[0][0]
|
|
|
|
|
|
|
|
|
68 |
with st.popover(f"See Result for Applicant {i}"):
|
69 |
st.write(f"Similarity between Applicant's resume and job description based on keywords: {similarity_score:.2f}")
|
70 |
st.info(
|
@@ -75,4 +91,58 @@ for i in range(1, 51): # Looping for 50 applicants
|
|
75 |
else:
|
76 |
st.warning(f"Maximum upload attempts has been reached ({max_attempts}).")
|
77 |
if st.session_state['applicant_data'][applicant_key]['upload_count'] > 0:
|
78 |
-
st.info(f"Files uploaded for Applicant {i}: {st.session_state['applicant_data'][applicant_key]['upload_count']} time(s).")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
2 |
from PyPDF2 import PdfReader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import pandas as pd
|
4 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
5 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
+
from gliner import GLiNER
|
7 |
+
import plotly.express as px
|
8 |
+
import time
|
9 |
+
|
10 |
+
with st.sidebar:
|
11 |
+
st.button("DEMO APP", type="primary")
|
12 |
+
|
13 |
|
14 |
+
expander = st.expander("**Important notes on the AI Resume Analysis based on Keywords App**")
|
15 |
+
expander.write('''
|
16 |
+
|
17 |
+
|
18 |
+
**Supported File Formats**
|
19 |
+
This app accepts files in .pdf formats.
|
20 |
+
|
21 |
+
**How to Use**
|
22 |
+
Paste the job description first. Then, upload the resume of each applicant to retrieve the results.
|
23 |
+
|
24 |
+
**Usage Limits**
|
25 |
+
For each applicant you can upload their resume and request results once (1 request per applicant's resume).
|
26 |
+
At the bottom of the app, you can also upload (up to 50 times) each applicant's resume to visualize their profile as a treemap chart as well as the results in a matrix heatmap. If you hover over the interactive graphs, an icon will appear to download them.
|
27 |
+
|
28 |
+
**Subscription Management**
|
29 |
+
This demo app offers a one-day subscription, expiring after 24 hours. If you are interested in building your own AI Resume Analysis based on Keywords Web App, we invite you to explore our NLP Web App Store on our website. You can select your desired features, place your order, and we will deliver your custom app within five business days. If you wish to delete your Account with us, please contact us at [email protected]
|
30 |
+
|
31 |
+
**Customization**
|
32 |
+
To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
|
33 |
+
|
34 |
+
**File Handling and Errors**
|
35 |
+
The app may display an error message if your file is corrupt, or has other errors.
|
36 |
+
|
37 |
+
|
38 |
+
For any errors or inquiries, please contact us at [email protected]
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
''')
|
43 |
|
|
|
44 |
|
45 |
model = SentenceTransformer("all-mpnet-base-v2")
|
46 |
st.title("AI Resume Analysis based on Keywords App")
|
47 |
st.divider()
|
48 |
+
job_desc = st.text_area("Paste the job description and then press Ctrl + Enter", key="job_desc")
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
if 'applicant_data' not in st.session_state:
|
51 |
st.session_state['applicant_data'] = {}
|
|
|
65 |
if st.session_state['applicant_data'][applicant_key]['uploaded_file'] and not st.session_state['applicant_data'][applicant_key]['analysis_done']:
|
66 |
try:
|
67 |
pdf_reader = PdfReader(st.session_state['applicant_data'][applicant_key]['uploaded_file'])
|
68 |
+
text_data = ""
|
69 |
for page in pdf_reader.pages:
|
70 |
+
text_data += page.extract_text()
|
71 |
with st.expander(f"See Applicant's {i} resume"):
|
72 |
+
st.write(text_data)
|
73 |
|
74 |
+
# Encode the job description and resume text separately
|
75 |
+
job_embedding = model.encode([job_desc])
|
76 |
+
resume_embedding = model.encode([text_data])
|
|
|
|
|
77 |
|
|
|
|
|
|
|
78 |
# Calculate the cosine similarity between the two embeddings
|
79 |
similarity_score = model.similarity(job_embedding, resume_embedding)[0][0]
|
80 |
+
|
81 |
+
with st.spinner("Wait for it...", show_time=True):
|
82 |
+
time.sleep(2)
|
83 |
+
|
84 |
with st.popover(f"See Result for Applicant {i}"):
|
85 |
st.write(f"Similarity between Applicant's resume and job description based on keywords: {similarity_score:.2f}")
|
86 |
st.info(
|
|
|
91 |
else:
|
92 |
st.warning(f"Maximum upload attempts has been reached ({max_attempts}).")
|
93 |
if st.session_state['applicant_data'][applicant_key]['upload_count'] > 0:
|
94 |
+
st.info(f"Files uploaded for Applicant {i}: {st.session_state['applicant_data'][applicant_key]['upload_count']} time(s).")
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
st.divider()
|
100 |
+
st.subheader("Visualise", divider="blue")
|
101 |
+
|
102 |
+
if 'upload_count' not in st.session_state:
|
103 |
+
st.session_state['upload_count'] = 0
|
104 |
+
max_attempts = 1
|
105 |
+
|
106 |
+
if st.session_state['upload_count'] < max_attempts:
|
107 |
+
uploaded_files = st.file_uploader("Upload Applicant's resume", type="pdf", key="applicant 1")
|
108 |
+
if uploaded_files:
|
109 |
+
st.session_state['upload_count'] += 1
|
110 |
+
with st.spinner("Wait for it...", show_time=True):
|
111 |
+
time.sleep(2)
|
112 |
+
pdf_reader = PdfReader(uploaded_files)
|
113 |
+
text_data = ""
|
114 |
+
for page in pdf_reader.pages:
|
115 |
+
text_data += page.extract_text()
|
116 |
+
|
117 |
+
data = pd.Series(text_data, name='Text')
|
118 |
+
frames = [job_desc, data]
|
119 |
+
result = pd.concat(frames)
|
120 |
+
model = GLiNER.from_pretrained("urchade/gliner_base")
|
121 |
+
labels = ["person", "country", "organization", "role", "skills"]
|
122 |
+
entities = model.predict_entities(text_data, labels)
|
123 |
+
df = pd.DataFrame(entities)
|
124 |
+
st.subheader("Applicant's Profile", divider = "orange")
|
125 |
+
fig = px.treemap(entities, path=[px.Constant("all"), 'text', 'label'],
|
126 |
+
values='score', color='label')
|
127 |
+
fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
|
128 |
+
st.plotly_chart(fig, key="figure 1")
|
129 |
+
|
130 |
+
|
131 |
+
job_embedding = model.encode([job_desc])
|
132 |
+
resume_embedding = model.encode([text_data])
|
133 |
+
|
134 |
+
|
135 |
+
similarity_score = model.similarity(job_embedding, resume_embedding)[0][0]
|
136 |
+
|
137 |
+
|
138 |
+
st.subheader("Similarity between Applicant's Profile and Job Description", divider = "orange")
|
139 |
+
|
140 |
+
fig = px.imshow(similarity_score, text_auto=True,
|
141 |
+
labels=dict(x="Keyword similarity", y="Resumes", color="Productivity"),
|
142 |
+
x=['Resume', 'Jon Description'],
|
143 |
+
y=['Resume', 'Job Description'])
|
144 |
+
st.plotly_chart(fig, key="figure 2")
|
145 |
+
else:
|
146 |
+
st.warning(f"Maximum upload attempts has been reached ({max_attempts}).")
|
147 |
+
if 'upload_count' in st.session_state and st.session_state['upload_count'] > 0:
|
148 |
+
st.info(f"Files uploaded {st.session_state['upload_count']} time(s).")
|