Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,21 @@ from sentence_transformers import SentenceTransformer
|
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
from PyPDF2 import PdfReader
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
model = SentenceTransformer("all-mpnet-base-v2")
|
7 |
st.title("AI Resume Analysis based on Keywords App")
|
8 |
st.divider()
|
9 |
job_desc = st.text_area("Paste the job description and then press Ctrl + Enter", key="job_desc")
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
if 'applicant_data' not in st.session_state:
|
12 |
st.session_state['applicant_data'] = {}
|
@@ -30,8 +40,13 @@ for i in range(1, 51): # Looping for 50 applicants
|
|
30 |
for page in pdf_reader.pages:
|
31 |
text_data += page.extract_text()
|
32 |
with st.expander(f"See Applicant's {i} resume"):
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
35 |
# Encode the job description and resume text separately
|
36 |
job_embedding = model.encode([job_desc])
|
37 |
resume_embedding = model.encode([text_data])
|
|
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
from PyPDF2 import PdfReader
|
5 |
+
import nltk
|
6 |
+
nltk.download('punkt')
|
7 |
+
from nltk.corpus import stopwords
|
8 |
+
nltk.download('stopwords')
|
9 |
+
from nltk.tokenize import word_tokenize
|
10 |
|
11 |
model = SentenceTransformer("all-mpnet-base-v2")
|
12 |
st.title("AI Resume Analysis based on Keywords App")
|
13 |
st.divider()
|
14 |
job_desc = st.text_area("Paste the job description and then press Ctrl + Enter", key="job_desc")
|
15 |
+
text_tokens = []
|
16 |
+
for sentence in job_desc:
|
17 |
+
text_tokens.extend(word_tokenize(job_desc))
|
18 |
+
job_desc = [word for word in text_tokens if not word in stopwords.words()]
|
19 |
+
st.write(job_desc)
|
20 |
|
21 |
if 'applicant_data' not in st.session_state:
|
22 |
st.session_state['applicant_data'] = {}
|
|
|
40 |
for page in pdf_reader.pages:
|
41 |
text_data += page.extract_text()
|
42 |
with st.expander(f"See Applicant's {i} resume"):
|
43 |
+
text_tokens = []
|
44 |
+
for sentence in text_data:
|
45 |
+
text_tokens.extend(word_tokenize(text_data))
|
46 |
+
text_data = [word for word in text_tokens if not word in stopwords.words()]
|
47 |
+
st.write(text_data)
|
48 |
+
|
49 |
+
|
50 |
# Encode the job description and resume text separately
|
51 |
job_embedding = model.encode([job_desc])
|
52 |
resume_embedding = model.encode([text_data])
|