File size: 8,259 Bytes
5bea701 5a5c182 c40c6c3 88d066d 040362f 5bea701 9ac410d 88d066d 9ac410d 23fd868 cef76db c71c13d b65c592 b1ed479 b65c592 5929cca 5196b87 caf704e ebf4966 e45d5ea ebf4966 caf704e cfb2edd caf704e 674df9a b65c592 39de0ed b65c592 39de0ed b65c592 7746185 674df9a caf704e b65c592 98209de cfb2edd 98209de 39de0ed 98209de 39de0ed 98209de b65c592 ebf4966 5196b87 34d0e07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import streamlit as st
from PyPDF2 import PdfReader
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import streamlit as st
from PyPDF2 import PdfReader
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from gliner import GLiNER
import streamlit as st
import pandas as pd
from PyPDF2 import PdfReader
from gliner import GLiNER
import streamlit as st
import pandas as pd
from PyPDF2 import PdfReader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import tempfile
import plotly.express as px
with st.sidebar:
st.button("DEMO APP", type="primary")
expander = st.expander("**Important notes on the YouTube Comments Sentiment Analysis App**")
expander.write('''
**Supported File Formats**
This app accepts files in .pdf formats.
**How to Use**
Upload your file first. Then, click the 'Results' button.
**Usage Limits**
You can request results up to 5 times.
**Subscription Management**
This demo app offers a one-day subscription, expiring after 24 hours. If you are interested in building your own Named Entity Recognition (NER) Web App, we invite you to explore our NLP Web App Store on our website. You can select your desired features, place your order, and we will deliver your custom app within five business days. If you wish to delete your Account with us, please contact us at [email protected]
**Authorization**
For security purposes, your authorization access expires hourly. To restore access, click the "Request Authorization" button.
**Customization**
To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
**File Handling and Errors**
The app may display an error message if your file is corrupt, or has other errors.
For any errors or inquiries, please contact us at [email protected]
''')
txt = st.text_area("Job description", key = "text 1")
job = pd.Series(txt, name="Text")
st.dataframe(job)
if 'upload_count' not in st.session_state:
st.session_state['upload_count'] = 0
max_attempts = 20
if st.session_state['upload_count'] < max_attempts:
uploaded_files = st.file_uploader(
"Choose a PDF file", accept_multiple_files=True, type="pdf", key="candidate 1"
)
if uploaded_files:
st.session_state['upload_count'] += 1
for uploaded_file in uploaded_files:
pdf_reader = PdfReader(uploaded_file)
text_data = ""
for page in pdf_reader.pages:
text_data += page.extract_text()
data = pd.Series(text_data, name = 'Text')
st.dataframe(data)
frames = [job, data]
result = pd.concat(frames)
st.dataframe(result)
model = GLiNER.from_pretrained("urchade/gliner_base")
labels = ["person", "country", "city", "organization", "date", "money", "percent value", "position"]
entities = model.predict_entities(text_data, labels)
df = pd.DataFrame(entities)
st.dataframe(entities)
st.dataframe(df)
fig1 = px.treemap(entities, path=[px.Constant("all"), 'text', 'label'],
values='score', color='label')
fig1.update_layout(margin = dict(t=50, l=25, r=25, b=25))
st.plotly_chart(fig1, key = "fig1")
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(result)
tfidf_df = pd.DataFrame(tfidf_matrix.toarray(), columns=vectorizer.get_feature_names_out())
st.subheader("TF-IDF Values:")
st.dataframe(tfidf_df)
cosine_sim_matrix = cosine_similarity(tfidf_matrix)
cosine_sim_df = pd.DataFrame(cosine_sim_matrix)
st.subheader("Cosine Similarity Matrix:")
st.dataframe(cosine_sim_df)
st.subheader("A score closer to 1 means closer match")
fig = px.imshow(cosine_sim_df, text_auto=True, labels=dict(x="Cosine similarity", y="Text", color="Productivity"),
x=['text1', 'Jon Description'],
y=['text1', 'Job Description'])
st.plotly_chart(fig)
st.subheader("Cosine Similarity Scores (Job Description vs. Resumes):")
for i, similarity_score in enumerate(cosine_sim_matrix[0][1:]):
st.write(f"Similarity with Candidate Profile {i + 1}: {similarity_score:.4f}")
else:
st.warning(f"You have reached the maximum upload attempts ({max_attempts}). Please refresh to upload more files.")
if 'upload_count' in st.session_state and st.session_state['upload_count'] > 0:
st.info(f"Files uploaded {st.session_state['upload_count']} time(s).")
st.subheader("Candidate profile 2", divider = "green")
txt = st.text_area("Job description", key = "text 2")
job = pd.Series(txt, name="Text")
st.dataframe(job)
if 'upload_count' not in st.session_state:
st.session_state['upload_count'] = 0
max_attempts = 2
if st.session_state['upload_count'] < max_attempts:
uploaded_files = st.file_uploader(
"Choose a PDF file", accept_multiple_files=True, type="pdf", key="candidate 2"
)
if uploaded_files:
st.session_state['upload_count'] += 1
for uploaded_file in uploaded_files:
pdf_reader = PdfReader(uploaded_file)
text_data = ""
for page in pdf_reader.pages:
text_data += page.extract_text()
data = pd.Series(text_data, name = 'Text')
st.dataframe(data)
frames = [job, data]
result = pd.concat(frames)
st.dataframe(result)
model = GLiNER.from_pretrained("urchade/gliner_base")
labels = ["person", "country", "city", "organization", "date", "money", "percent value", "position"]
entities = model.predict_entities(text_data, labels)
df = pd.DataFrame(entities)
st.dataframe(entities)
st.dataframe(df)
fig2 = px.treemap(entities, path=[px.Constant("all"), 'text', 'label'],
values='score', color='label')
fig2.update_layout(margin = dict(t=50, l=25, r=25, b=25))
st.plotly_chart(fig2, key = "fig2")
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(result)
tfidf_df = pd.DataFrame(tfidf_matrix.toarray(), columns=vectorizer.get_feature_names_out())
st.subheader("TF-IDF Values:")
st.dataframe(tfidf_df)
cosine_sim_matrix = cosine_similarity(tfidf_matrix)
cosine_sim_df = pd.DataFrame(cosine_sim_matrix)
st.subheader("Cosine Similarity Matrix:")
st.dataframe(cosine_sim_df)
st.subheader("A score closer to 1 means closer match")
fig = px.imshow(cosine_sim_df, text_auto=True, labels=dict(x="Cosine similarity", y="Text", color="Productivity"),
x=['text1', 'Jon Description'],
y=['text1', 'Job Description'])
st.plotly_chart(fig)
st.subheader("Cosine Similarity Scores (Job Description vs. Resumes):")
for i, similarity_score in enumerate(cosine_sim_matrix[0][1:]):
st.write(f"Similarity with Candidate Profile {i + 1}: {similarity_score:.4f}")
else:
st.warning(f"You have reached the maximum upload attempts ({max_attempts}). Please refresh to upload more files.")
if 'upload_count' in st.session_state and st.session_state['upload_count'] > 0:
st.info(f"Files uploaded {st.session_state['upload_count']} time(s).")
|