File size: 7,335 Bytes
a88983c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import streamlit as st
import time
import pandas as pd
import io
from transformers import pipeline
from streamlit_extras.stylable_container import stylable_container
import json
import plotly.express as px
from PyPDF2 import PdfReader
import docx
import zipfile
from gliner import GLiNER
st.subheader("14-Named Entity Recognition Web App", divider = "red")
st.link_button("by nlpblogs", "https://nlpblogs.com", type = "tertiary")
expander = st.expander("**Important notes on the 14-Named Entity Recognition Web App**")
expander.write('''
**Named Entities:**
This 14-Named Entity Recognition Web App predicts fourteen (14) labels (“person”, “location”, “country”, “city”, “organization”, “time”, “date”, “product”, “event name”, “money”, “affiliation”, “ordinal value”, “percent value”, “position”). Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
**How to Use:**
Upload your .pdf or .docx file. Then, click the 'Results' button to extract and tag entities in your text data.
**Usage Limits:**
You can request results up to 10 times.
**Customization:**
To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
**Technical issues:**
If your connection times out, please refresh the page or reopen the app's URL.
For any errors or inquiries, please contact us at [email protected]
''')
with st.sidebar:
container = st.container(border=True)
container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
st.subheader("Related NLP Web Apps", divider = "red")
st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type = "primary")
if 'file_upload_attempts' not in st.session_state:
st.session_state['file_upload_attempts'] = 0
max_attempts = 10
upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx'])
text = None
df = None
if upload_file is not None:
file_extension = upload_file.name.split('.')[-1].lower()
if file_extension == 'pdf':
try:
pdf_reader = PdfReader(upload_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
st.write("Due to security protocols, the file content is hidden.")
except Exception as e:
st.error(f"An error occurred while reading PDF: {e}")
elif file_extension == 'docx':
try:
doc = docx.Document(upload_file)
text = "\n".join([para.text for para in doc.paragraphs])
st.write("Due to security protocols, the file content is hidden.")
except Exception as e:
st.error(f"An error occurred while reading docx: {e}")
else:
st.warning("Unsupported file type.")
st.stop()
if st.button("Results"):
if st.session_state['file_upload_attempts'] >= max_attempts:
st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
st.stop()
st.session_state['file_upload_attempts'] += 1
with st.spinner('Wait for it...', show_time=True):
time.sleep(5)
model = GLiNER.from_pretrained("xomad/gliner-model-merge-large-v1.0")
labels = ["person", "location", "country", "city", "organization", "time", "date", "product", "event name", "money", "affiliation", "ordinal value", "percent value", "position"]
entities = model.predict_entities(text, labels)
df = pd.DataFrame(entities)
properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
df_styled = df.style.set_properties(**properties)
st.dataframe(df_styled)
with st.expander("See Glossary of tags"):
st.write('''
'**text**': ['entity extracted from your text data']
'**score**': ['accuracy score; how accurately a tag has been assigned to a given entity']
'**label**': ['label (tag) assigned to a given extracted entity']
'**start**': ['index of the start of the corresponding entity']
'**end**': ['index of the end of the corresponding entity']
''')
if df is not None:
fig = px.treemap(df, path=[px.Constant("all"), 'text', 'label'],
values='score', color='label')
fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
st.subheader("Tree map", divider = "red")
st.plotly_chart(fig)
if df is not None:
value_counts1 = df['label'].value_counts()
df1 = pd.DataFrame(value_counts1)
final_df = df1.reset_index().rename(columns={"index": "label"})
col1, col2 = st.columns(2)
with col1:
fig1 = px.pie(final_df, values='count', names='label', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
fig1.update_traces(textposition='inside', textinfo='percent+label')
st.subheader("Pie Chart", divider = "red")
st.plotly_chart(fig1)
with col2:
fig2 = px.bar(final_df, x="count", y="label", color="label", text_auto=True, title='Occurrences of predicted labels')
st.subheader("Bar Chart", divider = "red")
st.plotly_chart(fig2)
dfa = pd.DataFrame(
data={
'text': ['entity extracted from your text data'], 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'], 'label': ['label (tag) assigned to a given extracted entity'],
'start': ['index of the start of the corresponding entity'],
'end': ['index of the end of the corresponding entity'],
})
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as myzip:
myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
with stylable_container(
key="download_button",
css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
):
st.download_button(
label="Download zip file",
data=buf.getvalue(),
file_name="zip file.zip",
mime="application/zip",
)
st.divider()
st.write(f"Number of times you requested results: {st.session_state['file_upload_attempts']}/{max_attempts}")
|