Maria Tsilimos commited on
Commit
a88983c
·
unverified ·
1 Parent(s): cbb80dc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +182 -0
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+ import pandas as pd
4
+ import io
5
+ from transformers import pipeline
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import json
8
+ import plotly.express as px
9
+ from PyPDF2 import PdfReader
10
+ import docx
11
+ import zipfile
12
+ from gliner import GLiNER
13
+
14
+
15
+
16
+ st.subheader("14-Named Entity Recognition Web App", divider = "red")
17
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type = "tertiary")
18
+
19
+ expander = st.expander("**Important notes on the 14-Named Entity Recognition Web App**")
20
+ expander.write('''
21
+
22
+ **Named Entities:**
23
+ This 14-Named Entity Recognition Web App predicts fourteen (14) labels (“person”, “location”, “country”, “city”, “organization”, “time”, “date”, “product”, “event name”, “money”, “affiliation”, “ordinal value”, “percent value”, “position”). Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
24
+
25
+ **How to Use:**
26
+ Upload your .pdf or .docx file. Then, click the 'Results' button to extract and tag entities in your text data.
27
+
28
+ **Usage Limits:**
29
+ You can request results up to 10 times.
30
+
31
+ **Customization:**
32
+ To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
33
+
34
+ **Technical issues:**
35
+ If your connection times out, please refresh the page or reopen the app's URL.
36
+
37
+ For any errors or inquiries, please contact us at [email protected]
38
+
39
+ ''')
40
+
41
+
42
+ with st.sidebar:
43
+ container = st.container(border=True)
44
+ container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
45
+
46
+
47
+
48
+
49
+
50
+ st.subheader("Related NLP Web Apps", divider = "red")
51
+ st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type = "primary")
52
+
53
+
54
+
55
+
56
+
57
+
58
+
59
+
60
+
61
+ if 'file_upload_attempts' not in st.session_state:
62
+ st.session_state['file_upload_attempts'] = 0
63
+
64
+ max_attempts = 10
65
+
66
+
67
+ upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx'])
68
+ text = None
69
+ df = None
70
+
71
+ if upload_file is not None:
72
+
73
+ file_extension = upload_file.name.split('.')[-1].lower()
74
+ if file_extension == 'pdf':
75
+ try:
76
+ pdf_reader = PdfReader(upload_file)
77
+ text = ""
78
+ for page in pdf_reader.pages:
79
+ text += page.extract_text()
80
+ st.write("Due to security protocols, the file content is hidden.")
81
+ except Exception as e:
82
+ st.error(f"An error occurred while reading PDF: {e}")
83
+ elif file_extension == 'docx':
84
+ try:
85
+ doc = docx.Document(upload_file)
86
+ text = "\n".join([para.text for para in doc.paragraphs])
87
+ st.write("Due to security protocols, the file content is hidden.")
88
+ except Exception as e:
89
+ st.error(f"An error occurred while reading docx: {e}")
90
+ else:
91
+ st.warning("Unsupported file type.")
92
+
93
+ st.stop()
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+ if st.button("Results"):
103
+ if st.session_state['file_upload_attempts'] >= max_attempts:
104
+ st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
105
+ st.stop()
106
+ st.session_state['file_upload_attempts'] += 1
107
+
108
+
109
+ with st.spinner('Wait for it...', show_time=True):
110
+ time.sleep(5)
111
+ model = GLiNER.from_pretrained("xomad/gliner-model-merge-large-v1.0")
112
+ labels = ["person", "location", "country", "city", "organization", "time", "date", "product", "event name", "money", "affiliation", "ordinal value", "percent value", "position"]
113
+ entities = model.predict_entities(text, labels)
114
+ df = pd.DataFrame(entities)
115
+
116
+ properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
117
+ df_styled = df.style.set_properties(**properties)
118
+ st.dataframe(df_styled)
119
+ with st.expander("See Glossary of tags"):
120
+ st.write('''
121
+ '**text**': ['entity extracted from your text data']
122
+
123
+ '**score**': ['accuracy score; how accurately a tag has been assigned to a given entity']
124
+
125
+ '**label**': ['label (tag) assigned to a given extracted entity']
126
+
127
+ '**start**': ['index of the start of the corresponding entity']
128
+
129
+ '**end**': ['index of the end of the corresponding entity']
130
+
131
+ ''')
132
+ if df is not None:
133
+ fig = px.treemap(df, path=[px.Constant("all"), 'text', 'label'],
134
+ values='score', color='label')
135
+ fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
136
+ st.subheader("Tree map", divider = "red")
137
+ st.plotly_chart(fig)
138
+ if df is not None:
139
+ value_counts1 = df['label'].value_counts()
140
+ df1 = pd.DataFrame(value_counts1)
141
+ final_df = df1.reset_index().rename(columns={"index": "label"})
142
+ col1, col2 = st.columns(2)
143
+ with col1:
144
+ fig1 = px.pie(final_df, values='count', names='label', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
145
+ fig1.update_traces(textposition='inside', textinfo='percent+label')
146
+ st.subheader("Pie Chart", divider = "red")
147
+ st.plotly_chart(fig1)
148
+ with col2:
149
+ fig2 = px.bar(final_df, x="count", y="label", color="label", text_auto=True, title='Occurrences of predicted labels')
150
+ st.subheader("Bar Chart", divider = "red")
151
+ st.plotly_chart(fig2)
152
+
153
+
154
+
155
+ dfa = pd.DataFrame(
156
+ data={
157
+ 'text': ['entity extracted from your text data'], 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'], 'label': ['label (tag) assigned to a given extracted entity'],
158
+ 'start': ['index of the start of the corresponding entity'],
159
+ 'end': ['index of the end of the corresponding entity'],
160
+ })
161
+ buf = io.BytesIO()
162
+ with zipfile.ZipFile(buf, "w") as myzip:
163
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
164
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
165
+
166
+ with stylable_container(
167
+ key="download_button",
168
+ css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
169
+ ):
170
+ st.download_button(
171
+ label="Download zip file",
172
+ data=buf.getvalue(),
173
+ file_name="zip file.zip",
174
+ mime="application/zip",
175
+ )
176
+
177
+
178
+
179
+
180
+
181
+ st.divider()
182
+ st.write(f"Number of times you requested results: {st.session_state['file_upload_attempts']}/{max_attempts}")