Maria Tsilimos commited on
Commit
addfe9a
·
unverified ·
1 Parent(s): 066fc17

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +279 -0
app.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import streamlit as st
3
+ from bs4 import BeautifulSoup
4
+ import pandas as pd
5
+ from transformers import pipeline
6
+ import plotly.express as px
7
+ import time
8
+ import io
9
+ import os
10
+ from comet_ml import Experiment
11
+ import zipfile
12
+ import re
13
+ from streamlit_extras.stylable_container import stylable_container
14
+
15
+
16
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
17
+
18
+
19
+
20
+
21
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
22
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
23
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
24
+
25
+ comet_initialized = False
26
+ if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
27
+ comet_initialized = True
28
+
29
+
30
+
31
+ st.subheader("4-Spanish Named Entity Recognition Web App", divider="rainbow")
32
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
33
+
34
+ expander = st.expander("**Important notes on the 4-Spanish Named Entity Recognition Web App**")
35
+ expander.write('''
36
+
37
+ **Named Entities:**
38
+ This 4-Spanish Named Entity Recognition Web App predicts four (4) labels (“PER: person”, “LOC: location”, “ORG: organization”, “OTH: other”). Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
39
+
40
+ **How to Use:**
41
+ Paste a URL, and then press Enter. If you type or paste text, just press Ctrl + Enter.
42
+
43
+ **Usage Limits:**
44
+ You can request results up to 10 times.
45
+
46
+ **Customization:**
47
+ To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
48
+
49
+ **Technical issues:**
50
+ If your connection times out, please refresh the page or reopen the app's URL.
51
+
52
+ For any errors or inquiries, please contact us at [email protected]
53
+
54
+ ''')
55
+
56
+
57
+
58
+
59
+ with st.sidebar:
60
+ container = st.container(border=True)
61
+ container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
62
+ st.subheader("Related NLP Web Apps", divider="rainbow")
63
+ st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type="primary")
64
+
65
+
66
+ if 'source_type_attempts' not in st.session_state:
67
+ st.session_state['source_type_attempts'] = 0
68
+ max_attempts = 10
69
+
70
+ def clear_url_input():
71
+
72
+ st.session_state.url = ""
73
+
74
+ def clear_text_input():
75
+
76
+ st.session_state.my_text_area = ""
77
+
78
+ url = st.text_input("Enter URL from the internet, and then press Enter:", key="url")
79
+ st.button("Clear URL", on_click=clear_url_input)
80
+
81
+ text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", key='my_text_area')
82
+ st.button("Clear Text", on_click=clear_text_input)
83
+
84
+
85
+ source_type = None
86
+ input_content = None
87
+ text_to_process = None
88
+
89
+ if url:
90
+ source_type = 'url'
91
+ input_content = url
92
+ elif text:
93
+ source_type = 'text'
94
+ input_content = text
95
+
96
+ if source_type:
97
+
98
+ st.subheader("Results", divider = "rainbow")
99
+
100
+
101
+ if st.session_state['source_type_attempts'] >= max_attempts:
102
+ st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
103
+ st.stop()
104
+
105
+ st.session_state['source_type_attempts'] += 1
106
+
107
+
108
+ @st.cache_resource
109
+ def load_ner_model():
110
+
111
+ return pipeline("token-classification", model="saattrupdan/nbailab-base-ner-scandi", aggregation_strategy="max")
112
+
113
+ model = load_ner_model()
114
+ experiment = None
115
+
116
+ try:
117
+ if source_type == 'url':
118
+ if not url.startswith(("http://", "https://")):
119
+ st.error("Please enter a valid URL starting with 'http://' or 'https://'.")
120
+ else:
121
+ with st.spinner(f"Fetching and parsing content from **{url}**...", show_time=True):
122
+ f = requests.get(url, timeout=10)
123
+ f.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx)
124
+ soup = BeautifulSoup(f.text, 'html.parser')
125
+ text_to_process = soup.get_text(separator=' ', strip=True)
126
+ st.divider()
127
+ st.write("**Input text content**")
128
+ st.write(text_to_process[:500] + "..." if len(text_to_process) > 500 else text_to_process)
129
+
130
+
131
+
132
+ elif source_type == 'text':
133
+ text_to_process = text
134
+ st.divider()
135
+ st.write("**Input text content**")
136
+
137
+ st.write(text_to_process[:500] + "..." if len(text_to_process) > 500 else text_to_process)
138
+
139
+ if text_to_process and len(text_to_process.strip()) > 0:
140
+ with st.spinner("Analyzing text...", show_time=True):
141
+ entities = model(text_to_process)
142
+ data = []
143
+ for entity in entities:
144
+ data.append({
145
+ 'word': entity['word'],
146
+ 'entity_group': entity['entity_group'],
147
+ 'score': entity['score'],
148
+ 'start': entity['start'], # Include start and end for download
149
+ 'end': entity['end']
150
+ })
151
+ df = pd.DataFrame(data)
152
+
153
+
154
+ pattern = r'[^\w\s]'
155
+ df['word'] = df['word'].replace(pattern, '', regex=True)
156
+
157
+ df = df.replace('', 'Unknown')
158
+ st.dataframe(df)
159
+
160
+
161
+ if comet_initialized:
162
+ experiment = Experiment(
163
+ api_key=COMET_API_KEY,
164
+ workspace=COMET_WORKSPACE,
165
+ project_name=COMET_PROJECT_NAME,
166
+ )
167
+ experiment.log_parameter("input_source_type", source_type)
168
+ experiment.log_parameter("input_content_length", len(input_content))
169
+ experiment.log_table("predicted_entities", df)
170
+
171
+ with st.expander("See Glossary of tags"):
172
+ st.write('''
173
+ '**word**': ['entity extracted from your text data']
174
+
175
+ '**score**': ['accuracy score; how accurately a tag has been assigned to a given entity']
176
+
177
+ '**entity_group**': ['label (tag) assigned to a given extracted entity']
178
+
179
+ '**start**': ['index of the start of the corresponding entity']
180
+
181
+ '**end**': ['index of the end of the corresponding entity']
182
+ '**B**'- (Beginning): Indicates the beginning of a given entity.
183
+
184
+ '**I**'- (Inside): Indicates a word that is inside a given entity but not the first one.
185
+
186
+ '**E**'- (End): Indicates the end of a given entity.
187
+
188
+ '**S**'- (Single): Indicates that a given entity is a single entity. It's both the beginning and the end.
189
+
190
+ '**O**' (Outside): Indicates that a word is outside of any named entity.
191
+ ''')
192
+
193
+
194
+ if not df.empty:
195
+
196
+ st.markdown("---")
197
+ st.subheader("Treemap", divider="rainbow")
198
+ fig = px.treemap(df, path=[px.Constant("all"), 'entity_group', 'word'],
199
+ values='score', color='entity_group',
200
+ )
201
+ fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
202
+ st.plotly_chart(fig, use_container_width=True)
203
+ if comet_initialized and experiment:
204
+ experiment.log_figure(figure=fig, figure_name="entity_treemap")
205
+
206
+
207
+
208
+ value_counts = df['entity_group'].value_counts().reset_index()
209
+ value_counts.columns = ['entity_group', 'count']
210
+
211
+ col1, col2 = st.columns(2)
212
+ with col1:
213
+ st.subheader("Pie Chart", divider="rainbow")
214
+ fig1 = px.pie(value_counts, values='count', names='entity_group',
215
+ hover_data=['count'], labels={'count': 'count'},
216
+ title='Percentage of Predicted Labels')
217
+ fig1.update_traces(textposition='inside', textinfo='percent+label')
218
+ st.plotly_chart(fig1, use_container_width=True)
219
+ if comet_initialized and experiment: # Check if experiment is initialized
220
+ experiment.log_figure(figure=fig1, figure_name="label_pie_chart")
221
+
222
+ with col2:
223
+ st.subheader("Bar Chart", divider="rainbow")
224
+ fig2 = px.bar(value_counts, x="count", y="entity_group", color="entity_group",
225
+ text_auto=True, title='Occurrences of Predicted Labels')
226
+ st.plotly_chart(fig2, use_container_width=True)
227
+ if comet_initialized and experiment: # Check if experiment is initialized
228
+ experiment.log_figure(figure=fig2, figure_name="label_bar_chart")
229
+ else:
230
+ st.warning("No entities were extracted from the provided text.")
231
+
232
+
233
+
234
+ dfa = pd.DataFrame(
235
+ data={
236
+ 'word': ['entity extracted from your text data'],
237
+ 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'],
238
+ 'entity_group': ['label (tag) assigned to a given extracted entity'],
239
+ 'start': ['index of the start of the corresponding entity'],
240
+ 'end': ['index of the end of the corresponding entity'],
241
+ 'B(Beginning)': ['Indicates the beginning of a given entity.'],
242
+ 'I(Inside)': ['Indicates a word that is inside a given entity but not the first one.'],
243
+ 'E(End)': ['Indicates the end of a given entity.'],
244
+ 'S(Single)': ['Indicates that a given entity is a single entity.'],
245
+ '0(Outside)': ['Indicates that a word is outside of any named entity.']
246
+
247
+ }
248
+ )
249
+ buf = io.BytesIO()
250
+ with zipfile.ZipFile(buf, "w") as myzip:
251
+ if not df.empty:
252
+ myzip.writestr("Summary_of_results.csv", df.to_csv(index=False))
253
+ myzip.writestr("Glossary_of_tags.csv", dfa.to_csv(index=False))
254
+
255
+ with stylable_container(
256
+ key="download_button",
257
+ css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
258
+ ):
259
+ st.download_button(
260
+ label="Download zip file",
261
+ data=buf.getvalue(),
262
+ file_name="nlpblogs_ner_results.zip",
263
+ mime="application/zip",)
264
+
265
+
266
+
267
+ st.divider()
268
+ else:
269
+ st.warning("No meaningful text found to process. Please enter a URL or text.")
270
+
271
+
272
+ except Exception as e:
273
+ st.error(f"An unexpected error occurred: {e}")
274
+ finally:
275
+ if comet_initialized and experiment:
276
+ experiment.end()
277
+
278
+ st.write(f"Number of times you requested results: **{st.session_state['source_type_attempts']}/{max_attempts}**")
279
+