Maria Tsilimos commited on
Commit
cf54cfb
·
unverified ·
1 Parent(s): 1156047

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -83
app.py CHANGED
@@ -6,27 +6,48 @@ from transformers import pipeline
6
  from streamlit_extras.stylable_container import stylable_container
7
  import plotly.express as px
8
  import zipfile
9
-
10
  from PyPDF2 import PdfReader
11
  import docx
12
-
13
-
14
  import os
15
  from comet_ml import Experiment
16
  import re
17
- import numpy as np
 
 
18
 
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- st.subheader("58-Italian Named Entity Recognition Web App", divider = "orange")
23
- st.link_button("by nlpblogs", "https://nlpblogs.com", type = "tertiary")
24
 
25
- expander = st.expander("**Important notes on the 58-Italian Named Entity Recognition Web App**")
 
 
 
 
 
 
 
 
 
 
 
26
  expander.write('''
27
 
28
  **Named Entities:**
29
- This 58-Italian Named Entity Recognition Web App predicts fifty-eight (58) labels
30
 
31
  ("**INDIRIZZO**: Identifica un indirizzo fisico.
32
 
@@ -142,7 +163,7 @@ expander.write('''
142
 
143
  **DOSAGGIO**: Quantità di un medicinale da assumere.
144
 
145
- **FORM**: Forma del medicinale, ad esempio compresse.")
146
 
147
  Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
148
 
@@ -166,34 +187,15 @@ expander.write('''
166
  with st.sidebar:
167
  container = st.container(border=True)
168
  container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
169
- st.subheader("Related NLP Web Apps", divider = "orange")
170
- st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type = "primary")
171
-
172
-
173
- COMET_API_KEY = os.environ.get("COMET_API_KEY")
174
- COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
175
- COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
176
-
177
- if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
178
- comet_initialized = True
179
- else:
180
- comet_initialized = False
181
- st.warning("Comet ML not initialized. Check environment variables.")
182
-
183
-
184
-
185
- if 'file_upload_attempts' not in st.session_state:
186
- st.session_state['file_upload_attempts'] = 0
187
-
188
- max_attempts = 10
189
-
190
 
 
191
  upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx'])
192
  text = None
193
  df = None
194
 
195
  if upload_file is not None:
196
-
197
  file_extension = upload_file.name.split('.')[-1].lower()
198
  if file_extension == 'pdf':
199
  try:
@@ -201,47 +203,53 @@ if upload_file is not None:
201
  text = ""
202
  for page in pdf_reader.pages:
203
  text += page.extract_text()
204
- st.write("Due to security protocols, the file content is hidden.")
205
  except Exception as e:
206
  st.error(f"An error occurred while reading PDF: {e}")
 
207
  elif file_extension == 'docx':
208
  try:
209
  doc = docx.Document(upload_file)
210
  text = "\n".join([para.text for para in doc.paragraphs])
211
- st.write("Due to security protocols, the file content is hidden.")
212
  except Exception as e:
213
  st.error(f"An error occurred while reading docx: {e}")
 
214
  else:
215
  st.warning("Unsupported file type.")
216
-
217
- st.stop()
218
-
219
 
220
  st.divider()
221
 
 
222
  if st.button("Results"):
 
 
 
223
  if st.session_state['file_upload_attempts'] >= max_attempts:
224
  st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
225
  st.stop()
226
- st.session_state['file_upload_attempts'] += 1
227
-
228
 
229
- with st.spinner("Wait for it...", show_time=True):
230
- time.sleep(5)
231
- model = pipeline("token-classification", model="DeepMount00/Italian_NER_XXL", aggregation_strategy = "max")
232
- text1 = model(text)
233
-
234
- df1 = pd.DataFrame(text1)
235
- pattern = r'[^\w\s]'
236
- df1['word'] = df1['word'].replace(pattern, '', regex=True)
237
 
238
- df2 = df1.replace('', 'Unknown')
239
- df = df2.dropna()
240
 
241
-
242
-
 
 
 
243
 
244
-
 
 
 
 
 
 
 
245
 
246
  if comet_initialized:
247
  experiment = Experiment(
@@ -249,12 +257,13 @@ if st.button("Results"):
249
  workspace=COMET_WORKSPACE,
250
  project_name=COMET_PROJECT_NAME,
251
  )
252
- experiment.log_parameter("input_text", text)
253
  experiment.log_table("predicted_entities", df)
254
 
 
255
  properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
256
  df_styled = df.style.set_properties(**properties)
257
- st.dataframe(df_styled)
258
 
259
  with st.expander("See Glossary of tags"):
260
  st.write('''
@@ -269,45 +278,48 @@ if st.button("Results"):
269
  '**end**': ['index of the end of the corresponding entity']
270
  ''')
271
 
272
- if df is not None:
273
- fig = px.treemap(df, path=[px.Constant("all"), 'word', 'entity_group'],
 
274
  values='score', color='entity_group')
275
- fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
276
- st.subheader("Tree map", divider = "rainbow")
277
- st.plotly_chart(fig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  if comet_initialized:
279
- experiment.log_figure(figure=fig, figure_name="entity_treemap")
280
-
281
- if df is not None:
282
- value_counts1 = df['entity_group'].value_counts()
283
- df1 = pd.DataFrame(value_counts1)
284
- final_df = df1.reset_index().rename(columns={"index": "entity_group"})
285
- col1, col2 = st.columns(2)
286
- with col1:
287
- fig1 = px.pie(final_df, values='count', names='entity_group', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
288
- fig1.update_traces(textposition='inside', textinfo='percent+label')
289
- st.subheader("Pie Chart", divider = "orange")
290
- st.plotly_chart(fig1)
291
- if comet_initialized:
292
- experiment.log_figure(figure=fig1, figure_name="label_pie_chart")
293
- with col2:
294
- fig2 = px.bar(final_df, x="count", y="entity_group", color="entity_group", text_auto=True, title='Occurrences of predicted labels')
295
- st.subheader("Bar Chart", divider = "orange")
296
- st.plotly_chart(fig2)
297
- if comet_initialized:
298
- experiment.log_figure(figure=fig2, figure_name="label_bar_chart")
299
 
 
300
  dfa = pd.DataFrame(
301
  data={
302
- 'word': ['entity extracted from your text data'], 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'], 'entity_group': ['label (tag) assigned to a given extracted entity'],
 
 
303
  'start': ['index of the start of the corresponding entity'],
304
  'end': ['index of the end of the corresponding entity'],
305
- })
 
306
  buf = io.BytesIO()
307
  with zipfile.ZipFile(buf, "w") as myzip:
308
  myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
309
  myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
310
-
311
 
312
  with stylable_container(
313
  key="download_button",
@@ -316,7 +328,7 @@ if st.button("Results"):
316
  st.download_button(
317
  label="Download zip file",
318
  data=buf.getvalue(),
319
- file_name="zip file.zip",
320
  mime="application/zip",
321
  )
322
  if comet_initialized:
@@ -325,4 +337,5 @@ if st.button("Results"):
325
  st.divider()
326
  if comet_initialized:
327
  experiment.end()
328
- st.write(f"Number of times you requested results: {st.session_state['file_upload_attempts']}/{max_attempts}")
 
 
6
  from streamlit_extras.stylable_container import stylable_container
7
  import plotly.express as px
8
  import zipfile
 
9
  from PyPDF2 import PdfReader
10
  import docx
 
 
11
  import os
12
  from comet_ml import Experiment
13
  import re
14
+ import numpy as np
15
+
16
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
17
 
18
 
19
 
20
+ # --- Configuration ---
21
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
22
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
23
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
24
+
25
+ comet_initialized = False
26
+ if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
27
+ comet_initialized = True
28
+
29
+ # --- Initialize session state ---
30
+ if 'file_upload_attempts' not in st.session_state:
31
+ st.session_state['file_upload_attempts'] = 0
32
 
33
+ max_attempts = 10
 
34
 
35
+ # --- Helper function for model loading ---
36
+ @st.cache_resource
37
+ def load_ner_model():
38
+ """Loads the pre-trained NER model and caches it."""
39
+ return pipeline("token-classification", model="DeepMount00/Italian_NER_XXL", aggregation_strategy="max")
40
+
41
+ # --- UI Elements ---
42
+ st.subheader("58-Italian Named Entity Recognition Web App", divider="rainbow")
43
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
44
+
45
+
46
+ expander = st.expander("**Important notes on the 58-Italian-Named Entity Recognition Web App**")
47
  expander.write('''
48
 
49
  **Named Entities:**
50
+ This 58-Italian-Named Entity Recognition Web App predicts fifty-eight (58) labels
51
 
52
  ("**INDIRIZZO**: Identifica un indirizzo fisico.
53
 
 
163
 
164
  **DOSAGGIO**: Quantità di un medicinale da assumere.
165
 
166
+ **FORM**: Forma del medicinale, ad esempio compresse").
167
 
168
  Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
169
 
 
187
  with st.sidebar:
188
  container = st.container(border=True)
189
  container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
190
+ st.subheader("Related NLP Web Apps", divider="rainbow")
191
+ st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ # --- File Upload ---
194
  upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx'])
195
  text = None
196
  df = None
197
 
198
  if upload_file is not None:
 
199
  file_extension = upload_file.name.split('.')[-1].lower()
200
  if file_extension == 'pdf':
201
  try:
 
203
  text = ""
204
  for page in pdf_reader.pages:
205
  text += page.extract_text()
206
+ st.write("File uploaded successfully. Due to security protocols, the file content is hidden.")
207
  except Exception as e:
208
  st.error(f"An error occurred while reading PDF: {e}")
209
+ text = None
210
  elif file_extension == 'docx':
211
  try:
212
  doc = docx.Document(upload_file)
213
  text = "\n".join([para.text for para in doc.paragraphs])
214
+ st.write("File uploaded successfully. Due to security protocols, the file content is hidden.")
215
  except Exception as e:
216
  st.error(f"An error occurred while reading docx: {e}")
217
+ text = None
218
  else:
219
  st.warning("Unsupported file type.")
220
+ text = None
 
 
221
 
222
  st.divider()
223
 
224
+ # --- Results Button and Processing Logic ---
225
  if st.button("Results"):
226
+ if not comet_initialized:
227
+ st.warning("Comet ML not initialized. Check environment variables if you wish to log data.")
228
+
229
  if st.session_state['file_upload_attempts'] >= max_attempts:
230
  st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
231
  st.stop()
 
 
232
 
233
+ if text is None:
234
+ st.warning("Please upload a supported file (.pdf or .docx) before requesting results.")
235
+ st.stop()
 
 
 
 
 
236
 
237
+ st.session_state['file_upload_attempts'] += 1
 
238
 
239
+ with st.spinner("Analyzing text...", show_time=True):
240
+ # Load model (cached)
241
+ model = load_ner_model()
242
+ text_entities = model(text)
243
+ df = pd.DataFrame(text_entities)
244
 
245
+ # Clean and filter DataFrame
246
+ pattern = r'[^\w\s]'
247
+ df['word'] = df['word'].replace(pattern, '', regex=True)
248
+ df = df.replace('', 'Unknown').dropna()
249
+
250
+ if df.empty:
251
+ st.warning("No entities were extracted from the uploaded text.")
252
+ st.stop()
253
 
254
  if comet_initialized:
255
  experiment = Experiment(
 
257
  workspace=COMET_WORKSPACE,
258
  project_name=COMET_PROJECT_NAME,
259
  )
260
+ experiment.log_parameter("input_text_length", len(text))
261
  experiment.log_table("predicted_entities", df)
262
 
263
+ # --- Display Results ---
264
  properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
265
  df_styled = df.style.set_properties(**properties)
266
+ st.dataframe(df_styled, use_container_width=True)
267
 
268
  with st.expander("See Glossary of tags"):
269
  st.write('''
 
278
  '**end**': ['index of the end of the corresponding entity']
279
  ''')
280
 
281
+ # --- Visualizations ---
282
+ st.subheader("Tree map", divider="rainbow")
283
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'word', 'entity_group'],
284
  values='score', color='entity_group')
285
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25))
286
+ st.plotly_chart(fig_treemap)
287
+ if comet_initialized:
288
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap")
289
+
290
+ value_counts1 = df['entity_group'].value_counts()
291
+ final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group"})
292
+
293
+ col1, col2 = st.columns(2)
294
+ with col1:
295
+ st.subheader("Pie Chart", divider="rainbow")
296
+ fig_pie = px.pie(final_df_counts, values='count', names='entity_group', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
297
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
298
+ st.plotly_chart(fig_pie)
299
+ if comet_initialized:
300
+ experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart")
301
+
302
+ with col2:
303
+ st.subheader("Bar Chart", divider="rainbow")
304
+ fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", text_auto=True, title='Occurrences of predicted labels')
305
+ st.plotly_chart(fig_bar)
306
  if comet_initialized:
307
+ experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
 
309
+ # --- Downloadable Content ---
310
  dfa = pd.DataFrame(
311
  data={
312
+ 'word': ['entity extracted from your text data'],
313
+ 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'],
314
+ 'entity_group': ['label (tag) assigned to a given extracted entity'],
315
  'start': ['index of the start of the corresponding entity'],
316
  'end': ['index of the end of the corresponding entity'],
317
+ })
318
+
319
  buf = io.BytesIO()
320
  with zipfile.ZipFile(buf, "w") as myzip:
321
  myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
322
  myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
 
323
 
324
  with stylable_container(
325
  key="download_button",
 
328
  st.download_button(
329
  label="Download zip file",
330
  data=buf.getvalue(),
331
+ file_name="nlpblogs_ner_results.zip",
332
  mime="application/zip",
333
  )
334
  if comet_initialized:
 
337
  st.divider()
338
  if comet_initialized:
339
  experiment.end()
340
+
341
+ st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**")