AIEcosystem commited on
Commit
e30c00c
·
verified ·
1 Parent(s): 9f213ee

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +388 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,390 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
1
+ import os
2
+ os.environ['HF_HOME'] = '/tmp'
3
+ import time
4
  import streamlit as st
5
+ import pandas as pd
6
+ import io
7
+ import plotly.express as px
8
+ import zipfile
9
+ import json
10
+ from cryptography.fernet import Fernet
11
+ from streamlit_extras.stylable_container import stylable_container
12
+ from typing import Optional
13
+ from gliner import GLiNER
14
+ from comet_ml import Experiment
15
+
16
+
17
+ st.markdown(
18
+ """
19
+ <style>
20
+ /* Main app background and text color */
21
+ .stApp {
22
+ background-color: white;
23
+ color: black;
24
+ }
25
+ /* Sidebar background color */
26
+ .css-1d36184 {
27
+ background-color: #ADD8E6;
28
+ secondary-background-color: #ADD8E6;
29
+ }
30
+
31
+ /* Expander background color */
32
+ .streamlit-expanderContent {
33
+ background-color: white;
34
+ }
35
+ /* Expander header background color */
36
+ .streamlit-expanderHeader {
37
+ background-color: white;
38
+ }
39
+ /* Text Area background and text color */
40
+ .stTextArea textarea {
41
+ background-color: lavender;
42
+ color: black;
43
+ }
44
+ /* Button background and text color */
45
+ .stButton > button {
46
+ background-color: lavender;
47
+ color: black;
48
+ }
49
+ /* Warning box background and text color */
50
+ .stAlert.st-warning {
51
+ background-color: #lavender;
52
+ color: black;
53
+ }
54
+ /* Success box background and text color */
55
+ .stAlert.st-success {
56
+ background-color: #lavender;
57
+ color: black;
58
+ }
59
+ </style>
60
+ """,
61
+ unsafe_allow_html=True
62
+ )
63
+
64
+
65
+
66
+
67
+
68
+
69
+ # --- Page Configuration and UI Elements ---
70
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
71
+
72
+ st.subheader("Compliance", divider="gray")
73
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
74
+
75
+ expander = st.expander("**Important notes on the ProductTag**")
76
+ expander.write("""
77
+ **Named Entities:** This ProductTag predicts twenty-four (24) labels: "Product", "Service", "Organization", "Company", "Currency", "City", "Country", "Region", "Market", "Store", "Shop", "Customer_segment", "Demographics", "Target_market", "Market_segment", "Fiscal_period", "Timeframe", "Date", "Campaign", "Advertisement", "Event", "Media_platform", "Media_channel", "Social_media_platform"
78
+
79
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
80
+
81
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
82
+
83
+ **Usage Limits:** You can request results unlimited times for one (1) week.
84
+
85
+ **Supported Languages:** English
86
+
87
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
88
+
89
+ For any errors or inquiries, please contact us at [email protected]
90
+ """)
91
+
92
+ with st.sidebar:
93
+ st.subheader("Build your own NER Web App in a minute without writing a single line of code.", divider="gray")
94
+ st.link_button("NER File Builder", "https://nlpblogs.com/shop/named-entity-recognition-ner/ner-file-builder/", type="primary")
95
+
96
+ st.text("")
97
+ st.text("")
98
+
99
+ st.write("Use the following code to embed the ProductTag web app on your website. Feel free to adjust the width and height values to fit your page.")
100
+ code = '''
101
+ <iframe
102
+ src="https://aiecosystem-producttag1.hf.space"
103
+ frameborder="0"
104
+ width="850"
105
+ height="450"
106
+ ></iframe>
107
+ '''
108
+ st.code(code, language="html")
109
+
110
+ # --- Comet ML Setup ---
111
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
112
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
113
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
114
+
115
+ comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
116
+ if not comet_initialized:
117
+ st.warning("Comet ML not initialized. Check environment variables.")
118
+
119
+
120
+ # --- Label Definitions ---
121
+
122
+
123
+ # --- Label Definitions ---
124
+ labels = [
125
+ "medical_record_number",
126
+ "date_of_birth",
127
+ "ssn",
128
+ "date",
129
+ "first_name",
130
+ "email",
131
+ "last_name",
132
+ "customer_id",
133
+ "employee_id",
134
+ "name",
135
+ "street_address",
136
+ "phone_number",
137
+ "ipv4",
138
+ "credit_card_number",
139
+ "license_plate",
140
+ "address",
141
+ "user_name",
142
+ "device_identifier",
143
+ "bank_routing_number",
144
+ "date_time",
145
+ "company_name",
146
+ "unique_identifier",
147
+ "biometric_identifier",
148
+ "account_number",
149
+ "city",
150
+ "certificate_license_number",
151
+ "time",
152
+ "postcode",
153
+ "vehicle_identifier",
154
+ "coordinate",
155
+ "country",
156
+ "api_key",
157
+ "ipv6",
158
+ "password",
159
+ "health_plan_beneficiary_number",
160
+ "national_id",
161
+ "tax_id",
162
+ "url",
163
+ "state",
164
+ "swift_bic",
165
+ "cvv",
166
+ "pin"
167
+ ]
168
+
169
+ # Create a mapping dictionary for labels to categories
170
+ category_mapping = {
171
+ "Personal Identifiers": [
172
+ "date_of_birth",
173
+ "first_name",
174
+ "last_name",
175
+ "name",
176
+ "biometric_identifier",
177
+ "user_name",
178
+ "password"
179
+ ],
180
+
181
+ "Contact & Location Information": [
182
+ "email",
183
+ "street_address",
184
+ "phone_number",
185
+ "address",
186
+ "city",
187
+ "postcode",
188
+ "coordinate",
189
+ "country",
190
+ "state",
191
+ "url"
192
+ ],
193
+
194
+ "Financial & Business Data": [
195
+ "credit_card_number",
196
+ "bank_routing_number",
197
+ "account_number",
198
+ "swift_bic",
199
+ "cvv",
200
+ "pin",
201
+ "company_name",
202
+ "api_key"
203
+ ],
204
+
205
+ "Government & Official IDs": [
206
+ "ssn",
207
+ "license_plate",
208
+ "certificate_license_number",
209
+ "national_id",
210
+ "tax_id",
211
+ "medical_record_number",
212
+ "health_plan_beneficiary_number"
213
+ ],
214
+
215
+ "Technical & System Data ": [
216
+ "ipv4",
217
+ "device_identifier",
218
+ "ipv6"
219
+ ],
220
+
221
+ "Unique Identifiers & Registration Numbers: [
222
+ "customer_id",
223
+ "employee_id",
224
+ "unique_identifier",
225
+ "vehicle_identifier"
226
+ ],
227
+
228
+ "Date & Time Stamps": [
229
+ "date",
230
+ "date_time",
231
+ "time"
232
+ ]
233
+ }
234
+
235
+
236
+
237
+ # --- Model Loading ---
238
+ @st.cache_resource
239
+ def load_ner_model():
240
+ """Loads the GLiNER model and caches it."""
241
+ try:
242
+ return GLiNER.from_pretrained("gretelai/gretel-gliner-bi-large-v1.0", nested_ner=True, num_gen_sequences=2, gen_constraints= labels, threshold = 0.70)
243
+ except Exception as e:
244
+ st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
245
+ st.stop()
246
+
247
+ model = load_ner_model()
248
+
249
+
250
+ # Flatten the mapping to a single dictionary
251
+ reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
252
+
253
+ # --- Text Input and Clear Button ---
254
+ text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area')
255
+
256
+ def clear_text():
257
+ """Clears the text area."""
258
+ st.session_state['my_text_area'] = ""
259
+
260
+ st.button("Clear text", on_click=clear_text)
261
+ st.divider()
262
+
263
+ # --- Results Section ---
264
+ if st.button("Results"):
265
+ start_time = time.time()
266
+ if not text.strip():
267
+ st.warning("Please enter some text to extract entities.")
268
+ else:
269
+ with st.spinner("Extracting entities...", show_time=True):
270
+ entities = model.predict_entities(text, labels)
271
+ df = pd.DataFrame(entities)
272
+
273
+ if not df.empty:
274
+ df['category'] = df['label'].map(reverse_category_mapping)
275
+
276
+ if comet_initialized:
277
+ experiment = Experiment(
278
+ api_key=COMET_API_KEY,
279
+ workspace=COMET_WORKSPACE,
280
+ project_name=COMET_PROJECT_NAME,
281
+ )
282
+ experiment.log_parameter("input_text", text)
283
+ experiment.log_table("predicted_entities", df)
284
+
285
+ st.subheader("Extracted Entities", divider = "gray")
286
+ st.dataframe(df.style.set_properties(**{"border": "2px solid gray", "color": "blue", "font-size": "16px"}))
287
+
288
+ with st.expander("See Glossary of tags"):
289
+ st.write('''
290
+ - **text**: ['entity extracted from your text data']
291
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
292
+ - **label**: ['label (tag) assigned to a given extracted entity']
293
+ - **category**: ['the high-level category for the label']
294
+ - **start**: ['index of the start of the corresponding entity']
295
+ - **end**: ['index of the end of the corresponding entity']
296
+ ''')
297
+
298
+ st.divider()
299
+
300
+
301
+ # Tree map
302
+ st.subheader("Tree map", divider = "gray")
303
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
304
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25))
305
+
306
+
307
+ st.plotly_chart(fig_treemap)
308
+
309
+ # Pie and Bar charts
310
+ grouped_counts = df['category'].value_counts().reset_index()
311
+ grouped_counts.columns = ['category', 'count']
312
+
313
+ col1, col2 = st.columns(2)
314
+ with col1:
315
+ st.subheader("Pie chart", divider = "gray")
316
+ fig_pie = px.pie(grouped_counts, values='count', names='category',
317
+ hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
318
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
319
+ st.plotly_chart(fig_pie)
320
+
321
+ with col2:
322
+ st.subheader("Bar chart", divider = "gray")
323
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True,
324
+ title='Occurrences of predicted categories')
325
+ st.plotly_chart(fig_bar)
326
+
327
+ # Most Frequent Entities
328
+ st.subheader("Most Frequent Entities", divider="gray")
329
+ word_counts = df['text'].value_counts().reset_index()
330
+ word_counts.columns = ['Entity', 'Count']
331
+ repeating_entities = word_counts[word_counts['Count'] > 1]
332
+ if not repeating_entities.empty:
333
+ st.dataframe(repeating_entities, use_container_width=True)
334
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
335
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'})
336
+ st.plotly_chart(fig_repeating_bar)
337
+ else:
338
+ st.warning("No entities were found that occur more than once.")
339
+
340
+
341
+
342
+
343
+
344
+
345
+ # Download Section
346
+ st.divider()
347
+
348
+ dfa = pd.DataFrame(
349
+ data={
350
+ 'Column Name': ['text', 'label', 'score', 'start', 'end', 'category'],
351
+ 'Description': [
352
+ 'entity extracted from your text data',
353
+ 'label (tag) assigned to a given extracted entity',
354
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
355
+ 'index of the start of the corresponding entity',
356
+ 'index of the end of the corresponding entity',
357
+ 'the broader category the entity belongs to',
358
+ ]
359
+ }
360
+ )
361
+
362
+ buf = io.BytesIO()
363
+ with zipfile.ZipFile(buf, "w") as myzip:
364
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
365
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
366
+
367
+ with stylable_container(
368
+ key="download_button",
369
+ css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
370
+ ):
371
+ st.download_button(
372
+ label="Download results and glossary (zip)",
373
+ data=buf.getvalue(),
374
+ file_name="markettag_results.zip",
375
+ mime="application/zip",
376
+ )
377
+
378
+ if comet_initialized:
379
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
380
+ experiment.end()
381
+
382
+ else: # If df is empty
383
+ st.warning("No entities were found in the provided text.")
384
+
385
+ end_time = time.time()
386
+ elapsed_time = end_time - start_time
387
 
388
+ st.text("")
389
+ st.text("")
390
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")