AIEcosystem commited on
Commit
5c871dd
·
verified ·
1 Parent(s): bc1e539

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +314 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,316 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['HF_HOME'] = '/tmp'
3
+ import time
4
  import streamlit as st
5
+ import pandas as pd
6
+ import io
7
+ import plotly.express as px
8
+ import zipfile
9
+ import json
10
+ from cryptography.fernet import Fernet
11
+ from streamlit_extras.stylable_container import stylable_container
12
+ from typing import Optional
13
+ from gliner import GLiNER
14
+ from comet_ml import Experiment
15
+
16
+ st.markdown(
17
+ """
18
+ <style>
19
+ /* Main app background and text color */
20
+ .stApp {
21
+ background-color: #F5FFFA; /* Mint cream, a very light green */
22
+ color: #000000; /* Black for the text */
23
+ }
24
+ /* Sidebar background color */
25
+ .css-1d36184 {
26
+ background-color: #B2F2B2; /* A pale green for the sidebar */
27
+ secondary-background-color: #B2F2B2;
28
+ }
29
+
30
+ /* Expander background color */
31
+ .streamlit-expanderContent {
32
+ background-color: #F5FFFA;
33
+ }
34
+ /* Expander header background color */
35
+ .streamlit-expanderHeader {
36
+ background-color: #F5FFFA;
37
+ }
38
+ /* Text Area background and text color */
39
+ .stTextArea textarea {
40
+ background-color: #D4F4D4; /* A light, soft green */
41
+ color: #000000; /* Black for text */
42
+ }
43
+ /* Button background and text color */
44
+ .stButton > button {
45
+ background-color: #D4F4D4;
46
+ color: #000000;
47
+ }
48
+ /* Warning box background and text color */
49
+ .stAlert.st-warning {
50
+ background-color: #C8F0C8; /* A light green for the warning box */
51
+ color: #000000;
52
+ }
53
+ /* Success box background and text color */
54
+ .stAlert.st-success {
55
+ background-color: #C8F0C8; /* A light green for the success box */
56
+ color: #000000;
57
+ }
58
+ </style>
59
+ """,
60
+ unsafe_allow_html=True
61
+ )
62
+
63
+ # --- Page Configuration and UI Elements ---
64
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
65
+ st.subheader("Business Operations", divider="green")
66
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
67
+ expander = st.expander("**Important notes**")
68
+ expander.write("""**Named Entities:** This HR.ai predicts thirty-six (36) labels: "Email", "Phone_number", "Street_address", "City", "Country", "Date_of_birth", "Marital_status", "Person", "Full_time", "Part_time", "Contract", "Terminated", "Retired", "Job_title", "Date", "Organization", "Role", "Performance_score", "Leave_of_absence", "Retirement_plan", "Bonus", "Stock_options", "Health_insurance", "Pay_rate", "Annual_salary", "Tax", "Deductions", "Interview_type", "Applicant", "Referral", "Job_board", "Recruiter", "Offer_letter", "Agreement", "Certification", "Skill"
69
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
70
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
71
+ **Usage Limits:** You can request results unlimited times for one (1) month.
72
+ **Supported Languages:** English
73
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
74
+ For any errors or inquiries, please contact us at [email protected]""")
75
+
76
+ with st.sidebar:
77
+ st.write("Use the following code to embed the HR.ai web app on your website. Feel free to adjust the width and height values to fit your page.")
78
+ code = '''
79
+ <iframe
80
+ src="https://aiecosystem-hr-ai.hf.space"
81
+ frameborder="0"
82
+ width="850"
83
+ height="450"
84
+ ></iframe>
85
+ '''
86
+ st.code(code, language="html")
87
+ st.text("")
88
+ st.text("")
89
+ st.divider()
90
+ st.subheader("🚀 Ready to build your own NER Web App?", divider="green")
91
+ st.link_button("NER Builder", "https://nlpblogs.com", type="primary")
92
+
93
+ # --- Comet ML Setup ---
94
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
95
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
96
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
97
+ comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
98
+
99
+ if not comet_initialized:
100
+ st.warning("Comet ML not initialized. Check environment variables.")
101
+
102
+ # --- Label Definitions ---
103
+
104
+ labels = [
105
+ "Person",
106
+
107
+ "Contact",
108
+ "Company",
109
+ "Department",
110
+ "Vendor",
111
+ "Client",
112
+ "Office",
113
+ "Warehouse",
114
+ "Address",
115
+ "City",
116
+ "State",
117
+ "Country",
118
+ "Date",
119
+ "Time",
120
+ "Time Period",
121
+ "Revenue",
122
+ "Cost",
123
+ "Budget",
124
+ "Invoice Number",
125
+ "Product",
126
+ "Service",
127
+ "Task",
128
+ "Project",
129
+ "Status",
130
+ "Asset",
131
+ "Transaction"
132
+
133
+ ]
134
+
135
+
136
+ # Create a mapping dictionary for labels to categories
137
+
138
+ category_mapping = {
139
+
140
+
141
+ "People": ["Person", "Employee", "Contact"],
142
+ "Organizations": ["Company", "Department", "Vendor", "Client"],
143
+ "Locations": ["Office", "Warehouse", "Address", "City", "State", "Country"],
144
+ "Time & Finance" : ["Date", "Time", "Time Period", "Revenue", "Cost", "Budget", "Invoice Number"],
145
+
146
+ "Other Entities": ["Product", "Service", "Task", "Project", "Status", "Asset", "Transaction"],
147
+
148
+ }
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+ # --- Model Loading ---
158
+ @st.cache_resource
159
+ def load_ner_model():
160
+ """Loads the GLiNER model and caches it."""
161
+ try:
162
+ return GLiNER.from_pretrained("knowledgator/gliner-multitask-large-v0.5", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
163
+ except Exception as e:
164
+ st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
165
+ st.stop()
166
+ model = load_ner_model()
167
+
168
+ # Flatten the mapping to a single dictionary
169
+ reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
170
+
171
+ # --- Text Input and Clear Button ---
172
+ text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area')
173
+
174
+ def clear_text():
175
+ """Clears the text area."""
176
+ st.session_state['my_text_area'] = ""
177
+
178
+ st.button("Clear text", on_click=clear_text)
179
+
180
+
181
+ # --- Results Section ---
182
+ if st.button("Results"):
183
+ start_time = time.time()
184
+ if not text.strip():
185
+ st.warning("Please enter some text to extract entities.")
186
+ else:
187
+ with st.spinner("Extracting entities...", show_time=True):
188
+ entities = model.predict_entities(text, labels)
189
+ df = pd.DataFrame(entities)
190
+
191
+ if not df.empty:
192
+ df['category'] = df['label'].map(reverse_category_mapping)
193
+ if comet_initialized:
194
+ experiment = Experiment(
195
+ api_key=COMET_API_KEY,
196
+ workspace=COMET_WORKSPACE,
197
+ project_name=COMET_PROJECT_NAME,
198
+ )
199
+ experiment.log_parameter("input_text", text)
200
+ experiment.log_table("predicted_entities", df)
201
+
202
+ st.subheader("Grouped Entities by Category", divider = "green")
203
+
204
+ # Create tabs for each category
205
+ category_names = sorted(list(category_mapping.keys()))
206
+ category_tabs = st.tabs(category_names)
207
+
208
+ for i, category_name in enumerate(category_names):
209
+ with category_tabs[i]:
210
+ df_category_filtered = df[df['category'] == category_name]
211
+ if not df_category_filtered.empty:
212
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
213
+ else:
214
+ st.info(f"No entities found for the '{category_name}' category.")
215
+
216
+
217
 
218
+ with st.expander("See Glossary of tags"):
219
+ st.write('''
220
+ - **text**: ['entity extracted from your text data']
221
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
222
+ - **label**: ['label (tag) assigned to a given extracted entity']
223
+ - **category**: ['the high-level category for the label']
224
+ - **start**: ['index of the start of the corresponding entity']
225
+ - **end**: ['index of the end of the corresponding entity']
226
+ ''')
227
+ st.divider()
228
+
229
+ # Tree map
230
+ st.subheader("Tree map", divider = "green")
231
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
232
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#F5FFFA', plot_bgcolor='#F5FFFA')
233
+ st.plotly_chart(fig_treemap)
234
+
235
+ # Pie and Bar charts
236
+ grouped_counts = df['category'].value_counts().reset_index()
237
+ grouped_counts.columns = ['category', 'count']
238
+ col1, col2 = st.columns(2)
239
+
240
+ with col1:
241
+ st.subheader("Pie chart", divider = "green")
242
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
243
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
244
+ fig_pie.update_layout(
245
+ paper_bgcolor='#F5FFFA',
246
+ plot_bgcolor='#F5FFFA'
247
+ )
248
+ st.plotly_chart(fig_pie)
249
+
250
+ with col2:
251
+ st.subheader("Bar chart", divider = "green")
252
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
253
+ fig_pie.update_layout(
254
+ paper_bgcolor='#F5FFFA',
255
+ plot_bgcolor='#F5FFFA'
256
+ )
257
+ st.plotly_chart(fig_bar)
258
+
259
+ # Most Frequent Entities
260
+ st.subheader("Most Frequent Entities", divider="green")
261
+ word_counts = df['text'].value_counts().reset_index()
262
+ word_counts.columns = ['Entity', 'Count']
263
+ repeating_entities = word_counts[word_counts['Count'] > 1]
264
+ if not repeating_entities.empty:
265
+ st.dataframe(repeating_entities, use_container_width=True)
266
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
267
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
268
+ paper_bgcolor='#F5FFFA',
269
+ plot_bgcolor='#F5FFFA')
270
+ st.plotly_chart(fig_repeating_bar)
271
+ else:
272
+ st.warning("No entities were found that occur more than once.")
273
+
274
+ # Download Section
275
+ st.divider()
276
+
277
+ dfa = pd.DataFrame(
278
+ data={
279
+ 'Column Name': ['text', 'label', 'score', 'start', 'end', 'category'],
280
+ 'Description': [
281
+ 'entity extracted from your text data',
282
+ 'label (tag) assigned to a given extracted entity',
283
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
284
+ 'index of the start of the corresponding entity',
285
+ 'index of the end of the corresponding entity',
286
+ 'the broader category the entity belongs to',
287
+ ]
288
+ }
289
+ )
290
+ buf = io.BytesIO()
291
+ with zipfile.ZipFile(buf, "w") as myzip:
292
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
293
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
294
+
295
+ with stylable_container(
296
+ key="download_button",
297
+ css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
298
+ ):
299
+ st.download_button(
300
+ label="Download results and glossary (zip)",
301
+ data=buf.getvalue(),
302
+ file_name="nlpblogs_results.zip",
303
+ mime="application/zip",
304
+ )
305
+
306
+ if comet_initialized:
307
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
308
+ experiment.end()
309
+ else: # If df is empty
310
+ st.warning("No entities were found in the provided text.")
311
+
312
+ end_time = time.time()
313
+ elapsed_time = end_time - start_time
314
+ st.text("")
315
+ st.text("")
316
+ st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")