devanshsrivastav commited on
Commit
3318bf8
·
0 Parent(s):

conversate

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [theme]
2
+ base="light"
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ title: Psychological Safety Dashboard
4
+ sdk: streamlit
5
+ sdk_version: "1.35.0"
6
+ app_file: app.py
7
+ ---
8
+
9
+ # Conversate: Psychological Safety in the Workplace
app.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import plotly.graph_objects as go
2
+ from plotly.subplots import make_subplots
3
+ import streamlit as st
4
+ import json
5
+ import os
6
+ import pandas as pd
7
+ from mistralai.client import MistralClient
8
+ from mistralai.models.chat_completion import ChatMessage
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+
12
+ api_key = os.getenv("MISTRAL_API_KEY")
13
+ model = "mistral-large-latest"
14
+ client = MistralClient(api_key=api_key)
15
+
16
+ st.set_page_config(
17
+ page_title="Psychological Safety Dashboard",
18
+ layout="wide"
19
+ )
20
+
21
+ # Set page title
22
+ st.title("Conversate: Psychological Safety in the Workplace")
23
+
24
+ uploaded_file = st.file_uploader("Upload the slack conversation", type=['json'])
25
+
26
+ # Function to process uploaded file
27
+ @st.cache_data(show_spinner=False)
28
+ def process_file(uploaded_file):
29
+ json_data = json.load(uploaded_file)
30
+ messages_with_profile = [item for item in json_data if "user_profile" in item]
31
+
32
+ conversation = []
33
+ for item in messages_with_profile:
34
+ d = {"user": item["user_profile"]["real_name"], "message": item["text"]}
35
+ conversation.append(d)
36
+
37
+ df = []
38
+ for con in conversation:
39
+ user = con['user']
40
+ message = con['message']
41
+ messages = [
42
+ ChatMessage(
43
+ role="user",
44
+ content=f""" Given the message {message}, select the best label for the following categories that fits the message, omit any explanations or details and just respond in form of a python dictionary without any additional spaces or new lines. The dictionary should be in a single line without additional spaces. The keys and values of the dictionary must be enclosed with double quotes:\
45
+ sentiment = ['positive', 'negative', 'neutral'],\
46
+ emotions = [ 'admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'],\
47
+ toxicity = ['yes', 'no'],\
48
+ harassment indicators = ['harassment', 'hate speech', 'bullying', 'none'],\
49
+ sexist = ['yes', 'no'],\
50
+ spam = ['yes', 'no'],\
51
+ racism = ['yes', 'no'],\
52
+ profanity = ['yes', 'no']"""
53
+ )
54
+ ]
55
+
56
+ # No streaming
57
+ chat_response = client.chat(
58
+ model=model,
59
+ messages=messages,
60
+ )
61
+
62
+ res = json.loads(chat_response.choices[0].message.content)
63
+
64
+ df.append({
65
+ 'user': user,
66
+ 'message': message,
67
+ 'sentiment': res['sentiment'],
68
+ 'emotions': res['emotions'],
69
+ 'toxicity': res['toxicity'],
70
+ 'harassment indicators': res['harassment indicators'],
71
+ 'sexist': res['sexist'],
72
+ 'spam': res['spam'],
73
+ 'racism': res['racism'],
74
+ 'profanity': res['profanity']
75
+ })
76
+
77
+ return pd.DataFrame(df)
78
+
79
+ # Load data if file is uploaded
80
+ if uploaded_file is not None:
81
+ with st.spinner(f"Analyzing the conversation... (This may take a while depending on the size of the conversation)"):
82
+ data = process_file(uploaded_file)
83
+
84
+ default_options = list(set(data['user'])) + ['Entire Team']
85
+
86
+ # Define color map for each emotion category
87
+ color_map = {
88
+ 'admiration': ['#1f77b4', '#98df8a', '#2ca02c', '#d62728'],
89
+ 'amusement': ['#ff7f0e', '#98df8a', '#2ca02c', '#d62728'],
90
+ 'anger': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
91
+ 'annoyance': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
92
+ 'approval': ['#1f77b4', '#98df8a', '#2ca02c', '#d62728'],
93
+ 'caring': ['#98df8a', '#2ca02c', '#FF69B4', '#d62728'],
94
+ 'confusion': ['#ffbb78', '#ff7f0e', '#9467bd', '#d62728'],
95
+ 'curiosity': ['#ffbb78', '#ff7f0e', '#9467bd', '#d62728'],
96
+ 'desire': ['#2ca02c', '#ff7f0e', '#98df8a', '#d62728'],
97
+ 'disappointment': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
98
+ 'disapproval': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
99
+ 'disgust': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
100
+ 'embarrassment': ['#ffbb78', '#ff7f0e', '#9467bd', '#d62728'],
101
+ 'excitement': ['#ff7f0e', '#2ca02c', '#98df8a', '#d62728'],
102
+ 'fear': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
103
+ 'gratitude': ['#98df8a', '#2ca02c', '#1f77b4', '#d62728'],
104
+ 'grief': ['#ffbb78', '#d62728', '#bcbd22', '#ff7f0e'],
105
+ 'joy': ['#ff7f0e', '#98df8a', '#2ca02c', '#d62728'],
106
+ 'love': ['#FF69B4', '#98df8a', '#2ca02c', '#d62728'],
107
+ 'nervousness': ['#ffbb78', '#ff7f0e', '#9467bd', '#d62728'],
108
+ 'optimism': ['#98df8a', '#2ca02c', '#1f77b4', '#d62728'],
109
+ 'pride': ['#98df8a', '#ff7f0e', '#1f77b4', '#d62728'],
110
+ 'realization': ['#9467bd', '#ff7f0e', '#ffbb78', '#d62728'],
111
+ 'relief': ['#1f77b4', '#98df8a', '#2ca02c', '#d62728'],
112
+ 'remorse': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
113
+ 'sadness': ['#ffbb78', '#ff7f0e', '#d62728', '#bcbd22'],
114
+ 'surprise': ['#ff7f0e', '#ffbb78', '#9467bd', '#d62728'],
115
+ 'neutral': ['#2ca02c', '#98df8a', '#1f77b4', '#d62728']
116
+ }
117
+
118
+ with st.sidebar:
119
+
120
+ # Create dropdown with default options
121
+ selected_option = st.selectbox("Select the team member:", default_options)
122
+
123
+ # Add submit button
124
+ submit = st.button("Submit")
125
+
126
+ if submit:
127
+ with st.spinner('Processing messages by {s}...'.format(s=selected_option)):
128
+
129
+
130
+ if selected_option == 'Entire Team':
131
+ msgs = list(data['message'])
132
+ messages = [
133
+ ChatMessage(role="user", content=" Given the messages by the team, describe their communication style. What is they doing right and wrong and how should it be improved for effective communication. What must be done by this team to ensure pyschological safety in the team. Explain in detail. The messages by this team are {m}".format(m=msgs)
134
+ )
135
+ ]
136
+ else:
137
+ msgs = list(data[data['user'] == selected_option]['message'])
138
+ messages = [
139
+ ChatMessage(role="user", content=" Given the messages by {s}, describe his/her communication style. What is he/she doing right and wrong and how should it be improved for effective communication. What must be done by this person to ensure pyschological safety in the team. Explain in detail. The messages by this person are {m}".format(s=selected_option, m=msgs)
140
+ )
141
+ ]
142
+
143
+ # No streaming
144
+ chat_response = client.chat(
145
+ model=model,
146
+ messages=messages,
147
+ )
148
+
149
+ st.subheader(f"Team Member Selected: {selected_option}")
150
+
151
+ with st.expander("See explanation"):
152
+ st.write(chat_response.choices[0].message.content)
153
+
154
+
155
+ st.write("")
156
+ st.write("")
157
+ st.write("")
158
+
159
+ senti, _, emot = st.columns([4.5,1,4.5])
160
+
161
+ with senti:
162
+
163
+ if selected_option == 'Entire Team':
164
+ sentiment_labels = list(dict(data['sentiment'].value_counts()).keys())
165
+ sentiment_values = list(dict(data['sentiment'].value_counts()).values())
166
+ else:
167
+ # Sentiment counts
168
+ sentiment_labels = list(dict(data[data['user'] == selected_option]['sentiment'].value_counts()).keys())
169
+ sentiment_values = list(dict(data[data['user'] == selected_option]['sentiment'].value_counts()).values())
170
+
171
+ # Define colors for each sentiment category
172
+ colors = ['lightblue', 'lightcoral', 'lightgreen']
173
+
174
+ # Create a pie chart
175
+ fig = go.Figure(data=[go.Pie(labels=sentiment_labels, values=sentiment_values)])
176
+
177
+ # Update pie chart layout
178
+ fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20,
179
+ marker=dict(colors=colors, line=dict(color='#000000', width=2)))
180
+
181
+ # Set title
182
+ # fig.update_layout(title='Sentiment Distribution')
183
+ st.subheader("Sentiment Distribution")
184
+
185
+ # Display pie chart
186
+ st.plotly_chart(fig, use_container_width=True)
187
+
188
+ with _:
189
+ st.write("")
190
+
191
+ with emot:
192
+
193
+ if selected_option == 'Entire Team':
194
+ emotion_labels = list(dict(data['emotions'].value_counts()).keys())
195
+ emotion_values = list(dict(data['emotions'].value_counts()).values())
196
+ else:
197
+
198
+ emotion_labels = list(dict(data[data['user'] == selected_option]['emotions'].value_counts()).keys())
199
+ emotion_values = list(dict(data[data['user'] == selected_option]['emotions'].value_counts()).values())
200
+
201
+ predicted_probabilities_ED = [count / sum(emotion_values) for count in emotion_values]
202
+
203
+ top_emotions = emotion_labels[:4]
204
+ top_scores = predicted_probabilities_ED[:4]
205
+ # Create the gauge charts for the top 4 emotion categories
206
+ fig = make_subplots(rows=2, cols=2, specs=[[{'type': 'indicator'}, {'type': 'indicator'}],
207
+ [{'type': 'indicator'}, {'type': 'indicator'}]],
208
+ vertical_spacing=0.4)
209
+
210
+ for i, emotion in enumerate(top_emotions):
211
+ # Get the emotion category, color, and normalized score for the current emotion
212
+ category = emotion
213
+ color = color_map[category]
214
+ value = top_scores[i] * 100
215
+
216
+ # Calculate the row and column position for adding the trace to the subplots
217
+ row = i // 2 + 1
218
+ col = i % 2 + 1
219
+
220
+ # Add a gauge chart trace for the current emotion category
221
+ fig.add_trace(go.Indicator(
222
+ domain={'x': [0, 1], 'y': [0, 1]},
223
+ value=value,
224
+ mode="gauge+number",
225
+ title={'text': category.capitalize()},
226
+ gauge={'axis': {'range': [None, 100]},
227
+ 'bar': {'color': color[3]},
228
+ 'bgcolor': 'white',
229
+ 'borderwidth': 2,
230
+ 'bordercolor': color[1],
231
+ 'steps': [{'range': [0, 33], 'color': color[0]},
232
+ {'range': [33, 66], 'color': color[1]},
233
+ {'range': [66, 100], 'color': color[2]}],
234
+ 'threshold': {'line': {'color': "black", 'width': 4},
235
+ 'thickness': 0.5,
236
+ 'value': 50}}), row=row, col=col)
237
+
238
+ # Update the layout of the figure
239
+ fig.update_layout(height=400, margin=dict(t=50, b=5, l=0, r=0))
240
+
241
+
242
+ # Display gauge charts
243
+
244
+ st.subheader("Emotion Detection")
245
+
246
+ st.plotly_chart(fig, use_container_width=True)
247
+
248
+ st.write("")
249
+ st.write("")
250
+ st.write("")
251
+
252
+ tox, rac, sex, spam, prof = st.columns([2,2,2,2,2])
253
+
254
+ with tox:
255
+
256
+ #Toxicity
257
+ toxicity = False
258
+ if selected_option == 'Entire Team':
259
+ toxicity = 'yes' in dict(data['toxicity'].value_counts())
260
+ else:
261
+ toxicity = 'yes' in dict(data[data['user'] == selected_option]['toxicity'].value_counts())
262
+
263
+ if toxicity:
264
+ st.subheader("Toxicity detected in the conversation.")
265
+ st.image(f"imgs/toxic_yes.jpeg", width=200)
266
+ else:
267
+ st.subheader("No toxicity detected in the conversation.")
268
+ st.image(f"imgs/toxic_no.jpeg", width=200)
269
+
270
+ with rac:
271
+ #Racism
272
+ racism = False
273
+ if selected_option == 'Entire Team':
274
+ racism = 'yes' in dict(data['racism'].value_counts())
275
+ else:
276
+ racism = 'yes' in dict(data[data['user'] == selected_option]['racism'].value_counts())
277
+
278
+ if racism:
279
+ st.subheader("Racism detected in the conversation.")
280
+ st.image(f"imgs/racism_yes.jpeg", width=200)
281
+ else:
282
+ st.subheader("No racism detected in the conversation.")
283
+ st.image(f"imgs/racism_no.jpeg", width=200)
284
+
285
+ with sex:
286
+ #Sexism
287
+ sexism = False
288
+ if selected_option == 'Entire Team':
289
+ sexism = 'yes' in dict(data['sexist'].value_counts())
290
+ else:
291
+ sexism = 'yes' in dict(data[data['user'] == selected_option]['sexist'].value_counts())
292
+
293
+ if sexism:
294
+ st.subheader("Sexism detected in the conversation.")
295
+ st.image(f"imgs/sexism_yes.png", width=200)
296
+ else:
297
+ st.subheader("No sexism detected in the conversation.")
298
+ st.image(f"imgs/sexism_no.jpeg", width=200)
299
+
300
+ with spam:
301
+ #spam
302
+ spam = False
303
+ if selected_option == 'Entire Team':
304
+ spam = 'yes' in dict(data['spam'].value_counts())
305
+ else:
306
+ spam = 'yes' in dict(data[data['user'] == selected_option]['spam'].value_counts())
307
+
308
+ if spam:
309
+ st.subheader("Spam detected in the conversation.")
310
+ st.image(f"imgs/spam_yes.jpeg", width=200)
311
+ else:
312
+ st.subheader("No spam detected in the conversation.")
313
+ st.image(f"imgs/spam_no.png", width=200)
314
+
315
+ with prof:
316
+ #profanity
317
+ profanity = False
318
+ if selected_option == 'Entire Team':
319
+ profanity = 'yes' in dict(data['profanity'].value_counts())
320
+ else:
321
+ profanity = 'yes' in dict(data[data['user'] == selected_option]['profanity'].value_counts())
322
+
323
+ if profanity:
324
+ st.subheader("Profanity detected in the conversation.")
325
+ st.image(f"imgs/profanity_yes.jpeg", width=200)
326
+ else:
327
+ st.subheader("No profanity detected in the conversation.")
328
+ st.image(f"imgs/profanity_no.jpeg", width=200)
329
+
330
+ st.write("")
331
+ st.write("")
332
+ st.write("")
333
+
334
+ _, har, __ = st.columns([4,2,4])
335
+
336
+ with _:
337
+ st.write("")
338
+
339
+ with har:
340
+
341
+ # Harassment Indicator
342
+ if selected_option == 'Entire Team':
343
+ harassment = list(dict(data['harassment indicators'].value_counts()).keys())
344
+ else:
345
+ harassment = list(dict(data[data['user'] == selected_option]['harassment indicators'].value_counts()).keys())
346
+
347
+ filtered_values = [value for value in harassment if value != 'none']
348
+
349
+ if len(filtered_values) > 0:
350
+ st.subheader(f"Harassment indicators detected in the conversation: {', '.join(filtered_values)}",)
351
+ st.image(f"imgs/harass_yes.jpeg", width=200)
352
+ else:
353
+ st.subheader("No harassment indicators detected in the conversation.")
354
+ st.image(f"imgs/harass_no.jpeg", width=200)
355
+
356
+ with __:
357
+ st.write("")
358
+
359
+
360
+ hide_st_style = """
361
+ <style>
362
+ #MainMenu {visibility: hidden;}
363
+ footer {visibility: hidden;}
364
+ </style>
365
+ """
366
+ st.markdown(hide_st_style, unsafe_allow_html=True)
imgs/harass_no.jpeg ADDED
imgs/harass_yes.jpeg ADDED
imgs/profanity_no.jpeg ADDED
imgs/profanity_yes.jpeg ADDED
imgs/racism_no.jpeg ADDED
imgs/racism_yes.jpeg ADDED
imgs/sexism_no.jpeg ADDED
imgs/sexism_yes.png ADDED
imgs/spam_no.png ADDED
imgs/spam_yes.jpeg ADDED
imgs/toxic_no.jpeg ADDED
imgs/toxic_yes.jpeg ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit==1.35.0
2
+ mistralai==0.1.8
3
+ python-dotenv==1.0.1
4
+ pandas==2.1.1
5
+ plotly==5.18.1