Zasha1 commited on
Commit
0f82ede
·
verified ·
1 Parent(s): 5736615

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -93
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from streamlit_webrtc import webrtc_streamer, WebRtcMode
2
  from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
@@ -15,54 +15,43 @@ from io import BytesIO
15
  import wave
16
  import threading
17
  import queue
 
18
 
19
  # Initialize components
20
- objection_handler = ObjectionHandler("objections.csv") # Use relative path
21
- product_recommender = ProductRecommender("recommendations.csv") # Use relative path
22
  model = SentenceTransformer('all-MiniLM-L6-v2')
23
 
24
  # Queue to hold transcribed text
25
  transcription_queue = queue.Queue()
26
 
27
  def generate_comprehensive_summary(chunks):
28
- """
29
- Generate a comprehensive summary from conversation chunks
30
- """
31
- # Extract full text from chunks
32
  full_text = " ".join([chunk[0] for chunk in chunks])
33
-
34
- # Perform basic analysis
35
  total_chunks = len(chunks)
36
  sentiments = [chunk[1] for chunk in chunks]
37
 
38
- # Determine overall conversation context
39
  context_keywords = {
40
  'product_inquiry': ['dress', 'product', 'price', 'stock'],
41
  'pricing': ['cost', 'price', 'budget'],
42
  'negotiation': ['installment', 'payment', 'manage']
43
  }
44
 
45
- # Detect conversation themes
46
  themes = []
47
  for keyword_type, keywords in context_keywords.items():
48
  if any(keyword.lower() in full_text.lower() for keyword in keywords):
49
  themes.append(keyword_type)
50
 
51
- # Basic sentiment analysis
52
  positive_count = sentiments.count('POSITIVE')
53
  negative_count = sentiments.count('NEGATIVE')
54
  neutral_count = sentiments.count('NEUTRAL')
55
 
56
- # Key interaction highlights
57
  key_interactions = []
58
  for chunk in chunks:
59
  if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
60
  key_interactions.append(chunk[0])
61
 
62
- # Construct summary
63
  summary = f"Conversation Summary:\n"
64
 
65
- # Context and themes
66
  if 'product_inquiry' in themes:
67
  summary += "• Customer initiated a product inquiry about items.\n"
68
 
@@ -72,18 +61,15 @@ def generate_comprehensive_summary(chunks):
72
  if 'negotiation' in themes:
73
  summary += "• Customer and seller explored flexible payment options.\n"
74
 
75
- # Sentiment insights
76
  summary += f"\nConversation Sentiment:\n"
77
  summary += f"• Positive Interactions: {positive_count}\n"
78
  summary += f"• Negative Interactions: {negative_count}\n"
79
  summary += f"• Neutral Interactions: {neutral_count}\n"
80
 
81
- # Key highlights
82
  summary += "\nKey Conversation Points:\n"
83
- for interaction in key_interactions[:3]: # Limit to top 3 key points
84
  summary += f"• {interaction}\n"
85
 
86
- # Conversation outcome
87
  if positive_count > negative_count:
88
  summary += "\nOutcome: Constructive and potentially successful interaction."
89
  elif negative_count > positive_count:
@@ -117,127 +103,207 @@ def calculate_overall_sentiment(sentiment_scores):
117
  def handle_objection(text):
118
  query_embedding = model.encode([text])
119
  distances, indices = objection_handler.index.search(query_embedding, 1)
120
- if distances[0][0] < 1.5: # Adjust similarity threshold as needed
121
  responses = objection_handler.handle_objection(text)
122
  return "\n".join(responses) if responses else "No objection response found."
123
  return "No objection response found."
124
 
125
  def transcribe_audio(audio_bytes, sample_rate=16000):
126
- """Transcribe audio using the transcribe_with_chunks function from sentiment_analysis.py."""
127
  try:
128
- # Save audio bytes to a temporary WAV file
129
  with BytesIO() as wav_buffer:
130
  with wave.open(wav_buffer, 'wb') as wf:
131
- wf.setnchannels(1) # Mono audio
132
- wf.setsampwidth(2) # 2 bytes for int16
133
- wf.setframerate(sample_rate) # Sample rate
134
  wf.writeframes(audio_bytes)
135
 
136
- # Use the transcribe_with_chunks function from sentiment_analysis.py
137
- chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
138
  if chunks:
139
- return chunks[-1][0] # Return the latest transcribed text
140
  except Exception as e:
141
  print(f"Error transcribing audio: {e}")
142
  return None
143
 
144
- def audio_processing_thread(audio_frame):
145
- """Thread function to process audio frames."""
146
- # Convert audio frame to bytes
147
- audio_data = audio_frame.to_ndarray()
148
- print(f"Audio data shape: {audio_data.shape}") # Debug: Check audio data shape
149
- print(f"Audio data sample: {audio_data[:10]}") # Debug: Check first 10 samples
150
-
151
- audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
152
 
153
- # Transcribe the audio
154
- text = transcribe_audio(audio_bytes)
155
- if text:
156
- transcription_queue.put(text) # Add transcribed text to the queue
 
 
 
157
 
158
  def real_time_analysis():
159
  st.info("Listening... Say 'stop' to end the process.")
160
 
161
- def audio_frame_callback(audio_frame):
162
- # Start a new thread to process the audio frame
163
- threading.Thread(target=audio_processing_thread, args=(audio_frame,)).start()
164
- return audio_frame
165
-
166
- # Start WebRTC audio stream
167
  webrtc_ctx = webrtc_streamer(
168
  key="real-time-audio",
169
  mode=WebRtcMode.SENDONLY,
170
- audio_frame_callback=audio_frame_callback,
171
  media_stream_constraints={"audio": True, "video": False},
172
  )
173
 
174
- # Display transcribed text from the queue
175
- while not transcription_queue.empty():
176
- text = transcription_queue.get()
177
- st.write(f"*Recognized Text:* {text}")
178
 
179
- # Analyze sentiment
180
- sentiment, score = analyze_sentiment(text)
181
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
182
 
183
- # Handle objection
184
- objection_response = handle_objection(text)
185
- st.write(f"*Objection Response:* {objection_response}")
186
 
187
- # Get product recommendation
188
- recommendations = []
189
- if is_valid_input(text) and is_relevant_sentiment(score):
190
- query_embedding = model.encode([text])
191
- distances, indices = product_recommender.index.search(query_embedding, 1)
192
 
193
- if distances[0][0] < 1.5: # Similarity threshold
194
- recommendations = product_recommender.get_recommendations(text)
195
 
196
- if recommendations:
197
- st.write("*Product Recommendations:*")
198
- for rec in recommendations:
199
- st.write(rec)
200
 
201
  def run_app():
202
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
203
  st.title("AI Sales Call Assistant")
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  st.sidebar.title("Navigation")
206
  app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
207
 
208
  if app_mode == "Real-Time Call Analysis":
 
209
  st.header("Real-Time Sales Call Analysis")
210
- real_time_analysis()
 
 
211
 
212
  elif app_mode == "Dashboard":
 
213
  st.header("Call Summaries and Sentiment Analysis")
214
  try:
215
  data = fetch_call_data(config["google_sheet_id"])
216
  if data.empty:
217
  st.warning("No data available in the Google Sheet.")
218
  else:
219
- # Sentiment Visualizations
220
  sentiment_counts = data['Sentiment'].value_counts()
221
-
222
- # Pie Chart
 
 
223
  col1, col2 = st.columns(2)
224
  with col1:
225
  st.subheader("Sentiment Distribution")
226
- fig_pie = px.pie(
227
- values=sentiment_counts.values,
228
- names=sentiment_counts.index,
229
- title='Call Sentiment Breakdown',
230
- color_discrete_map={
231
- 'POSITIVE': 'green',
232
- 'NEGATIVE': 'red',
233
- 'NEUTRAL': 'blue'
234
- }
235
- )
236
- st.plotly_chart(fig_pie)
237
-
238
- # Bar Chart
239
- with col2:
240
- st.subheader("Sentiment Counts")
241
  fig_bar = px.bar(
242
  x=sentiment_counts.index,
243
  y=sentiment_counts.values,
@@ -252,41 +318,46 @@ def run_app():
252
  )
253
  st.plotly_chart(fig_bar)
254
 
255
- # Existing Call Details Section
 
 
 
 
 
 
 
 
256
  st.subheader("All Calls")
257
  display_data = data.copy()
258
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
259
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
260
 
261
- # Dropdown to select Call ID
262
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
263
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
264
 
265
- # Display selected Call ID details
266
  call_details = data[data['Call ID'] == call_id]
267
  if not call_details.empty:
268
  st.subheader("Detailed Call Information")
269
  st.write(f"**Call ID:** {call_id}")
270
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
271
 
272
- # Expand summary section
273
  st.subheader("Full Call Summary")
274
  st.text_area("Summary:",
275
  value=call_details.iloc[0]['Summary'],
276
  height=200,
277
  disabled=True)
278
 
279
- # Show all chunks for the selected call
280
  st.subheader("Conversation Chunks")
281
  for _, row in call_details.iterrows():
282
  if pd.notna(row['Chunk']):
283
  st.write(f"**Chunk:** {row['Chunk']}")
284
  st.write(f"**Sentiment:** {row['Sentiment']}")
285
- st.write("---") # Separator between chunks
286
  else:
287
  st.error("No details available for the selected Call ID.")
288
  except Exception as e:
289
  st.error(f"Error loading dashboard: {e}")
 
290
 
291
  if __name__ == "__main__":
292
  run_app()
 
1
+ import speech_recognition as sr
2
  from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
 
15
  import wave
16
  import threading
17
  import queue
18
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode, AudioProcessorBase
19
 
20
  # Initialize components
21
+ objection_handler = ObjectionHandler("objections.csv")
22
+ product_recommender = ProductRecommender("recommendations.csv")
23
  model = SentenceTransformer('all-MiniLM-L6-v2')
24
 
25
  # Queue to hold transcribed text
26
  transcription_queue = queue.Queue()
27
 
28
  def generate_comprehensive_summary(chunks):
 
 
 
 
29
  full_text = " ".join([chunk[0] for chunk in chunks])
 
 
30
  total_chunks = len(chunks)
31
  sentiments = [chunk[1] for chunk in chunks]
32
 
 
33
  context_keywords = {
34
  'product_inquiry': ['dress', 'product', 'price', 'stock'],
35
  'pricing': ['cost', 'price', 'budget'],
36
  'negotiation': ['installment', 'payment', 'manage']
37
  }
38
 
 
39
  themes = []
40
  for keyword_type, keywords in context_keywords.items():
41
  if any(keyword.lower() in full_text.lower() for keyword in keywords):
42
  themes.append(keyword_type)
43
 
 
44
  positive_count = sentiments.count('POSITIVE')
45
  negative_count = sentiments.count('NEGATIVE')
46
  neutral_count = sentiments.count('NEUTRAL')
47
 
 
48
  key_interactions = []
49
  for chunk in chunks:
50
  if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
51
  key_interactions.append(chunk[0])
52
 
 
53
  summary = f"Conversation Summary:\n"
54
 
 
55
  if 'product_inquiry' in themes:
56
  summary += "• Customer initiated a product inquiry about items.\n"
57
 
 
61
  if 'negotiation' in themes:
62
  summary += "• Customer and seller explored flexible payment options.\n"
63
 
 
64
  summary += f"\nConversation Sentiment:\n"
65
  summary += f"• Positive Interactions: {positive_count}\n"
66
  summary += f"• Negative Interactions: {negative_count}\n"
67
  summary += f"• Neutral Interactions: {neutral_count}\n"
68
 
 
69
  summary += "\nKey Conversation Points:\n"
70
+ for interaction in key_interactions[:3]:
71
  summary += f"• {interaction}\n"
72
 
 
73
  if positive_count > negative_count:
74
  summary += "\nOutcome: Constructive and potentially successful interaction."
75
  elif negative_count > positive_count:
 
103
  def handle_objection(text):
104
  query_embedding = model.encode([text])
105
  distances, indices = objection_handler.index.search(query_embedding, 1)
106
+ if distances[0][0] < 1.5:
107
  responses = objection_handler.handle_objection(text)
108
  return "\n".join(responses) if responses else "No objection response found."
109
  return "No objection response found."
110
 
111
  def transcribe_audio(audio_bytes, sample_rate=16000):
 
112
  try:
 
113
  with BytesIO() as wav_buffer:
114
  with wave.open(wav_buffer, 'wb') as wf:
115
+ wf.setnchannels(1)
116
+ wf.setsampwidth(2)
117
+ wf.setframerate(sample_rate)
118
  wf.writeframes(audio_bytes)
119
 
120
+ chunks = transcribe_with_chunks(wav_buffer.getvalue())
 
121
  if chunks:
122
+ return chunks[-1][0]
123
  except Exception as e:
124
  print(f"Error transcribing audio: {e}")
125
  return None
126
 
127
+ class AudioProcessor(AudioProcessorBase):
128
+ def __init__(self):
129
+ self.transcription_queue = transcription_queue
 
 
 
 
 
130
 
131
+ def recv(self, frame):
132
+ audio_data = frame.to_ndarray()
133
+ audio_bytes = (audio_data * 32767).astype(np.int16).tobytes()
134
+ text = transcribe_audio(audio_bytes)
135
+ if text:
136
+ self.transcription_queue.put(text)
137
+ return frame
138
 
139
  def real_time_analysis():
140
  st.info("Listening... Say 'stop' to end the process.")
141
 
 
 
 
 
 
 
142
  webrtc_ctx = webrtc_streamer(
143
  key="real-time-audio",
144
  mode=WebRtcMode.SENDONLY,
145
+ audio_processor_factory=AudioProcessor,
146
  media_stream_constraints={"audio": True, "video": False},
147
  )
148
 
149
+ if webrtc_ctx.state.playing:
150
+ while not transcription_queue.empty():
151
+ text = transcription_queue.get()
152
+ st.write(f"*Recognized Text:* {text}")
153
 
154
+ sentiment, score = analyze_sentiment(text)
155
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
 
156
 
157
+ objection_response = handle_objection(text)
158
+ st.write(f"*Objection Response:* {objection_response}")
 
159
 
160
+ recommendations = []
161
+ if is_valid_input(text) and is_relevant_sentiment(score):
162
+ query_embedding = model.encode([text])
163
+ distances, indices = product_recommender.index.search(query_embedding, 1)
 
164
 
165
+ if distances[0][0] < 1.5:
166
+ recommendations = product_recommender.get_recommendations(text)
167
 
168
+ if recommendations:
169
+ st.write("*Product Recommendations:*")
170
+ for rec in recommendations:
171
+ st.write(rec)
172
 
173
  def run_app():
174
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
175
  st.title("AI Sales Call Assistant")
176
 
177
+ st.markdown("""
178
+ <style>
179
+ html, body {
180
+ font-family: 'Roboto', sans-serif;
181
+ background-color: #f5f7fa;
182
+ }
183
+ .header-container {
184
+ background: linear-gradient(135deg, #2980b9, #6dd5fa, #ffffff);
185
+ padding: 20px;
186
+ border-radius: 15px;
187
+ margin-bottom: 30px;
188
+ text-align: center;
189
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
190
+ }
191
+ .section {
192
+ background: linear-gradient(135deg, #ffffff, #f5f7fa);
193
+ padding: 25px;
194
+ border-radius: 15px;
195
+ margin-bottom: 30px;
196
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
197
+ }
198
+ .header {
199
+ font-size: 2.5em;
200
+ font-weight: 800;
201
+ color: #2980b9;
202
+ margin: 0;
203
+ padding: 10px;
204
+ letter-spacing: 1px;
205
+ }
206
+ .subheader {
207
+ font-size: 1.8em;
208
+ font-weight: 600;
209
+ color: #2980b9;
210
+ margin-top: 20px;
211
+ margin-bottom: 10px;
212
+ text-align: left;
213
+ }
214
+ .table-container {
215
+ background: #ffffff;
216
+ padding: 20px;
217
+ border-radius: 10px;
218
+ margin: 20px 0;
219
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
220
+ }
221
+ .stButton > button {
222
+ background: linear-gradient(135deg, #2980b9, #6dd5fa);
223
+ color: white;
224
+ border: none;
225
+ padding: 10px 20px;
226
+ border-radius: 5px;
227
+ transition: all 0.3s ease;
228
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
229
+ }
230
+ .stButton > button:hover {
231
+ background: linear-gradient(135deg, #2396dc, #6dd5fa);
232
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.2);
233
+ }
234
+ .stTabs [data-baseweb="tab-list"] {
235
+ gap: 24px;
236
+ background: #f5f7fa;
237
+ padding: 10px;
238
+ border-radius: 10px;
239
+ }
240
+ .stTabs [data-baseweb="tab"] {
241
+ background-color: transparent;
242
+ border-radius: 4px;
243
+ color: #2980b9;
244
+ font-weight: 600;
245
+ padding: 10px 16px;
246
+ }
247
+ .stTabs [aria-selected="true"] {
248
+ background: linear-gradient(120deg, #2980b9, #6dd5fa);
249
+ color: white;
250
+ }
251
+ .success {
252
+ background: linear-gradient(135deg, #43A047, #2E7D32);
253
+ color: white;
254
+ padding: 10px;
255
+ border-radius: 5px;
256
+ margin: 10px 0;
257
+ }
258
+ .error {
259
+ background: linear-gradient(135deg, #E53935, #C62828);
260
+ color: white;
261
+ padding: 10px;
262
+ border-radius: 5px;
263
+ margin: 10px 0;
264
+ }
265
+ .warning {
266
+ background: linear-gradient(135deg, #FB8C00, #F57C00);
267
+ color: white;
268
+ padding: 10px;
269
+ border-radius: 5px;
270
+ margin: 10px 0;
271
+ }
272
+ </style>
273
+ """, unsafe_allow_html=True)
274
+
275
+ st.markdown("""
276
+ <div class="header-container">
277
+ <h1 class="header">AI Sales Call Assistant</h1>
278
+ </div>
279
+ """, unsafe_allow_html=True)
280
+
281
  st.sidebar.title("Navigation")
282
  app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
283
 
284
  if app_mode == "Real-Time Call Analysis":
285
+ st.markdown('<div class="section">', unsafe_allow_html=True)
286
  st.header("Real-Time Sales Call Analysis")
287
+ st.markdown('</div>', unsafe_allow_html=True)
288
+ if st.button("Start Listening"):
289
+ real_time_analysis()
290
 
291
  elif app_mode == "Dashboard":
292
+ st.markdown('<div class="section">', unsafe_allow_html=True)
293
  st.header("Call Summaries and Sentiment Analysis")
294
  try:
295
  data = fetch_call_data(config["google_sheet_id"])
296
  if data.empty:
297
  st.warning("No data available in the Google Sheet.")
298
  else:
 
299
  sentiment_counts = data['Sentiment'].value_counts()
300
+
301
+ product_mentions = filter_product_mentions(data[['Chunk']].values.tolist(), product_titles)
302
+ product_mentions_df = pd.DataFrame(list(product_mentions.items()), columns=['Product', 'Count'])
303
+
304
  col1, col2 = st.columns(2)
305
  with col1:
306
  st.subheader("Sentiment Distribution")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  fig_bar = px.bar(
308
  x=sentiment_counts.index,
309
  y=sentiment_counts.values,
 
318
  )
319
  st.plotly_chart(fig_bar)
320
 
321
+ with col2:
322
+ st.subheader("Most Mentioned Products")
323
+ fig_products = px.pie(
324
+ values=product_mentions_df['Count'],
325
+ names=product_mentions_df['Product'],
326
+ title='Most Mentioned Products'
327
+ )
328
+ st.plotly_chart(fig_products)
329
+
330
  st.subheader("All Calls")
331
  display_data = data.copy()
332
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
333
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
334
 
 
335
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
336
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
337
 
 
338
  call_details = data[data['Call ID'] == call_id]
339
  if not call_details.empty:
340
  st.subheader("Detailed Call Information")
341
  st.write(f"**Call ID:** {call_id}")
342
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
343
 
 
344
  st.subheader("Full Call Summary")
345
  st.text_area("Summary:",
346
  value=call_details.iloc[0]['Summary'],
347
  height=200,
348
  disabled=True)
349
 
 
350
  st.subheader("Conversation Chunks")
351
  for _, row in call_details.iterrows():
352
  if pd.notna(row['Chunk']):
353
  st.write(f"**Chunk:** {row['Chunk']}")
354
  st.write(f"**Sentiment:** {row['Sentiment']}")
355
+ st.write("---")
356
  else:
357
  st.error("No details available for the selected Call ID.")
358
  except Exception as e:
359
  st.error(f"Error loading dashboard: {e}")
360
+ st.markdown('</div>', unsafe_allow_html=True)
361
 
362
  if __name__ == "__main__":
363
  run_app()