jaisun2004 commited on
Commit
1cd1007
Β·
verified Β·
1 Parent(s): 693ec40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -32
app.py CHANGED
@@ -29,39 +29,63 @@ if uploaded_file:
29
  audio.export(audio_path, format="wav")
30
 
31
  try:
32
- pass # Placeholder to avoid empty try
33
- except Exception as e:
34
- st.error(f"❌ Processing failed: {e}")
35
- tab1, tab2, tab3, tab4 = st.tabs(["πŸ“ Transcript", "πŸ“‹ Summary", "πŸ’¬ Emotions", "πŸ“ˆ Trends"])
36
-
37
- with tab1:
38
- st.subheader("πŸ“ Speaker-Simulated Transcript")
39
- st.markdown(diarized_transcript, unsafe_allow_html=True)
40
-
41
- with tab2:
42
- st.subheader("πŸ“‹ Contextual Summary")
43
- st.write(summary[0]["summary_text"])
44
-
45
- with tab3:
46
- st.subheader("πŸ’¬ Emotional Insights (Overall)")
47
- if 'emotion_scores' in locals():
48
- for emo in emotion_scores[0]:
49
- st.write(f"{emo['label']}: {round(emo['score']*100, 2)}%")
50
- else:
51
- st.write("No emotional data to display.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- with tab4:
54
- st.subheader("πŸ“ˆ Emotional Trends Over Time")
55
-
56
- session_dates = ["2024-04-01", "2024-04-08", "2024-04-15", "2024-04-22"]
57
- anxiety_scores = [70, 65, 55, 40]
58
- sadness_scores = [30, 20, 25, 15]
59
-
60
- fig = go.Figure()
61
- fig.add_trace(go.Scatter(x=session_dates, y=anxiety_scores, mode='lines+markers', name='Anxiety'))
62
- fig.add_trace(go.Scatter(x=session_dates, y=sadness_scores, mode='lines+markers', name='Sadness'))
63
- fig.update_layout(title='Emotional Trends', xaxis_title='Date', yaxis_title='Score (%)')
64
- st.plotly_chart(fig)
65
 
66
  # Export Button
67
  st.subheader("πŸ“₯ Export Session Report")
@@ -116,3 +140,39 @@ if uploaded_file:
116
  finally:
117
  if os.path.exists(audio_path):
118
  os.remove(audio_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  audio.export(audio_path, format="wav")
30
 
31
  try:
32
+ # Transcribe
33
+ st.info("πŸ”„ Transcribing with Whisper (mixed-language support)...")
34
+ asr = pipeline("automatic-speech-recognition", model="openai/whisper-large")
35
+ result = asr(audio_path, return_timestamps=True, generate_kwargs={"language": "<|en|>"})
36
+ raw_transcript = result.get("text", "")
37
+
38
+ if not raw_transcript:
39
+ st.error("❌ Could not generate a transcript. Please try a different audio.")
40
+ else:
41
+ # Simulated Speaker Diarization
42
+ st.info("πŸ—£οΈ Simulating speaker separation...")
43
+ sentences = re.split(r'(?<=[.?!])\s+', raw_transcript)
44
+ diarized_transcript = ""
45
+ for idx, sentence in enumerate(sentences):
46
+ speaker = "Speaker 1" if idx % 2 == 0 else "Speaker 2"
47
+ diarized_transcript += f"{speaker}: {sentence}\n\n"
48
+
49
+ # Summarization
50
+ st.info("πŸ“‹ Summarizing conversation...")
51
+ summarizer = pipeline("summarization", model="philschmid/bart-large-cnn-samsum")
52
+ summary = summarizer(raw_transcript, max_length=256, min_length=60, do_sample=False)
53
+
54
+ # Emotion tagging
55
+ st.info("🎭 Extracting emotional tones...")
56
+ emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
57
+ emotion_scores = emotion_model(raw_transcript)
58
+
59
+ # Layout with Tabs
60
+
61
+ with tab1:
62
+ st.subheader("πŸ“ Speaker-Simulated Transcript")
63
+ st.markdown(diarized_transcript, unsafe_allow_html=True)
64
+
65
+ with tab2:
66
+ st.subheader("πŸ“‹ Contextual Summary")
67
+ st.write(summary[0]["summary_text"])
68
+
69
+ with tab3:
70
+ st.subheader("πŸ’¬ Emotional Insights (Overall)")
71
+ if 'emotion_scores' in locals():
72
+ for emo in emotion_scores[0]:
73
+ st.write(f"{emo['label']}: {round(emo['score']*100, 2)}%")
74
+ else:
75
+ st.write("No emotional data to display.")
76
+
77
+ with tab4:
78
+ st.subheader("πŸ“ˆ Emotional Trends Over Time")
79
+
80
+ session_dates = ["2024-04-01", "2024-04-08", "2024-04-15", "2024-04-22"]
81
+ anxiety_scores = [70, 65, 55, 40]
82
+ sadness_scores = [30, 20, 25, 15]
83
 
84
+ fig = go.Figure()
85
+ fig.add_trace(go.Scatter(x=session_dates, y=anxiety_scores, mode='lines+markers', name='Anxiety'))
86
+ fig.add_trace(go.Scatter(x=session_dates, y=sadness_scores, mode='lines+markers', name='Sadness'))
87
+ fig.update_layout(title='Emotional Trends', xaxis_title='Date', yaxis_title='Score (%)')
88
+ st.plotly_chart(fig)
 
 
 
 
 
 
 
89
 
90
  # Export Button
91
  st.subheader("πŸ“₯ Export Session Report")
 
140
  finally:
141
  if os.path.exists(audio_path):
142
  os.remove(audio_path)
143
+
144
+ tab1, tab2, tab3, tab4 = st.tabs(["πŸ“ Transcript", "πŸ“‹ Summary", "πŸ’¬ Emotions", "πŸ“ˆ Trends"])
145
+
146
+ with tab1:
147
+ st.subheader("πŸ“ Speaker-Simulated Transcript")
148
+ if 'diarized_transcript' in locals():
149
+ st.markdown(diarized_transcript, unsafe_allow_html=True)
150
+ else:
151
+ st.warning("Transcript not available.")
152
+
153
+ with tab2:
154
+ st.subheader("πŸ“‹ Contextual Summary")
155
+ if 'summary' in locals():
156
+ st.write(summary[0]["summary_text"])
157
+ else:
158
+ st.warning("Summary not available.")
159
+
160
+ with tab3:
161
+ st.subheader("πŸ’¬ Emotional Insights (Overall)")
162
+ if 'emotion_scores' in locals():
163
+ for emo in emotion_scores[0]:
164
+ st.write(f"{emo['label']}: {round(emo['score']*100, 2)}%")
165
+ else:
166
+ st.warning("No emotional data to display.")
167
+
168
+ with tab4:
169
+ st.subheader("πŸ“ˆ Emotional Trends Over Time")
170
+ session_dates = ["2024-04-01", "2024-04-08", "2024-04-15", "2024-04-22"]
171
+ anxiety_scores = [70, 65, 55, 40]
172
+ sadness_scores = [30, 20, 25, 15]
173
+
174
+ fig = go.Figure()
175
+ fig.add_trace(go.Scatter(x=session_dates, y=anxiety_scores, mode='lines+markers', name='Anxiety'))
176
+ fig.add_trace(go.Scatter(x=session_dates, y=sadness_scores, mode='lines+markers', name='Sadness'))
177
+ fig.update_layout(title='Emotional Trends', xaxis_title='Date', yaxis_title='Score (%)')
178
+ st.plotly_chart(fig)