Spaces:
Sleeping
Sleeping
File size: 6,326 Bytes
db865fa d19971d db865fa 8a2791f a7c706c db865fa abc0e8f a7c706c 644d52a 8a2791f 07d82a4 db865fa 644d52a abc0e8f db865fa 644d52a abc0e8f db865fa 21c5c47 a7c706c 8a2791f 21c5c47 8a2791f 21c5c47 8a2791f 21c5c47 a7c706c 8a2791f a7c706c 21c5c47 8a2791f 21c5c47 101c614 21c5c47 8a2791f 101c614 92ed364 8a2791f 101c614 8a2791f 21c5c47 a7c706c 21c5c47 8a2791f 21c5c47 07d82a4 101c614 fa44fcd d19971d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import streamlit as st
import plotly.graph_objects as go
from transformers import pipeline
from pydub import AudioSegment
import os
import re
from docx import Document
from docx.shared import Pt
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from datetime import datetime
# Page config
st.set_page_config(page_title="Atma.ai - Session Summarizer + Export", layout="wide")
st.title("π§ Atma.ai β Advanced Mental Health Session Summarizer")
st.markdown("Upload a therapy session audio (Tamil-English mix) to view the transcript, summary, emotional analysis, and export everything to Word!")
# Upload audio
uploaded_file = st.file_uploader("ποΈ Upload audio file", type=["wav", "mp3", "m4a"])
if uploaded_file:
st.audio(uploaded_file)
# Convert audio to required format
audio_path = "temp_audio.wav"
audio = AudioSegment.from_file(uploaded_file)
audio = audio.set_channels(1).set_frame_rate(16000)
audio.export(audio_path, format="wav")
try:
# Transcribe
st.info("π Transcribing with Whisper (mixed-language support)...")
asr = pipeline("automatic-speech-recognition", model="openai/whisper-large")
result = asr(audio_path, return_timestamps=True, generate_kwargs={"language": "<|en|>"})
raw_transcript = result.get("text", "")
if not raw_transcript:
st.error("β Could not generate a transcript. Please try a different audio.")
else:
# Simulated Speaker Diarization
st.info("π£οΈ Simulating speaker separation...")
sentences = re.split(r'(?<=[.?!])\s+', raw_transcript)
diarized_transcript = ""
for idx, sentence in enumerate(sentences):
speaker = "Speaker 1" if idx % 2 == 0 else "Speaker 2"
diarized_transcript += f"{speaker}: {sentence}\n\n"
# Summarization
st.info("π Summarizing conversation...")
summarizer = pipeline("summarization", model="philschmid/bart-large-cnn-samsum")
summary = summarizer(raw_transcript, max_length=256, min_length=60, do_sample=False)
# Emotion tagging
st.info("π Extracting emotional tones...")
emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
emotion_scores = emotion_model(raw_transcript)
# Layout with Tabs
tab1, tab2, tab3 = st.tabs(["π Transcript", "π Summary", "π¬ Emotions"])
with tab1:
st.subheader("π Speaker-Simulated Transcript")
st.markdown(diarized_transcript, unsafe_allow_html=True)
with tab2:
st.subheader("π Contextual Summary")
st.write(summary[0]["summary_text"])
with tab3:
st.subheader("π¬ Emotional Insights (Overall)")
for emo in emotion_scores[0]:
st.write(f"{emo['label']}: {round(emo['score']*100, 2)}%")
# Export Button
st.subheader("π₯ Export Session Report")
def generate_docx(transcript, summary_text, emotions):
doc = Document()
# Title
title = doc.add_heading('Session Summary - Atma.ai', 0)
title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
# Date
date_paragraph = doc.add_paragraph(f"Date: {datetime.now().strftime('%Y-%m-%d')}")
date_paragraph.runs[0].italic = True
doc.add_paragraph("\n")
# Transcript
doc.add_heading('π Transcript', level=1)
transcript_para = doc.add_paragraph(transcript)
transcript_para.runs[0].font.size = Pt(12)
doc.add_paragraph("\n")
# Summary
doc.add_heading('π Summary', level=1)
summary_para = doc.add_paragraph(summary_text)
summary_para.runs[0].font.size = Pt(12)
doc.add_paragraph("\n")
# Emotional Insights
doc.add_heading('π¬ Emotional Insights', level=1)
for emo in emotions[0]:
emotion_para = doc.add_paragraph(f"{emo['label']}: {round(emo['score']*100, 2)}%")
emotion_para.runs[0].font.size = Pt(12)
# Footer
doc.add_paragraph("\n\n---\nGenerated by Atma.ai β Confidential", style="Intense Quote")
output_path = "session_summary.docx"
doc.save(output_path)
return output_path
if st.button("Generate and Download Report (.docx)"):
output_file = generate_docx(diarized_transcript, summary[0]["summary_text"], emotion_scores)
with open(output_file, "rb") as f:
st.download_button(label="π₯ Download Report", data=f, file_name="session_summary.docx", mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
except Exception as err:
st.error(f"β Processing failed: {err}")
finally:
if os.path.exists(audio_path):
os.remove(audio_path)
# --- Post-processing UI Layout ---
if 'diarized_transcript' in locals() and 'final_output' in locals() and 'emotion_scores' in locals():
tab1, tab2, tab3, tab4 = st.tabs(["π Transcript", "π Summary", "π¬ Emotions", "π Trends"])
with tab1:
st.subheader("π Transcript")
st.markdown(diarized_transcript, unsafe_allow_html=True)
with tab2:
st.subheader("π Contextual Summary")
st.write(final_output)
with tab3:
st.subheader("π¬ Emotional Insights (Overall)")
for emo in emotion_scores[0]:
st.write(f"{emo['label']}: {round(emo['score']*100, 2)}%")
with tab4:
st.subheader("π Emotional Trends Over Time")
fig = go.Figure()
fig.add_trace(go.Scatter(x=session_dates, y=anxiety_scores, mode='lines+markers', name='Anxiety'))
fig.add_trace(go.Scatter(x=session_dates, y=sadness_scores, mode='lines+markers', name='Sadness'))
fig.update_layout(title='Emotional Trends', xaxis_title='Date', yaxis_title='Score (%)')
st.plotly_chart(fig)
|