File size: 15,782 Bytes
4e7ec06
1100e65
b09f327
639051f
4e7ec06
 
7a3a01f
bc3b705
110c781
4e7ec06
110c781
6a11fc5
 
c61e81a
f54764f
64832c1
953582f
4e7ec06
ce4312e
 
 
c8ceed5
7594178
79bc005
c8ceed5
 
 
 
ee2cdf3
 
4288efb
 
 
 
 
639051f
4e7ec06
82b85b5
4e7ec06
 
 
82b85b5
4e7ec06
 
4ed1e63
53bdf99
110c781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e99776b
 
110c781
 
 
 
52b9389
 
 
 
b86b204
 
110c781
53bdf99
a123d64
53bdf99
e72ac8d
e99776b
e72ac8d
 
e99776b
 
e72ac8d
 
b5dd7e3
 
f97123b
 
 
 
 
 
e72ac8d
 
bafe69e
eb57b1b
002ed93
 
 
ecdf41d
 
64832c1
 
 
 
 
eb57b1b
 
 
64832c1
eb57b1b
64832c1
 
 
eb57b1b
 
64832c1
 
 
 
7ab0236
64832c1
eb57b1b
64832c1
eb57b1b
 
fd7e9e7
 
 
 
 
 
 
 
 
110c781
4e7ec06
4288efb
 
4e7ec06
110c781
6a11fc5
e72ac8d
c61e81a
237cc21
 
 
 
e72ac8d
 
 
237cc21
 
 
 
110c781
237cc21
6a11fc5
f97123b
 
 
 
4288efb
c61e81a
 
4e7ec06
237cc21
 
 
 
f54764f
237cc21
bb294a9
237cc21
 
 
f54764f
c61e81a
f97123b
1e94ca7
f54764f
 
fd7e9e7
f54764f
 
fd7e9e7
f54764f
 
 
bb294a9
 
c61e81a
 
bb294a9
 
 
 
6a11fc5
e72ac8d
e99776b
 
110c781
b5dd7e3
 
e72ac8d
 
 
b5dd7e3
 
ad0756f
 
bd33635
ad0756f
 
 
2877dcf
ad0756f
 
 
 
 
 
 
 
 
 
 
e72ac8d
110c781
bafe69e
e72ac8d
 
4e7ec06
110c781
 
4e7ec06
c61e81a
 
4288efb
b3174ad
52b9389
2d36724
 
6bd7229
2d36724
6bd7229
 
 
2d36724
 
 
6bd7229
2998fbf
6bd7229
 
52b9389
6bd7229
 
 
 
bd33635
52b9389
6bd7229
52b9389
6bd7229
 
52b9389
6bd7229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52b9389
6bd7229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52b9389
53bdf99
110c781
4e7ec06
 
52b9389
 
 
110c781
 
 
 
53bdf99
500dce0
110c781
04d8424
110c781
 
52b9389
110c781
b8cd6c2
 
110c781
57c2b38
f97123b
57c2b38
110c781
 
52b9389
4e7ec06
 
 
52b9389
4e7ec06
52b9389
b86b204
6575bf4
4e7ec06
31b9df5
4e7ec06
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
import base64
import io
import os
import threading
import tempfile
import logging
import openai
from dash import Dash, dcc, html, Input, Output, State, callback, callback_context
import dash_bootstrap_components as dbc
from pydub import AudioSegment
import requests
import mimetypes
import urllib.parse
import subprocess
import json
from tqdm import tqdm

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Try to import moviepy with the simpler import statement
try:
    from moviepy import VideoFileClip, AudioFileClip
    logger.info("MoviePy (VideoFileClip) successfully imported")
except ImportError as e:
    logger.error(f"Error importing MoviePy (VideoFileClip): {str(e)}")
    logger.error("Please ensure moviepy is installed correctly")
    raise

# Supported file formats
AUDIO_FORMATS = ['.wav', '.mp3', '.ogg', '.flac', '.aac', '.m4a', '.wma']
VIDEO_FORMATS = ['.mp4', '.avi', '.mov', '.flv', '.wmv', '.mkv', '.webm']
SUPPORTED_FORMATS = AUDIO_FORMATS + VIDEO_FORMATS

# Initialize the Dash app
app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])

# Global variables
generated_file = None
transcription_text = ""

# Set up OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")

app.layout = dbc.Container([
    html.H1("Audio/Video Transcription and Diarization App", className="text-center my-4"),
    dbc.Card([
        dbc.CardBody([
            dcc.Upload(
                id='upload-media',
                children=html.Div([
                    'Drag and Drop or ',
                    html.A('Select Audio/Video File')
                ]),
                style={
                    'width': '100%',
                    'height': '60px',
                    'lineHeight': '60px',
                    'borderWidth': '1px',
                    'borderStyle': 'dashed',
                    'borderRadius': '5px',
                    'textAlign': 'center',
                    'margin': '10px'
                },
                multiple=False
            ),
            html.Div(id='output-media-upload'),
            dbc.Input(id="url-input", type="text", placeholder="Enter audio/video URL", className="mb-3"),
            dbc.Button("Process Media", id="process-url-button", color="primary", className="mb-3"),
            dbc.Spinner(html.Div(id='transcription-status'), color="primary", type="grow"),
            html.H4("Diarized Transcription Preview", className="mt-4"),
            html.Div(id='transcription-preview', style={'whiteSpace': 'pre-wrap'}),
            html.Br(),
            dbc.Button("Download Transcription", id="btn-download", color="primary", className="mt-3 me-2", disabled=True),
            dbc.Button("Summarize Transcript", id="btn-summarize", color="secondary", className="mt-3 me-2", disabled=True),
            dbc.Button("Generate Meeting Minutes", id="btn-minutes", color="info", className="mt-3", disabled=True),
            dcc.Download(id="download-transcription"),
            dbc.Spinner(html.Div(id='summary-status'), color="secondary", type="grow"),
            dbc.Spinner(html.Div(id='minutes-status'), color="info", type="grow"),
        ])
    ])
], fluid=True)

def chunk_audio(audio_segment, chunk_size_ms=60000):
    chunks = []
    for i in range(0, len(audio_segment), chunk_size_ms):
        chunks.append(audio_segment[i:i+chunk_size_ms])
    return chunks

def transcribe_audio_chunks(chunks):
    transcriptions = []
    for i, chunk in enumerate(chunks):
        logger.info(f"Transcribing chunk {i+1}/{len(chunks)}")
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_audio_file:
            chunk.export(temp_audio_file.name, format="wav")
            with open(temp_audio_file.name, 'rb') as audio_file:
                transcript = openai.Audio.transcribe("whisper-1", audio_file)
                transcriptions.append(transcript.get('text', ''))
            os.unlink(temp_audio_file.name)
    return ' '.join(transcriptions)

def download_file(url):
    with requests.Session() as session:
        # First, send a GET request to get the final URL after redirects
        response = session.get(url, allow_redirects=True, stream=True)
        final_url = response.url
        logger.info(f"Final URL after redirects: {final_url}")

        # Get the total file size
        total_size = int(response.headers.get('content-length', 0))
        
        # Use a default name with .mp4 extension
        filename = 'downloaded_video.mp4'

        # Save the content to a temporary file with .mp4 extension
        with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
            progress_bar = tqdm(total=total_size, unit='iB', unit_scale=True, desc=filename)
            for chunk in response.iter_content(chunk_size=8192):
                size = temp_file.write(chunk)
                progress_bar.update(size)
            progress_bar.close()
            temp_file_path = temp_file.name

        # Check if the downloaded file size matches the expected size
        actual_size = os.path.getsize(temp_file_path)
        if total_size != 0 and actual_size != total_size:
            logger.error(f"Downloaded file size ({actual_size} bytes) does not match expected size ({total_size} bytes)")
            raise Exception(f"Incomplete download. Expected {total_size} bytes, got {actual_size} bytes.")

    logger.info(f"File downloaded and saved as: {temp_file_path}")
    logger.info(f"File size: {actual_size} bytes")
    return temp_file_path

def get_file_info(file_path):
    try:
        result = subprocess.run(['ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', file_path], 
                                capture_output=True, text=True, check=True)
        return json.loads(result.stdout)
    except subprocess.CalledProcessError as e:
        logger.error(f"Error getting file info: {str(e)}")
        return None

def process_media(file_path, is_url=False):
    global generated_file, transcription_text
    temp_file = None
    wav_path = None
    try:
        if is_url:
            logger.info(f"Processing URL: {file_path}")
            try:
                temp_file = download_file(file_path)
                file_size = os.path.getsize(temp_file)
                logger.info(f"URL content downloaded: {temp_file} (Size: {file_size} bytes)")
                if file_size < 1000000:  # Less than 1MB
                    raise Exception(f"Downloaded file is too small ({file_size} bytes). Possible incomplete download.")
            except Exception as e:
                logger.error(f"Error downloading URL content: {str(e)}")
                return f"Error downloading URL content: {str(e)}", False
            
            # For downloaded files, we know it's an MP4, so we can skip file type determination
            is_video = True
            is_audio = False
        else:
            # For uploaded files, we still need to determine the file type
            logger.info("Processing uploaded file")
            content_type, content_string = file_path.split(',')
            decoded = base64.b64decode(content_string)
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tmp')
            temp_file.write(decoded)
            temp_file.close()
            temp_file = temp_file.name
            logger.info(f"Uploaded file saved: {temp_file}")

            # Get file info for uploaded files
            file_info = get_file_info(temp_file)
            if not file_info:
                return "Unable to process file: Could not determine file type", False

            logger.info(f"File info: {json.dumps(file_info, indent=2)}")

            # Determine if it's audio or video
            is_audio = any(stream['codec_type'] == 'audio' for stream in file_info['streams'])
            is_video = any(stream['codec_type'] == 'video' for stream in file_info['streams'])

        # Convert to WAV using ffmpeg
        wav_path = tempfile.NamedTemporaryFile(delete=False, suffix='.wav').name
        try:
            if is_video:
                # Extract audio from video
                cmd = ['ffmpeg', '-y', '-i', temp_file, '-vn', '-acodec', 'pcm_s16le', '-ar', '44100', '-ac', '2', wav_path, '-v', 'verbose']
            elif is_audio:
                # Convert audio to WAV
                cmd = ['ffmpeg', '-y', '-i', temp_file, '-acodec', 'pcm_s16le', '-ar', '44100', '-ac', '2', wav_path, '-v', 'verbose']
            else:
                return "Unsupported file type: Neither audio nor video detected", False
            
            result = subprocess.run(cmd, check=True, capture_output=True, text=True)
            logger.info(f"FFmpeg command output: {result.stdout}")
            logger.info(f"Audio extracted to WAV: {wav_path}")
        except subprocess.CalledProcessError as e:
            logger.error(f"FFmpeg conversion failed. Error output: {e.stderr}")
            logger.error(f"FFmpeg command: {e.cmd}")
            logger.error(f"Return code: {e.returncode}")
            return f"FFmpeg conversion failed: {e.stderr}", False

        # Chunk the audio file
        audio = AudioSegment.from_wav(wav_path)
        chunks = chunk_audio(audio)
        
        logger.info(f"Audio chunked into {len(chunks)} segments")
        
        # Transcribe chunks
        transcription = transcribe_audio_chunks(chunks)
        
        logger.info(f"Transcription completed. Total length: {len(transcription)} characters")

        # Diarization using OpenAI
        diarization_prompt = f"""
        The following is a transcription of a conversation. Please identify different speakers and label them as Speaker 1, Speaker 2, etc. unless the speaker idententifies themselves by name in that case use their name. Format the output as a series of speaker labels followed by their dialogue. Here's the transcription:

        {transcription}

        Please analyze the content and speaking styles to differentiate between speakers. If they give their name, assume that is the speaker and assume who is speaking bsed on speech patterns. Consider changes in topic, speaking patterns, and any contextual clues that might indicate a change in speaker.
        """

        diarization_response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "You are an AI assistant skilled in analyzing conversations and identifying different speakers."},
                {"role": "user", "content": diarization_prompt}
            ]
        )

        formatted_transcript = diarization_response['choices'][0]['message']['content']
        
        transcription_text = formatted_transcript
        generated_file = io.BytesIO(transcription_text.encode())
        logger.info("Transcription and diarization completed successfully")
        return "Transcription and diarization completed successfully!", True
    except Exception as e:
        logger.error(f"Error during processing: {str(e)}")
        return f"An error occurred: {str(e)}", False
    finally:
        if temp_file and os.path.exists(temp_file):
            os.unlink(temp_file)
        if wav_path and os.path.exists(wav_path):
            os.unlink(wav_path)

@app.callback(
    [Output('summary-status', 'children'),
     Output('minutes-status', 'children'),
     Output('download-transcription', 'data')],
    [Input('btn-summarize', 'n_clicks'),
     Input('btn-minutes', 'n_clicks'),
     Input('btn-download', 'n_clicks')],
    State('transcription-preview', 'children'),
    prevent_initial_call=True
)
def handle_document_actions(summarize_clicks, minutes_clicks, download_clicks, transcript):
    ctx = callback_context
    if not ctx.triggered:
        return "", "", None
    
    button_id = ctx.triggered[0]['prop_id'].split('.')[0]
    
    if button_id == 'btn-summarize':
        summary_prompt = f"""
        Please provide a detailed summary of the following transcript. Include the main topics discussed and key points. Format it for readability in paragraph format writing it wikipedia style:

        {transcript}

        Summary:
        """

        try:
            summary_response = openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=[
                    {"role": "system", "content": "You are an AI assistant skilled in summarizing conversations."},
                    {"role": "user", "content": summary_prompt}
                ]
            )

            summary = summary_response['choices'][0]['message']['content']
            return "", "", dcc.send_string(summary, "transcript_summary.txt")
        except Exception as e:
            logger.error(f"Error generating summary: {str(e)}")
            return f"An error occurred while generating the summary: {str(e)}", "", None

    elif button_id == 'btn-minutes':
        minutes_prompt = f"""
        Please transform the following transcript into structured meeting minutes. Include the following sections:
        1. Meeting Title
        2. Date and Time (if mentioned)
        3. Attendees (if mentioned)
        4. Agenda Items
        5. Key Decisions
        6. Action Items
        7. Next Steps

        Transcript:
        {transcript}

        Meeting Minutes:
        """

        try:
            minutes_response = openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=[
                    {"role": "system", "content": "You are an AI assistant skilled in creating structured meeting minutes from transcripts."},
                    {"role": "user", "content": minutes_prompt}
                ]
            )

            minutes = minutes_response['choices'][0]['message']['content']
            return "", "", dcc.send_string(minutes, "meeting_minutes.txt")
        except Exception as e:
            logger.error(f"Error generating meeting minutes: {str(e)}")
            return "", f"An error occurred while generating meeting minutes: {str(e)}", None

    elif button_id == 'btn-download':
        return "", "", dcc.send_bytes(generated_file.getvalue(), "diarized_transcription.txt")

    return "", "", None

@app.callback(
    [Output('output-media-upload', 'children'),
     Output('transcription-status', 'children'),
     Output('transcription-preview', 'children'),
     Output('btn-download', 'disabled'),
     Output('btn-summarize', 'disabled'),
     Output('btn-minutes', 'disabled')],
    [Input('upload-media', 'contents'),
     Input('process-url-button', 'n_clicks')],
    [State('upload-media', 'filename'),
     State('url-input', 'value')]
)

def update_output(contents, n_clicks, filename, url):
    global transcription_text
    ctx = callback_context
    if not ctx.triggered:
        return "No file uploaded or URL processed.", "", "", True, True, True

    # Clear the preview pane
    transcription_preview = ""

    if contents is not None:
        status_message, success = process_media(contents)
    elif url:
        status_message, success = process_media(url, is_url=True)
    else:
        return "No file uploaded or URL processed.", "", "", True, True, True

    if success:
        preview = transcription_text[:1000] + "..." if len(transcription_text) > 1000 else transcription_text
        return f"Media processed successfully.", status_message, preview, False, False, False
    else:
        return "Processing failed.", status_message, transcription_preview, True, True, True
        
if __name__ == '__main__':
    print("Starting the Dash application...")
    app.run(debug=True, host='0.0.0.0', port=7860)
    print("Dash application has finished running.")