Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,6 @@ transcription_text = ""
|
|
43 |
# Set up OpenAI API key
|
44 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
45 |
|
46 |
-
# Layout
|
47 |
app.layout = dbc.Container([
|
48 |
html.H1("Audio/Video Transcription and Diarization App", className="text-center my-4"),
|
49 |
dbc.Card([
|
@@ -73,8 +72,12 @@ app.layout = dbc.Container([
|
|
73 |
html.H4("Diarized Transcription Preview", className="mt-4"),
|
74 |
html.Div(id='transcription-preview', style={'whiteSpace': 'pre-wrap'}),
|
75 |
html.Br(),
|
76 |
-
dbc.Button("Download Transcription", id="btn-download", color="primary", className="mt-3", disabled=True),
|
77 |
-
|
|
|
|
|
|
|
|
|
78 |
])
|
79 |
])
|
80 |
], fluid=True)
|
@@ -243,12 +246,94 @@ def process_media(file_path, is_url=False):
|
|
243 |
os.unlink(temp_file)
|
244 |
if wav_path and os.path.exists(wav_path):
|
245 |
os.unlink(wav_path)
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
@app.callback(
|
248 |
[Output('output-media-upload', 'children'),
|
249 |
Output('transcription-status', 'children'),
|
250 |
Output('transcription-preview', 'children'),
|
251 |
-
Output('btn-download', 'disabled')
|
|
|
|
|
252 |
[Input('upload-media', 'contents'),
|
253 |
Input('process-url-button', 'n_clicks')],
|
254 |
[State('upload-media', 'filename'),
|
@@ -257,7 +342,7 @@ def process_media(file_path, is_url=False):
|
|
257 |
def update_output(contents, n_clicks, filename, url):
|
258 |
ctx = callback_context
|
259 |
if not ctx.triggered:
|
260 |
-
return "No file uploaded or URL processed.", "", "", True
|
261 |
|
262 |
# Clear the preview pane
|
263 |
transcription_preview = ""
|
@@ -267,13 +352,13 @@ def update_output(contents, n_clicks, filename, url):
|
|
267 |
elif url:
|
268 |
status_message, success = process_media(url, is_url=True)
|
269 |
else:
|
270 |
-
return "No file uploaded or URL processed.", "", "", True
|
271 |
|
272 |
if success:
|
273 |
preview = transcription_text[:1000] + "..." if len(transcription_text) > 1000 else transcription_text
|
274 |
-
return f"Media processed successfully.", status_message, preview, False
|
275 |
else:
|
276 |
-
return "Processing failed.", status_message, transcription_preview, True
|
277 |
|
278 |
@app.callback(
|
279 |
Output("download-transcription", "data"),
|
|
|
43 |
# Set up OpenAI API key
|
44 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
45 |
|
|
|
46 |
app.layout = dbc.Container([
|
47 |
html.H1("Audio/Video Transcription and Diarization App", className="text-center my-4"),
|
48 |
dbc.Card([
|
|
|
72 |
html.H4("Diarized Transcription Preview", className="mt-4"),
|
73 |
html.Div(id='transcription-preview', style={'whiteSpace': 'pre-wrap'}),
|
74 |
html.Br(),
|
75 |
+
dbc.Button("Download Transcription", id="btn-download", color="primary", className="mt-3 me-2", disabled=True),
|
76 |
+
dbc.Button("Summarize Transcript", id="btn-summarize", color="secondary", className="mt-3 me-2", disabled=True),
|
77 |
+
dbc.Button("Generate Meeting Minutes", id="btn-minutes", color="info", className="mt-3", disabled=True),
|
78 |
+
dcc.Download(id="download-transcription"),
|
79 |
+
html.Div(id='summary-output', className="mt-4"),
|
80 |
+
html.Div(id='minutes-output', className="mt-4")
|
81 |
])
|
82 |
])
|
83 |
], fluid=True)
|
|
|
246 |
os.unlink(temp_file)
|
247 |
if wav_path and os.path.exists(wav_path):
|
248 |
os.unlink(wav_path)
|
249 |
+
|
250 |
+
@app.callback(
|
251 |
+
Output('summary-output', 'children'),
|
252 |
+
Input('btn-summarize', 'n_clicks'),
|
253 |
+
State('transcription-preview', 'children'),
|
254 |
+
prevent_initial_call=True
|
255 |
+
)
|
256 |
+
def summarize_transcript(n_clicks, transcript):
|
257 |
+
if n_clicks is None or not transcript:
|
258 |
+
return ""
|
259 |
+
|
260 |
+
summary_prompt = f"""
|
261 |
+
Please provide a concise summary of the following transcript. Include the main topics discussed and key points. Format it for readability:
|
262 |
+
|
263 |
+
{transcript}
|
264 |
+
|
265 |
+
Summary:
|
266 |
+
"""
|
267 |
+
|
268 |
+
try:
|
269 |
+
summary_response = openai.ChatCompletion.create(
|
270 |
+
model="gpt-3.5-turbo",
|
271 |
+
messages=[
|
272 |
+
{"role": "system", "content": "You are an AI assistant skilled in summarizing conversations."},
|
273 |
+
{"role": "user", "content": summary_prompt}
|
274 |
+
]
|
275 |
+
)
|
276 |
+
|
277 |
+
summary = summary_response['choices'][0]['message']['content']
|
278 |
+
return dbc.Card(dbc.CardBody([
|
279 |
+
html.H5("Transcript Summary", className="card-title"),
|
280 |
+
html.P(summary, className="card-text")
|
281 |
+
]))
|
282 |
+
except Exception as e:
|
283 |
+
logger.error(f"Error generating summary: {str(e)}")
|
284 |
+
return html.Div(f"An error occurred while generating the summary: {str(e)}", className="text-danger")
|
285 |
+
|
286 |
+
@app.callback(
|
287 |
+
Output('minutes-output', 'children'),
|
288 |
+
Input('btn-minutes', 'n_clicks'),
|
289 |
+
State('transcription-preview', 'children'),
|
290 |
+
prevent_initial_call=True
|
291 |
+
)
|
292 |
+
def generate_meeting_minutes(n_clicks, transcript):
|
293 |
+
if n_clicks is None or not transcript:
|
294 |
+
return ""
|
295 |
+
|
296 |
+
minutes_prompt = f"""
|
297 |
+
Please transform the following transcript into structured meeting minutes. Include the following sections:
|
298 |
+
1. Meeting Title
|
299 |
+
2. Date and Time (if mentioned)
|
300 |
+
3. Attendees (if mentioned)
|
301 |
+
4. Agenda Items
|
302 |
+
5. Key Decisions
|
303 |
+
6. Action Items
|
304 |
+
7. Next Steps
|
305 |
+
|
306 |
+
Transcript:
|
307 |
+
{transcript}
|
308 |
+
|
309 |
+
Meeting Minutes:
|
310 |
+
"""
|
311 |
+
|
312 |
+
try:
|
313 |
+
minutes_response = openai.ChatCompletion.create(
|
314 |
+
model="gpt-3.5-turbo",
|
315 |
+
messages=[
|
316 |
+
{"role": "system", "content": "You are an AI assistant skilled in creating structured meeting minutes from transcripts."},
|
317 |
+
{"role": "user", "content": minutes_prompt}
|
318 |
+
]
|
319 |
+
)
|
320 |
+
|
321 |
+
minutes = minutes_response['choices'][0]['message']['content']
|
322 |
+
return dbc.Card(dbc.CardBody([
|
323 |
+
html.H5("Meeting Minutes", className="card-title"),
|
324 |
+
dcc.Markdown(minutes, className="card-text")
|
325 |
+
]))
|
326 |
+
except Exception as e:
|
327 |
+
logger.error(f"Error generating meeting minutes: {str(e)}")
|
328 |
+
return html.Div(f"An error occurred while generating meeting minutes: {str(e)}", className="text-danger")
|
329 |
+
|
330 |
@app.callback(
|
331 |
[Output('output-media-upload', 'children'),
|
332 |
Output('transcription-status', 'children'),
|
333 |
Output('transcription-preview', 'children'),
|
334 |
+
Output('btn-download', 'disabled'),
|
335 |
+
Output('btn-summarize', 'disabled'),
|
336 |
+
Output('btn-minutes', 'disabled')],
|
337 |
[Input('upload-media', 'contents'),
|
338 |
Input('process-url-button', 'n_clicks')],
|
339 |
[State('upload-media', 'filename'),
|
|
|
342 |
def update_output(contents, n_clicks, filename, url):
|
343 |
ctx = callback_context
|
344 |
if not ctx.triggered:
|
345 |
+
return "No file uploaded or URL processed.", "", "", True, True, True
|
346 |
|
347 |
# Clear the preview pane
|
348 |
transcription_preview = ""
|
|
|
352 |
elif url:
|
353 |
status_message, success = process_media(url, is_url=True)
|
354 |
else:
|
355 |
+
return "No file uploaded or URL processed.", "", "", True, True, True
|
356 |
|
357 |
if success:
|
358 |
preview = transcription_text[:1000] + "..." if len(transcription_text) > 1000 else transcription_text
|
359 |
+
return f"Media processed successfully.", status_message, preview, False, False, False
|
360 |
else:
|
361 |
+
return "Processing failed.", status_message, transcription_preview, True, True, True
|
362 |
|
363 |
@app.callback(
|
364 |
Output("download-transcription", "data"),
|