reab5555 commited on
Commit
babf5da
·
verified ·
1 Parent(s): 2daa60c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -77
app.py CHANGED
@@ -1,80 +1,3 @@
1
- import gradio as gr
2
- from llm_loader import load_model
3
- from processing import process_input
4
- from transcription_diarization import diarize_audio
5
- from visualization import create_charts
6
- import time
7
- from config import openai_api_key
8
-
9
- # Load the model
10
- llm = load_model(openai_api_key)
11
-
12
- def analyze_video(video_path, max_speakers, progress=gr.Progress()):
13
- start_time = time.time()
14
-
15
- if not video_path:
16
- return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 48 + ["Analysis not started."]
17
-
18
- # Start the progress bar
19
- progress(0, desc="Starting analysis...")
20
-
21
- # Progress for transcription and diarization
22
- progress(0.2, desc="Starting transcription and diarization...")
23
- transcription = diarize_audio(video_path, max_speakers)
24
- progress(0.6, desc="Transcription and diarization complete.")
25
-
26
- # Progress for processing the transcription
27
- progress(0.7, desc="Processing transcription...")
28
- results = process_input(transcription, llm)
29
- progress(0.8, desc="Transcription processing complete.")
30
-
31
- # Progress for creating charts
32
- progress(0.9, desc="Generating charts...")
33
- charts, explanations = create_charts(results)
34
- progress(1.0, desc="Charts generation complete.")
35
-
36
- end_time = time.time()
37
- execution_time = end_time - start_time
38
-
39
- # Prepare outputs for each speaker
40
- output_components = []
41
- speaker_count = len(charts)
42
-
43
- for speaker_id, speaker_charts in charts.items():
44
- speaker_explanations = explanations[speaker_id]
45
-
46
- speaker_section = [
47
- gr.Plot(value=speaker_charts.get("attachment", None), visible=True),
48
- gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation", visible=True),
49
- gr.Plot(value=speaker_charts.get("dimensions", None), visible=True),
50
- gr.Plot(value=speaker_charts.get("bigfive", None), visible=True),
51
- gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation", visible=True),
52
- gr.Plot(value=speaker_charts.get("personality", None), visible=True),
53
- gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation", visible=True),
54
- ]
55
-
56
- output_components.extend(speaker_section)
57
-
58
- # Add empty placeholders for remaining outputs if fewer speakers than expected
59
- while len(output_components) < 48:
60
- output_components.extend([
61
- gr.Plot(visible=False),
62
- gr.Textbox(visible=False),
63
- gr.Plot(visible=False),
64
- gr.Plot(visible=False),
65
- gr.Textbox(visible=False),
66
- gr.Plot(visible=False),
67
- gr.Textbox(visible=False),
68
- ])
69
-
70
- # Add the transcript at the end
71
- output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
72
-
73
- # Add the execution time
74
- output_components.append(gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))
75
-
76
- return output_components
77
-
78
  # Define the Gradio interface
79
  with gr.Blocks() as iface:
80
  gr.Markdown("# AI Personality Detection")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Define the Gradio interface
2
  with gr.Blocks() as iface:
3
  gr.Markdown("# AI Personality Detection")