Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,104 +5,65 @@ from transcription_diarization import diarize_audio
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
8 |
-
import plotly.graph_objs as go
|
9 |
-
import json
|
10 |
|
11 |
# Load the model
|
12 |
llm = load_model(openai_api_key)
|
13 |
|
|
|
14 |
def analyze_video(video_path, max_speakers, progress=gr.Progress()):
|
15 |
start_time = time.time()
|
16 |
if not video_path:
|
17 |
-
return
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
"
|
46 |
-
"
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
gr.Plot(label="Personality", visible=True),
|
69 |
-
gr.Textbox(label="Personality Disorders Explanation", visible=True)
|
70 |
-
]
|
71 |
-
tab_components.extend(speaker_components)
|
72 |
-
|
73 |
-
execution_info = gr.Textbox(label="Execution Information", visible=True)
|
74 |
-
return row, transcript, tab_components, execution_info
|
75 |
-
|
76 |
-
def run_analysis(video_path, max_speakers):
|
77 |
-
results = analyze_video(video_path, max_speakers)
|
78 |
-
|
79 |
-
if "error" in results:
|
80 |
-
return [gr.update(value=results["error"], visible=True)] + [gr.update(visible=False)] * 24 + [gr.update(value="Analysis failed", visible=True)]
|
81 |
-
|
82 |
-
transcript = results["transcript"]
|
83 |
-
execution_info = f"Completed in {results['execution_time']} seconds."
|
84 |
-
|
85 |
-
tab_updates = []
|
86 |
-
for i in range(3): # For each potential speaker
|
87 |
-
if i < len(results["charts"]):
|
88 |
-
speaker_id = list(results["charts"].keys())[i]
|
89 |
-
speaker_charts = results["charts"][speaker_id]
|
90 |
-
speaker_explanations = results["explanations"][speaker_id]
|
91 |
-
|
92 |
-
tab_updates.extend([
|
93 |
-
gr.update(value=f"## {speaker_id}", visible=True), # Markdown
|
94 |
-
gr.update(value=speaker_charts.get("attachment", go.Figure()), visible=True), # Attachment plot
|
95 |
-
gr.update(value=speaker_explanations.get("attachment", ""), visible=True), # Attachment explanation
|
96 |
-
gr.update(value=speaker_charts.get("dimensions", go.Figure()), visible=True), # Dimensions plot
|
97 |
-
gr.update(value=speaker_charts.get("bigfive", go.Figure()), visible=True), # Big Five plot
|
98 |
-
gr.update(value=speaker_explanations.get("bigfive", ""), visible=True), # Big Five explanation
|
99 |
-
gr.update(value=speaker_charts.get("personality", go.Figure()), visible=True), # Personality plot
|
100 |
-
gr.update(value=speaker_explanations.get("personality", ""), visible=True), # Personality explanation
|
101 |
-
])
|
102 |
-
else:
|
103 |
-
tab_updates.extend([gr.update(visible=False)] * 8) # Hide unused tab components
|
104 |
-
|
105 |
-
return [gr.update(value=transcript, visible=True)] + tab_updates + [gr.update(value=execution_info, visible=True)]
|
106 |
|
107 |
with gr.Blocks() as iface:
|
108 |
gr.Markdown("# AI Personality Detection")
|
@@ -112,12 +73,33 @@ with gr.Blocks() as iface:
|
|
112 |
max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers")
|
113 |
analyze_button = gr.Button("Analyze")
|
114 |
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
analyze_button.click(
|
118 |
-
fn=
|
119 |
inputs=[video_input, max_speakers],
|
120 |
-
outputs=
|
121 |
show_progress=True
|
122 |
)
|
123 |
|
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
|
|
|
|
8 |
|
9 |
# Load the model
|
10 |
llm = load_model(openai_api_key)
|
11 |
|
12 |
+
|
13 |
def analyze_video(video_path, max_speakers, progress=gr.Progress()):
|
14 |
start_time = time.time()
|
15 |
if not video_path:
|
16 |
+
return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 49 + [
|
17 |
+
"Analysis not started."]
|
18 |
+
|
19 |
+
progress(0, desc="Starting analysis...")
|
20 |
+
progress(0.2, desc="Starting transcription and diarization")
|
21 |
+
transcription = diarize_audio(video_path, max_speakers)
|
22 |
+
print("Transcription:", transcription) # Debug print
|
23 |
+
progress(0.5, desc="Transcription and diarization complete.")
|
24 |
+
|
25 |
+
progress(0.6, desc="Processing transcription")
|
26 |
+
results = process_input(transcription, llm)
|
27 |
+
progress(0.7, desc="Transcription processing complete.")
|
28 |
+
|
29 |
+
progress(0.9, desc="Generating charts")
|
30 |
+
charts, explanations = create_charts(results)
|
31 |
+
progress(1.0, desc="Charts generation complete.")
|
32 |
+
|
33 |
+
end_time = time.time()
|
34 |
+
execution_time = end_time - start_time
|
35 |
+
|
36 |
+
output_components = []
|
37 |
+
|
38 |
+
# Add transcript near the beginning
|
39 |
+
output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
|
40 |
+
|
41 |
+
for speaker_id, speaker_charts in charts.items():
|
42 |
+
speaker_explanations = explanations[speaker_id]
|
43 |
+
speaker_section = [
|
44 |
+
gr.Markdown(f"## {speaker_id}<hr>", visible=True),
|
45 |
+
gr.Plot(value=speaker_charts.get("attachment", None), visible=True),
|
46 |
+
gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation",
|
47 |
+
visible=True),
|
48 |
+
gr.Plot(value=speaker_charts.get("dimensions", None), visible=True),
|
49 |
+
gr.Plot(value=speaker_charts.get("bigfive", None), visible=True),
|
50 |
+
gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation",
|
51 |
+
visible=True),
|
52 |
+
gr.Plot(value=speaker_charts.get("personality", None), visible=True),
|
53 |
+
gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation",
|
54 |
+
visible=True),
|
55 |
+
]
|
56 |
+
output_components.extend(speaker_section)
|
57 |
+
|
58 |
+
while len(output_components) < 49:
|
59 |
+
output_components.extend([gr.update(visible=False)] * 8)
|
60 |
+
|
61 |
+
# Add execution info
|
62 |
+
output_components.append(
|
63 |
+
gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))
|
64 |
+
|
65 |
+
return output_components
|
66 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
with gr.Blocks() as iface:
|
69 |
gr.Markdown("# AI Personality Detection")
|
|
|
73 |
max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers")
|
74 |
analyze_button = gr.Button("Analyze")
|
75 |
|
76 |
+
# Create output components
|
77 |
+
output_components = []
|
78 |
+
|
79 |
+
# Add transcript output near the top
|
80 |
+
execution_info_box = gr.Textbox(label="Execution Information", value="N/A", lines=1)
|
81 |
+
output_components.append(execution_info_box)
|
82 |
+
|
83 |
+
for _ in range(3): # Assuming maximum of 3 speakers
|
84 |
+
output_components.extend([
|
85 |
+
gr.Markdown(visible=False),
|
86 |
+
gr.Plot(visible=False),
|
87 |
+
gr.Textbox(label="Attachment Styles Explanation", visible=False),
|
88 |
+
gr.Plot(visible=False),
|
89 |
+
gr.Plot(visible=False),
|
90 |
+
gr.Textbox(label="Big Five Traits Explanation", visible=False),
|
91 |
+
gr.Plot(visible=False),
|
92 |
+
gr.Textbox(label="Personality Disorders Explanation", visible=False),
|
93 |
+
])
|
94 |
+
|
95 |
+
# Add execution info component
|
96 |
+
transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
|
97 |
+
output_components.append(transcript_output)
|
98 |
|
99 |
analyze_button.click(
|
100 |
+
fn=analyze_video,
|
101 |
inputs=[video_input, max_speakers],
|
102 |
+
outputs=output_components,
|
103 |
show_progress=True
|
104 |
)
|
105 |
|