Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ llm = load_model(openai_api_key)
|
|
13 |
def analyze_video(video_path, progress=gr.Progress()):
|
14 |
start_time = time.time()
|
15 |
if not video_path:
|
16 |
-
return [None] *
|
17 |
|
18 |
progress(0, desc="Starting analysis...")
|
19 |
progress(0.2, desc="Starting transcription and diarization")
|
@@ -37,32 +37,36 @@ def analyze_video(video_path, progress=gr.Progress()):
|
|
37 |
output_components.append(f"Completed in {int(execution_time)} seconds.")
|
38 |
output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
|
39 |
|
40 |
-
for i
|
41 |
-
|
42 |
-
|
|
|
|
|
43 |
|
44 |
output_components.extend([
|
45 |
-
gr.Markdown(f"### {speaker_id}", visible=
|
46 |
-
gr.Textbox(value=speaker_general_impression, label="General Impression", visible=
|
47 |
-
gr.Plot(value=speaker_charts.get("attachment", None), visible=
|
48 |
-
gr.Plot(value=speaker_charts.get("dimensions", None), visible=
|
49 |
-
gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation", visible=
|
50 |
-
gr.Plot(value=speaker_charts.get("bigfive", None), visible=
|
51 |
-
gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation", visible=
|
52 |
-
gr.Plot(value=speaker_charts.get("personality", None), visible=
|
53 |
-
gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation", visible=
|
54 |
])
|
55 |
|
56 |
-
#
|
57 |
-
while len(output_components) <
|
58 |
output_components.append(None)
|
59 |
|
60 |
return output_components
|
61 |
|
62 |
def use_example_1():
|
63 |
return "examples/Scenes.From.A.Marriage.US.mp4"
|
|
|
64 |
def use_example_2():
|
65 |
return "examples/Billie Eilish.mp4"
|
|
|
66 |
def use_example_3():
|
67 |
return "examples/Elliot Rodger.mp4"
|
68 |
|
@@ -80,7 +84,7 @@ def get_middle_frame(video_path):
|
|
80 |
return None
|
81 |
|
82 |
def clear_outputs():
|
83 |
-
return [None] *
|
84 |
|
85 |
with gr.Blocks() as iface:
|
86 |
gr.Markdown("# Multiple-Speakers-Personality-Analyzer")
|
@@ -93,14 +97,13 @@ with gr.Blocks() as iface:
|
|
93 |
|
94 |
# Create output components
|
95 |
output_components = []
|
96 |
-
# Add transcript output near the top
|
97 |
execution_box = gr.Textbox(label="Execution Info", value="N/A", lines=1)
|
98 |
output_components.append(execution_box)
|
99 |
|
100 |
transcript = gr.Textbox(label="Transcript", lines=10, visible=False)
|
101 |
output_components.append(transcript)
|
102 |
|
103 |
-
for n in range(3): #
|
104 |
with gr.Tab(label=f'Speaker {n + 1}', visible=False) as tab:
|
105 |
gr.Markdown(visible=False)
|
106 |
gr.Textbox(label="General Impression", visible=False)
|
@@ -113,14 +116,7 @@ with gr.Blocks() as iface:
|
|
113 |
gr.Textbox(label="Personality Disorders Explanation", visible=False)
|
114 |
output_components.extend([tab] + [component for component in tab.children])
|
115 |
|
116 |
-
|
117 |
-
description_txt = file.read()
|
118 |
-
with gr.Tab(label=f'Description', visible=True):
|
119 |
-
gr.Markdown(description_txt)
|
120 |
-
gr.HTML("<div style='height: 20px;'></div>")
|
121 |
-
gr.Image(value="appendix/AI Personality Detection flow - 1.png", label='Flowchart 1', width=1000)
|
122 |
-
gr.Image(value="appendix/AI Personality Detection flow - 2.png", label='Flowchart 2', width=1000)
|
123 |
-
|
124 |
gr.Markdown("### Example Videos")
|
125 |
with gr.Row():
|
126 |
with gr.Column(scale=1):
|
@@ -144,6 +140,15 @@ with gr.Blocks() as iface:
|
|
144 |
example_video_3 = gr.Video(example_video_3_path, label="Example 3", visible=False)
|
145 |
use_example_button_3 = gr.Button("Load Example 3")
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
analyze_button.click(
|
148 |
fn=clear_outputs,
|
149 |
inputs=[],
|
|
|
13 |
def analyze_video(video_path, progress=gr.Progress()):
|
14 |
start_time = time.time()
|
15 |
if not video_path:
|
16 |
+
return [None] * 32 # Return None for all outputs
|
17 |
|
18 |
progress(0, desc="Starting analysis...")
|
19 |
progress(0.2, desc="Starting transcription and diarization")
|
|
|
37 |
output_components.append(f"Completed in {int(execution_time)} seconds.")
|
38 |
output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
|
39 |
|
40 |
+
for i in range(3): # Always process 3 speakers, even if some are empty
|
41 |
+
speaker_id = f"Speaker {i+1}"
|
42 |
+
speaker_charts = charts.get(speaker_id, {})
|
43 |
+
speaker_explanations = explanations.get(speaker_id, {})
|
44 |
+
speaker_general_impression = general_impressions.get(speaker_id, "")
|
45 |
|
46 |
output_components.extend([
|
47 |
+
gr.Markdown(f"### {speaker_id}", visible=bool(speaker_charts)),
|
48 |
+
gr.Textbox(value=speaker_general_impression, label="General Impression", visible=bool(speaker_charts), lines=10),
|
49 |
+
gr.Plot(value=speaker_charts.get("attachment", None), visible=bool(speaker_charts)),
|
50 |
+
gr.Plot(value=speaker_charts.get("dimensions", None), visible=bool(speaker_charts)),
|
51 |
+
gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation", visible=bool(speaker_charts), lines=2),
|
52 |
+
gr.Plot(value=speaker_charts.get("bigfive", None), visible=bool(speaker_charts)),
|
53 |
+
gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation", visible=bool(speaker_charts), lines=2),
|
54 |
+
gr.Plot(value=speaker_charts.get("personality", None), visible=bool(speaker_charts)),
|
55 |
+
gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation", visible=bool(speaker_charts), lines=2)
|
56 |
])
|
57 |
|
58 |
+
# Ensure we always return 32 components
|
59 |
+
while len(output_components) < 32:
|
60 |
output_components.append(None)
|
61 |
|
62 |
return output_components
|
63 |
|
64 |
def use_example_1():
|
65 |
return "examples/Scenes.From.A.Marriage.US.mp4"
|
66 |
+
|
67 |
def use_example_2():
|
68 |
return "examples/Billie Eilish.mp4"
|
69 |
+
|
70 |
def use_example_3():
|
71 |
return "examples/Elliot Rodger.mp4"
|
72 |
|
|
|
84 |
return None
|
85 |
|
86 |
def clear_outputs():
|
87 |
+
return [None] * 32
|
88 |
|
89 |
with gr.Blocks() as iface:
|
90 |
gr.Markdown("# Multiple-Speakers-Personality-Analyzer")
|
|
|
97 |
|
98 |
# Create output components
|
99 |
output_components = []
|
|
|
100 |
execution_box = gr.Textbox(label="Execution Info", value="N/A", lines=1)
|
101 |
output_components.append(execution_box)
|
102 |
|
103 |
transcript = gr.Textbox(label="Transcript", lines=10, visible=False)
|
104 |
output_components.append(transcript)
|
105 |
|
106 |
+
for n in range(3): # Always create 3 speaker tabs
|
107 |
with gr.Tab(label=f'Speaker {n + 1}', visible=False) as tab:
|
108 |
gr.Markdown(visible=False)
|
109 |
gr.Textbox(label="General Impression", visible=False)
|
|
|
116 |
gr.Textbox(label="Personality Disorders Explanation", visible=False)
|
117 |
output_components.extend([tab] + [component for component in tab.children])
|
118 |
|
119 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
gr.Markdown("### Example Videos")
|
121 |
with gr.Row():
|
122 |
with gr.Column(scale=1):
|
|
|
140 |
example_video_3 = gr.Video(example_video_3_path, label="Example 3", visible=False)
|
141 |
use_example_button_3 = gr.Button("Load Example 3")
|
142 |
|
143 |
+
with open('description.txt', 'r') as file:
|
144 |
+
description_txt = file.read()
|
145 |
+
with gr.Tab(label=f'Description', visible=True):
|
146 |
+
gr.Markdown(description_txt)
|
147 |
+
gr.HTML("<div style='height: 20px;'></div>")
|
148 |
+
gr.Image(value="appendix/AI Personality Detection flow - 1.png", label='Flowchart 1', width=1000)
|
149 |
+
gr.Image(value="appendix/AI Personality Detection flow - 2.png", label='Flowchart 2', width=1000)
|
150 |
+
|
151 |
+
|
152 |
analyze_button.click(
|
153 |
fn=clear_outputs,
|
154 |
inputs=[],
|