Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from transcription_diarization import diarize_audio
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
|
|
8 |
|
9 |
# Load the model
|
10 |
llm = load_model(openai_api_key)
|
@@ -45,12 +46,12 @@ def analyze_video(video_path, progress=gr.Progress()):
|
|
45 |
output_components.extend([
|
46 |
f"## {speaker_id}", # speaker header
|
47 |
speaker_general_impression, # speaker impression
|
48 |
-
speaker_charts["attachment"], # attachment plot
|
49 |
speaker_explanations["attachment"], # attachment explanation
|
50 |
-
speaker_charts["dimensions"], # dimensions plot
|
51 |
-
speaker_charts["bigfive"], # bigfive plot
|
52 |
speaker_explanations["bigfive"], # bigfive explanation
|
53 |
-
speaker_charts["personality"], # personality plot
|
54 |
speaker_explanations["personality"], # personality explanation
|
55 |
])
|
56 |
|
@@ -69,68 +70,7 @@ def use_example():
|
|
69 |
return "examples/Scenes.From.A.Marriage.US.mp4"
|
70 |
|
71 |
with gr.Blocks() as iface:
|
72 |
-
|
73 |
-
|
74 |
-
with gr.Row():
|
75 |
-
with gr.Column(scale=3):
|
76 |
-
gr.Markdown("Upload a video")
|
77 |
-
video_input = gr.Video(label="Upload Video")
|
78 |
-
analyze_button = gr.Button("Analyze")
|
79 |
-
with gr.Column(scale=1):
|
80 |
-
gr.Markdown("Example Video")
|
81 |
-
example_video = gr.Video("examples/Scenes.From.A.Marriage.US.mp4", label="Example Video")
|
82 |
-
use_example_button = gr.Button("Use Example Video")
|
83 |
-
|
84 |
-
# Create placeholder components for output
|
85 |
-
with gr.Column() as output_container:
|
86 |
-
transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
|
87 |
-
speaker_outputs = []
|
88 |
-
for i in range(1, 4): # Assuming a maximum of 3 speakers
|
89 |
-
with gr.Column(visible=False) as speaker_column:
|
90 |
-
speaker_header = gr.Markdown(f"## Speaker {i}")
|
91 |
-
speaker_impression = gr.Textbox(label="General Impression", lines=3)
|
92 |
-
speaker_attachment = gr.Plot(label="Attachment Styles")
|
93 |
-
speaker_attachment_exp = gr.Textbox(label="Attachment Styles Explanation")
|
94 |
-
speaker_dimensions = gr.Plot(label="Attachment Dimensions")
|
95 |
-
speaker_bigfive = gr.Plot(label="Big Five Traits")
|
96 |
-
speaker_bigfive_exp = gr.Textbox(label="Big Five Traits Explanation")
|
97 |
-
speaker_personality = gr.Plot(label="Personality Disorders")
|
98 |
-
speaker_personality_exp = gr.Textbox(label="Personality Disorders Explanation")
|
99 |
-
speaker_outputs.extend([
|
100 |
-
speaker_header, speaker_impression, speaker_attachment, speaker_attachment_exp,
|
101 |
-
speaker_dimensions, speaker_bigfive, speaker_bigfive_exp, speaker_personality,
|
102 |
-
speaker_personality_exp
|
103 |
-
])
|
104 |
-
execution_info = gr.Textbox(label="Execution Information", visible=True)
|
105 |
-
|
106 |
-
all_outputs = [transcript_output] + speaker_outputs + [execution_info]
|
107 |
-
|
108 |
-
analyze_button.click(
|
109 |
-
fn=analyze_video,
|
110 |
-
inputs=[video_input],
|
111 |
-
outputs=all_outputs,
|
112 |
-
show_progress=True
|
113 |
-
).then(
|
114 |
-
fn=update_output,
|
115 |
-
inputs=all_outputs,
|
116 |
-
outputs=all_outputs
|
117 |
-
)
|
118 |
-
|
119 |
-
use_example_button.click(
|
120 |
-
fn=use_example,
|
121 |
-
inputs=[],
|
122 |
-
outputs=[video_input],
|
123 |
-
).then(
|
124 |
-
fn=analyze_video,
|
125 |
-
inputs=[video_input],
|
126 |
-
outputs=all_outputs,
|
127 |
-
show_progress=True
|
128 |
-
).then(
|
129 |
-
fn=update_output,
|
130 |
-
inputs=all_outputs,
|
131 |
-
outputs=all_outputs
|
132 |
-
)
|
133 |
|
134 |
if __name__ == "__main__":
|
135 |
-
iface.launch()
|
136 |
-
|
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
8 |
+
import plotly.io as pio
|
9 |
|
10 |
# Load the model
|
11 |
llm = load_model(openai_api_key)
|
|
|
46 |
output_components.extend([
|
47 |
f"## {speaker_id}", # speaker header
|
48 |
speaker_general_impression, # speaker impression
|
49 |
+
pio.to_json(speaker_charts["attachment"]), # attachment plot
|
50 |
speaker_explanations["attachment"], # attachment explanation
|
51 |
+
pio.to_json(speaker_charts["dimensions"]), # dimensions plot
|
52 |
+
pio.to_json(speaker_charts["bigfive"]), # bigfive plot
|
53 |
speaker_explanations["bigfive"], # bigfive explanation
|
54 |
+
pio.to_json(speaker_charts["personality"]), # personality plot
|
55 |
speaker_explanations["personality"], # personality explanation
|
56 |
])
|
57 |
|
|
|
70 |
return "examples/Scenes.From.A.Marriage.US.mp4"
|
71 |
|
72 |
with gr.Blocks() as iface:
|
73 |
+
# ... (rest of the interface definition remains the same)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
if __name__ == "__main__":
|
76 |
+
iface.launch()
|
|