Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,6 @@ from transcription_diarization import diarize_audio
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
8 |
-
import json
|
9 |
|
10 |
# Load the model
|
11 |
llm = load_model(openai_api_key)
|
@@ -25,12 +24,9 @@ def analyze_video(video_path, max_speakers, progress=gr.Progress()):
|
|
25 |
progress(0.7, desc="Transcription processing complete.")
|
26 |
|
27 |
progress(0.9, desc="Generating charts")
|
28 |
-
charts, explanations = create_charts(
|
29 |
progress(1.0, desc="Charts generation complete.")
|
30 |
|
31 |
-
end_time = time.time()
|
32 |
-
execution_time = end_time - start_time
|
33 |
-
|
34 |
output = {
|
35 |
"transcript": transcription,
|
36 |
"speakers": charts,
|
@@ -41,7 +37,7 @@ def analyze_video(video_path, max_speakers, progress=gr.Progress()):
|
|
41 |
|
42 |
def update_interface(result, max_speakers):
|
43 |
if "error" in result:
|
44 |
-
return [result["error"]
|
45 |
|
46 |
outputs = [result["transcript"]] # Transcript
|
47 |
|
@@ -49,10 +45,9 @@ def update_interface(result, max_speakers):
|
|
49 |
outputs.extend([gr.update(visible=True) for _ in range(4)]) # Make section titles visible
|
50 |
|
51 |
for i in range(3):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
speaker_explanations = result["explanations"][speaker_id]
|
56 |
outputs.extend([
|
57 |
gr.update(value=speaker_data["attachment"], visible=True),
|
58 |
gr.update(visible=True), # Column visibility
|
@@ -79,13 +74,11 @@ with gr.Blocks() as iface:
|
|
79 |
gr.Markdown("Upload a video")
|
80 |
|
81 |
with gr.Row():
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
with gr.Column(scale=2):
|
88 |
-
transcript_output = gr.Textbox(label="Transcript", lines=10)
|
89 |
|
90 |
# Section titles (initially hidden)
|
91 |
attachment_styles_title = gr.Markdown("## Attachment Styles", visible=False)
|
|
|
5 |
from visualization import create_charts
|
6 |
import time
|
7 |
from config import openai_api_key
|
|
|
8 |
|
9 |
# Load the model
|
10 |
llm = load_model(openai_api_key)
|
|
|
24 |
progress(0.7, desc="Transcription processing complete.")
|
25 |
|
26 |
progress(0.9, desc="Generating charts")
|
27 |
+
charts, explanations = create_charts(results)
|
28 |
progress(1.0, desc="Charts generation complete.")
|
29 |
|
|
|
|
|
|
|
30 |
output = {
|
31 |
"transcript": transcription,
|
32 |
"speakers": charts,
|
|
|
37 |
|
38 |
def update_interface(result, max_speakers):
|
39 |
if "error" in result:
|
40 |
+
return [result["error"]] + [gr.update(visible=False)] * 47
|
41 |
|
42 |
outputs = [result["transcript"]] # Transcript
|
43 |
|
|
|
45 |
outputs.extend([gr.update(visible=True) for _ in range(4)]) # Make section titles visible
|
46 |
|
47 |
for i in range(3):
|
48 |
+
if i < max_speakers and i in result["speakers"]:
|
49 |
+
speaker_data = result["speakers"][i]
|
50 |
+
speaker_explanations = result["explanations"][i]
|
|
|
51 |
outputs.extend([
|
52 |
gr.update(value=speaker_data["attachment"], visible=True),
|
53 |
gr.update(visible=True), # Column visibility
|
|
|
74 |
gr.Markdown("Upload a video")
|
75 |
|
76 |
with gr.Row():
|
77 |
+
video_input = gr.Video(label="Upload Video")
|
78 |
+
max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers")
|
79 |
+
|
80 |
+
analyze_button = gr.Button("Analyze")
|
81 |
+
transcript_output = gr.Textbox(label="Transcript", lines=10)
|
|
|
|
|
82 |
|
83 |
# Section titles (initially hidden)
|
84 |
attachment_styles_title = gr.Markdown("## Attachment Styles", visible=False)
|