reab5555 commited on
Commit
4c637de
·
verified ·
1 Parent(s): 38e37ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -13
app.py CHANGED
@@ -12,7 +12,7 @@ llm = load_model(openai_api_key)
12
  def analyze_video(video_path, progress=gr.Progress()):
13
  start_time = time.time()
14
  if not video_path:
15
- return [None] * 29 # Return None for all outputs
16
 
17
  progress(0, desc="Starting analysis...")
18
  progress(0.2, desc="Starting transcription and diarization")
@@ -36,32 +36,35 @@ def analyze_video(video_path, progress=gr.Progress()):
36
  execution_time = end_time - start_time
37
 
38
  output_components = [
39
- transcription, # transcript
40
  ]
41
 
42
  for i, (speaker_id, speaker_charts) in enumerate(charts.items(), start=1):
43
  speaker_explanations = explanations[speaker_id]
44
  speaker_general_impression = general_impressions[speaker_id]
45
  output_components.extend([
46
- f"## {speaker_id}", # speaker header
47
- speaker_general_impression, # speaker impression
48
- speaker_charts["attachment"], # attachment plot
49
- speaker_explanations["attachment"], # attachment explanation
50
- speaker_charts["dimensions"], # dimensions plot
51
- speaker_charts["bigfive"], # bigfive plot
52
- speaker_explanations["bigfive"], # bigfive explanation
53
- speaker_charts["personality"], # personality plot
54
- speaker_explanations["personality"], # personality explanation
55
  ])
56
 
57
  # Pad with None for any missing speakers
58
  while len(output_components) < 28:
59
- output_components.extend([None] * 9)
60
 
61
- output_components.append(f"Completed in {int(execution_time)} seconds.") # execution info
62
 
63
  return output_components
64
 
 
 
 
65
  def use_example():
66
  return "examples/Scenes.From.A.Marriage.US.mp4"
67
 
@@ -107,6 +110,10 @@ with gr.Blocks() as iface:
107
  inputs=[video_input],
108
  outputs=all_outputs,
109
  show_progress=True
 
 
 
 
110
  )
111
 
112
  use_example_button.click(
@@ -118,6 +125,10 @@ with gr.Blocks() as iface:
118
  inputs=[video_input],
119
  outputs=all_outputs,
120
  show_progress=True
 
 
 
 
121
  )
122
 
123
  if __name__ == "__main__":
 
12
  def analyze_video(video_path, progress=gr.Progress()):
13
  start_time = time.time()
14
  if not video_path:
15
+ return [{"value": None, "visible": False}] * 29 # Return None for all outputs
16
 
17
  progress(0, desc="Starting analysis...")
18
  progress(0.2, desc="Starting transcription and diarization")
 
36
  execution_time = end_time - start_time
37
 
38
  output_components = [
39
+ {"value": transcription, "visible": True}, # transcript
40
  ]
41
 
42
  for i, (speaker_id, speaker_charts) in enumerate(charts.items(), start=1):
43
  speaker_explanations = explanations[speaker_id]
44
  speaker_general_impression = general_impressions[speaker_id]
45
  output_components.extend([
46
+ {"value": f"## {speaker_id}", "visible": True}, # speaker header
47
+ {"value": speaker_general_impression, "visible": True}, # speaker impression
48
+ {"value": speaker_charts["attachment"], "visible": True}, # attachment plot
49
+ {"value": speaker_explanations["attachment"], "visible": True}, # attachment explanation
50
+ {"value": speaker_charts["dimensions"], "visible": True}, # dimensions plot
51
+ {"value": speaker_charts["bigfive"], "visible": True}, # bigfive plot
52
+ {"value": speaker_explanations["bigfive"], "visible": True}, # bigfive explanation
53
+ {"value": speaker_charts["personality"], "visible": True}, # personality plot
54
+ {"value": speaker_explanations["personality"], "visible": True}, # personality explanation
55
  ])
56
 
57
  # Pad with None for any missing speakers
58
  while len(output_components) < 28:
59
+ output_components.extend([{"value": None, "visible": False}] * 9)
60
 
61
+ output_components.append({"value": f"Completed in {int(execution_time)} seconds.", "visible": True}) # execution info
62
 
63
  return output_components
64
 
65
+ def update_output(outputs):
66
+ return [gr.update(value=output["value"], visible=output["visible"]) for output in outputs]
67
+
68
  def use_example():
69
  return "examples/Scenes.From.A.Marriage.US.mp4"
70
 
 
110
  inputs=[video_input],
111
  outputs=all_outputs,
112
  show_progress=True
113
+ ).then(
114
+ fn=update_output,
115
+ inputs=gr.State(lambda: analyze_video(video_input.value)),
116
+ outputs=all_outputs
117
  )
118
 
119
  use_example_button.click(
 
125
  inputs=[video_input],
126
  outputs=all_outputs,
127
  show_progress=True
128
+ ).then(
129
+ fn=update_output,
130
+ inputs=gr.State(lambda: analyze_video(video_input.value)),
131
+ outputs=all_outputs
132
  )
133
 
134
  if __name__ == "__main__":