reab5555 commited on
Commit
e77657a
·
verified ·
1 Parent(s): 29e2af6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -53
app.py CHANGED
@@ -9,12 +9,13 @@ from config import openai_api_key
9
  # Load the model
10
  llm = load_model(openai_api_key)
11
 
12
-
13
  def analyze_video(video_path, progress=gr.Progress()):
14
  start_time = time.time()
15
  if not video_path:
16
- return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 49 + [
17
- "Analysis not started."]
 
 
18
 
19
  progress(0, desc="Starting analysis...")
20
  progress(0.2, desc="Starting transcription and diarization")
@@ -33,33 +34,54 @@ def analyze_video(video_path, progress=gr.Progress()):
33
  end_time = time.time()
34
  execution_time = end_time - start_time
35
 
36
- output_components = []
 
 
37
 
38
- # Add transcript
39
- output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
40
-
41
- for speaker_id, speaker_charts in charts.items():
42
  speaker_explanations = explanations[speaker_id]
43
  speaker_general_impression = general_impressions[speaker_id]
44
- speaker_section = [
45
- gr.Markdown(f"## {speaker_id}", visible=True),
46
- gr.Textbox(value=speaker_general_impression, label="General Impression", lines=3, visible=True),
47
- gr.Plot(value=speaker_charts["attachment"], visible=True),
48
- gr.Textbox(value=speaker_explanations["attachment"], label="Attachment Styles Explanation", visible=True),
49
- gr.Plot(value=speaker_charts["dimensions"], visible=True),
50
- gr.Plot(value=speaker_charts["bigfive"], visible=True),
51
- gr.Textbox(value=speaker_explanations["bigfive"], label="Big Five Traits Explanation", visible=True),
52
- gr.Plot(value=speaker_charts["personality"], visible=True),
53
- gr.Textbox(value=speaker_explanations["personality"], label="Personality Disorders Explanation", visible=True),
54
- ]
55
- output_components.extend(speaker_section)
56
-
57
- # Add execution info
58
- output_components.append(
59
- gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))
60
 
61
  return output_components
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  with gr.Blocks() as iface:
64
  gr.Markdown("# AI Personality Detection")
65
 
@@ -73,39 +95,33 @@ with gr.Blocks() as iface:
73
  example_video = gr.Video("examples/Scenes.From.A.Marriage.US.mp4", label="Example Video")
74
  use_example_button = gr.Button("Use Example Video")
75
 
76
- # Create output components
77
- output_components = []
78
-
79
- execution_info_box = gr.Textbox(label="Execution Information", value="N/A", lines=1)
80
- output_components.append(execution_info_box)
81
-
82
- for _ in range(3): # Assuming maximum of 3 speakers
83
- output_components.extend([
84
- gr.Markdown(visible=False),
85
- gr.Textbox(label="General Impression", visible=False),
86
- gr.Plot(visible=False),
87
- gr.Textbox(label="Attachment Styles Explanation", visible=False),
88
- gr.Plot(visible=False),
89
- gr.Plot(visible=False),
90
- gr.Textbox(label="Big Five Traits Explanation", visible=False),
91
- gr.Plot(visible=False),
92
- gr.Textbox(label="Personality Disorders Explanation", visible=False),
93
- ])
94
-
95
- # Add transcript output component
96
- transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
97
- output_components.append(transcript_output)
98
-
99
- def use_example():
100
- return "examples/Scenes.From.A.Marriage.US.mp4"
101
-
102
- output_container = gr.Column()
103
 
104
  analyze_button.click(
105
  fn=analyze_video,
106
  inputs=[video_input],
107
- outputs=output_container,
108
  show_progress=True
 
 
 
 
109
  )
110
 
111
  use_example_button.click(
@@ -115,8 +131,12 @@ with gr.Blocks() as iface:
115
  ).then(
116
  fn=analyze_video,
117
  inputs=[video_input],
118
- outputs=output_container,
119
  show_progress=True
 
 
 
 
120
  )
121
 
122
  if __name__ == "__main__":
 
9
  # Load the model
10
  llm = load_model(openai_api_key)
11
 
 
12
  def analyze_video(video_path, progress=gr.Progress()):
13
  start_time = time.time()
14
  if not video_path:
15
+ return {
16
+ "transcript": gr.Textbox(value="Please upload a video file.", label="Error"),
17
+ "execution_info": gr.Textbox(value="Analysis not started.", label="Execution Information")
18
+ }
19
 
20
  progress(0, desc="Starting analysis...")
21
  progress(0.2, desc="Starting transcription and diarization")
 
34
  end_time = time.time()
35
  execution_time = end_time - start_time
36
 
37
+ output_components = {
38
+ "transcript": gr.Textbox(value=transcription, label="Transcript", lines=10),
39
+ }
40
 
41
+ for i, (speaker_id, speaker_charts) in enumerate(charts.items(), start=1):
 
 
 
42
  speaker_explanations = explanations[speaker_id]
43
  speaker_general_impression = general_impressions[speaker_id]
44
+ output_components.update({
45
+ f"speaker_{i}_header": gr.Markdown(f"## {speaker_id}"),
46
+ f"speaker_{i}_impression": gr.Textbox(value=speaker_general_impression, label="General Impression", lines=3),
47
+ f"speaker_{i}_attachment": gr.Plot(value=speaker_charts["attachment"]),
48
+ f"speaker_{i}_attachment_exp": gr.Textbox(value=speaker_explanations["attachment"], label="Attachment Styles Explanation"),
49
+ f"speaker_{i}_dimensions": gr.Plot(value=speaker_charts["dimensions"]),
50
+ f"speaker_{i}_bigfive": gr.Plot(value=speaker_charts["bigfive"]),
51
+ f"speaker_{i}_bigfive_exp": gr.Textbox(value=speaker_explanations["bigfive"], label="Big Five Traits Explanation"),
52
+ f"speaker_{i}_personality": gr.Plot(value=speaker_charts["personality"]),
53
+ f"speaker_{i}_personality_exp": gr.Textbox(value=speaker_explanations["personality"], label="Personality Disorders Explanation"),
54
+ })
55
+
56
+ output_components["execution_info"] = gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information")
 
 
 
57
 
58
  return output_components
59
 
60
+ def use_example():
61
+ return "examples/Scenes.From.A.Marriage.US.mp4"
62
+
63
+ def update_output(components):
64
+ updates = []
65
+ updates.append(gr.update(value=components["transcript"].value, visible=True))
66
+ for i in range(1, 4):
67
+ if f"speaker_{i}_header" in components:
68
+ updates.append(gr.update(visible=True))
69
+ updates.extend([
70
+ gr.update(value=components[f"speaker_{i}_header"].value),
71
+ gr.update(value=components[f"speaker_{i}_impression"].value),
72
+ gr.update(value=components[f"speaker_{i}_attachment"].value),
73
+ gr.update(value=components[f"speaker_{i}_attachment_exp"].value),
74
+ gr.update(value=components[f"speaker_{i}_dimensions"].value),
75
+ gr.update(value=components[f"speaker_{i}_bigfive"].value),
76
+ gr.update(value=components[f"speaker_{i}_bigfive_exp"].value),
77
+ gr.update(value=components[f"speaker_{i}_personality"].value),
78
+ gr.update(value=components[f"speaker_{i}_personality_exp"].value),
79
+ ])
80
+ else:
81
+ updates.append(gr.update(visible=False))
82
+ updates.append(gr.update(value=components["execution_info"].value, visible=True))
83
+ return updates
84
+
85
  with gr.Blocks() as iface:
86
  gr.Markdown("# AI Personality Detection")
87
 
 
95
  example_video = gr.Video("examples/Scenes.From.A.Marriage.US.mp4", label="Example Video")
96
  use_example_button = gr.Button("Use Example Video")
97
 
98
+ # Create placeholder components for output
99
+ with gr.Column() as output_container:
100
+ transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
101
+ speaker_outputs = []
102
+ for i in range(1, 4): # Assuming a maximum of 3 speakers
103
+ with gr.Column(visible=False) as speaker_column:
104
+ gr.Markdown(f"## Speaker {i}")
105
+ gr.Textbox(label="General Impression", lines=3)
106
+ gr.Plot(label="Attachment Styles")
107
+ gr.Textbox(label="Attachment Styles Explanation")
108
+ gr.Plot(label="Attachment Dimensions")
109
+ gr.Plot(label="Big Five Traits")
110
+ gr.Textbox(label="Big Five Traits Explanation")
111
+ gr.Plot(label="Personality Disorders")
112
+ gr.Textbox(label="Personality Disorders Explanation")
113
+ speaker_outputs.append(speaker_column)
114
+ execution_info = gr.Textbox(label="Execution Information", visible=False)
 
 
 
 
 
 
 
 
 
 
115
 
116
  analyze_button.click(
117
  fn=analyze_video,
118
  inputs=[video_input],
119
+ outputs=[transcript_output] + speaker_outputs + [execution_info],
120
  show_progress=True
121
+ ).then(
122
+ fn=update_output,
123
+ inputs=[gr.State(analyze_video)],
124
+ outputs=[transcript_output] + speaker_outputs + [execution_info],
125
  )
126
 
127
  use_example_button.click(
 
131
  ).then(
132
  fn=analyze_video,
133
  inputs=[video_input],
134
+ outputs=[transcript_output] + speaker_outputs + [execution_info],
135
  show_progress=True
136
+ ).then(
137
+ fn=update_output,
138
+ inputs=[gr.State(analyze_video)],
139
+ outputs=[transcript_output] + speaker_outputs + [execution_info],
140
  )
141
 
142
  if __name__ == "__main__":