reab5555 commited on
Commit
41754b5
·
verified ·
1 Parent(s): 372b9fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -12
app.py CHANGED
@@ -13,6 +13,16 @@ from config import openai_api_key
13
  # Load the model
14
  llm = load_model(openai_api_key)
15
 
 
 
 
 
 
 
 
 
 
 
16
  def analyze_video(video_path, progress=gr.Progress()):
17
  start_time = time.time()
18
  if not video_path:
@@ -36,7 +46,8 @@ def analyze_video(video_path, progress=gr.Progress()):
36
  execution_time = end_time - start_time
37
 
38
  output_components = [] # transcript
39
-
 
40
  output_components.append(f"Completed in {int(execution_time)} seconds.")
41
  output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
42
 
@@ -44,6 +55,21 @@ def analyze_video(video_path, progress=gr.Progress()):
44
  print(speaker_id)
45
  speaker_explanations = explanations[speaker_id]
46
  speaker_general_impression = general_impressions[speaker_id]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  with gr.Tab(visible=True):
49
  with gr.TabItem(label=f'General Impression'):
@@ -73,15 +99,16 @@ def analyze_video(video_path, progress=gr.Progress()):
73
  gr.Textbox(value=speaker_explanations.get("personality", ""),
74
  label="Personality Disorders Explanation", visible=True, lines=2)
75
  ]
76
-
 
77
  output_components.extend(speaker_section1)
78
  output_components.extend(speaker_section2)
79
  output_components.extend(speaker_section3)
80
  output_components.extend(speaker_section4)
81
 
82
  # Pad with None for any missing speakers
83
- while len(output_components) < 28:
84
- output_components.extend([gr.update(visible=False)] * 9)
85
 
86
  return output_components
87
 
@@ -140,19 +167,20 @@ with gr.Blocks() as iface:
140
  for n in range(3): # Assuming maximum of 3 speakers
141
 
142
  with gr.Tab(label=f'Speaker {n + 1}', visible=True):
 
 
 
 
 
 
 
 
 
143
  with gr.TabItem(label=f'General Impression'):
144
  column_components1 = [
145
  gr.Markdown(visible=False),
146
  gr.Textbox(label="General Impression")]
147
 
148
-
149
- with gr.Tab("Interactive Interview"):
150
- chatbot = gr.Chatbot()
151
- msg = gr.Textbox()
152
- clear = gr.Button("Clear")
153
- start_interview = gr.Button("Start Interview")
154
-
155
-
156
  with gr.TabItem(label=f'Attachment Styles'):
157
  column_components2 = [
158
  gr.Plot(visible=False),
@@ -169,6 +197,7 @@ with gr.Blocks() as iface:
169
  gr.Plot(visible=False),
170
  gr.Textbox(label="Personality Disorders Explanation")]
171
 
 
172
  output_components.extend(column_components1)
173
  output_components.extend(column_components2)
174
  output_components.extend(column_components3)
 
13
  # Load the model
14
  llm = load_model(openai_api_key)
15
 
16
+ def process_message(message, history, speaker_id):
17
+ interview = get_interview_instance()
18
+ response = interview.process_message(message)
19
+ return response
20
+
21
+ def start_new_interview(general_impression, speaker_id):
22
+ interview = get_interview_instance()
23
+ interview.set_general_impression(general_impression)
24
+ return interview.start_interview()
25
+
26
  def analyze_video(video_path, progress=gr.Progress()):
27
  start_time = time.time()
28
  if not video_path:
 
46
  execution_time = end_time - start_time
47
 
48
  output_components = [] # transcript
49
+ chatbots = []
50
+
51
  output_components.append(f"Completed in {int(execution_time)} seconds.")
52
  output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
53
 
 
55
  print(speaker_id)
56
  speaker_explanations = explanations[speaker_id]
57
  speaker_general_impression = general_impressions[speaker_id]
58
+
59
+ with gr.TabItem(label=f'Interactive Interview'):
60
+ chatbot = gr.Chatbot(visible=True)
61
+ msg = gr.Textbox(visible=True)
62
+ clear = gr.Button("Clear", visible=True)
63
+ start_interview = gr.Button("Start Interview", visible=True)
64
+ chatbots.append(chatbot)
65
+
66
+ msg.submit(process_message, inputs=[msg, chatbot, gr.State(speaker_id)], outputs=[chatbot])
67
+ clear.click(lambda: None, None, chatbot, queue=False)
68
+ start_interview.click(
69
+ start_new_interview,
70
+ inputs=[gr.State(speaker_general_impression), gr.State(speaker_id)],
71
+ outputs=[chatbot]
72
+ )
73
 
74
  with gr.Tab(visible=True):
75
  with gr.TabItem(label=f'General Impression'):
 
99
  gr.Textbox(value=speaker_explanations.get("personality", ""),
100
  label="Personality Disorders Explanation", visible=True, lines=2)
101
  ]
102
+
103
+ output_components.extend([chatbot, msg, clear, start_interview])
104
  output_components.extend(speaker_section1)
105
  output_components.extend(speaker_section2)
106
  output_components.extend(speaker_section3)
107
  output_components.extend(speaker_section4)
108
 
109
  # Pad with None for any missing speakers
110
+ while len(output_components) < 40:
111
+ output_components.extend([gr.update(visible=False)] * 13)
112
 
113
  return output_components
114
 
 
167
  for n in range(3): # Assuming maximum of 3 speakers
168
 
169
  with gr.Tab(label=f'Speaker {n + 1}', visible=True):
170
+
171
+ with gr.TabItem(label=f'Interactive Interview'):
172
+ column_components_interview = [
173
+ gr.Chatbot(visible=False),
174
+ gr.Textbox(visible=False),
175
+ gr.Button("Clear", visible=False),
176
+ gr.Button("Start Interview", visible=False)
177
+ ]
178
+
179
  with gr.TabItem(label=f'General Impression'):
180
  column_components1 = [
181
  gr.Markdown(visible=False),
182
  gr.Textbox(label="General Impression")]
183
 
 
 
 
 
 
 
 
 
184
  with gr.TabItem(label=f'Attachment Styles'):
185
  column_components2 = [
186
  gr.Plot(visible=False),
 
197
  gr.Plot(visible=False),
198
  gr.Textbox(label="Personality Disorders Explanation")]
199
 
200
+ output_components.extend(column_components_interview)
201
  output_components.extend(column_components1)
202
  output_components.extend(column_components2)
203
  output_components.extend(column_components3)