reab5555 commited on
Commit
422e558
·
verified ·
1 Parent(s): a3e8bd9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -12
app.py CHANGED
@@ -150,12 +150,12 @@ def count_words_and_tokens(text):
150
  tokens = len(AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3").tokenize(text))
151
  return words, tokens
152
 
 
153
  @spaces.GPU(duration=150)
154
  def process_input(input_file):
155
  start_time = time.time()
156
 
157
- progress_info = "Processing file..."
158
- yield progress_info, None, None, None, None, None, None, None, None
159
 
160
  file_extension = os.path.splitext(input_file.name)[1].lower()
161
 
@@ -176,8 +176,7 @@ def process_input(input_file):
176
  temp_video_path = "temp_video" + file_extension
177
  shutil.copy2(input_file.name, temp_video_path)
178
 
179
- progress_info = "Transcribing video..."
180
- yield progress_info, None, None, None, None, None, None, None, temp_video_path
181
 
182
  language = "en" # Default to English for video files
183
  diarization.process_video(temp_video_path, hf_token, language)
@@ -189,21 +188,24 @@ def process_input(input_file):
189
  input_info = f"Video transcribed. Words: {words}, Tokens: {tokens}"
190
  video_path = temp_video_path
191
  else:
192
- return "Unsupported file format. Please upload a TXT, PDF, or video file.", None, None, None, None, None, None, None, None
 
193
 
194
  detected_language = detect_language(content)
195
 
196
- progress_info = "Analyzing content..."
197
- yield progress_info, None, detected_language, input_info, None, None, None, None, video_path
198
 
199
  attachments_chain, bigfive_chain, personalities_chain = lazy_chains.get_chains()
200
 
 
201
  attachments_result = attachments_chain({"query": content})
202
  attachments_answer = attachments_result['result'].split("-----------\n\nAnswer:")[-1].strip()
203
 
 
204
  bigfive_result = bigfive_chain({"query": content})
205
  bigfive_answer = bigfive_result['result'].split("-----------\n\nAnswer:")[-1].strip()
206
 
 
207
  personalities_result = personalities_chain({"query": content})
208
  personalities_answer = personalities_result['result'].split("-----------\n\nAnswer:")[-1].strip()
209
 
@@ -212,9 +214,7 @@ def process_input(input_file):
212
 
213
  execution_info = f"{execution_time:.2f} seconds"
214
 
215
- progress_info = "Analysis complete!"
216
-
217
- yield progress_info, execution_info, detected_language, input_info, attachments_answer, bigfive_answer, personalities_answer, video_path
218
 
219
  def create_interface():
220
  with gr.Blocks() as iface:
@@ -225,7 +225,8 @@ def create_interface():
225
  input_file = gr.File(label="Upload File (TXT, PDF, or Video)")
226
 
227
  with gr.Column():
228
- progress = gr.Textbox(label="Progress")
 
229
  execution_time = gr.Textbox(label="Execution Time", visible=False)
230
  detected_language = gr.Textbox(label="Detected Language", visible=False)
231
  input_info = gr.Textbox(label="Input Information", visible=False)
@@ -234,11 +235,28 @@ def create_interface():
234
  bigfive_output = gr.Textbox(label="Big Five Results", visible=False)
235
  personalities_output = gr.Textbox(label="Personalities Results", visible=False)
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  input_file.upload(
238
  fn=process_input,
239
  inputs=[input_file],
240
- outputs=[progress, execution_time, detected_language, input_info, attachments_output, bigfive_output, personalities_output, video_output],
241
  show_progress=True
 
 
 
 
242
  )
243
 
244
  return iface
 
150
  tokens = len(AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3").tokenize(text))
151
  return words, tokens
152
 
153
+
154
  @spaces.GPU(duration=150)
155
  def process_input(input_file):
156
  start_time = time.time()
157
 
158
+ yield 0, "Processing file...", None, None, None, None, None, None, None, None
 
159
 
160
  file_extension = os.path.splitext(input_file.name)[1].lower()
161
 
 
176
  temp_video_path = "temp_video" + file_extension
177
  shutil.copy2(input_file.name, temp_video_path)
178
 
179
+ yield 0.2, "Transcribing video...", None, None, None, None, None, None, None, temp_video_path
 
180
 
181
  language = "en" # Default to English for video files
182
  diarization.process_video(temp_video_path, hf_token, language)
 
188
  input_info = f"Video transcribed. Words: {words}, Tokens: {tokens}"
189
  video_path = temp_video_path
190
  else:
191
+ yield 1, "Unsupported file format. Please upload a TXT, PDF, or video file.", None, None, None, None, None, None, None, None
192
+ return
193
 
194
  detected_language = detect_language(content)
195
 
196
+ yield 0.4, "Analyzing content...", None, detected_language, input_info, None, None, None, None, video_path
 
197
 
198
  attachments_chain, bigfive_chain, personalities_chain = lazy_chains.get_chains()
199
 
200
+ yield 0.6, "Analyzing attachments...", None, detected_language, input_info, None, None, None, None, video_path
201
  attachments_result = attachments_chain({"query": content})
202
  attachments_answer = attachments_result['result'].split("-----------\n\nAnswer:")[-1].strip()
203
 
204
+ yield 0.7, "Analyzing Big Five traits...", None, detected_language, input_info, attachments_answer, None, None, None, video_path
205
  bigfive_result = bigfive_chain({"query": content})
206
  bigfive_answer = bigfive_result['result'].split("-----------\n\nAnswer:")[-1].strip()
207
 
208
+ yield 0.8, "Analyzing personalities...", None, detected_language, input_info, attachments_answer, bigfive_answer, None, None, video_path
209
  personalities_result = personalities_chain({"query": content})
210
  personalities_answer = personalities_result['result'].split("-----------\n\nAnswer:")[-1].strip()
211
 
 
214
 
215
  execution_info = f"{execution_time:.2f} seconds"
216
 
217
+ yield 1, "Analysis complete!", execution_info, detected_language, input_info, attachments_answer, bigfive_answer, personalities_answer, video_path
 
 
218
 
219
  def create_interface():
220
  with gr.Blocks() as iface:
 
225
  input_file = gr.File(label="Upload File (TXT, PDF, or Video)")
226
 
227
  with gr.Column():
228
+ progress = gr.Slider(label="Progress", minimum=0, maximum=1, value=0, interactive=False)
229
+ progress_text = gr.Textbox(label="Status")
230
  execution_time = gr.Textbox(label="Execution Time", visible=False)
231
  detected_language = gr.Textbox(label="Detected Language", visible=False)
232
  input_info = gr.Textbox(label="Input Information", visible=False)
 
235
  bigfive_output = gr.Textbox(label="Big Five Results", visible=False)
236
  personalities_output = gr.Textbox(label="Personalities Results", visible=False)
237
 
238
+ def update_output(progress_value, progress_text, execution_time, detected_lang, input_info, attachments, bigfive, personalities, video):
239
+ return {
240
+ progress: progress_value,
241
+ progress_text: progress_text,
242
+ execution_time: gr.update(value=execution_time, visible=execution_time is not None),
243
+ detected_language: gr.update(value=detected_lang, visible=detected_lang is not None),
244
+ input_info: gr.update(value=input_info, visible=input_info is not None),
245
+ attachments_output: gr.update(value=attachments, visible=attachments is not None),
246
+ bigfive_output: gr.update(value=bigfive, visible=bigfive is not None),
247
+ personalities_output: gr.update(value=personalities, visible=personalities is not None),
248
+ video_output: gr.update(value=video, visible=video is not None)
249
+ }
250
+
251
  input_file.upload(
252
  fn=process_input,
253
  inputs=[input_file],
254
+ outputs=[progress, progress_text, execution_time, detected_language, input_info, attachments_output, bigfive_output, personalities_output, video_output],
255
  show_progress=True
256
+ ).then(
257
+ fn=update_output,
258
+ inputs=[progress, progress_text, execution_time, detected_language, input_info, attachments_output, bigfive_output, personalities_output, video_output],
259
+ outputs=[progress, progress_text, execution_time, detected_language, input_info, attachments_output, bigfive_output, personalities_output, video_output]
260
  )
261
 
262
  return iface