reab5555 commited on
Commit
8a68274
·
verified ·
1 Parent(s): 2b182b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -94
app.py CHANGED
@@ -5,104 +5,65 @@ from transcription_diarization import diarize_audio
5
  from visualization import create_charts
6
  import time
7
  from config import openai_api_key
8
- import plotly.graph_objs as go
9
- import json
10
 
11
  # Load the model
12
  llm = load_model(openai_api_key)
13
 
 
14
  def analyze_video(video_path, max_speakers, progress=gr.Progress()):
15
  start_time = time.time()
16
  if not video_path:
17
- return {"error": "Please upload a video file."}
18
-
19
- try:
20
- progress(0, desc="Starting analysis...")
21
- progress(0.2, desc="Starting transcription and diarization")
22
- transcription = diarize_audio(video_path, max_speakers)
23
- print("Transcription:", transcription) # Debug print
24
- progress(0.5, desc="Transcription and diarization complete.")
25
-
26
- progress(0.6, desc="Processing transcription")
27
- raw_llm_output = llm(transcription) # This should return the JSON string
28
- print("Raw LLM Output:", raw_llm_output) # Debug print
29
- results = process_input(raw_llm_output, llm)
30
- print("Processed results:", results) # Debug print
31
- progress(0.7, desc="Transcription processing complete.")
32
-
33
- progress(0.9, desc="Generating charts")
34
- charts, explanations = create_charts(results)
35
- print("Charts generated:", charts.keys()) # Debug print
36
- print("Explanations generated:", explanations.keys()) # Debug print
37
- progress(1.0, desc="Charts generation complete.")
38
-
39
- end_time = time.time()
40
- execution_time = end_time - start_time
41
-
42
- return {
43
- "transcript": transcription,
44
- "charts": charts,
45
- "explanations": explanations,
46
- "execution_time": int(execution_time)
47
- }
48
- except Exception as e:
49
- print(f"Error in analyze_video: {e}")
50
- return {"error": f"An error occurred during analysis: {str(e)}"}
51
-
52
- def create_output_components():
53
- with gr.Row() as row:
54
- with gr.Column():
55
- transcript = gr.Textbox(label="Transcript", lines=10, visible=True)
56
-
57
- tabs = gr.Tabs()
58
- tab_components = []
59
- for i in range(3): # Pre-create 3 tabs (max number of speakers)
60
- with gr.Tab(f"Speaker {i+1}", visible=True) as tab:
61
- speaker_components = [
62
- gr.Markdown(f"## Speaker {i+1}", visible=True),
63
- gr.Plot(label="Attachment", visible=True),
64
- gr.Textbox(label="Attachment Styles Explanation", visible=True),
65
- gr.Plot(label="Dimensions", visible=True),
66
- gr.Plot(label="Big Five", visible=True),
67
- gr.Textbox(label="Big Five Traits Explanation", visible=True),
68
- gr.Plot(label="Personality", visible=True),
69
- gr.Textbox(label="Personality Disorders Explanation", visible=True)
70
- ]
71
- tab_components.extend(speaker_components)
72
-
73
- execution_info = gr.Textbox(label="Execution Information", visible=True)
74
- return row, transcript, tab_components, execution_info
75
-
76
- def run_analysis(video_path, max_speakers):
77
- results = analyze_video(video_path, max_speakers)
78
-
79
- if "error" in results:
80
- return [gr.update(value=results["error"], visible=True)] + [gr.update(visible=False)] * 24 + [gr.update(value="Analysis failed", visible=True)]
81
-
82
- transcript = results["transcript"]
83
- execution_info = f"Completed in {results['execution_time']} seconds."
84
-
85
- tab_updates = []
86
- for i in range(3): # For each potential speaker
87
- if i < len(results["charts"]):
88
- speaker_id = list(results["charts"].keys())[i]
89
- speaker_charts = results["charts"][speaker_id]
90
- speaker_explanations = results["explanations"][speaker_id]
91
-
92
- tab_updates.extend([
93
- gr.update(value=f"## {speaker_id}", visible=True), # Markdown
94
- gr.update(value=speaker_charts.get("attachment", go.Figure()), visible=True), # Attachment plot
95
- gr.update(value=speaker_explanations.get("attachment", ""), visible=True), # Attachment explanation
96
- gr.update(value=speaker_charts.get("dimensions", go.Figure()), visible=True), # Dimensions plot
97
- gr.update(value=speaker_charts.get("bigfive", go.Figure()), visible=True), # Big Five plot
98
- gr.update(value=speaker_explanations.get("bigfive", ""), visible=True), # Big Five explanation
99
- gr.update(value=speaker_charts.get("personality", go.Figure()), visible=True), # Personality plot
100
- gr.update(value=speaker_explanations.get("personality", ""), visible=True), # Personality explanation
101
- ])
102
- else:
103
- tab_updates.extend([gr.update(visible=False)] * 8) # Hide unused tab components
104
-
105
- return [gr.update(value=transcript, visible=True)] + tab_updates + [gr.update(value=execution_info, visible=True)]
106
 
107
  with gr.Blocks() as iface:
108
  gr.Markdown("# AI Personality Detection")
@@ -112,12 +73,33 @@ with gr.Blocks() as iface:
112
  max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers")
113
  analyze_button = gr.Button("Analyze")
114
 
115
- output_row, transcript_output, tab_components, execution_info_output = create_output_components()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  analyze_button.click(
118
- fn=run_analysis,
119
  inputs=[video_input, max_speakers],
120
- outputs=[transcript_output] + tab_components + [execution_info_output],
121
  show_progress=True
122
  )
123
 
 
5
  from visualization import create_charts
6
  import time
7
  from config import openai_api_key
 
 
8
 
9
  # Load the model
10
  llm = load_model(openai_api_key)
11
 
12
+
13
  def analyze_video(video_path, max_speakers, progress=gr.Progress()):
14
  start_time = time.time()
15
  if not video_path:
16
+ return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 49 + [
17
+ "Analysis not started."]
18
+
19
+ progress(0, desc="Starting analysis...")
20
+ progress(0.2, desc="Starting transcription and diarization")
21
+ transcription = diarize_audio(video_path, max_speakers)
22
+ print("Transcription:", transcription) # Debug print
23
+ progress(0.5, desc="Transcription and diarization complete.")
24
+
25
+ progress(0.6, desc="Processing transcription")
26
+ results = process_input(transcription, llm)
27
+ progress(0.7, desc="Transcription processing complete.")
28
+
29
+ progress(0.9, desc="Generating charts")
30
+ charts, explanations = create_charts(results)
31
+ progress(1.0, desc="Charts generation complete.")
32
+
33
+ end_time = time.time()
34
+ execution_time = end_time - start_time
35
+
36
+ output_components = []
37
+
38
+ # Add transcript near the beginning
39
+ output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
40
+
41
+ for speaker_id, speaker_charts in charts.items():
42
+ speaker_explanations = explanations[speaker_id]
43
+ speaker_section = [
44
+ gr.Markdown(f"## {speaker_id}<hr>", visible=True),
45
+ gr.Plot(value=speaker_charts.get("attachment", None), visible=True),
46
+ gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation",
47
+ visible=True),
48
+ gr.Plot(value=speaker_charts.get("dimensions", None), visible=True),
49
+ gr.Plot(value=speaker_charts.get("bigfive", None), visible=True),
50
+ gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation",
51
+ visible=True),
52
+ gr.Plot(value=speaker_charts.get("personality", None), visible=True),
53
+ gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation",
54
+ visible=True),
55
+ ]
56
+ output_components.extend(speaker_section)
57
+
58
+ while len(output_components) < 49:
59
+ output_components.extend([gr.update(visible=False)] * 8)
60
+
61
+ # Add execution info
62
+ output_components.append(
63
+ gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))
64
+
65
+ return output_components
66
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  with gr.Blocks() as iface:
69
  gr.Markdown("# AI Personality Detection")
 
73
  max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers")
74
  analyze_button = gr.Button("Analyze")
75
 
76
+ # Create output components
77
+ output_components = []
78
+
79
+ # Add transcript output near the top
80
+ execution_info_box = gr.Textbox(label="Execution Information", value="N/A", lines=1)
81
+ output_components.append(execution_info_box)
82
+
83
+ for _ in range(3): # Assuming maximum of 3 speakers
84
+ output_components.extend([
85
+ gr.Markdown(visible=False),
86
+ gr.Plot(visible=False),
87
+ gr.Textbox(label="Attachment Styles Explanation", visible=False),
88
+ gr.Plot(visible=False),
89
+ gr.Plot(visible=False),
90
+ gr.Textbox(label="Big Five Traits Explanation", visible=False),
91
+ gr.Plot(visible=False),
92
+ gr.Textbox(label="Personality Disorders Explanation", visible=False),
93
+ ])
94
+
95
+ # Add execution info component
96
+ transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
97
+ output_components.append(transcript_output)
98
 
99
  analyze_button.click(
100
+ fn=analyze_video,
101
  inputs=[video_input, max_speakers],
102
+ outputs=output_components,
103
  show_progress=True
104
  )
105