File size: 4,918 Bytes
b8a7acb
 
 
 
8505717
 
b8a7acb
 
 
 
 
189e4c1
3670697
c114fac
 
189e4c1
 
c114fac
 
 
3670697
c114fac
 
 
 
 
 
 
 
75ea1cc
c114fac
 
 
 
 
 
 
189e4c1
c114fac
189e4c1
c114fac
189e4c1
 
 
 
75ea1cc
c114fac
 
3cb6460
75ea1cc
189e4c1
 
 
 
 
 
 
 
 
 
c114fac
 
 
189e4c1
 
c114fac
189e4c1
 
 
c114fac
f60e56e
8a68274
189e4c1
2078afe
ed43bfe
1641d0b
 
 
 
 
eb454af
1641d0b
 
 
 
7b3086f
c114fac
189e4c1
 
0bfc4c9
 
 
189e4c1
 
 
 
 
6ca8dc9
189e4c1
 
 
 
 
 
 
5fc0620
189e4c1
3628dfd
1641d0b
 
 
646f3d2
8a68274
3670697
c114fac
646f3d2
 
cb2b50c
1641d0b
 
 
 
 
 
 
 
 
 
 
466cd11
c114fac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import gradio as gr
from llm_loader import load_model
from processing import process_input
from transcription_diarization import diarize_audio
from visualization import create_charts
import time
from config import openai_api_key

# Load the model
llm = load_model(openai_api_key)


def analyze_video(video_path, progress=gr.Progress()):
    start_time = time.time()
    if not video_path:
        return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 49 + [
            "Analysis not started."]

    progress(0, desc="Starting analysis...")
    progress(0.2, desc="Starting transcription and diarization")
    transcription = diarize_audio(video_path)
    print("Transcription:", transcription)  # Debug print
    progress(0.5, desc="Transcription and diarization complete.")

    progress(0.6, desc="Processing transcription")
    results = process_input(transcription, llm)
    progress(0.7, desc="Transcription processing complete.")

    progress(0.9, desc="Generating charts")
    charts, explanations, general_impressions = create_charts(results)
    progress(1.0, desc="Charts generation complete.")

    end_time = time.time()
    execution_time = end_time - start_time

    output_components = []

    # Add transcript near the beginning
    output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
    
    for speaker_id, speaker_charts in charts.items():
        markdown_content = f"""
        | {speaker_id} |
        |-----------|
        """
        speaker_general_impression = general_impressions[speaker_id]
        speaker_explanations = explanations[speaker_id]
        speaker_section = [
            gr.Markdown(markdown_content, visible=True),
            gr.Textbox(value=speaker_general_impression, label="General Impression", lines=3, visible=True),
            gr.Plot(value=speaker_charts.get("attachment", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation",
                       visible=True),
            gr.Plot(value=speaker_charts.get("dimensions", None), visible=True),
            gr.Plot(value=speaker_charts.get("bigfive", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation",
                       visible=True),
            gr.Plot(value=speaker_charts.get("personality", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation",
                       visible=True),
        ]
        output_components.extend(speaker_section)

    while len(output_components) < 49:
        output_components.extend([gr.update(visible=False)] * 8)

    # Add execution info
    output_components.append(
        gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))

    return output_components


with gr.Blocks() as iface:
    gr.Markdown("# AI Personality Detection")
    
    with gr.Row():
        with gr.Column(scale=3):
            gr.Markdown("Upload a video")
            video_input = gr.Video(label="Upload Video")
            analyze_button = gr.Button("Analyze")
        with gr.Column(scale=1):
            gr.Markdown("Example Video")
            example_video = gr.Video("examples/Scenes.From.A.Marriage.US.mp4", label="Example Video")
            use_example_button = gr.Button("Use Example Video")

    # Create output components
    output_components = []

    execution_info_box = gr.Textbox(label="Execution Information", value="N/A", lines=1)
    output_components.append(execution_info_box)

    for _ in range(3):  # Assuming maximum of 3 speakers
        output_components.extend([
            gr.Markdown(visible=False),
            gr.Plot(visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Attachment Styles Explanation", visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Big Five Traits Explanation", visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Personality Disorders Explanation", visible=False),
        ])

    # Add execution info component
    transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
    output_components.append(transcript_output)

    def use_example():
        return "examples/Scenes.From.A.Marriage.US.mp4"

    analyze_button.click(
        fn=analyze_video,
        inputs=[video_input],
        outputs=output_components,
        show_progress=True
    )

    use_example_button.click(
        fn=use_example,
        inputs=[],
        outputs=[video_input],
    ).then(
        fn=analyze_video,
        inputs=[video_input],
        outputs=output_components,
        show_progress=True
    )

if __name__ == "__main__":
    iface.launch()