File size: 4,383 Bytes
b8a7acb
 
 
 
8505717
 
b8a7acb
 
 
 
 
189e4c1
a141f18
c114fac
 
189e4c1
 
c114fac
 
 
a141f18
c114fac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189e4c1
c114fac
189e4c1
c114fac
189e4c1
 
 
 
c114fac
 
3cb6460
189e4c1
 
 
 
 
 
 
 
 
 
c114fac
 
 
189e4c1
 
c114fac
189e4c1
 
 
c114fac
f60e56e
8a68274
189e4c1
2078afe
ed43bfe
c114fac
7b3086f
c114fac
a141f18
 
 
 
 
7925424
7b3086f
c114fac
189e4c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3628dfd
646f3d2
8a68274
a141f18
c114fac
646f3d2
 
cb2b50c
466cd11
c114fac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import gradio as gr
from llm_loader import load_model
from processing import process_input
from transcription_diarization import diarize_audio
from visualization import create_charts
import time
from config import openai_api_key

# Load the model
llm = load_model(openai_api_key)


def analyze_video(video_path, language, progress=gr.Progress()):
    start_time = time.time()
    if not video_path:
        return [gr.Markdown("Please upload a video file.")] + [gr.update(visible=False)] * 49 + [
            "Analysis not started."]

    progress(0, desc="Starting analysis...")
    progress(0.2, desc="Starting transcription and diarization")
    transcription = diarize_audio(video_path, language)
    print("Transcription:", transcription)  # Debug print
    progress(0.5, desc="Transcription and diarization complete.")

    progress(0.6, desc="Processing transcription")
    results = process_input(transcription, llm)
    progress(0.7, desc="Transcription processing complete.")

    progress(0.9, desc="Generating charts")
    charts, explanations = create_charts(results)
    progress(1.0, desc="Charts generation complete.")

    end_time = time.time()
    execution_time = end_time - start_time

    output_components = []

    # Add transcript near the beginning
    output_components.append(gr.Textbox(value=transcription, label="Transcript", lines=10, visible=True))
    
    for speaker_id, speaker_charts in charts.items():
        markdown_content = f"""
        | {speaker_id} |
        |-----------|
        """
        speaker_explanations = explanations[speaker_id]
        speaker_section = [
            gr.Markdown(markdown_content, visible=True),
            gr.Plot(value=speaker_charts.get("attachment", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("attachment", ""), label="Attachment Styles Explanation",
                       visible=True),
            gr.Plot(value=speaker_charts.get("dimensions", None), visible=True),
            gr.Plot(value=speaker_charts.get("bigfive", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("bigfive", ""), label="Big Five Traits Explanation",
                       visible=True),
            gr.Plot(value=speaker_charts.get("personality", None), visible=True),
            gr.Textbox(value=speaker_explanations.get("personality", ""), label="Personality Disorders Explanation",
                       visible=True),
        ]
        output_components.extend(speaker_section)

    while len(output_components) < 49:
        output_components.extend([gr.update(visible=False)] * 8)

    # Add execution info
    output_components.append(
        gr.Textbox(value=f"Completed in {int(execution_time)} seconds.", label="Execution Information", visible=True))

    return output_components


with gr.Blocks() as iface:
    gr.Markdown("# AI Personality Detection")
    gr.Markdown("Upload a video")

    video_input = gr.Video(label="Upload Video")
    language_dropdown = gr.Dropdown(
        choices=["English", "Hebrew", "Arabic", "French", "German", "Italian", "Japanese", "Chinese", "Auto-detect"],
        value="English",
        label="Transcription Language"
    )
    analyze_button = gr.Button("Analyze")

    # Create output components
    output_components = []

    # Add transcript output near the top
    execution_info_box = gr.Textbox(label="Execution Information", value="N/A", lines=1)
    output_components.append(execution_info_box)

    for _ in range(3):  # Assuming maximum of 3 speakers
        output_components.extend([
            gr.Markdown(visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Attachment Styles Explanation", visible=False),
            gr.Plot(visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Big Five Traits Explanation", visible=False),
            gr.Plot(visible=False),
            gr.Textbox(label="Personality Disorders Explanation", visible=False),
        ])

    # Add execution info component
    transcript_output = gr.Textbox(label="Transcript", lines=10, visible=False)
    output_components.append(transcript_output)

    analyze_button.click(
        fn=analyze_video,
        inputs=[video_input, language_dropdown],
        outputs=output_components,
        show_progress=True
    )

if __name__ == "__main__":
    iface.launch()