Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from llm_loader import load_model | |
| from processing import process_input | |
| from transcription_diarization import diarize_audio | |
| from visualization import create_charts | |
| import time | |
| from config import openai_api_key | |
| import plotly.graph_objs as go | |
| import json | |
| # Load the model | |
| llm = load_model(openai_api_key) | |
| def analyze_video(video_path, max_speakers, progress=gr.Progress()): | |
| start_time = time.time() | |
| if not video_path: | |
| return {"error": "Please upload a video file."} | |
| try: | |
| progress(0, desc="Starting analysis...") | |
| progress(0.2, desc="Starting transcription and diarization") | |
| transcription = diarize_audio(video_path, max_speakers) | |
| print("Transcription:", transcription) # Debug print | |
| progress(0.5, desc="Transcription and diarization complete.") | |
| progress(0.6, desc="Processing transcription") | |
| raw_llm_output = llm(transcription) # This should return the JSON string | |
| print("Raw LLM Output:", raw_llm_output) # Debug print | |
| results = process_input(raw_llm_output, llm) | |
| print("Processed results:", results) # Debug print | |
| progress(0.7, desc="Transcription processing complete.") | |
| progress(0.9, desc="Generating charts") | |
| charts, explanations = create_charts(results) | |
| print("Charts generated:", charts.keys()) # Debug print | |
| print("Explanations generated:", explanations.keys()) # Debug print | |
| progress(1.0, desc="Charts generation complete.") | |
| end_time = time.time() | |
| execution_time = end_time - start_time | |
| return { | |
| "transcript": transcription, | |
| "charts": charts, | |
| "explanations": explanations, | |
| "execution_time": int(execution_time) | |
| } | |
| except Exception as e: | |
| print(f"Error in analyze_video: {e}") | |
| return {"error": f"An error occurred during analysis: {str(e)}"} | |
| def create_output_components(): | |
| with gr.Row() as row: | |
| with gr.Column(): | |
| transcript = gr.Textbox(label="Transcript", lines=10, visible=True) | |
| tabs = gr.Tabs() | |
| tab_components = [] | |
| for i in range(3): # Pre-create 3 tabs (max number of speakers) | |
| with gr.Tab(f"Speaker {i+1}", visible=True) as tab: | |
| speaker_components = [ | |
| gr.Markdown(f"## Speaker {i+1}", visible=True), | |
| gr.Plot(label="Attachment", visible=True), | |
| gr.Textbox(label="Attachment Styles Explanation", visible=True), | |
| gr.Plot(label="Dimensions", visible=True), | |
| gr.Plot(label="Big Five", visible=True), | |
| gr.Textbox(label="Big Five Traits Explanation", visible=True), | |
| gr.Plot(label="Personality", visible=True), | |
| gr.Textbox(label="Personality Disorders Explanation", visible=True) | |
| ] | |
| tab_components.extend(speaker_components) | |
| execution_info = gr.Textbox(label="Execution Information", visible=True) | |
| return row, transcript, tab_components, execution_info | |
| def run_analysis(video_path, max_speakers): | |
| results = analyze_video(video_path, max_speakers) | |
| if "error" in results: | |
| return [gr.update(value=results["error"], visible=True)] + [gr.update(visible=False)] * 24 + [gr.update(value="Analysis failed", visible=True)] | |
| transcript = results["transcript"] | |
| execution_info = f"Completed in {results['execution_time']} seconds." | |
| tab_updates = [] | |
| for i in range(3): # For each potential speaker | |
| if i < len(results["charts"]): | |
| speaker_id = list(results["charts"].keys())[i] | |
| speaker_charts = results["charts"][speaker_id] | |
| speaker_explanations = results["explanations"][speaker_id] | |
| tab_updates.extend([ | |
| gr.update(value=f"## {speaker_id}", visible=True), # Markdown | |
| gr.update(value=speaker_charts.get("attachment", go.Figure()), visible=True), # Attachment plot | |
| gr.update(value=speaker_explanations.get("attachment", ""), visible=True), # Attachment explanation | |
| gr.update(value=speaker_charts.get("dimensions", go.Figure()), visible=True), # Dimensions plot | |
| gr.update(value=speaker_charts.get("bigfive", go.Figure()), visible=True), # Big Five plot | |
| gr.update(value=speaker_explanations.get("bigfive", ""), visible=True), # Big Five explanation | |
| gr.update(value=speaker_charts.get("personality", go.Figure()), visible=True), # Personality plot | |
| gr.update(value=speaker_explanations.get("personality", ""), visible=True), # Personality explanation | |
| ]) | |
| else: | |
| tab_updates.extend([gr.update(visible=False)] * 8) # Hide unused tab components | |
| return [gr.update(value=transcript, visible=True)] + tab_updates + [gr.update(value=execution_info, visible=True)] | |
| with gr.Blocks() as iface: | |
| gr.Markdown("# AI Personality Detection") | |
| gr.Markdown("Upload a video") | |
| video_input = gr.Video(label="Upload Video") | |
| max_speakers = gr.Slider(minimum=1, maximum=3, step=1, value=2, label="Maximum Number of Speakers") | |
| analyze_button = gr.Button("Analyze") | |
| output_row, transcript_output, tab_components, execution_info_output = create_output_components() | |
| analyze_button.click( | |
| fn=run_analysis, | |
| inputs=[video_input, max_speakers], | |
| outputs=[transcript_output] + tab_components + [execution_info_output], | |
| show_progress=True | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |