Spaces:
Runtime error
Runtime error
import gradio as gr | |
from processing import process_input | |
from visualization import update_visibility_and_charts | |
import os | |
def create_interface(): | |
example_video_path = "examples/Scenes.From.A.Marriage.US.mp4" | |
with gr.Blocks() as iface: | |
gr.Markdown("# Personality Analysis Classification") | |
gr.Markdown("Upload a Video, TXT, or PDF file or use the example video.") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
input_file = gr.File(label="Upload File (TXT, PDF, or Video)") | |
with gr.Column(scale=1): | |
example_video = gr.Video(value=example_video_path, label="Example Video Preview") | |
example_button = gr.Button("Use Example Video") | |
with gr.Column(): | |
progress = gr.Progress() | |
status_text = gr.Textbox(label="Status") | |
execution_time = gr.Textbox(label="Execution Time", visible=False) | |
detected_language = gr.Textbox(label="Detected Language", visible=False) | |
# Add a Textbox for the SRT transcription | |
transcription_text = gr.Textbox(label="Transcription", visible=False, lines=10) | |
charts_and_explanations = [] | |
for _ in range(6): # 3 analysis types * 2 speakers | |
with gr.Row(): | |
charts_and_explanations.append(gr.Plot(visible=False)) | |
charts_and_explanations.append(gr.Plot(visible=False)) | |
charts_and_explanations.append(gr.Textbox(visible=False)) | |
def process_and_update(input_file): | |
if input_file is None: | |
return [gr.update(value="No file selected")] + [gr.update(visible=False)] * 21 # +1 for transcription | |
results = process_input(input_file, progress=gr.Progress()) | |
if len(results) != 10: | |
return [gr.update(value="Error: Unexpected number of results")] + [gr.update(visible=False)] * 21 | |
# Extract transcription from results (assuming it's the last item) | |
transcription = results[-1] | |
# Remove token information and transcription from results | |
results = results[:3] + results[3:6] + [transcription] | |
visibility_updates = update_visibility_and_charts(*results[:6]) | |
# Add update for transcription text | |
transcription_update = gr.update(value=transcription, visible=True) | |
return visibility_updates + [transcription_update] | |
def use_example_video(): | |
return example_video_path | |
input_file.upload( | |
fn=process_and_update, | |
inputs=[input_file], | |
outputs=[status_text, execution_time, detected_language] + charts_and_explanations + [transcription_text] | |
) | |
example_button.click( | |
fn=use_example_video, | |
inputs=[], | |
outputs=input_file | |
).then( | |
fn=process_and_update, | |
inputs=[input_file], | |
outputs=[status_text, execution_time, detected_language] + charts_and_explanations + [transcription_text] | |
) | |
return iface | |
iface = create_interface() | |
if __name__ == "__main__": | |
iface.launch(share=True) |