Spaces:
Runtime error
Runtime error
File size: 3,340 Bytes
ddf0a26 b444514 83c21b5 a3e8bd9 83c21b5 6c3ba31 a3e8bd9 1361c07 83c21b5 6c3ba31 871e711 6c3ba31 a3e8bd9 708d13d 9433c15 a3e8bd9 8fa4560 1334eed 30efc24 d81e882 30efc24 78f6fde 8244923 8d88be4 1334eed 8d88be4 d5fbbf5 52e4479 1334eed 52e4479 1334eed a3e8bd9 83c21b5 95c25ea 83c21b5 a3e8bd9 8244923 a3e8bd9 1334eed a3e8bd9 83c21b5 95c25ea 83c21b5 1334eed 83c21b5 66acfba 5e7b13d 466cd11 8d88be4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
from processing import process_input
from visualization import update_visibility_and_charts
import os
def create_interface():
example_video_path = "examples/Scenes.From.A.Marriage.US.mp4"
with gr.Blocks() as iface:
gr.Markdown("# Personality Analysis Classification")
gr.Markdown("Upload a Video, TXT, or PDF file or use the example video.")
with gr.Row():
with gr.Column(scale=3):
input_file = gr.File(label="Upload File (TXT, PDF, or Video)")
with gr.Column(scale=1):
example_video = gr.Video(value=example_video_path, label="Example Video Preview")
example_button = gr.Button("Use Example Video")
with gr.Column():
progress = gr.Progress()
status_text = gr.Textbox(label="Status")
execution_time = gr.Textbox(label="Execution Time", visible=False)
detected_language = gr.Textbox(label="Detected Language", visible=False)
# Add a Textbox for the SRT transcription
transcription_text = gr.Textbox(label="Transcription", visible=False, lines=10)
charts_and_explanations = []
for _ in range(6): # 3 analysis types * 2 speakers
with gr.Row():
charts_and_explanations.append(gr.Plot(visible=False))
charts_and_explanations.append(gr.Plot(visible=False))
charts_and_explanations.append(gr.Textbox(visible=False))
def process_and_update(input_file):
if input_file is None:
return [gr.update(value="No file selected")] + [gr.update(visible=False)] * 21 # +1 for transcription
results = process_input(input_file, progress=gr.Progress())
if len(results) != 10:
return [gr.update(value="Error: Unexpected number of results")] + [gr.update(visible=False)] * 21
# Extract transcription from results (assuming it's the last item)
transcription = results[-1]
# Remove token information and transcription from results
results = results[:3] + results[3:6] + [transcription]
visibility_updates = update_visibility_and_charts(*results[:6])
# Add update for transcription text
transcription_update = gr.update(value=transcription, visible=True)
return visibility_updates + [transcription_update]
def use_example_video():
return example_video_path
input_file.upload(
fn=process_and_update,
inputs=[input_file],
outputs=[status_text, execution_time, detected_language] + charts_and_explanations + [transcription_text]
)
example_button.click(
fn=use_example_video,
inputs=[],
outputs=input_file
).then(
fn=process_and_update,
inputs=[input_file],
outputs=[status_text, execution_time, detected_language] + charts_and_explanations + [transcription_text]
)
return iface
iface = create_interface()
if __name__ == "__main__":
iface.launch(share=True) |