safe-talk / app.py
rshakked's picture
refactor: create predict_pipeline.py and utils.py to modularize app logic
bc56514
raw
history blame
1.6 kB
import gradio as gr
from train_abuse_model import (
run_training,
evaluate_saved_model,
push_model_to_hub
)
from predict_pipeline import run_prediction_pipeline
with gr.Blocks() as demo:
gr.Markdown("## ๐Ÿง  Abuse Detection App")
gr.Markdown("โš ๏ธ Keep this tab open while training or evaluating.")
with gr.Tab("๐Ÿงช Train / Evaluate"):
with gr.Row():
start_btn = gr.Button("๐Ÿš€ Start Training")
eval_btn = gr.Button("๐Ÿ” Evaluate Trained Model")
push_btn = gr.Button("๐Ÿ“ค Push Model to Hub")
output_box = gr.Textbox(label="Logs", lines=25, interactive=False)
start_btn.click(fn=run_training, outputs=output_box)
eval_btn.click(fn=evaluate_saved_model, outputs=output_box)
push_btn.click(fn=push_model_to_hub, outputs=output_box)
with gr.Tab("๐Ÿ”ฎ Abuse Detection"):
desc_input = gr.Textbox(label="๐Ÿ“ Relationship Description", lines=5, placeholder="Write a relationship story here...")
chat_upload = gr.File(label="๐Ÿ“ Optional: WhatsApp Chat ZIP (.zip)", file_types=[".zip"])
predict_btn = gr.Button("Run Prediction")
enriched_output = gr.Textbox(label="๐Ÿ“Ž Enriched Input (Used for Prediction)", lines=8, interactive=False)
label_output = gr.Textbox(label="๐Ÿท๏ธ Predicted Labels", lines=2, interactive=False)
predict_btn.click(
fn=run_prediction_pipeline,
inputs=[desc_input, chat_upload],
outputs=[enriched_output, label_output]
)
if __name__ == "__main__":
demo.launch()