import gradio as gr from transformers import pipeline from gtts import gTTS # Load models sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn") # Task logic def perform_task(task, text): if not text.strip(): return "āš ļø Please enter some text.", None if task == "Sentiment Analysis": result = sentiment_pipeline(text)[0] label = result['label'] score = round(result['score'], 3) return f"🧠 Sentiment: {label}\nšŸ“Š Confidence: {score}", None elif task == "Summarization": result = summarization_pipeline(text, max_length=100, min_length=30, do_sample=False) return f"āœ‚ļø Summary:\n{result[0]['summary_text']}", None elif task == "Text-to-Speech": tts = gTTS(text) filename = "tts_output.mp3" tts.save(filename) return "āœ… Audio generated below:", filename # UI with custom dark mode with gr.Blocks(theme=gr.themes.Base()) as demo: # Custom CSS for dark mode gr.HTML(""" """) gr.Markdown("# šŸ¤– Multi-Task Chatbot", elem_id="title") with gr.Row(): task_selector = gr.Dropdown( ["Sentiment Analysis", "Summarization", "Text-to-Speech"], label="Select Task", value="Sentiment Analysis" ) textbox = gr.Textbox(lines=6, label="Enter your text here") output_text = gr.Textbox(label="Output Message") output_audio = gr.Audio(label="Generated Speech", type="filepath", visible=False) run_button = gr.Button("Run") def handle_all(task, text): message, audio_path = perform_task(task, text) return message, gr.update(value=audio_path, visible=audio_path is not None) run_button.click(fn=handle_all, inputs=[task_selector, textbox], outputs=[output_text, output_audio]) demo.launch()