Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
from gtts import gTTS | |
# Load models | |
sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") | |
summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn") | |
# Task logic | |
def perform_task(task, text): | |
if not text.strip(): | |
return "β οΈ Please enter some text.", None | |
if task == "Sentiment Analysis": | |
result = sentiment_pipeline(text)[0] | |
label = result['label'] | |
score = round(result['score'], 3) | |
return f"π§ Sentiment: {label}\nπ Confidence: {score}", None | |
elif task == "Summarization": | |
result = summarization_pipeline(text, max_length=100, min_length=30, do_sample=False) | |
return f"βοΈ Summary:\n{result[0]['summary_text']}", None | |
elif task == "Text-to-Speech": | |
tts = gTTS(text) | |
filename = "tts_output.mp3" | |
tts.save(filename) | |
return "β Audio generated below:", filename | |
# UI with custom dark mode | |
with gr.Blocks(theme=gr.themes.Base()) as demo: | |
# Custom CSS for dark mode | |
gr.HTML(""" | |
<style> | |
body, .gradio-container { | |
background-color: #111 !important; | |
color: #eee !important; | |
} | |
.gr-input, .gr-button, .gr-box { | |
background-color: #222 !important; | |
color: #eee !important; | |
border: 1px solid #444 !important; | |
} | |
input::placeholder, textarea::placeholder { | |
color: #888 !important; | |
} | |
</style> | |
""") | |
gr.Markdown("# π€ Multi-Task Chatbot", elem_id="title") | |
with gr.Row(): | |
task_selector = gr.Dropdown( | |
["Sentiment Analysis", "Summarization", "Text-to-Speech"], | |
label="Select Task", | |
value="Sentiment Analysis" | |
) | |
textbox = gr.Textbox(lines=6, label="Enter your text here") | |
output_text = gr.Textbox(label="Output Message") | |
output_audio = gr.Audio(label="Generated Speech", type="filepath", visible=False) | |
run_button = gr.Button("Run") | |
def handle_all(task, text): | |
message, audio_path = perform_task(task, text) | |
return message, gr.update(value=audio_path, visible=audio_path is not None) | |
run_button.click(fn=handle_all, inputs=[task_selector, textbox], outputs=[output_text, output_audio]) | |
demo.launch() | |