Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
import time | |
from transformers import pipeline | |
from gtts import gTTS | |
import os | |
# β Set your OpenAI API key here | |
openai.api_key = "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" # <<< REPLACE THIS WITH YOUR KEY | |
# Load pipelines | |
sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") | |
summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn") | |
# Chatbot class | |
class OpenAIChatbot: | |
def __init__(self, model="gpt-3.5-turbo"): | |
self.model = model | |
def set_model(self, model_name): | |
self.model = model_name | |
return f"Model set to {model_name}" | |
def stream_chat(self, message, history, system_prompt=""): | |
if not message.strip(): | |
yield history | |
return | |
messages = [{"role": "system", "content": system_prompt}] if system_prompt else [] | |
for user, bot in history: | |
messages += [{"role": "user", "content": user}, {"role": "assistant", "content": bot}] | |
messages.append({"role": "user", "content": message}) | |
history.append([message, ""]) | |
try: | |
response = openai.chat.completions.create( | |
model=self.model, | |
messages=messages, | |
stream=True, | |
temperature=0.7, | |
max_tokens=1000 | |
) | |
bot_reply = "" | |
for chunk in response: | |
delta = chunk.choices[0].delta | |
if delta and delta.content: | |
bot_reply += delta.content | |
history[-1][1] = bot_reply | |
yield history | |
time.sleep(0.02) | |
except Exception as e: | |
history[-1][1] = f"Error: {str(e)}" | |
yield history | |
chatbot = OpenAIChatbot() | |
# Multi-task handler | |
def perform_task(task, text): | |
if not text.strip(): | |
return "β οΈ Please enter some text.", None, gr.update(visible=False) | |
if task == "Sentiment Analysis": | |
result = sentiment_pipeline(text)[0] | |
return f"Label: {result['label']} | Confidence: {round(result['score'], 3)}", None, gr.update(visible=False) | |
elif task == "Summarization": | |
result = summarization_pipeline(text, max_length=100, min_length=30, do_sample=False) | |
return result[0]['summary_text'], None, gr.update(visible=False) | |
elif task == "Text-to-Speech": | |
tts = gTTS(text) | |
file_path = "tts_output.mp3" | |
tts.save(file_path) | |
return "Audio generated successfully.", file_path, gr.update(visible=True, value=file_path) | |
# Interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# π€ Multi-Task AI Assistant + OpenAI Chatbot") | |
with gr.Tab("AI Tasks"): | |
task = gr.Dropdown(["Sentiment Analysis", "Summarization", "Text-to-Speech"], value="Sentiment Analysis") | |
input_text = gr.Textbox(lines=6, label="Input") | |
run_btn = gr.Button("Run") | |
output = gr.Textbox(label="Result") | |
audio = gr.Audio(type="filepath", visible=False) | |
run_btn.click(perform_task, [task, input_text], [output, audio, audio]) | |
with gr.Tab("Chatbot"): | |
model_select = gr.Dropdown(["gpt-3.5-turbo", "gpt-4"], value="gpt-3.5-turbo", label="Model") | |
system_prompt = gr.Textbox(label="System Prompt", placeholder="You are a helpful assistant...") | |
chat_ui = gr.Chatbot(label="Chat", height=400) | |
message_input = gr.Textbox(placeholder="Type your message...") | |
send_btn = gr.Button("Send") | |
clear_btn = gr.Button("Clear") | |
model_select.change(chatbot.set_model, inputs=[model_select], outputs=[]) | |
def handle_chat(msg, hist, sys_prompt): | |
return chatbot.stream_chat(msg, hist, sys_prompt) | |
send_btn.click(handle_chat, [message_input, chat_ui, system_prompt], [chat_ui]) | |
message_input.submit(handle_chat, [message_input, chat_ui, system_prompt], [chat_ui]) | |
clear_btn.click(lambda: [], outputs=[chat_ui]) | |
demo.launch(share=True) | |