shukdevdatta123's picture
Update app.py
9be1d2a verified
raw
history blame
19.1 kB
import gradio as gr
from openai import OpenAI
import json
import os
import time
from typing import List, Tuple, Optional
import requests
from datetime import datetime
class ChatbotManager:
def __init__(self):
self.conversation_history = []
self.current_api_key = None
self.current_model = "gpt-3.5-turbo"
self.system_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner."
self.max_tokens = 150
self.temperature = 0.7
def set_api_key(self, api_key: str) -> str:
"""Set the OpenAI API key"""
if not api_key.strip():
return "❌ Please enter a valid API key"
self.current_api_key = api_key.strip()
# Create OpenAI client with the new syntax
self.client = openai.OpenAI(api_key=self.current_api_key)
# Test the API key
try:
self.client.models.list()
return "βœ… API key validated successfully!"
except Exception as e:
return f"❌ Invalid API key: {str(e)}"
def update_settings(self, model: str, system_prompt: str, max_tokens: int, temperature: float) -> str:
"""Update chatbot settings"""
self.current_model = model
self.system_prompt = system_prompt
self.max_tokens = max_tokens
self.temperature = temperature
return f"βœ… Settings updated: Model={model}, Max Tokens={max_tokens}, Temperature={temperature}"
def preprocess_data(self, data_text: str) -> str:
"""Preprocess and integrate custom data into the system prompt"""
if not data_text.strip():
return "No custom data provided"
# Reset system prompt to avoid accumulation
base_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner."
self.system_prompt = base_prompt + f"\n\nAdditional Context:\n{data_text}"
return f"βœ… Custom data integrated ({len(data_text)} characters)"
def generate_response(self, user_input: str, history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]:
"""Generate response using the selected LLM model"""
if not self.current_api_key:
return "❌ Please set your API key first!", history
if not user_input.strip():
return "Please enter a message.", history
try:
# Prepare conversation context
messages = [{"role": "system", "content": self.system_prompt}]
# Add conversation history
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
# Add current user input
messages.append({"role": "user", "content": user_input})
# Generate response using new OpenAI client syntax
response = self.client.chat.completions.create(
model=self.current_model,
messages=messages,
max_tokens=self.max_tokens,
temperature=self.temperature,
n=1,
stop=None,
)
assistant_response = response.choices[0].message.content.strip()
# Update history
history.append((user_input, assistant_response))
return assistant_response, history
except Exception as e:
error_msg = f"❌ Error generating response: {str(e)}"
return error_msg, history
def clear_conversation(self) -> Tuple[str, List[Tuple[str, str]]]:
"""Clear conversation history"""
self.conversation_history = []
return "", []
def export_conversation(self, history: List[Tuple[str, str]]) -> str:
"""Export conversation history to JSON format"""
if not history:
return "No conversation to export"
export_data = {
"timestamp": datetime.now().isoformat(),
"model": self.current_model,
"conversation": [
{"user": user_msg, "assistant": assistant_msg}
for user_msg, assistant_msg in history
]
}
filename = f"conversation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
try:
with open(filename, 'w', encoding='utf-8') as f:
json.dump(export_data, f, indent=2, ensure_ascii=False)
return f"βœ… Conversation exported to {filename}"
except Exception as e:
return f"❌ Export failed: {str(e)}"
# Initialize chatbot manager
chatbot = ChatbotManager()
# Define available models
AVAILABLE_MODELS = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-32k",
"gpt-4-turbo-preview",
"gpt-4o",
"gpt-4o-mini"
]
def create_interface():
"""Create the Gradio interface"""
with gr.Blocks(title="LLM-Based Chatbot", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# πŸ€– LLM-Based Conversational AI Chatbot
This chatbot leverages powerful Language Models to provide intelligent conversations.
Enter your OpenAI API key to get started!
""")
with gr.Tab("πŸ’¬ Chat Interface"):
with gr.Row():
with gr.Column(scale=3):
chatbot_interface = gr.Chatbot(
label="Conversation",
height=400,
show_label=True,
avatar_images=("πŸ‘€", "πŸ€–"),
show_copy_button=True,
bubble_full_width=False,
show_share_button=True
)
with gr.Row():
user_input = gr.Textbox(
placeholder="Type your message here...",
scale=4,
show_label=False,
container=False
)
send_btn = gr.Button("πŸ“€ Send", variant="primary", scale=1)
with gr.Row():
clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
export_btn = gr.Button("πŸ“₯ Export Chat", variant="secondary")
regenerate_btn = gr.Button("πŸ”„ Regenerate", variant="secondary")
with gr.Column(scale=1):
gr.Markdown("### πŸ”§ Quick Settings")
api_key_input = gr.Textbox(
label="πŸ”‘ OpenAI API Key",
placeholder="sk-...",
type="password"
)
api_status = gr.Textbox(
label="API Status",
interactive=False,
value="❌ No API key provided"
)
model_dropdown = gr.Dropdown(
choices=AVAILABLE_MODELS,
value="gpt-3.5-turbo",
label="πŸ€– Model"
)
max_tokens_slider = gr.Slider(
minimum=50,
maximum=2000,
value=150,
step=10,
label="πŸ“ Max Tokens"
)
temperature_slider = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="🌑️ Temperature"
)
# Live settings display
gr.Markdown("### πŸ“Š Current Settings")
current_settings = gr.Textbox(
value="Model: gpt-3.5-turbo\nTokens: 150\nTemp: 0.7",
label="Active Configuration",
interactive=False,
lines=3
)
with gr.Tab("βš™οΈ Advanced Settings"):
gr.Markdown("### 🎯 System Prompt Configuration")
system_prompt_input = gr.Textbox(
label="System Prompt",
value="You are a helpful AI assistant. Respond in a friendly and informative manner.",
lines=5,
placeholder="Enter custom system prompt..."
)
gr.Markdown("### πŸ“š Custom Data Integration")
custom_data_input = gr.Textbox(
label="Custom Training Data",
lines=10,
placeholder="Enter custom data, FAQs, or domain-specific information..."
)
with gr.Row():
update_settings_btn = gr.Button("βœ… Update Settings", variant="primary")
integrate_data_btn = gr.Button("πŸ“Š Integrate Custom Data", variant="secondary")
reset_prompt_btn = gr.Button("πŸ”„ Reset to Default", variant="secondary")
settings_status = gr.Textbox(
label="Settings Status",
interactive=False
)
# Preset system prompts
gr.Markdown("### 🎭 Preset System Prompts")
with gr.Row():
preset_customer_support = gr.Button("πŸ‘₯ Customer Support", variant="secondary")
preset_tutor = gr.Button("πŸŽ“ Educational Tutor", variant="secondary")
preset_creative = gr.Button("✨ Creative Assistant", variant="secondary")
preset_technical = gr.Button("πŸ”§ Technical Writer", variant="secondary")
with gr.Tab("πŸ“‹ Usage Guide"):
gr.Markdown("""
## πŸš€ Getting Started
### 1. **Set Up API Key**
- Obtain an OpenAI API key from [OpenAI Platform](https://platform.openai.com/)
- Enter your API key in the "πŸ”‘ OpenAI API Key" field
- Wait for the green checkmark confirmation
### 2. **Configure Settings**
- **Model**: Choose from available GPT models
- **Max Tokens**: Control response length (50-2000)
- **Temperature**: Adjust creativity (0.0 = focused, 1.0 = creative)
### 3. **Advanced Customization**
- **System Prompt**: Define the AI's personality and behavior
- **Custom Data**: Add domain-specific information or FAQs
- **Presets**: Use pre-configured prompts for common use cases
### 4. **Chat Features**
- Type messages and get intelligent responses
- Clear conversation history anytime
- Export chat history as JSON
- Regenerate the last response
- Copy responses using the copy button
## πŸ› οΈ Technical Features
- **Multi-model support**: GPT-3.5, GPT-4, and variants
- **Conversation memory**: Maintains context throughout the session
- **Custom data integration**: Enhance responses with your own data
- **Export functionality**: Save conversations for later analysis
- **Real-time validation**: API key and settings verification
- **Visual indicators**: User (πŸ‘€) and AI (πŸ€–) avatars
## πŸ’‘ Use Cases
- **Customer Support**: Create domain-specific support chatbots
- **Education**: Build tutoring assistants with custom curriculum
- **Business**: Develop FAQ bots with company-specific information
- **Research**: Analyze conversations and response patterns
## πŸ”§ Troubleshooting
- **API Key Issues**: Ensure your key is valid and has credits
- **Model Errors**: Some models may not be available in your region
- **Long Response Times**: Reduce max tokens or switch to faster models
- **Context Limits**: Clear chat history if responses become inconsistent
""")
# Event handlers
def handle_api_key(api_key):
status = chatbot.set_api_key(api_key)
return status
def handle_chat(user_input, history):
if not user_input.strip():
return history, ""
response, updated_history = chatbot.generate_response(user_input, history)
return updated_history, ""
def handle_settings_update(model, system_prompt, max_tokens, temperature):
status = chatbot.update_settings(model, system_prompt, max_tokens, temperature)
settings_display = f"Model: {model}\nTokens: {max_tokens}\nTemp: {temperature}"
return status, settings_display
def handle_data_integration(custom_data):
status = chatbot.preprocess_data(custom_data)
return status
def handle_clear():
return chatbot.clear_conversation()
def handle_export(history):
return chatbot.export_conversation(history)
def handle_regenerate(history):
if not history:
return history
# Get the last user message and regenerate response
last_user_msg = history[-1][0]
history_without_last = history[:-1]
response, updated_history = chatbot.generate_response(last_user_msg, history_without_last)
return updated_history
def update_settings_display(model, max_tokens, temperature):
return f"Model: {model}\nTokens: {max_tokens}\nTemp: {temperature}"
def reset_prompt():
default_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner."
return default_prompt, "βœ… System prompt reset to default"
def load_preset_prompt(preset_type):
presets = {
"customer_support": "You are a helpful customer support representative. You are friendly, professional, and knowledgeable. Always try to resolve customer issues and provide clear solutions. If you cannot solve a problem, escalate it politely.",
"tutor": "You are an experienced tutor. Explain concepts clearly, use examples, and encourage students when they struggle. Break down complex problems into smaller, manageable steps. Always check for understanding.",
"creative": "You are a creative writing assistant who helps with stories, poems, and creative content. Provide constructive feedback, suggest improvements, and inspire creativity while maintaining quality standards.",
"technical": "You are a technical writer who creates clear, concise documentation. Use precise language, provide examples when relevant, and structure information logically for developers and technical users."
}
return presets.get(preset_type, ""), f"βœ… Loaded {preset_type.replace('_', ' ').title()} preset"
# Connect events
api_key_input.change(
handle_api_key,
inputs=[api_key_input],
outputs=[api_status]
)
send_btn.click(
handle_chat,
inputs=[user_input, chatbot_interface],
outputs=[chatbot_interface, user_input]
)
user_input.submit(
handle_chat,
inputs=[user_input, chatbot_interface],
outputs=[chatbot_interface, user_input]
)
update_settings_btn.click(
handle_settings_update,
inputs=[model_dropdown, system_prompt_input, max_tokens_slider, temperature_slider],
outputs=[settings_status, current_settings]
)
integrate_data_btn.click(
handle_data_integration,
inputs=[custom_data_input],
outputs=[settings_status]
)
clear_btn.click(
handle_clear,
outputs=[user_input, chatbot_interface]
)
export_btn.click(
handle_export,
inputs=[chatbot_interface],
outputs=[settings_status]
)
regenerate_btn.click(
handle_regenerate,
inputs=[chatbot_interface],
outputs=[chatbot_interface]
)
# Live settings update
for component in [model_dropdown, max_tokens_slider, temperature_slider]:
component.change(
update_settings_display,
inputs=[model_dropdown, max_tokens_slider, temperature_slider],
outputs=[current_settings]
)
# Reset and preset buttons
reset_prompt_btn.click(
reset_prompt,
outputs=[system_prompt_input, settings_status]
)
preset_customer_support.click(
lambda: load_preset_prompt("customer_support"),
outputs=[system_prompt_input, settings_status]
)
preset_tutor.click(
lambda: load_preset_prompt("tutor"),
outputs=[system_prompt_input, settings_status]
)
preset_creative.click(
lambda: load_preset_prompt("creative"),
outputs=[system_prompt_input, settings_status]
)
preset_technical.click(
lambda: load_preset_prompt("technical"),
outputs=[system_prompt_input, settings_status]
)
return demo
# Requirements and setup instructions
def print_setup_instructions():
"""Print setup instructions"""
print("""
πŸ€– LLM-Based Chatbot Setup Instructions
=====================================
πŸ“¦ Required Dependencies:
pip install gradio openai requests
πŸ”‘ API Key Setup:
1. Visit https://platform.openai.com/
2. Create an account and generate an API key
3. Enter the API key in the interface
πŸš€ Running the Application:
python app.py
πŸ“‚ Files Created:
- conversation_YYYYMMDD_HHMMSS.json (exported chats)
🌐 Access: http://localhost:7860
""")
if __name__ == "__main__":
print_setup_instructions()
# Create and launch the interface
demo = create_interface()
# Launch with custom settings
demo.launch(
share=True,
debug=True,
show_error=True
)