import gradio as gr import openai import json import os import time from typing import List, Tuple, Optional import requests from datetime import datetime class ChatbotManager: def __init__(self): self.conversation_history = [] self.current_api_key = None self.current_model = "gpt-3.5-turbo" self.system_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner." self.max_tokens = 150 self.temperature = 0.7 def set_api_key(self, api_key: str) -> str: """Set the OpenAI API key""" if not api_key.strip(): return "❌ Please enter a valid API key" self.current_api_key = api_key.strip() openai.api_key = self.current_api_key # Test the API key try: openai.Model.list() return "✅ API key validated successfully!" except Exception as e: return f"❌ Invalid API key: {str(e)}" def update_settings(self, model: str, system_prompt: str, max_tokens: int, temperature: float) -> str: """Update chatbot settings""" self.current_model = model self.system_prompt = system_prompt self.max_tokens = max_tokens self.temperature = temperature return f"✅ Settings updated: Model={model}, Max Tokens={max_tokens}, Temperature={temperature}" def preprocess_data(self, data_text: str) -> str: """Preprocess and integrate custom data into the system prompt""" if not data_text.strip(): return "No custom data provided" # Add custom data to system prompt self.system_prompt += f"\n\nAdditional Context:\n{data_text}" return f"✅ Custom data integrated ({len(data_text)} characters)" def generate_response(self, user_input: str, history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]: """Generate response using the selected LLM model""" if not self.current_api_key: return "❌ Please set your API key first!", history if not user_input.strip(): return "Please enter a message.", history try: # Prepare conversation context messages = [{"role": "system", "content": self.system_prompt}] # Add conversation history for user_msg, assistant_msg in history: messages.append({"role": "user", "content": user_msg}) messages.append({"role": "assistant", "content": assistant_msg}) # Add current user input messages.append({"role": "user", "content": user_input}) # Generate response response = openai.ChatCompletion.create( model=self.current_model, messages=messages, max_tokens=self.max_tokens, temperature=self.temperature, n=1, stop=None, ) assistant_response = response.choices[0].message.content.strip() # Update history history.append((user_input, assistant_response)) return assistant_response, history except Exception as e: error_msg = f"❌ Error generating response: {str(e)}" return error_msg, history def clear_conversation(self) -> Tuple[str, List[Tuple[str, str]]]: """Clear conversation history""" self.conversation_history = [] return "", [] def export_conversation(self, history: List[Tuple[str, str]]) -> str: """Export conversation history to JSON format""" if not history: return "No conversation to export" export_data = { "timestamp": datetime.now().isoformat(), "model": self.current_model, "conversation": [ {"user": user_msg, "assistant": assistant_msg} for user_msg, assistant_msg in history ] } filename = f"conversation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" try: with open(filename, 'w', encoding='utf-8') as f: json.dump(export_data, f, indent=2, ensure_ascii=False) return f"✅ Conversation exported to {filename}" except Exception as e: return f"❌ Export failed: {str(e)}" # Initialize chatbot manager chatbot = ChatbotManager() # Define available models AVAILABLE_MODELS = [ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k", "gpt-4-turbo-preview", "gpt-4o", "gpt-4o-mini" ] def create_interface(): """Create the Gradio interface""" with gr.Blocks(title="LLM-Based Chatbot", theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🤖 LLM-Based Conversational AI Chatbot This chatbot leverages powerful Language Models to provide intelligent conversations. Enter your OpenAI API key to get started! """) with gr.Tab("💬 Chat Interface"): with gr.Row(): with gr.Column(scale=3): chatbot_interface = gr.Chatbot( label="Conversation", height=400, show_label=True, avatar_images=("👤", "🤖") ) with gr.Row(): user_input = gr.Textbox( placeholder="Type your message here...", scale=4, show_label=False ) send_btn = gr.Button("Send", variant="primary", scale=1) with gr.Row(): clear_btn = gr.Button("Clear Chat", variant="secondary") export_btn = gr.Button("Export Chat", variant="secondary") with gr.Column(scale=1): gr.Markdown("### 🔧 Quick Settings") api_key_input = gr.Textbox( label="OpenAI API Key", placeholder="sk-...", type="password" ) api_status = gr.Textbox( label="API Status", interactive=False, value="❌ No API key provided" ) model_dropdown = gr.Dropdown( choices=AVAILABLE_MODELS, value="gpt-3.5-turbo", label="Model" ) max_tokens_slider = gr.Slider( minimum=50, maximum=500, value=150, step=10, label="Max Tokens" ) temperature_slider = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="Temperature" ) with gr.Tab("⚙️ Advanced Settings"): gr.Markdown("### System Prompt Configuration") system_prompt_input = gr.Textbox( label="System Prompt", value="You are a helpful AI assistant. Respond in a friendly and informative manner.", lines=5, placeholder="Enter custom system prompt..." ) gr.Markdown("### 📊 Custom Data Integration") custom_data_input = gr.Textbox( label="Custom Training Data", lines=10, placeholder="Enter custom data, FAQs, or domain-specific information..." ) with gr.Row(): update_settings_btn = gr.Button("Update Settings", variant="primary") integrate_data_btn = gr.Button("Integrate Custom Data", variant="secondary") settings_status = gr.Textbox( label="Settings Status", interactive=False ) with gr.Tab("📋 Usage Guide"): gr.Markdown(""" ## 🚀 Getting Started ### 1. **Set Up API Key** - Obtain an OpenAI API key from [OpenAI Platform](https://platform.openai.com/) - Enter your API key in the "OpenAI API Key" field - Wait for the green checkmark confirmation ### 2. **Configure Settings** - **Model**: Choose from available GPT models - **Max Tokens**: Control response length (50-500) - **Temperature**: Adjust creativity (0.0 = focused, 1.0 = creative) ### 3. **Advanced Customization** - **System Prompt**: Define the AI's personality and behavior - **Custom Data**: Add domain-specific information or FAQs ### 4. **Chat Features** - Type messages and get intelligent responses - Clear conversation history anytime - Export chat history as JSON ## 🛠️ Technical Features - **Multi-model support**: GPT-3.5, GPT-4, and variants - **Conversation memory**: Maintains context throughout the session - **Custom data integration**: Enhance responses with your own data - **Export functionality**: Save conversations for later analysis - **Real-time validation**: API key and settings verification ## 💡 Use Cases - **Customer Support**: Create domain-specific support chatbots - **Education**: Build tutoring assistants with custom curriculum - **Business**: Develop FAQ bots with company-specific information - **Research**: Analyze conversations and response patterns """) # Event handlers def handle_api_key(api_key): status = chatbot.set_api_key(api_key) return status def handle_chat(user_input, history): if not user_input.strip(): return history, "" response, updated_history = chatbot.generate_response(user_input, history) return updated_history, "" def handle_settings_update(model, system_prompt, max_tokens, temperature): status = chatbot.update_settings(model, system_prompt, max_tokens, temperature) return status def handle_data_integration(custom_data): status = chatbot.preprocess_data(custom_data) return status def handle_clear(): return chatbot.clear_conversation() def handle_export(history): return chatbot.export_conversation(history) # Connect events api_key_input.change( handle_api_key, inputs=[api_key_input], outputs=[api_status] ) send_btn.click( handle_chat, inputs=[user_input, chatbot_interface], outputs=[chatbot_interface, user_input] ) user_input.submit( handle_chat, inputs=[user_input, chatbot_interface], outputs=[chatbot_interface, user_input] ) update_settings_btn.click( handle_settings_update, inputs=[model_dropdown, system_prompt_input, max_tokens_slider, temperature_slider], outputs=[settings_status] ) integrate_data_btn.click( handle_data_integration, inputs=[custom_data_input], outputs=[settings_status] ) clear_btn.click( handle_clear, outputs=[user_input, chatbot_interface] ) export_btn.click( handle_export, inputs=[chatbot_interface], outputs=[settings_status] ) return demo # Requirements and setup instructions def print_setup_instructions(): """Print setup instructions""" print(""" 🤖 LLM-Based Chatbot Setup Instructions ===================================== 📦 Required Dependencies: pip install gradio openai requests 🔑 API Key Setup: 1. Visit https://platform.openai.com/ 2. Create an account and generate an API key 3. Enter the API key in the interface 🚀 Running the Application: python app.py 📂 Files Created: - conversation_YYYYMMDD_HHMMSS.json (exported chats) """) if __name__ == "__main__": print_setup_instructions() # Create and launch the interface demo = create_interface() # Launch with custom settings demo.launch( share=True )