|
import gradio as gr |
|
import openai |
|
import json |
|
import os |
|
import time |
|
from typing import List, Tuple, Optional |
|
import requests |
|
from datetime import datetime |
|
|
|
class ChatbotManager: |
|
def __init__(self): |
|
self.conversation_history = [] |
|
self.current_api_key = None |
|
self.current_model = "gpt-3.5-turbo" |
|
self.system_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner." |
|
self.max_tokens = 150 |
|
self.temperature = 0.7 |
|
|
|
def set_api_key(self, api_key: str) -> str: |
|
if not api_key.strip(): |
|
return "β Please enter a valid API key" |
|
|
|
self.current_api_key = api_key.strip() |
|
openai.api_key = self.current_api_key |
|
|
|
try: |
|
openai.Model.list() |
|
return "β
API key validated successfully!" |
|
except Exception as e: |
|
return f"β Invalid API key: {str(e)}" |
|
|
|
def update_settings(self, model: str, system_prompt: str, max_tokens: int, temperature: float) -> str: |
|
self.current_model = model |
|
self.system_prompt = system_prompt |
|
self.max_tokens = max_tokens |
|
self.temperature = temperature |
|
return f"β
Settings updated: Model={model}, Max Tokens={max_tokens}, Temperature={temperature}" |
|
|
|
def preprocess_data(self, data_text: str) -> str: |
|
if not data_text.strip(): |
|
return "No custom data provided" |
|
|
|
base_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner." |
|
self.system_prompt = base_prompt + f"\n\nAdditional Context:\n{data_text}" |
|
return f"β
Custom data integrated ({len(data_text)} characters)" |
|
|
|
def generate_response(self, user_input: str, history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]: |
|
if not self.current_api_key: |
|
return "β Please set your API key first!", history |
|
|
|
if not user_input.strip(): |
|
return "Please enter a message.", history |
|
|
|
try: |
|
messages = [{"role": "system", "content": self.system_prompt}] |
|
|
|
for user_msg, assistant_msg in history: |
|
messages.append({"role": "user", "content": user_msg}) |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
|
messages.append({"role": "user", "content": user_input}) |
|
|
|
response = openai.ChatCompletion.create( |
|
model=self.current_model, |
|
messages=messages, |
|
max_tokens=self.max_tokens, |
|
temperature=self.temperature, |
|
n=1, |
|
stop=None, |
|
) |
|
|
|
assistant_response = response.choices[0].message.content.strip() |
|
history.append((user_input, assistant_response)) |
|
|
|
return assistant_response, history |
|
|
|
except Exception as e: |
|
error_msg = f"β Error generating response: {str(e)}" |
|
return error_msg, history |
|
|
|
def clear_conversation(self) -> Tuple[str, List[Tuple[str, str]]]: |
|
self.conversation_history = [] |
|
return "", [] |
|
|
|
def export_conversation(self, history: List[Tuple[str, str]]) -> Tuple[str, Optional[str]]: |
|
if not history: |
|
return "No conversation to export", None |
|
|
|
export_data = { |
|
"timestamp": datetime.now().isoformat(), |
|
"model": self.current_model, |
|
"conversation": [ |
|
{"user": user_msg, "assistant": assistant_msg} |
|
for user_msg, assistant_msg in history |
|
] |
|
} |
|
|
|
filename = f"conversation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" |
|
|
|
try: |
|
os.makedirs("/tmp", exist_ok=True) |
|
filepath = os.path.join("/tmp", filename) |
|
with open(filepath, 'w', encoding='utf-8') as f: |
|
json.dump(export_data, f, indent=2, ensure_ascii=False) |
|
return f"β
Conversation exported as {filename}", filepath |
|
except Exception as e: |
|
return f"β Export failed: {str(e)}", None |
|
|
|
|
|
chatbot = ChatbotManager() |
|
|
|
|
|
AVAILABLE_MODELS = [ |
|
"gpt-3.5-turbo", |
|
"gpt-3.5-turbo-16k", |
|
"gpt-4", |
|
"gpt-4-32k", |
|
"gpt-4-0613", |
|
"gpt-4-32k-0613" |
|
] |
|
|
|
def create_interface(): |
|
with gr.Blocks(title="LLM-Based Chatbot", theme=gr.themes.Ocean()) as demo: |
|
gr.Markdown(""" |
|
# π€ LLM-Based Conversational AI Chatbot |
|
This chatbot leverages powerful Language Models to provide intelligent conversations. |
|
Enter your OpenAI API key to get started! |
|
""") |
|
|
|
with gr.Tab("π¬ Chat Interface"): |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
chatbot_interface = gr.Chatbot( |
|
label="Conversation", |
|
height=400, |
|
show_label=True, |
|
avatar_images=("user.png", "assistant.png"), |
|
show_copy_button=True, |
|
bubble_full_width=False, |
|
) |
|
|
|
with gr.Row(): |
|
user_input = gr.Textbox( |
|
placeholder="Type your message here...", |
|
scale=4, |
|
show_label=False, |
|
container=False |
|
) |
|
send_btn = gr.Button("π€ Send", variant="primary", scale=1) |
|
|
|
with gr.Row(): |
|
clear_btn = gr.Button("ποΈ Clear Chat", variant="secondary") |
|
regenerate_btn = gr.Button("π Regenerate", variant="secondary") |
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("### π§ Quick Settings") |
|
|
|
api_key_input = gr.Textbox( |
|
label="π OpenAI API Key", |
|
placeholder="sk-...", |
|
type="password" |
|
) |
|
api_status = gr.Textbox( |
|
label="API Status", |
|
interactive=False, |
|
value="β No API key provided" |
|
) |
|
|
|
model_dropdown = gr.Dropdown( |
|
choices=AVAILABLE_MODELS, |
|
value="gpt-3.5-turbo", |
|
label="π€ Model" |
|
) |
|
|
|
max_tokens_slider = gr.Slider( |
|
minimum=50, |
|
maximum=4096, |
|
value=150, |
|
step=10, |
|
label="π Max Tokens" |
|
) |
|
|
|
temperature_slider = gr.Slider( |
|
minimum=0.0, |
|
maximum=1.0, |
|
value=0.7, |
|
step=0.1, |
|
label="π‘οΈ Temperature" |
|
) |
|
|
|
gr.Markdown("### π Current Settings") |
|
current_settings = gr.Textbox( |
|
value="Model: gpt-3.5-turbo\nTokens: 150\nTemp: 0.7", |
|
label="Active Configuration", |
|
interactive=False, |
|
lines=3 |
|
) |
|
|
|
with gr.Tab("βοΈ Advanced Settings"): |
|
gr.Markdown("### π― System Prompt Configuration") |
|
system_prompt_input = gr.Textbox( |
|
label="System Prompt", |
|
value="You are a helpful AI assistant. Respond in a friendly and informative manner.", |
|
lines=5, |
|
placeholder="Enter custom system prompt..." |
|
) |
|
|
|
gr.Markdown("### π Custom Data Integration") |
|
custom_data_input = gr.Textbox( |
|
label="Custom Training Data", |
|
lines=10, |
|
placeholder="Enter custom data, FAQs, or domain-specific information..." |
|
) |
|
|
|
with gr.Row(): |
|
update_settings_btn = gr.Button("β
Update Settings", variant="primary") |
|
integrate_data_btn = gr.Button("π Integrate Custom Data", variant="secondary") |
|
reset_prompt_btn = gr.Button("π Reset to Default", variant="secondary") |
|
|
|
settings_status = gr.Textbox( |
|
label="Settings Status", |
|
interactive=False |
|
) |
|
|
|
gr.Markdown("### π Preset System Prompts") |
|
with gr.Row(): |
|
preset_customer_support = gr.Button("π₯ Customer Support", variant="secondary") |
|
preset_tutor = gr.Button("π Educational Tutor", variant="secondary") |
|
preset_creative = gr.Button("β¨ Creative Assistant", variant="secondary") |
|
preset_technical = gr.Button("π§ Technical Writer", variant="secondary") |
|
|
|
with gr.Tab("π Usage Guide"): |
|
gr.Markdown(""" |
|
## π Getting Started |
|
|
|
### 1. **Set Up API Key** |
|
- Obtain an OpenAI API key from [OpenAI Platform](https://platform.openai.com/) |
|
- Enter your API key in the "π OpenAI API Key" field |
|
- Wait for the green checkmark confirmation |
|
|
|
### 2. **Configure Settings** |
|
- **Model**: Choose from available GPT models |
|
- **Max Tokens**: Control response length (50-4096) |
|
- **Temperature**: Adjust creativity (0.0 = focused, 1.0 = creative) |
|
|
|
### 3. **Advanced Customization** |
|
- **System Prompt**: Define the AI's personality and behavior |
|
- **Custom Data**: Add domain-specific information or FAQs |
|
- **Presets**: Use pre-configured prompts for common use cases |
|
|
|
### 4. **Chat Features** |
|
- Type messages and get intelligent responses |
|
- Clear conversation history anytime |
|
- Regenerate the last response |
|
- Copy responses using the copy button |
|
|
|
## π οΈ Technical Features |
|
|
|
- **Multi-model support**: GPT-3.5, GPT-4, and variants |
|
- **Conversation memory**: Maintains context throughout the session |
|
- **Custom data integration**: Enhance responses with your own data |
|
- **Real-time validation**: API key and settings verification |
|
- **Visual indicators**: User and AI avatars |
|
|
|
## π‘ Use Cases |
|
|
|
- **Customer Support**: Create domain-specific support chatbots |
|
- **Education**: Build tutoring assistants with custom curriculum |
|
- **Business**: Develop FAQ bots with company-specific information |
|
- **Research**: Analyze conversations and response patterns |
|
|
|
## π§ Troubleshooting |
|
|
|
- **API Key Issues**: Ensure your key is valid and has credits |
|
- **Model Errors**: Some models may not be available in your region |
|
- **Long Response Times**: Reduce max tokens or switch to faster models |
|
- **Context Limits**: Clear chat history if responses become inconsistent |
|
|
|
## π¦ Installation Requirements |
|
|
|
This version is compatible with OpenAI 0.28.0. Install dependencies: |
|
```bash |
|
pip install openai==0.28.0 |
|
pip install gradio |
|
pip install requests |
|
``` |
|
""") |
|
|
|
|
|
def handle_api_key(api_key): |
|
status = chatbot.set_api_key(api_key) |
|
return status |
|
|
|
def handle_chat(user_input, history): |
|
if not user_input.strip(): |
|
return history or [], "" |
|
|
|
response, updated_history = chatbot.generate_response(user_input, history or []) |
|
return updated_history, "" |
|
|
|
def handle_settings_update(model, system_prompt, max_tokens, temperature): |
|
status = chatbot.update_settings(model, system_prompt, max_tokens, temperature) |
|
settings_display = f"Model: {model}\nTokens: {max_tokens}\nTemp: {temperature}" |
|
return status, settings_display |
|
|
|
def handle_data_integration(custom_data): |
|
status = chatbot.preprocess_data(custom_data) |
|
return status |
|
|
|
def handle_clear(): |
|
return chatbot.clear_conversation() |
|
|
|
def handle_regenerate(history): |
|
if not history: |
|
return history or [] |
|
|
|
last_user_msg = history[-1][0] |
|
history_without_last = history[:-1] |
|
response, updated_history = chatbot.generate_response(last_user_msg, history_without_last) |
|
return updated_history |
|
|
|
def update_settings_display(model, max_tokens, temperature): |
|
return f"Model: {model}\nTokens: {max_tokens}\nTemp: {temperature}" |
|
|
|
def reset_prompt(): |
|
default_prompt = "You are a helpful AI assistant. Respond in a friendly and informative manner." |
|
return default_prompt, "β
System prompt reset to default" |
|
|
|
def load_preset_prompt(preset_type): |
|
presets = { |
|
"customer_support": "You are a helpful customer support representative. You are friendly, professional, and knowledgeable. Always try to resolve customer issues and provide clear solutions. If you cannot solve a problem, escalate it politely.", |
|
"tutor": "You are an experienced tutor. Explain concepts clearly, use examples, and encourage students when they struggle. Break down complex problems into smaller, manageable steps. Always check for understanding.", |
|
"creative": "You are a creative writing assistant who helps with stories, poems, and creative content. Provide constructive feedback, suggest improvements, and inspire creativity while maintaining quality standards.", |
|
"technical": "You are a technical writer who creates clear, concise documentation. Use precise language, provide examples when relevant, and structure information logically for developers and technical users." |
|
} |
|
return presets.get(preset_type, ""), f"β
Loaded {preset_type.replace('_', ' ').title()} preset" |
|
|
|
|
|
api_key_input.change( |
|
handle_api_key, |
|
inputs=[api_key_input], |
|
outputs=[api_status] |
|
) |
|
|
|
send_btn.click( |
|
handle_chat, |
|
inputs=[user_input, chatbot_interface], |
|
outputs=[chatbot_interface, user_input] |
|
) |
|
|
|
user_input.submit( |
|
handle_chat, |
|
inputs=[user_input, chatbot_interface], |
|
outputs=[chatbot_interface, user_input] |
|
) |
|
|
|
update_settings_btn.click( |
|
handle_settings_update, |
|
inputs=[model_dropdown, system_prompt_input, max_tokens_slider, temperature_slider], |
|
outputs=[settings_status, current_settings] |
|
) |
|
|
|
integrate_data_btn.click( |
|
handle_data_integration, |
|
inputs=[custom_data_input], |
|
outputs=[settings_status] |
|
) |
|
|
|
clear_btn.click( |
|
handle_clear, |
|
outputs=[user_input, chatbot_interface] |
|
) |
|
|
|
regenerate_btn.click( |
|
handle_regenerate, |
|
inputs=[chatbot_interface], |
|
outputs=[chatbot_interface] |
|
) |
|
|
|
for component in [model_dropdown, max_tokens_slider, temperature_slider]: |
|
component.change( |
|
update_settings_display, |
|
inputs=[model_dropdown, max_tokens_slider, temperature_slider], |
|
outputs=[current_settings] |
|
) |
|
|
|
reset_prompt_btn.click( |
|
reset_prompt, |
|
outputs=[system_prompt_input, settings_status] |
|
) |
|
|
|
preset_customer_support.click( |
|
lambda: load_preset_prompt("customer_support"), |
|
outputs=[system_prompt_input, settings_status] |
|
) |
|
|
|
preset_tutor.click( |
|
lambda: load_preset_prompt("tutor"), |
|
outputs=[system_prompt_input, settings_status] |
|
) |
|
|
|
preset_creative.click( |
|
lambda: load_preset_prompt("creative"), |
|
outputs=[system_prompt_input, settings_status] |
|
) |
|
|
|
preset_technical.click( |
|
lambda: load_preset_prompt("technical"), |
|
outputs=[system_prompt_input, settings_status] |
|
) |
|
|
|
return demo |
|
|
|
if __name__ == "__main__": |
|
demo = create_interface() |
|
demo.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860 |
|
) |