ReallyFloppyPenguin's picture
Update app.py
e5c4a6a verified
import gradio as gr
import requests
import os
import json
from typing import List, Dict, Optional
import time
# Curated selection of advanced AI models for general users
ADVANCED_MODELS = {
"meta-llama/Llama-3.3-70B-Instruct": {
"provider": "Cerebras",
"display_name": "Llama 3.3 70B (Ultra Fast)",
"description": "Meta's latest and most capable model, optimized for speed",
"category": "General Purpose",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"deepseek-ai/DeepSeek-R1": {
"provider": "Groq",
"display_name": "DeepSeek R1 (Reasoning)",
"description": "Advanced reasoning model for complex problem solving",
"category": "Reasoning & Analysis",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"meta-llama/Meta-Llama-3.1-405B-Instruct": {
"provider": "SambaNova",
"display_name": "Llama 3.1 405B (Most Powerful)",
"description": "Meta's largest and most capable language model",
"category": "Expert Level",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"meta-llama/Meta-Llama-3-70B-Instruct": {
"provider": "Together",
"display_name": "Llama 3 70B (Balanced)",
"description": "Excellent balance of capability and speed",
"category": "General Purpose",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"cohere/command-r-plus": {
"provider": "Cohere",
"display_name": "Command R+ (Enterprise)",
"description": "Enterprise-grade model for professional use",
"category": "Business & Professional",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"Qwen/Qwen2.5-72B-Instruct": {
"provider": "Novita",
"display_name": "Qwen 2.5 72B (Multilingual)",
"description": "Excellent for multiple languages and coding",
"category": "Multilingual & Code",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
},
"mistralai/Mixtral-8x7B-Instruct-v0.1": {
"provider": "Nebius",
"display_name": "Mixtral 8x7B (Efficient)",
"description": "Fast and efficient for everyday tasks",
"category": "Daily Tasks",
"endpoint": "https://router.huggingface.co/v1/chat/completions"
}
}
class AIChat:
def __init__(self):
self.hf_token = os.getenv("HF_TOKEN")
if not self.hf_token:
raise ValueError("HF_TOKEN environment variable is required")
self.headers = {
"Authorization": f"Bearer {self.hf_token}",
"Content-Type": "application/json"
}
def send_message(self, model_id: str, message: str, conversation_history: List = None) -> Dict:
"""Send a chat message to the selected AI model"""
if model_id not in ADVANCED_MODELS:
return {
"success": False,
"error": "Selected model is not available"
}
model_info = ADVANCED_MODELS[model_id]
# Build conversation with history
messages = []
if conversation_history:
messages.extend(conversation_history)
messages.append({"role": "user", "content": message})
payload = {
"model": model_id,
"messages": messages,
"max_tokens": 1000,
"temperature": 0.7,
"stream": False
}
try:
response = requests.post(
model_info["endpoint"],
headers=self.headers,
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
ai_response = result["choices"][0]["message"]["content"]
return {
"success": True,
"response": ai_response,
"model": model_info["display_name"],
"provider": model_info["provider"]
}
else:
return {
"success": False,
"error": "No response generated"
}
else:
return {
"success": False,
"error": f"API Error: {response.status_code} - {response.text}"
}
except Exception as e:
return {
"success": False,
"error": f"Connection error: {str(e)}"
}
def create_chat_interface():
try:
chat_ai = AIChat()
except ValueError as e:
# Create error interface
with gr.Blocks(title="❌ Setup Required") as demo:
gr.Markdown(f"""
# ❌ Setup Required
**{str(e)}**
Please set the `HF_TOKEN` environment variable with your HuggingFace token.
Get your token at: https://huggingface.co/settings/tokens
""")
return demo
# Create model choices for dropdown
model_choices = [
(f"🚀 {info['display_name']} - {info['description']}", model_id)
for model_id, info in ADVANCED_MODELS.items()
]
def chat_with_ai(message, history, selected_model):
"""Handle chat conversation"""
if not message.strip():
return history, ""
if not selected_model:
history.append([message, "❌ Please select an AI model first"])
return history, ""
# Convert gradio history to API format
conversation_history = []
for user_msg, ai_msg in history:
if user_msg and ai_msg:
conversation_history.append({"role": "user", "content": user_msg})
conversation_history.append({"role": "assistant", "content": ai_msg})
# Send message to AI
result = chat_ai.send_message(selected_model, message, conversation_history)
if result["success"]:
# Add the new conversation to history
history.append([message, result["response"]])
return history, ""
else:
# Add error message to history
history.append([message, f"❌ Error: {result['error']}"])
return history, ""
def clear_chat():
"""Clear the chat history"""
return [], ""
def get_model_info(selected_model):
"""Get information about the selected model"""
if not selected_model or selected_model not in ADVANCED_MODELS:
return "Select a model to see details"
info = ADVANCED_MODELS[selected_model]
return f"""
**🤖 {info['display_name']}**
**Provider:** {info['provider']}
**Category:** {info['category']}
**Description:** {info['description']}
Ready to chat! Type your message below.
"""
# Create the interface
with gr.Blocks(
title="🤖 Chat with Advanced AI Models",
theme=gr.themes.Soft(),
css="""
.chat-container {
max-width: 1000px;
margin: 0 auto;
}
.model-info {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 15px;
border-radius: 10px;
margin: 10px 0;
}
"""
) as demo:
gr.Markdown("""
# 🤖 Chat with Advanced AI Models
**Experience the latest AI technology!** Choose from powerful models and start chatting instantly.
✨ **What you can do:**
- Ask questions and get intelligent answers
- Get help with writing, analysis, and creative tasks
- Solve problems and get explanations
- Have natural conversations
""")
with gr.Row():
# Left column - Model selection
with gr.Column(scale=1):
gr.Markdown("### 🎯 Choose Your AI")
model_selector = gr.Dropdown(
choices=model_choices,
label="Select AI Model",
info="Each model has different strengths",
interactive=True
)
model_info_display = gr.Markdown(
"Select a model to see details",
elem_classes=["model-info"]
)
# Update model info when selection changes
model_selector.change(
get_model_info,
inputs=model_selector,
outputs=model_info_display
)
# Right column - Chat interface
with gr.Column(scale=2):
gr.Markdown("### 💬 Chat Interface")
chatbot = gr.Chatbot(
label="Conversation",
height=400,
show_label=False,
container=True,
elem_classes=["chat-container"]
)
with gr.Row():
message_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Message",
scale=4,
lines=1
)
send_btn = gr.Button("Send 📤", variant="primary", scale=1)
with gr.Row():
clear_btn = gr.Button("Clear Chat 🗑️", variant="secondary")
# Chat functionality
def submit_message(message, history, model):
new_history, cleared_input = chat_with_ai(message, history, model)
return new_history, "" # Return updated history and clear the input
# Send message on button click or enter
send_btn.click(
submit_message,
inputs=[message_input, chatbot, model_selector],
outputs=[chatbot, message_input]
).then(
lambda: "", outputs=message_input # Clear input after sending
)
message_input.submit(
submit_message,
inputs=[message_input, chatbot, model_selector],
outputs=[chatbot, message_input]
).then(
lambda: "", outputs=message_input # Clear input after sending
)
# Clear chat
clear_btn.click(clear_chat, outputs=[chatbot, message_input])
# Footer
gr.Markdown("""
---
## 🚀 **Featured AI Models:**
- **🚀 Ultra Fast**: Llama 3.3 70B on Cerebras chips
- **🧠 Reasoning**: DeepSeek R1 for complex problem solving
- **💪 Most Powerful**: Llama 3.1 405B for expert tasks
- **⚖️ Balanced**: Llama 3 70B for everyday use
- **💼 Enterprise**: Command R+ for professional work
- **🌍 Multilingual**: Qwen 2.5 72B for global communication
- **⚡ Efficient**: Mixtral 8x7B for quick responses
## 💡 **Tips for Better Conversations:**
- Be specific about what you want
- Ask follow-up questions for deeper insights
- Try different models for different types of tasks
- Use clear, natural language
---
*Powered by HuggingFace Inference Providers* 🤗
""")
return demo
if __name__ == "__main__":
try:
demo = create_chat_interface()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)
except Exception as e:
print(f"Error starting chat application: {e}")
print("Please ensure HF_TOKEN environment variable is set.")