File size: 14,101 Bytes
8e4018d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
import gradio as gr
import datetime
from typing import Dict, List, Any, Union, Optional
import random
import os
import json
import numpy as np
from pathlib import Path
# Import utilities
from utils.storage import load_data, save_data
from utils.state import generate_id, get_timestamp, record_activity
from utils.ai_models import (
generate_text, answer_question, analyze_image, transcribe_speech,
translate_text, analyze_sentiment, summarize_text, generate_code
)
from utils.config import AI_MODELS, DATA_DIR
from utils.logging import get_logger
from utils.error_handling import handle_ai_model_exceptions, AIModelError
# Initialize logger
logger = get_logger(__name__)
# Define AI assistant types and their descriptions
AI_ASSISTANT_TYPES = {
"General Chat": {
"description": "Have natural conversations on any topic",
"icon": "π¬",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "Chat with me about anything...",
"examples": [
"Tell me about the benefits of meditation",
"What are some good productivity habits?",
"Can you recommend some books on personal growth?"
]
},
"Task Assistant": {
"description": "Get help with planning and organizing tasks",
"icon": "π",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "Ask for help with your tasks and planning...",
"examples": [
"Help me break down this project into smaller tasks",
"How can I prioritize my workload better?",
"Create a schedule for my day"
]
},
"Writing Helper": {
"description": "Assistance with writing and content creation",
"icon": "βοΈ",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "What would you like help writing?",
"examples": [
"Help me draft an email to my team about the project delay",
"Give me ideas for a blog post about productivity",
"Improve this paragraph: [your text here]"
]
},
"Code Assistant": {
"description": "Get help with programming and coding",
"icon": "π»",
"model": "microsoft/CodeBERT-base",
"task": "code_generation",
"placeholder": "Describe what code you need help with...",
"examples": [
"Write a Python function to sort a list of dictionaries by a specific key",
"How do I create a responsive navbar with CSS?",
"Debug this code: [your code here]"
]
},
"Research Agent": {
"description": "Help with gathering and organizing information",
"icon": "π",
"model": "distilbert-base-uncased-distilled-squad",
"task": "question_answering",
"placeholder": "What topic would you like to research?",
"examples": [
"Summarize the key points about climate change",
"What are the main theories of motivation?",
"Compare different project management methodologies"
]
},
"Learning Tutor": {
"description": "Educational support and explanations",
"icon": "π",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "What would you like to learn about?",
"examples": [
"Explain quantum computing in simple terms",
"Help me understand the concept of compound interest",
"What are the key events of World War II?"
]
},
"Wellness Coach": {
"description": "Guidance on health, fitness, and wellbeing",
"icon": "π§",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "Ask for wellness and health advice...",
"examples": [
"Suggest a 10-minute desk workout",
"What are some stress management techniques?",
"Give me ideas for healthy meal prep"
]
},
"Creative Assistant": {
"description": "Help with brainstorming and creative ideas",
"icon": "π‘",
"model": "microsoft/DialoGPT-medium",
"task": "text_generation",
"placeholder": "What creative ideas do you need?",
"examples": [
"Help me brainstorm names for my new project",
"Give me ideas for a fantasy story setting",
"Suggest creative ways to repurpose old items"
]
}
}
def create_ai_hub_page(state: Dict[str, Any]) -> None:
"""
Create the AI Assistant Hub page with access to various specialized AI assistants
Args:
state: Application state
"""
# Initialize AI conversation history if not present
if "ai_conversations" not in state:
state["ai_conversations"] = {}
for assistant_type in AI_ASSISTANT_TYPES:
state["ai_conversations"][assistant_type] = []
# Create the AI hub page layout
with gr.Column(elem_id="ai-hub-page"):
gr.Markdown("# π€ AI Assistant Hub")
gr.Markdown("*Access specialized AI assistants powered by free models to help with various tasks*")
# Create tabs for different assistant types
with gr.Tabs() as assistant_tabs:
assistant_interfaces = {}
assistant_chat_histories = {}
assistant_inputs = {}
assistant_send_btns = {}
# Create a tab for each assistant type
for assistant_type, assistant_info in AI_ASSISTANT_TYPES.items():
with gr.TabItem(f"{assistant_info['icon']} {assistant_type}") as tab:
assistant_interfaces[assistant_type] = tab
# Assistant description
gr.Markdown(f"## {assistant_info['icon']} {assistant_type}")
gr.Markdown(f"*{assistant_info['description']}*")
gr.Markdown(f"*Using model: {assistant_info['model']}*")
# Chat interface
with gr.Column():
# Chat history display
assistant_chat_histories[assistant_type] = gr.Chatbot(
label="Conversation",
elem_id=f"{assistant_type.lower().replace(' ', '-')}-chat",
height=400,
show_copy_button=True
)
# Example queries
with gr.Accordion("Example queries", open=False):
example_btns = []
for example in assistant_info["examples"]:
example_btn = gr.Button(example)
example_btns.append(example_btn)
# Input area
with gr.Row():
assistant_inputs[assistant_type] = gr.Textbox(
placeholder=assistant_info["placeholder"],
label="Your message",
lines=3,
elem_id=f"{assistant_type.lower().replace(' ', '-')}-input"
)
assistant_send_btns[assistant_type] = gr.Button("Send", variant="primary")
# Clear chat button
clear_btn = gr.Button("Clear Conversation")
# Set up clear button functionality
def create_clear_handler(assistant_type):
def clear_history():
state["ai_conversations"][assistant_type] = []
return []
return clear_history
clear_btn.click(
create_clear_handler(assistant_type),
inputs=[],
outputs=[assistant_chat_histories[assistant_type]]
)
# Set up example buttons
for example_btn in example_btns:
example_btn.click(
lambda example=example_btn.value: example,
inputs=[],
outputs=[assistant_inputs[assistant_type]]
)
# Function to handle sending messages to assistants
def send_message(assistant_type, message):
if not message.strip():
return state["ai_conversations"].get(assistant_type, []), ""
# Get assistant info
assistant_info = AI_ASSISTANT_TYPES[assistant_type]
task = assistant_info["task"]
# Add user message to history
history = state["ai_conversations"].get(assistant_type, [])
history.append([message, None])
# Generate response based on assistant type
try:
if task == "text_generation":
# For conversation-based assistants, include recent history in the prompt
context = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history[:-1][-3:] if h[1] is not None])
if context:
context += "\n"
# Create a prompt with assistant type context
prompt = f"You are a helpful {assistant_type}.\n\n{context}User: {message}\nAssistant:"
response = generate_text(prompt, max_length=300)
elif task == "code_generation":
# For code assistant
response = generate_code(message)
elif task == "question_answering":
# For research agent, we need context
# In a real implementation, this would search for relevant context
# Here we'll use a simplified approach
context = "The user is asking for information. Provide a helpful response based on your knowledge."
response = answer_question(message, context)
# Update the last message with the response
history[-1][1] = response
# Record activity
record_activity({
"type": "ai_assistant_used",
"assistant": assistant_type,
"timestamp": get_timestamp()
})
# Save conversation history
state["ai_conversations"][assistant_type] = history
save_data(os.path.join(DATA_DIR, "ai_conversations.json"), state["ai_conversations"])
return history, ""
except Exception as e:
logger.error(f"Error generating response: {str(e)}")
error_message = "Sorry, I encountered an error while generating a response. Please try again."
history[-1][1] = error_message
return history, ""
# Set up send button for each assistant
for assistant_type, send_btn in assistant_send_btns.items():
send_btn.click(
lambda message, assistant_type=assistant_type: send_message(assistant_type, message),
inputs=[assistant_inputs[assistant_type]],
outputs=[assistant_chat_histories[assistant_type], assistant_inputs[assistant_type]]
)
# Also trigger on Enter key
assistant_inputs[assistant_type].submit(
lambda message, assistant_type=assistant_type: send_message(assistant_type, message),
inputs=[assistant_inputs[assistant_type]],
outputs=[assistant_chat_histories[assistant_type], assistant_inputs[assistant_type]]
)
# Load conversation history for each assistant
for assistant_type, chatbot in assistant_chat_histories.items():
if assistant_type in state["ai_conversations"]:
chatbot.value = state["ai_conversations"][assistant_type]
# AI Assistant Analytics
with gr.Accordion("π AI Assistant Analytics", open=False):
gr.Markdown("### Usage Statistics")
# Create a placeholder for analytics
# In a real implementation, this would show actual usage data
usage_data = [
[assistant_type, len(state["ai_conversations"].get(assistant_type, []))]
for assistant_type in AI_ASSISTANT_TYPES
]
usage_stats = gr.Dataframe(
headers=["Assistant Type", "Messages"],
datatype=["str", "number"],
value=usage_data,
label="Assistant Usage"
)
# Refresh button for analytics
refresh_stats_btn = gr.Button("Refresh Statistics")
def update_stats():
return [
[assistant_type, len(state["ai_conversations"].get(assistant_type, []))]
for assistant_type in AI_ASSISTANT_TYPES
]
refresh_stats_btn.click(
update_stats,
inputs=[],
outputs=[usage_stats]
)
# Record page visit in activity
record_activity({
"type": "page_viewed",
"page": "AI Assistant Hub",
"timestamp": get_timestamp()
}) |