shukdevdattaEX's picture
Update app.py
4bd696b verified
raw
history blame
42.6 kB
import os
import json
import time
import gradio as gr
from datetime import datetime
from typing import List, Dict, Any, Optional, Union
import threading
import re
import aiohttp
import asyncio
# Import Groq
from groq import Groq
class ChutesClient:
"""Client for interacting with Chutes API"""
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://llm.chutes.ai/v1"
async def chat_completions_create(self, **kwargs) -> Dict:
"""Make async request to Chutes chat completions endpoint"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
# Prepare the body
body = {
"model": kwargs.get("model", "openai/gpt-oss-20b"),
"messages": kwargs.get("messages", []),
"stream": False, # Non-streaming for simplicity
"max_tokens": kwargs.get("max_tokens", 1024),
"temperature": kwargs.get("temperature", 0.7)
}
# Add tool calls if present
if "tools" in kwargs and kwargs["tools"]:
body["tools"] = kwargs["tools"]
body["tool_choice"] = kwargs.get("tool_choice", "auto")
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=body
) as response:
if response.status != 200:
raise Exception(f"Chutes API error: {await response.text()}")
return await response.json()
class CreativeAgenticAI:
"""
Creative Agentic AI Chat Tool using Groq and Chutes models with browser search and compound models
"""
def __init__(self, groq_api_key: str, chutes_api_key: str, model: str = "compound-beta"):
"""
Initialize the Creative Agentic AI system.
Args:
groq_api_key: Groq API key
chutes_api_key: Chutes API key
model: Which model to use
"""
self.groq_api_key = groq_api_key
self.chutes_api_key = chutes_api_key
if not self.groq_api_key and model != "openai/gpt-oss-20b":
raise ValueError("No Groq API key provided")
if not self.chutes_api_key and model == "openai/gpt-oss-20b":
raise ValueError("No Chutes API key provided")
self.model = model
self.groq_client = Groq(api_key=self.groq_api_key) if self.groq_api_key else None
self.chutes_client = ChutesClient(api_key=self.chutes_api_key) if self.chutes_api_key else None
self.conversation_history = []
# Available models with their capabilities
self.available_models = {
"compound-beta": {"supports_web_search": True, "supports_browser_search": False, "api": "groq"},
"compound-beta-mini": {"supports_web_search": True, "supports_browser_search": False, "api": "groq"},
"openai/gpt-oss-20b": {"supports_web_search": False, "supports_browser_search": True, "api": "chutes"},
}
async def chat(self, message: str,
include_domains: List[str] = None,
exclude_domains: List[str] = None,
system_prompt: str = None,
temperature: float = 0.7,
max_tokens: int = 1024,
search_type: str = "auto",
force_search: bool = False) -> Dict:
"""
Send a message to the AI and get a response with flexible search options
Args:
message: User's message
include_domains: List of domains to include for web search
exclude_domains: List of domains to exclude from web search
system_prompt: Custom system prompt
temperature: Model temperature (0.0-2.0)
max_tokens: Maximum tokens in response
search_type: 'web_search', 'browser_search', 'auto', or 'none'
force_search: Force the AI to use search tools
Returns:
AI response with metadata
"""
# Enhanced system prompt for better citation behavior
if not system_prompt:
citation_instruction = """
IMPORTANT: When you search the web and find information, you MUST:
1. Always cite your sources with clickable links in this format: [Source Title](URL)
2. Include multiple diverse sources when possible
3. Show which specific websites you used for each claim
4. At the end of your response, provide a "Sources Used" section with all the links
5. Be transparent about which information comes from which source
"""
domain_context = ""
if include_domains and self._supports_web_search():
domain_context = f"\nYou are restricted to searching ONLY these domains: {', '.join(include_domains)}. Make sure to find and cite sources specifically from these domains."
elif exclude_domains and self._supports_web_search():
domain_context = f"\nAvoid searching these domains: {', '.join(exclude_domains)}. Search everywhere else on the web."
search_instruction = ""
if search_type == "browser_search":
search_instruction = "\nUse browser search tools to find the most current and relevant information from the web."
elif search_type == "web_search":
search_instruction = "\nUse web search capabilities to find relevant information."
elif force_search:
if self._supports_browser_search():
search_instruction = "\nYou MUST use search tools to find current information before responding."
elif self._supports_web_search():
search_instruction = "\nYou MUST use web search to find current information before responding."
system_prompt = f"""You are a creative and intelligent AI assistant with agentic capabilities.
You can search the web, analyze information, and provide comprehensive responses.
Be helpful, creative, and engaging while maintaining accuracy.
{citation_instruction}
{domain_context}
{search_instruction}
Your responses should be well-structured, informative, and properly cited with working links."""
# Build messages
messages = [{"role": "system", "content": system_prompt}]
# Add conversation history (last 10 exchanges)
messages.extend(self.conversation_history[-20:]) # Last 10 user-assistant pairs
# Add current message with context
enhanced_message = message
if include_domains or exclude_domains:
filter_context = []
if include_domains:
filter_context.append(f"ONLY search these domains: {', '.join(include_domains)}")
if exclude_domains:
filter_context.append(f"EXCLUDE these domains: {', '.join(exclude_domains)}")
enhanced_message += f"\n\n[Domain Filtering: {' | '.join(filter_context)}]"
messages.append({"role": "user", "content": enhanced_message})
# Set up API parameters
params = {
"messages": messages,
"model": self.model,
"temperature": temperature,
"max_tokens": max_tokens,
}
# Add domain filtering for compound models
if self._supports_web_search():
if include_domains and include_domains[0].strip():
params["include_domains"] = [domain.strip() for domain in include_domains if domain.strip()]
if exclude_domains and exclude_domains[0].strip():
params["exclude_domains"] = [domain.strip() for domain in exclude_domains if domain.strip()]
# Add tools based on search type and model capabilities
tools = []
tool_choice = None
if search_type == "browser_search" and self._supports_browser_search():
tools = [{"type": "browser_search"}]
tool_choice = "required" if force_search else "auto"
elif search_type == "auto":
if self._supports_browser_search():
tools = [{"type": "browser_search"}]
tool_choice = "required" if force_search else "auto"
elif force_search and self._supports_browser_search():
tools = [{"type": "browser_search"}]
tool_choice = "required"
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
try:
# Make the API call based on model
if self.available_models[self.model]["api"] == "chutes":
response = await self.chutes_client.chat_completions_create(**params)
else:
params["max_completion_tokens"] = params.pop("max_tokens", None)
response = self.groq_client.chat.completions.create(**params)
content = response["choices"][0]["message"]["content"]
# Extract tool usage information and enhance it
tool_info = self._extract_tool_info(response)
# Process content to enhance citations
processed_content = self._enhance_citations(content, tool_info)
# Add to conversation history
self.conversation_history.append({"role": "user", "content": message})
self.conversation_history.append({"role": "assistant", "content": processed_content})
# Create response object
response_data = {
"content": processed_content,
"timestamp": datetime.now().isoformat(),
"model": self.model,
"tool_usage": tool_info,
"search_type_used": search_type,
"parameters": {
"temperature": temperature,
"max_tokens": max_tokens,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"force_search": force_search
}
}
return response_data
except Exception as e:
error_msg = f"Error: {str(e)}"
self.conversation_history.append({"role": "user", "content": message})
self.conversation_history.append({"role": "assistant", "content": error_msg})
return {
"content": error_msg,
"timestamp": datetime.now().isoformat(),
"model": self.model,
"tool_usage": None,
"error": str(e)
}
def _supports_web_search(self) -> bool:
"""Check if current model supports web search (compound models)"""
return self.available_models.get(self.model, {}).get("supports_web_search", False)
def _supports_browser_search(self) -> bool:
"""Check if current model supports browser search tools"""
return self.available_models.get(self.model, {}).get("supports_browser_search", False)
def _extract_tool_info(self, response) -> Dict:
"""Extract tool usage information in a JSON serializable format"""
tool_info = {
"tools_used": [],
"search_queries": [],
"sources_found": []
}
# Check for executed_tools attribute (compound models)
if hasattr(response.choices[0].message, 'executed_tools'):
tools = response.choices[0].message.executed_tools
if tools:
for tool in tools:
tool_dict = {
"tool_type": getattr(tool, "type", "unknown"),
"tool_name": getattr(tool, "name", "unknown"),
}
if hasattr(tool, "input"):
tool_input = str(tool.input)
tool_dict["input"] = tool_input
if "search" in tool_dict["tool_name"].lower():
tool_info["search_queries"].append(tool_input)
if hasattr(tool, "output"):
tool_output = str(tool.output)
tool_dict["output"] = tool_output
urls = self._extract_urls(tool_output)
tool_info["sources_found"].extend(urls)
tool_info["tools_used"].append(tool_dict)
# Check for tool_calls attribute (browser search models)
if hasattr(response.choices[0].message, 'tool_calls') and response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
tool_dict = {
"tool_type": tool_call.type if hasattr(tool_call, 'type') else "browser_search",
"tool_name": tool_call.function.name if hasattr(tool_call, 'function') else "browser_search",
"tool_id": tool_call.id if hasattr(tool_call, 'id') else None
}
if hasattr(tool_call, 'function') and hasattr(tool_call.function, 'arguments'):
try:
args = json.loads(tool_call.function.arguments) if isinstance(tool_call.function.arguments, str) else tool_call.function.arguments
tool_dict["arguments"] = args
if "query" in args:
tool_info["search_queries"].append(args["query"])
except:
tool_dict["arguments"] = str(tool_call.function.arguments)
tool_info["tools_used"].append(tool_dict)
return tool_info
def _extract_urls(self, text: str) -> List[str]:
"""Extract URLs from text"""
url_pattern = r'https?://[^\s<>"]{2,}'
urls = re.findall(url_pattern, text)
return list(set(urls))
def _enhance_citations(self, content: str, tool_info: Dict) -> str:
"""Enhance content with better citation formatting"""
if not tool_info or not tool_info.get("sources_found"):
return content
if "Sources Used:" not in content and "sources:" not in content.lower():
sources_section = "\n\n---\n\n### πŸ“š Sources Used:\n"
for i, url in enumerate(tool_info["sources_found"][:10], 1):
domain = self._extract_domain(url)
sources_section += f"{i}. [{domain}]({url})\n"
content += sources_section
return content
def _extract_domain(self, url: str) -> str:
"""Extract domain name from URL for display"""
try:
if url.startswith(('http://', 'https://')):
domain = url.split('/')[2]
if domain.startswith('www.'):
domain = domain[4:]
return domain
return url
except:
return url
def get_model_info(self) -> Dict:
"""Get information about current model capabilities"""
return self.available_models.get(self.model, {})
def clear_history(self):
"""Clear conversation history"""
self.conversation_history = []
def get_history_summary(self) -> str:
"""Get a summary of conversation history"""
if not self.conversation_history:
return "No conversation history"
user_messages = [msg for msg in self.conversation_history if msg["role"] == "user"]
assistant_messages = [msg for msg in self.conversation_history if msg["role"] == "assistant"]
return f"Conversation: {len(user_messages)} user messages, {len(assistant_messages)} assistant responses"
# Global variables
ai_instance = None
api_key_status = "Not Set"
async def validate_api_keys(groq_api_key: str, chutes_api_key: str, model: str) -> str:
"""Validate both Groq and Chutes API keys and initialize AI instance"""
global ai_instance, api_key_status
if model == "openai/gpt-oss-20b" and not chutes_api_key:
api_key_status = "Invalid ❌"
return "❌ Please enter a valid Chutes API key for the selected model"
if model in ["compound-beta", "compound-beta-mini"] and not groq_api_key:
api_key_status = "Invalid ❌"
return "❌ Please enter a valid Groq API key for the selected model"
try:
# Test API keys based on model
if model == "openai/gpt-oss-20b":
chutes_client = ChutesClient(api_key=chutes_api_key)
await chutes_client.chat_completions_create(
messages=[{"role": "user", "content": "Hello"}],
model=model,
max_tokens=10
)
else:
groq_client = Groq(api_key=groq_api_key)
groq_client.chat.completions.create(
messages=[{"role": "user", "content": "Hello"}],
model=model,
max_tokens=10
)
# Create AI instance
ai_instance = CreativeAgenticAI(groq_api_key=groq_api_key, chutes_api_key=chutes_api_key, model=model)
api_key_status = "Valid βœ…"
model_info = ai_instance.get_model_info()
capabilities = []
if model_info.get("supports_web_search"):
capabilities.append("🌐 Web Search with Domain Filtering")
if model_info.get("supports_browser_search"):
capabilities.append("πŸ” Browser Search Tools")
cap_text = " | ".join(capabilities) if capabilities else "πŸ’¬ Chat Only"
return f"βœ… API Keys Valid! NeuroScope AI is ready.\n\n**Model:** {model}\n**Capabilities:** {cap_text}\n**API:** {model_info.get('api', 'unknown')}\n**Status:** Connected and ready for chat!"
except Exception as e:
api_key_status = "Invalid ❌"
ai_instance = None
return f"❌ Error validating API key: {str(e)}\n\nPlease check your API keys and try again."
def update_model(model: str) -> str:
"""Update the model selection"""
global ai_instance
if ai_instance:
ai_instance.model = model
model_info = ai_instance.get_model_info()
capabilities = []
if model_info.get("supports_web_search"):
capabilities.append("🌐 Web Search with Domain Filtering")
if model_info.get("supports_browser_search"):
capabilities.append("πŸ” Browser Search Tools")
cap_text = " | ".join(capabilities) if capabilities else "πŸ’¬ Chat Only"
return f"βœ… Model updated to: **{model}**\n**Capabilities:** {cap_text}\n**API:** {model_info.get('api', 'unknown')}"
else:
return "⚠️ Please set your API keys first"
def get_search_options(model: str) -> gr.update:
"""Get available search options based on model"""
if not ai_instance:
return gr.update(choices=["none"], value="none")
model_info = ai_instance.available_models.get(model, {})
options = ["none"]
if model_info.get("supports_web_search"):
options.extend(["web_search", "auto"])
if model_info.get("supports_browser_search"):
options.extend(["browser_search", "auto"])
options = list(dict.fromkeys(options))
default_value = "auto" if "auto" in options else "none"
return gr.update(choices=options, value=default_value)
async def chat_with_ai(message: str,
include_domains: str,
exclude_domains: str,
system_prompt: str,
temperature: float,
max_tokens: int,
search_type: str,
force_search: bool,
history: List) -> tuple:
"""Main chat function"""
global ai_instance
if not ai_instance:
error_msg = "⚠️ Please set your API keys first!"
history.append([message, error_msg])
return history, ""
if not message.strip():
return history, ""
include_list = [d.strip() for d in include_domains.split(",")] if include_domains.strip() else []
exclude_list = [d.strip() for d in exclude_domains.split(",")] if exclude_domains.strip() else []
try:
response = await ai_instance.chat(
message=message,
include_domains=include_list if include_list else None,
exclude_domains=exclude_list if exclude_list else None,
system_prompt=system_prompt if system_prompt.strip() else None,
temperature=temperature,
max_tokens=int(max_tokens),
search_type=search_type,
force_search=force_search
)
ai_response = response["content"]
if response.get("tool_usage"):
tool_info = response["tool_usage"]
tool_summary = []
if tool_info.get("search_queries"):
tool_summary.append(f"πŸ” Search queries: {len(tool_info['search_queries'])}")
if tool_info.get("sources_found"):
tool_summary.append(f"πŸ“„ Sources found: {len(tool_info['sources_found'])}")
if tool_info.get("tools_used"):
tool_types = [tool.get("tool_type", "unknown") for tool in tool_info["tools_used"]]
unique_types = list(set(tool_types))
tool_summary.append(f"πŸ”§ Tools used: {', '.join(unique_types)}")
if tool_summary:
ai_response += f"\n\n*{' | '.join(tool_summary)}*"
search_info = []
if response.get("search_type_used") and response["search_type_used"] != "none":
search_info.append(f"πŸ” Search type: {response['search_type_used']}")
if force_search:
search_info.append("⚑ Forced search enabled")
if include_list or exclude_list:
filter_info = []
if include_list:
filter_info.append(f"βœ… Included domains: {', '.join(include_list)}")
if exclude_list:
filter_info.append(f"❌ Excluded domains: {', '.join(exclude_list)}")
search_info.extend(filter_info)
if search_info:
ai_response += f"\n\n*🌐 Search settings: {' | '.join(search_info)}*"
history.append([message, ai_response])
return history, ""
except Exception as e:
error_msg = f"❌ Error: {str(e)}"
history.append([message, error_msg])
return history, ""
def clear_chat_history():
"""Clear the chat history"""
global ai_instance
if ai_instance:
ai_instance.clear_history()
return []
def create_gradio_app():
"""Create the main Gradio application"""
css = """
.container {
max-width: 1200px;
margin: 0 auto;
}
.header {
text-align: center;
background: linear-gradient(to right, #00ff94, #00b4db);
color: white;
padding: 20px;
border-radius: 10px;
margin-bottom: 20px;
}
.status-box {
background-color: #f8f9fa;
border: 1px solid #dee2e6;
border-radius: 8px;
padding: 15px;
margin: 10px 0;
}
.example-box {
background-color: #e8f4fd;
border-left: 4px solid #007bff;
padding: 15px;
margin: 10px 0;
border-radius: 0 8px 8px 0;
}
.domain-info {
background-color: #fff3cd;
border: 1px solid #ffeaa7;
border-radius: 8px;
padding: 15px;
margin: 10px 0;
}
.citation-info {
background-color: #d1ecf1;
border: 1px solid #bee5eb;
border-radius: 8px;
padding: 15px;
margin: 10px 0;
}
.search-info {
background-color: #e2e3e5;
border: 1px solid #c6c8ca;
border-radius: 8px;
padding: 15px;
margin: 10px 0;
}
#neuroscope-accordion {
background: linear-gradient(to right, #00ff94, #00b4db);
border-radius: 8px;
}
"""
with gr.Blocks(css=css, title="πŸ€– Creative Agentic AI Chat", theme=gr.themes.Ocean()) as app:
gr.HTML("""
<div class="header">
<h1>πŸ€– NeuroScope-AI Enhanced</h1>
<p>Powered by Groq and Chutes Models with Web Search, Browser Search & Agentic Capabilities</p>
</div>
""")
with gr.Group():
with gr.Accordion("πŸ€– NeuroScope AI Enhanced", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
**Enhanced with Multiple Search Capabilities:**
- 🧠 **Intelligence** (Neuro): Advanced AI reasoning across multiple models
- πŸ” **Precision Search** (Scope): Domain filtering + Browser search tools
- πŸ€– **AI Capabilities** (AI): Agentic behavior with tool usage
- ⚑ **Dual Search**: Web search (compound models) + Browser search (other models)
- 🎯 **Model Flexibility**: Choose the right model for your task
""")
with gr.Group():
with gr.Accordion("πŸ” IMPORTANT - Enhanced Search Capabilities!", open=True, elem_id="neuroscope-accordion"):
gr.Markdown("""
<div class="search-info">
<h3>πŸš€ NEW: Multiple Search Types Available!</h3>
<h4>🌐 Web Search Models (Groq API)</h4>
<ul>
<li><strong>compound-beta:</strong> Most powerful with domain filtering</li>
<li><strong>compound-beta-mini:</strong> Faster with domain filtering</li>
<li><strong>Features:</strong> Include/exclude domains, autonomous web search</li>
</ul>
<h4>πŸ” Browser Search Models (Chutes API)</h4>
<ul>
<li><strong>openai/gpt-oss-20b:</strong> Fast browser search capabilities</li>
<li><strong>Features:</strong> Real-time browser search, current information</li>
</ul>
</div>
<div class="citation-info">
<h3>πŸ”— Enhanced Citation System</h3>
<p>All models now include:</p>
<ul>
<li><strong>Automatic Source Citations:</strong> Clickable links to sources</li>
<li><strong>Sources Used Section:</strong> Dedicated section showing all websites</li>
<li><strong>Search Type Indication:</strong> Shows which search method was used</li>
<li><strong>Tool Usage Display:</strong> Transparent about AI's research process</li>
</ul>
</div>
""")
with gr.Row():
with gr.Column(scale=2):
groq_api_key = gr.Textbox(
label="πŸ”‘ Groq API Key",
placeholder="Enter your Groq API key here...",
type="password",
info="Get your API key from: https://console.groq.com/"
)
chutes_api_key = gr.Textbox(
label="πŸ”‘ Chutes API Key",
placeholder="Enter your Chutes API key here...",
type="password",
info="Required for openai/gpt-oss-20b model"
)
with gr.Column(scale=2):
model_selection = gr.Radio(
choices=[
"compound-beta",
"compound-beta-mini",
"openai/gpt-oss-20b"
],
label="🧠 Model Selection",
value="compound-beta",
info="Choose based on your search needs"
)
with gr.Column(scale=1):
connect_btn = gr.Button("πŸ”— Connect", variant="primary", size="lg")
status_display = gr.Markdown("### πŸ“Š Status: Not connected", elem_classes=["status-box"])
connect_btn.click(
fn=validate_api_keys,
inputs=[groq_api_key, chutes_api_key, model_selection],
outputs=[status_display]
)
model_selection.change(
fn=update_model,
inputs=[model_selection],
outputs=[status_display]
)
with gr.Tab("πŸ’¬ Chat"):
chatbot = gr.Chatbot(
label="Creative AI Assistant with Enhanced Search",
height=500,
show_label=True,
bubble_full_width=False,
show_copy_button=True
)
with gr.Row():
msg = gr.Textbox(
label="Your Message",
placeholder="Type your message here...",
lines=3
)
with gr.Column():
send_btn = gr.Button("πŸ“€ Send", variant="primary")
clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
with gr.Accordion("πŸ” Search Settings", open=False, elem_id="neuroscope-accordion"):
with gr.Row():
search_type = gr.Radio(
choices=["auto", "web_search", "browser_search", "none"],
label="🎯 Search Type",
value="auto",
info="Choose search method (auto = model decides)"
)
force_search = gr.Checkbox(
label="⚑ Force Search",
value=False,
info="Force AI to search even for general questions"
)
model_selection.change(
fn=get_search_options,
inputs=[model_selection],
outputs=[search_type]
)
with gr.Accordion("🌐 Domain Filtering (Web Search Models Only)", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
<div class="domain-info">
<h4>πŸ” Domain Filtering Guide</h4>
<p><strong>Note:</strong> Domain filtering only works with compound models (compound-beta, compound-beta-mini)</p>
<ul>
<li><strong>Include Domains:</strong> Only search these domains (comma-separated)</li>
<li><strong>Exclude Domains:</strong> Never search these domains (comma-separated)</li>
<li><strong>Examples:</strong> arxiv.org, *.edu, github.com, stackoverflow.com</li>
<li><strong>Wildcards:</strong> Use *.edu for all educational domains</li>
</ul>
</div>
""")
with gr.Row():
include_domains = gr.Textbox(
label="βœ… Include Domains (comma-separated)",
placeholder="arxiv.org, *.edu, github.com, stackoverflow.com",
info="Only search these domains (compound models only)"
)
exclude_domains = gr.Textbox(
label="❌ Exclude Domains (comma-separated)",
placeholder="wikipedia.org, reddit.com, twitter.com",
info="Never search these domains (compound models only)"
)
with gr.Accordion("βš™οΈ Advanced Settings", open=False, elem_id="neuroscope-accordion"):
with gr.Row():
temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="🌑️ Temperature",
info="Higher = more creative, Lower = more focused"
)
max_tokens = gr.Slider(
minimum=100,
maximum=4000,
value=1024,
step=100,
label="πŸ“ Max Tokens",
info="Maximum length of response"
)
system_prompt = gr.Textbox(
label="🎭 Custom System Prompt",
placeholder="Override the default system prompt...",
lines=3,
info="Leave empty to use default creative assistant prompt with enhanced citations"
)
with gr.Accordion("πŸ“Š Model Comparison Guide", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
### πŸ” Choose Your Model Based on Task:
**For Academic Research & Domain-Specific Search:**
- `compound-beta` or `compound-beta-mini` with include domains (*.edu, arxiv.org)
- Best for: Research papers, academic sources, filtered searches
- API: Groq
**For Current Events & Real-Time Information:**
- `openai/gpt-oss-20b` with browser search
- Best for: News, current events, real-time data
- API: Chutes
**For General Knowledge & Creative Tasks:**
- Any model with search type = "auto" or "none"
- Best for: Creative writing, general questions, analysis
**For Programming & Technical Documentation:**
- `openai/gpt-oss-20b` with browser search, or compound models with tech domains
- Best for: Code help, documentation, technical guides
""")
with gr.Accordion("πŸ”— Common Domain Examples", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
**Academic & Research:**
- `arxiv.org`, `*.edu`, `scholar.google.com`, `researchgate.net`, `pubmed.ncbi.nlm.nih.gov`
**Technology & Programming:**
- `github.com`, `stackoverflow.com`, `docs.python.org`, `developer.mozilla.org`, `medium.com`
**News & Media:**
- `reuters.com`, `bbc.com`, `npr.org`, `apnews.com`, `cnn.com`, `nytimes.com`
**Business & Finance:**
- `bloomberg.com`, `wsj.com`, `nasdaq.com`, `sec.gov`, `investopedia.com`
**Science & Medicine:**
- `nature.com`, `science.org`, `pubmed.ncbi.nlm.nih.gov`, `who.int`, `cdc.gov`
**Government & Official:**
- `*.gov`, `*.org`, `un.org`, `worldbank.org`, `imf.org`
""")
with gr.Accordion("πŸ“– How to Use This Enhanced App", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
### πŸš€ Getting Started
1. **Enter your API Keys** - Groq from [console.groq.com](https://console.groq.com/), Chutes for openai/gpt-oss-20b
2. **Select a model** - Choose based on your search needs:
- **Compound models** (Groq): For web search with domain filtering
- **openai/gpt-oss-20b** (Chutes): For browser search with real-time data
3. **Configure search settings** - Choose search type and options
4. **Click Connect** - Validate your keys and connect to the AI
5. **Start chatting!** - Type your message and get intelligent responses with citations
### 🎯 Key Features
- **Dual Search Capabilities**: Web search (compound) + Browser search (Chutes)
- **Smart Citations**: Automatic source linking and citation formatting
- **Domain Filtering**: Control which websites the AI searches (compound models)
- **Real-time Search**: Get current information with browser search tools
- **Model Flexibility**: Choose the right model and API for your task
- **Enhanced Tool Visibility**: See exactly what search tools were used
### πŸ’‘ Tips for Best Results
**For Research Tasks:**
- Use compound models with domain filtering
- Include academic domains (*.edu, arxiv.org) for scholarly sources
- Use "Force Search" for the most current information
**For Current Events:**
- Use openai/gpt-oss-20b (Chutes)
- Set search type to "browser_search"
- Enable "Force Search" for real-time data
**For Creative Tasks:**
- Any model works well
- Set search type to "none" for purely creative responses
- Use higher temperature (0.8-1.0) for more creativity
""")
with gr.Accordion("🎯 Sample Examples to Test Enhanced Search", open=False, elem_id="neuroscope-accordion"):
gr.Markdown("""
<div class="example-box">
<h4>πŸ”¬ Research & Analysis (Test Different Models)</h4>
**Compound Model + Domain Filtering (Groq):**
- Query: "What are the latest breakthroughs in quantum computing?"
- Model: compound-beta
- Include domains: "arxiv.org, *.edu, nature.com"
- Search type: web_search
**Browser Search Model (Chutes):**
- Same query with openai/gpt-oss-20b
- Search type: browser_search
- Force search: enabled
<h4>πŸ“° Current Events (Browser Search Excellence)</h4>
**Real-time News:**
- Query: "What happened in AI industry this week?"
- Model: openai/gpt-oss-20b (Chutes)
- Search type: browser_search
- Force search: enabled
**Compare with Web Search:**
- Same query with compound-beta (Groq)
- Include domains: "reuters.com, bbc.com, techcrunch.com"
<h4>πŸ’» Programming & Tech (Model Comparison)</h4>
**Technical Documentation:**
- Query: "How to implement OAuth 2.0 in Python Flask?"
- Try with both model types:
- compound-beta with "github.com, docs.python.org, stackoverflow.com"
- openai/gpt-oss-20b (Chutes) with browser_search
<h4>🎨 Creative Tasks (No Search Needed)</h4>
- Query: "Write a short story about AI and humans working together"
- Any model with search_type: "none"
- Higher temperature (0.8-1.0)
<h4>πŸ“Š Business Analysis (Filtered vs Real-time)</h4>
**Financial Data (Real-time):**
- Query: "Current cryptocurrency market trends"
- Model: openai/gpt-oss-20b (Chutes)
- Search type: browser_search
- Force search: enabled
**Business Analysis (Filtered):**
- Query: "Cryptocurrency adoption in enterprise"
- Model: compound-beta (Groq)
- Include domains: "bloomberg.com, wsj.com, harvard.edu"
</div>
""")
send_btn.click(
fn=chat_with_ai,
inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot],
outputs=[chatbot, msg]
)
msg.submit(
fn=chat_with_ai,
inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot],
outputs=[chatbot, msg]
)
clear_btn.click(
fn=clear_chat_history,
outputs=[chatbot]
)
with gr.Accordion("πŸš€ About This Enhanced NeuroScope AI", open=True, elem_id="neuroscope-accordion"):
gr.Markdown("""
**Enhanced Creative Agentic AI Chat Tool** with dual search capabilities:
### πŸ†• **New in This Version:**
- πŸ” **Browser Search Integration**: Real-time search with Chutes API
- 🌐 **Dual Search System**: Web search (Groq) + Browser search (Chutes)
- 🎯 **Model Flexibility**: Multiple models across two APIs
- ⚑ **Force Search Option**: Make AI search even for general questions
- πŸ”§ **Enhanced Tool Visibility**: See exactly what search tools were used
- πŸ“Š **Model Comparison Guide**: Choose the right model and API for your task
### πŸ† **Core Features:**
- πŸ”— **Automatic Source Citations**: Every response includes clickable links to sources
- πŸ“š **Sources Used Section**: Dedicated section showing all websites referenced
- 🌐 **Smart Domain Filtering**: Control search scope (compound models)
- πŸ” **Real-time Browser Search**: Current information (Chutes model)
- πŸ’¬ **Conversational Memory**: Maintains context throughout the session
- βš™οΈ **Full Customization**: Adjust all parameters and prompts
- 🎨 **Creative & Analytical**: Optimized for both creative and research tasks
### πŸ› οΈ **Technical Details:**
- **Compound Models (Groq)**: compound-beta, compound-beta-mini (web search + domain filtering)
- **Tool-based Model (Chutes)**: openai/gpt-oss-20b (browser search tools)
- **Automatic Search Type Detection**: AI chooses best search method
- **Enhanced Error Handling**: Robust error management and user feedback
- **Real-time Status Updates**: Live feedback on model capabilities and search settings
""")
return app
# Main execution
if __name__ == "__main__":
app = create_gradio_app()
app.launch(
share=True
)