import os import json import time import gradio as gr from datetime import datetime from typing import List, Dict, Any, Optional, Union import threading import re # Import Groq from groq import Groq class CreativeAgenticAI: """ Creative Agentic AI Chat Tool using Groq's models with browser search and compound models """ def __init__(self, api_key: str, model: str = "compound-beta"): """ Initialize the Creative Agentic AI system. Args: api_key: Groq API key model: Which Groq model to use """ self.api_key = api_key if not self.api_key: raise ValueError("No API key provided") self.client = Groq(api_key=self.api_key) self.model = model self.conversation_history = [] # Available models with their capabilities self.available_models = { "compound-beta": {"supports_web_search": True, "supports_browser_search": False}, "compound-beta-mini": {"supports_web_search": True, "supports_browser_search": False}, "openai/gpt-oss-20b": {"supports_web_search": False, "supports_browser_search": True}, } def chat(self, message: str, include_domains: List[str] = None, exclude_domains: List[str] = None, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1024, search_type: str = "auto", force_search: bool = False) -> Dict: """ Send a message to the AI and get a response with flexible search options Args: message: User's message include_domains: List of domains to include for web search exclude_domains: List of domains to exclude from web search system_prompt: Custom system prompt temperature: Model temperature (0.0-2.0) max_tokens: Maximum tokens in response search_type: 'web_search', 'browser_search', 'auto', or 'none' force_search: Force the AI to use search tools Returns: AI response with metadata """ # Enhanced system prompt for better citation behavior if not system_prompt: citation_instruction = """ IMPORTANT: When you search the web and find information, you MUST: 1. Always cite your sources with clickable links in this format: [Source Title](URL) 2. Include multiple diverse sources when possible 3. Show which specific websites you used for each claim 4. At the end of your response, provide a "Sources Used" section with all the links 5. Be transparent about which information comes from which source """ domain_context = "" if include_domains and self._supports_web_search(): domain_context = f"\nYou are restricted to searching ONLY these domains: {', '.join(include_domains)}. Make sure to find and cite sources specifically from these domains." elif exclude_domains and self._supports_web_search(): domain_context = f"\nAvoid searching these domains: {', '.join(exclude_domains)}. Search everywhere else on the web." search_instruction = "" if search_type == "browser_search": search_instruction = "\nUse browser search tools to find the most current and relevant information from the web." elif search_type == "web_search": search_instruction = "\nUse web search capabilities to find relevant information." elif force_search: if self._supports_browser_search(): search_instruction = "\nYou MUST use search tools to find current information before responding." elif self._supports_web_search(): search_instruction = "\nYou MUST use web search to find current information before responding." system_prompt = f"""You are a creative and intelligent AI assistant with agentic capabilities. You can search the web, analyze information, and provide comprehensive responses. Be helpful, creative, and engaging while maintaining accuracy. {citation_instruction} {domain_context} {search_instruction} Your responses should be well-structured, informative, and properly cited with working links.""" # Build messages messages = [{"role": "system", "content": system_prompt}] # Add conversation history (last 10 exchanges) messages.extend(self.conversation_history[-20:]) # Last 10 user-assistant pairs # Add current message with context enhanced_message = message if include_domains or exclude_domains: filter_context = [] if include_domains: filter_context.append(f"ONLY search these domains: {', '.join(include_domains)}") if exclude_domains: filter_context.append(f"EXCLUDE these domains: {', '.join(exclude_domains)}") enhanced_message += f"\n\n[Domain Filtering: {' | '.join(filter_context)}]" messages.append({"role": "user", "content": enhanced_message}) # Set up API parameters params = { "messages": messages, "model": self.model, "temperature": temperature, "max_completion_tokens": max_tokens if self._supports_browser_search() else None, "max_tokens": max_tokens if not self._supports_browser_search() else None, } # Add domain filtering for compound models if self._supports_web_search(): if include_domains and include_domains[0].strip(): params["include_domains"] = [domain.strip() for domain in include_domains if domain.strip()] if exclude_domains and exclude_domains[0].strip(): params["exclude_domains"] = [domain.strip() for domain in exclude_domains if domain.strip()] # Add tools based on search type and model capabilities tools = [] tool_choice = None if search_type == "browser_search" and self._supports_browser_search(): tools = [{"type": "browser_search"}] tool_choice = "required" if force_search else "auto" elif search_type == "auto": if self._supports_browser_search(): tools = [{"type": "browser_search"}] tool_choice = "required" if force_search else "auto" elif force_search and self._supports_browser_search(): tools = [{"type": "browser_search"}] tool_choice = "required" if tools: params["tools"] = tools params["tool_choice"] = tool_choice try: # Make the API call response = self.client.chat.completions.create(**params) content = response.choices[0].message.content # Extract tool usage information and enhance it tool_info = self._extract_tool_info(response) # Process content to enhance citations processed_content = self._enhance_citations(content, tool_info) # Add to conversation history self.conversation_history.append({"role": "user", "content": message}) self.conversation_history.append({"role": "assistant", "content": processed_content}) # Create response object response_data = { "content": processed_content, "timestamp": datetime.now().isoformat(), "model": self.model, "tool_usage": tool_info, "search_type_used": search_type, "parameters": { "temperature": temperature, "max_tokens": max_tokens, "include_domains": include_domains, "exclude_domains": exclude_domains, "force_search": force_search } } return response_data except Exception as e: error_msg = f"Error: {str(e)}" self.conversation_history.append({"role": "user", "content": message}) self.conversation_history.append({"role": "assistant", "content": error_msg}) return { "content": error_msg, "timestamp": datetime.now().isoformat(), "model": self.model, "tool_usage": None, "error": str(e) } def _supports_web_search(self) -> bool: """Check if current model supports web search (compound models)""" return self.available_models.get(self.model, {}).get("supports_web_search", False) def _supports_browser_search(self) -> bool: """Check if current model supports browser search tools""" return self.available_models.get(self.model, {}).get("supports_browser_search", False) def _extract_tool_info(self, response) -> Dict: """Extract tool usage information in a JSON serializable format""" tool_info = { "tools_used": [], "search_queries": [], "sources_found": [] } # Check for executed_tools attribute (compound models) if hasattr(response.choices[0].message, 'executed_tools'): tools = response.choices[0].message.executed_tools if tools: for tool in tools: tool_dict = { "tool_type": getattr(tool, "type", "unknown"), "tool_name": getattr(tool, "name", "unknown"), } # Extract search queries and results if hasattr(tool, "input"): tool_input = str(tool.input) tool_dict["input"] = tool_input # Try to extract search query if "search" in tool_dict["tool_name"].lower(): tool_info["search_queries"].append(tool_input) if hasattr(tool, "output"): tool_output = str(tool.output) tool_dict["output"] = tool_output # Try to extract URLs from output urls = self._extract_urls(tool_output) tool_info["sources_found"].extend(urls) tool_info["tools_used"].append(tool_dict) # Check for tool_calls attribute (browser search models) if hasattr(response.choices[0].message, 'tool_calls') and response.choices[0].message.tool_calls: for tool_call in response.choices[0].message.tool_calls: tool_dict = { "tool_type": tool_call.type if hasattr(tool_call, 'type') else "browser_search", "tool_name": tool_call.function.name if hasattr(tool_call, 'function') else "browser_search", "tool_id": tool_call.id if hasattr(tool_call, 'id') else None } if hasattr(tool_call, 'function') and hasattr(tool_call.function, 'arguments'): try: args = json.loads(tool_call.function.arguments) if isinstance(tool_call.function.arguments, str) else tool_call.function.arguments tool_dict["arguments"] = args if "query" in args: tool_info["search_queries"].append(args["query"]) except: tool_dict["arguments"] = str(tool_call.function.arguments) tool_info["tools_used"].append(tool_dict) return tool_info def _extract_urls(self, text: str) -> List[str]: """Extract URLs from text""" url_pattern = r'https?://[^\s<>"]{2,}' urls = re.findall(url_pattern, text) return list(set(urls)) # Remove duplicates def _enhance_citations(self, content: str, tool_info: Dict) -> str: """Enhance content with better citation formatting""" if not tool_info or not tool_info.get("sources_found"): return content # Add sources section if not already present if "Sources Used:" not in content and "sources:" not in content.lower(): sources_section = "\n\n---\n\n### 📚 Sources Used:\n" for i, url in enumerate(tool_info["sources_found"][:10], 1): # Limit to 10 sources # Try to extract domain name for better formatting domain = self._extract_domain(url) sources_section += f"{i}. [{domain}]({url})\n" content += sources_section return content def _extract_domain(self, url: str) -> str: """Extract domain name from URL for display""" try: if url.startswith(('http://', 'https://')): domain = url.split('/')[2] # Remove www. prefix if present if domain.startswith('www.'): domain = domain[4:] return domain return url except: return url def get_model_info(self) -> Dict: """Get information about current model capabilities""" return self.available_models.get(self.model, {}) def clear_history(self): """Clear conversation history""" self.conversation_history = [] def get_history_summary(self) -> str: """Get a summary of conversation history""" if not self.conversation_history: return "No conversation history" user_messages = [msg for msg in self.conversation_history if msg["role"] == "user"] assistant_messages = [msg for msg in self.conversation_history if msg["role"] == "assistant"] return f"Conversation: {len(user_messages)} user messages, {len(assistant_messages)} assistant responses" # Global variables ai_instance = None api_key_status = "Not Set" def validate_api_key(api_key: str, model: str) -> str: """Validate Groq API key and initialize AI instance""" global ai_instance, api_key_status if not api_key or len(api_key.strip()) < 10: api_key_status = "Invalid ❌" return "❌ Please enter a valid API key (should be longer than 10 characters)" try: # Test the API key client = Groq(api_key=api_key) # Try a simple request to validate test_response = client.chat.completions.create( messages=[{"role": "user", "content": "Hello"}], model=model, max_completion_tokens=10 if model in ["openai/gpt-oss-20b", "llama-3.3-70b-versatile", "llama-3.1-70b-versatile", "mixtral-8x7b-32768"] else None, max_tokens=10 if model in ["compound-beta", "compound-beta-mini"] else None ) # Create AI instance ai_instance = CreativeAgenticAI(api_key=api_key, model=model) api_key_status = "Valid ✅" model_info = ai_instance.get_model_info() capabilities = [] if model_info.get("supports_web_search"): capabilities.append("🌐 Web Search with Domain Filtering") if model_info.get("supports_browser_search"): capabilities.append("🔍 Browser Search Tools") cap_text = " | ".join(capabilities) if capabilities else "💬 Chat Only" return f"✅ API Key Valid! NeuroScope AI is ready.\n\n**Model:** {model}\n**Capabilities:** {cap_text}\n**Status:** Connected and ready for chat!" except Exception as e: api_key_status = "Invalid ❌" ai_instance = None return f"❌ Error validating API key: {str(e)}\n\nPlease check your API key and try again." def update_model(model: str) -> str: """Update the model selection""" global ai_instance if ai_instance: ai_instance.model = model model_info = ai_instance.get_model_info() capabilities = [] if model_info.get("supports_web_search"): capabilities.append("🌐 Web Search with Domain Filtering") if model_info.get("supports_browser_search"): capabilities.append("🔍 Browser Search Tools") cap_text = " | ".join(capabilities) if capabilities else "💬 Chat Only" return f"✅ Model updated to: **{model}**\n**Capabilities:** {cap_text}" else: return "⚠️ Please set your API key first" def get_search_options(model: str) -> gr.update: """Get available search options based on model""" if not ai_instance: return gr.update(choices=["none"], value="none") model_info = ai_instance.available_models.get(model, {}) options = ["none"] if model_info.get("supports_web_search"): options.extend(["web_search", "auto"]) if model_info.get("supports_browser_search"): options.extend(["browser_search", "auto"]) # Remove duplicates while preserving order options = list(dict.fromkeys(options)) default_value = "auto" if "auto" in options else "none" return gr.update(choices=options, value=default_value) def chat_with_ai(message: str, include_domains: str, exclude_domains: str, system_prompt: str, temperature: float, max_tokens: int, search_type: str, force_search: bool, history: List) -> tuple: """Main chat function""" global ai_instance if not ai_instance: error_msg = "⚠️ Please set your Groq API key first!" history.append([message, error_msg]) return history, "" if not message.strip(): return history, "" # Process domain lists include_list = [d.strip() for d in include_domains.split(",")] if include_domains.strip() else [] exclude_list = [d.strip() for d in exclude_domains.split(",")] if exclude_domains.strip() else [] try: # Get AI response response = ai_instance.chat( message=message, include_domains=include_list if include_list else None, exclude_domains=exclude_list if exclude_list else None, system_prompt=system_prompt if system_prompt.strip() else None, temperature=temperature, max_tokens=int(max_tokens), search_type=search_type, force_search=force_search ) # Format response ai_response = response["content"] # Add enhanced tool usage info if response.get("tool_usage"): tool_info = response["tool_usage"] tool_summary = [] if tool_info.get("search_queries"): tool_summary.append(f"🔍 Search queries: {len(tool_info['search_queries'])}") if tool_info.get("sources_found"): tool_summary.append(f"📄 Sources found: {len(tool_info['sources_found'])}") if tool_info.get("tools_used"): tool_types = [tool.get("tool_type", "unknown") for tool in tool_info["tools_used"]] unique_types = list(set(tool_types)) tool_summary.append(f"🔧 Tools used: {', '.join(unique_types)}") if tool_summary: ai_response += f"\n\n*{' | '.join(tool_summary)}*" # Add search type info search_info = [] if response.get("search_type_used") and response["search_type_used"] != "none": search_info.append(f"🔍 Search type: {response['search_type_used']}") if force_search: search_info.append("⚡ Forced search enabled") # Add domain filtering info if include_list or exclude_list: filter_info = [] if include_list: filter_info.append(f"✅ Included domains: {', '.join(include_list)}") if exclude_list: filter_info.append(f"❌ Excluded domains: {', '.join(exclude_list)}") search_info.extend(filter_info) if search_info: ai_response += f"\n\n*🌐 Search settings: {' | '.join(search_info)}*" # Add to history history.append([message, ai_response]) return history, "" except Exception as e: error_msg = f"❌ Error: {str(e)}" history.append([message, error_msg]) return history, "" def clear_chat_history(): """Clear the chat history""" global ai_instance if ai_instance: ai_instance.clear_history() return [] def create_gradio_app(): """Create the main Gradio application""" # Custom CSS for better styling css = """ .container { max-width: 1200px; margin: 0 auto; } .header { text-align: center; background: linear-gradient(to right, #00ff94, #00b4db); color: white; padding: 20px; border-radius: 10px; margin-bottom: 20px; } .status-box { background-color: #f8f9fa; border: 1px solid #dee2e6; border-radius: 8px; padding: 15px; margin: 10px 0; } .example-box { background-color: #e8f4fd; border-left: 4px solid #007bff; padding: 15px; margin: 10px 0; border-radius: 0 8px 8px 0; } .domain-info { background-color: #fff3cd; border: 1px solid #ffeaa7; border-radius: 8px; padding: 15px; margin: 10px 0; } .citation-info { background-color: #d1ecf1; border: 1px solid #bee5eb; border-radius: 8px; padding: 15px; margin: 10px 0; } .search-info { background-color: #e2e3e5; border: 1px solid #c6c8ca; border-radius: 8px; padding: 15px; margin: 10px 0; } #neuroscope-accordion { background: linear-gradient(to right, #00ff94, #00b4db); border-radius: 8px; } """ with gr.Blocks(css=css, title="🤖 Creative Agentic AI Chat", theme=gr.themes.Ocean()) as app: # Header gr.HTML("""

🤖 NeuroScope-AI Enhanced

Powered by Groq's Models with Web Search, Browser Search & Agentic Capabilities

""") # NeuroScope AI Section with gr.Group(): with gr.Accordion("🤖 NeuroScope AI Enhanced", open=False, elem_id="neuroscope-accordion"): gr.Markdown(""" **Enhanced with Multiple Search Capabilities:** - 🧠 **Intelligence** (Neuro): Advanced AI reasoning across multiple models - 🔍 **Precision Search** (Scope): Domain filtering + Browser search tools - 🤖 **AI Capabilities** (AI): Agentic behavior with tool usage - ⚡ **Dual Search**: Web search (compound models) + Browser search (other models) - 🎯 **Model Flexibility**: Choose the right model for your task """) # IMPORTANT Section with Enhanced Search Info with gr.Group(): with gr.Accordion("🔍 IMPORTANT - Enhanced Search Capabilities!", open=True, elem_id="neuroscope-accordion"): gr.Markdown("""

🚀 NEW: Multiple Search Types Available!

🌐 Web Search Models (Compound Models)

🔍 Browser Search Models (Tool-based Models)

🔗 Enhanced Citation System

All models now include:

""") # API Key and Model Selection Section with gr.Row(): with gr.Column(scale=2): api_key = gr.Textbox( label="🔑 Groq API Key", placeholder="Enter your Groq API key here...", type="password", info="Get your API key from: https://console.groq.com/" ) with gr.Column(scale=2): model_selection = gr.Radio( choices=[ "compound-beta", "compound-beta-mini", "openai/gpt-oss-20b" ], label="🧠 Model Selection", value="compound-beta", info="Choose based on your search needs" ) with gr.Column(scale=1): connect_btn = gr.Button("🔗 Connect", variant="primary", size="lg") # Status display status_display = gr.Markdown("### 📊 Status: Not connected", elem_classes=["status-box"]) # Connect button functionality connect_btn.click( fn=validate_api_key, inputs=[api_key, model_selection], outputs=[status_display] ) model_selection.change( fn=update_model, inputs=[model_selection], outputs=[status_display] ) # Main Chat Interface with gr.Tab("💬 Chat"): chatbot = gr.Chatbot( label="Creative AI Assistant with Enhanced Search", height=500, show_label=True, bubble_full_width=False, show_copy_button=True ) with gr.Row(): msg = gr.Textbox( label="Your Message", placeholder="Type your message here...", lines=3 ) with gr.Column(): send_btn = gr.Button("📤 Send", variant="primary") clear_btn = gr.Button("🗑️ Clear", variant="secondary") # Search Settings with gr.Accordion("🔍 Search Settings", open=False, elem_id="neuroscope-accordion"): with gr.Row(): search_type = gr.Radio( choices=["auto", "web_search", "browser_search", "none"], label="🎯 Search Type", value="auto", info="Choose search method (auto = model decides)" ) force_search = gr.Checkbox( label="⚡ Force Search", value=False, info="Force AI to search even for general questions" ) # Update search options when model changes model_selection.change( fn=get_search_options, inputs=[model_selection], outputs=[search_type] ) # Domain Filtering Section (only for web search models) with gr.Accordion("🌐 Domain Filtering (Web Search Models Only)", open=False, elem_id="neuroscope-accordion"): gr.Markdown("""

🔍 Domain Filtering Guide

Note: Domain filtering only works with compound models (compound-beta, compound-beta-mini)

""") with gr.Row(): include_domains = gr.Textbox( label="✅ Include Domains (comma-separated)", placeholder="arxiv.org, *.edu, github.com, stackoverflow.com", info="Only search these domains (compound models only)" ) exclude_domains = gr.Textbox( label="❌ Exclude Domains (comma-separated)", placeholder="wikipedia.org, reddit.com, twitter.com", info="Never search these domains (compound models only)" ) # Advanced Settings with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="neuroscope-accordion"): with gr.Row(): temperature = gr.Slider( minimum=0.0, maximum=2.0, value=0.7, step=0.1, label="🌡️ Temperature", info="Higher = more creative, Lower = more focused" ) max_tokens = gr.Slider( minimum=100, maximum=4000, value=1024, step=100, label="📝 Max Tokens", info="Maximum length of response" ) system_prompt = gr.Textbox( label="🎭 Custom System Prompt", placeholder="Override the default system prompt...", lines=3, info="Leave empty to use default creative assistant prompt with enhanced citations" ) # Model Comparison Section with gr.Accordion("📊 Model Comparison Guide", open=False, elem_id="neuroscope-accordion"): gr.Markdown(""" ### 🔍 Choose Your Model Based on Task: **For Academic Research & Domain-Specific Search:** - `compound-beta` or `compound-beta-mini` with include domains (*.edu, arxiv.org) - Best for: Research papers, academic sources, filtered searches **For Current Events & Real-Time Information:** - `openai/gpt-oss-20b` or `llama-3.3-70b-versatile` with browser search - Best for: News, current events, real-time data **For General Knowledge & Creative Tasks:** - Any model with search type = "auto" or "none" - Best for: Creative writing, general questions, analysis **For Programming & Technical Documentation:** - `llama-3.1-70b-versatile` with browser search, or compound models with tech domains - Best for: Code help, documentation, technical guides """) # Domain Examples Section with gr.Accordion("🔗 Common Domain Examples", open=False, elem_id="neuroscope-accordion"): gr.Markdown(""" **Academic & Research:** - `arxiv.org`, `*.edu`, `scholar.google.com`, `researchgate.net`, `pubmed.ncbi.nlm.nih.gov` **Technology & Programming:** - `github.com`, `stackoverflow.com`, `docs.python.org`, `developer.mozilla.org`, `medium.com` **News & Media:** - `reuters.com`, `bbc.com`, `npr.org`, `apnews.com`, `cnn.com`, `nytimes.com` **Business & Finance:** - `bloomberg.com`, `wsj.com`, `nasdaq.com`, `sec.gov`, `investopedia.com` **Science & Medicine:** - `nature.com`, `science.org`, `pubmed.ncbi.nlm.nih.gov`, `who.int`, `cdc.gov` **Government & Official:** - `*.gov`, `*.org`, `un.org`, `worldbank.org`, `imf.org` """) # How to Use Section with gr.Accordion("📖 How to Use This Enhanced App", open=False, elem_id="neuroscope-accordion"): gr.Markdown(""" ### 🚀 Getting Started 1. **Enter your Groq API Key** - Get one from [console.groq.com](https://console.groq.com/) 2. **Select a model** - Choose based on your search needs: - **Compound models**: For web search with domain filtering - **Tool-based models**: For browser search with real-time data 3. **Configure search settings** - Choose search type and options 4. **Click Connect** - Validate your key and connect to the AI 5. **Start chatting!** - Type your message and get intelligent responses with citations ### 🎯 Key Features - **Dual Search Capabilities**: Web search + Browser search depending on model - **Smart Citations**: Automatic source linking and citation formatting - **Domain Filtering**: Control which websites the AI searches (compound models) - **Real-time Search**: Get current information with browser search tools - **Model Flexibility**: Choose the right model for your specific task - **Enhanced Tool Visibility**: See exactly what search tools were used ### 💡 Tips for Best Results **For Research Tasks:** - Use compound models with domain filtering - Include academic domains (*.edu, arxiv.org) for scholarly sources - Use "Force Search" for the most current information **For Current Events:** - Use tool-based models (openai/gpt-oss-20b) - Set search type to "browser_search" - Enable "Force Search" for real-time data **For Creative Tasks:** - Any model works well - Set search type to "none" for purely creative responses - Use higher temperature (0.8-1.0) for more creativity """) # Sample Examples Section with gr.Accordion("🎯 Sample Examples to Test Enhanced Search", open=False, elem_id="neuroscope-accordion"): gr.Markdown("""

🔬 Research & Analysis (Test Different Models)

**Compound Model + Domain Filtering:** - Query: "What are the latest breakthroughs in quantum computing?" - Model: compound-beta - Include domains: "arxiv.org, *.edu, nature.com" - Search type: web_search **Browser Search Model:** - Same query with openai/gpt-oss-20b - Search type: browser_search - Force search: enabled

📰 Current Events (Browser Search Excellence)

**Real-time News:** - Query: "What happened in AI industry this week?" - Model: openai/gpt-oss-20b - Search type: browser_search - Force search: enabled **Compare with Web Search:** - Same query with compound-beta - Include domains: "reuters.com, bbc.com, techcrunch.com"

💻 Programming & Tech (Model Comparison)

**Technical Documentation:** - Query: "How to implement OAuth 2.0 in Python Flask?" - Try with both model types: - compound-beta with "github.com, docs.python.org, stackoverflow.com" - openai/gpt-oss-20b with browser_search

🎨 Creative Tasks (No Search Needed)

- Query: "Write a short story about AI and humans working together" - Any model with search_type: "none" - Higher temperature (0.8-1.0)

📊 Business Analysis (Filtered vs Real-time)

**Financial Data (Real-time):** - Query: "Current cryptocurrency market trends" - Model: openai/gpt-oss-20b - Search type: browser_search - Force search: enabled **Business Analysis (Filtered):** - Query: "Cryptocurrency adoption in enterprise" - Model: compound-beta - Include domains: "bloomberg.com, wsj.com, harvard.edu"
""") # Event handlers send_btn.click( fn=chat_with_ai, inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot], outputs=[chatbot, msg] ) msg.submit( fn=chat_with_ai, inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot], outputs=[chatbot, msg] ) clear_btn.click( fn=clear_chat_history, outputs=[chatbot] ) # Footer with gr.Accordion("🚀 About This Enhanced NeuroScope AI", open=True, elem_id="neuroscope-accordion"): gr.Markdown(""" **Enhanced Creative Agentic AI Chat Tool** with dual search capabilities: ### 🆕 **New in This Version:** - 🔍 **Browser Search Integration**: Real-time search with tool-based models - 🌐 **Dual Search System**: Web search (compound) + Browser search (tool-based) - 🎯 **Model Flexibility**: 6 different models for different tasks - ⚡ **Force Search Option**: Make AI search even for general questions - 🔧 **Enhanced Tool Visibility**: See exactly what search tools were used - 📊 **Model Comparison Guide**: Choose the right model for your task ### 🏆 **Core Features:** - 🔗 **Automatic Source Citations**: Every response includes clickable links to sources - 📚 **Sources Used Section**: Dedicated section showing all websites referenced - 🌐 **Smart Domain Filtering**: Control search scope (compound models) - 🔍 **Real-time Browser Search**: Current information (tool-based models) - 💬 **Conversational Memory**: Maintains context throughout the session - ⚙️ **Full Customization**: Adjust all parameters and prompts - 🎨 **Creative & Analytical**: Optimized for both creative and research tasks ### 🛠️ **Technical Details:** - **Compound Models**: compound-beta, compound-beta-mini (web search + domain filtering) - **Tool-based Models**: openai/gpt-oss-20b, llama models, mixtral (browser search tools) - **Automatic Search Type Detection**: AI chooses best search method - **Enhanced Error Handling**: Robust error management and user feedback - **Real-time Status Updates**: Live feedback on model capabilities and search settings """) return app # Main execution if __name__ == "__main__": app = create_gradio_app() app.launch( share=True, server_name="0.0.0.0", server_port=7860 )