shukdevdattaEX commited on
Commit
d21e7f5
·
verified ·
1 Parent(s): 96d4232

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -1034
app.py CHANGED
@@ -1,1034 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import gradio as gr
5
- from datetime import datetime
6
- from typing import List, Dict, Any, Optional, Union
7
- import threading
8
- import re
9
- import aiohttp
10
- import asyncio
11
-
12
- # Import Groq
13
- from groq import Groq
14
-
15
- class ChutesClient:
16
- """Client for interacting with Chutes API"""
17
-
18
- def __init__(self, api_key: str):
19
- self.api_key = api_key or ""
20
- self.base_url = "https://llm.chutes.ai/v1"
21
-
22
- async def chat_completions_create(self, **kwargs) -> Dict:
23
- """Make async request to Chutes chat completions endpoint"""
24
- headers = {
25
- "Authorization": f"Bearer {self.api_key}",
26
- "Content-Type": "application/json"
27
- }
28
-
29
- # Prepare the body for Chutes API
30
- body = {
31
- "model": kwargs.get("model", "openai/gpt-oss-20b"),
32
- "messages": kwargs.get("messages", []),
33
- "stream": kwargs.get("stream", False),
34
- "max_tokens": kwargs.get("max_tokens", 1024),
35
- "temperature": kwargs.get("temperature", 0.7)
36
- }
37
-
38
- async with aiohttp.ClientSession() as session:
39
- if body["stream"]:
40
- # Handle streaming response
41
- async with session.post(
42
- f"{self.base_url}/chat/completions",
43
- headers=headers,
44
- json=body
45
- ) as response:
46
- if response.status != 200:
47
- raise Exception(f"Chutes API error: {await response.text()}")
48
-
49
- content = ""
50
- async for line in response.content:
51
- line = line.decode("utf-8").strip()
52
- if line.startswith("data: "):
53
- data = line[6:]
54
- if data == "[DONE]":
55
- break
56
- try:
57
- if data.strip():
58
- chunk_json = json.loads(data)
59
- if "choices" in chunk_json and len(chunk_json["choices"]) > 0:
60
- delta = chunk_json["choices"][0].get("delta", {})
61
- if "content" in delta and delta["content"]:
62
- content += str(delta["content"])
63
- except json.JSONDecodeError:
64
- continue
65
-
66
- # Return in OpenAI format for compatibility
67
- return {
68
- "choices": [{
69
- "message": {
70
- "content": content,
71
- "role": "assistant"
72
- }
73
- }]
74
- }
75
- else:
76
- # Handle non-streaming response
77
- async with session.post(
78
- f"{self.base_url}/chat/completions",
79
- headers=headers,
80
- json=body
81
- ) as response:
82
- if response.status != 200:
83
- raise Exception(f"Chutes API error: {await response.text()}")
84
- return await response.json()
85
-
86
- class CreativeAgenticAI:
87
- """
88
- Creative Agentic AI Chat Tool using Groq and Chutes models with browser search and compound models
89
- """
90
-
91
- def __init__(self, groq_api_key: str, chutes_api_key: str, model: str = "compound-beta"):
92
- """
93
- Initialize the Creative Agentic AI system.
94
-
95
- Args:
96
- groq_api_key: Groq API key
97
- chutes_api_key: Chutes API key
98
- model: Which model to use
99
- """
100
- self.groq_api_key = str(groq_api_key) if groq_api_key else ""
101
- self.chutes_api_key = str(chutes_api_key) if chutes_api_key else ""
102
- if not self.groq_api_key and model != "openai/gpt-oss-20b":
103
- raise ValueError("No Groq API key provided")
104
- if not self.chutes_api_key and model == "openai/gpt-oss-20b":
105
- raise ValueError("No Chutes API key provided")
106
-
107
- self.model = str(model) if model else "compound-beta"
108
- self.groq_client = Groq(api_key=self.groq_api_key) if self.groq_api_key else None
109
- self.chutes_client = ChutesClient(api_key=self.chutes_api_key) if self.chutes_api_key else None
110
- self.conversation_history = []
111
-
112
- # Available models with their capabilities
113
- self.available_models = {
114
- "compound-beta": {"supports_web_search": True, "supports_browser_search": False, "api": "groq"},
115
- "compound-beta-mini": {"supports_web_search": True, "supports_browser_search": False, "api": "groq"},
116
- "openai/gpt-oss-20b": {"supports_web_search": False, "supports_browser_search": False, "api": "chutes"},
117
- }
118
-
119
- async def chat(self, message: str,
120
- include_domains: List[str] = None,
121
- exclude_domains: List[str] = None,
122
- system_prompt: str = None,
123
- temperature: float = 0.7,
124
- max_tokens: int = 1024,
125
- search_type: str = "auto",
126
- force_search: bool = False) -> Dict:
127
- """
128
- Send a message to the AI and get a response with flexible search options
129
-
130
- Args:
131
- message: User's message
132
- include_domains: List of domains to include for web search
133
- exclude_domains: List of domains to exclude from web search
134
- system_prompt: Custom system prompt
135
- temperature: Model temperature (0.0-2.0)
136
- max_tokens: Maximum tokens in response
137
- search_type: 'web_search', 'browser_search', 'auto', or 'none'
138
- force_search: Force the AI to use search tools
139
-
140
- Returns:
141
- AI response with metadata
142
- """
143
- # Safe string conversion
144
- message = str(message) if message else ""
145
- system_prompt = str(system_prompt) if system_prompt else ""
146
- search_type = str(search_type) if search_type else "auto"
147
-
148
- # Enhanced system prompt for better behavior
149
- if not system_prompt:
150
- if self.model == "openai/gpt-oss-20b":
151
- # Simple, direct system prompt for Chutes model
152
- system_prompt = """You are a helpful, knowledgeable AI assistant. Provide direct, clear, complete and informative responses to user questions. Be concise but thorough. Do not include internal reasoning or commentary - just give the answer the user is looking for. Please also cite the source urls from where you got the informations."""
153
- else:
154
- # Enhanced system prompt for Groq models with search capabilities
155
- citation_instruction = """
156
- IMPORTANT: When you search the web and find information, you MUST:
157
- 1. Always cite your sources with clickable links in this format: [Source Title](URL)
158
- 2. Include multiple diverse sources when possible
159
- 3. Show which specific websites you used for each claim
160
- 4. At the end of your response, provide a "Sources Used" section with all the links
161
- 5. Be transparent about which information comes from which source
162
- """
163
-
164
- domain_context = ""
165
- if include_domains and self._supports_web_search():
166
- safe_domains = [str(d) for d in include_domains if d]
167
- domain_context = f"\nYou are restricted to searching ONLY these domains: {', '.join(safe_domains)}. Make sure to find and cite sources specifically from these domains."
168
- elif exclude_domains and self._supports_web_search():
169
- safe_domains = [str(d) for d in exclude_domains if d]
170
- domain_context = f"\nAvoid searching these domains: {', '.join(safe_domains)}. Search everywhere else on the web."
171
-
172
- search_instruction = ""
173
- if search_type == "browser_search" and self._supports_browser_search():
174
- search_instruction = "\nUse browser search tools to find the most current and relevant information from the web."
175
- elif search_type == "web_search":
176
- search_instruction = "\nUse web search capabilities to find relevant information."
177
- elif force_search:
178
- if self._supports_browser_search():
179
- search_instruction = "\nYou MUST use search tools to find current information before responding."
180
- elif self._supports_web_search():
181
- search_instruction = "\nYou MUST use web search to find current information before responding."
182
-
183
- system_prompt = f"""You are a creative and intelligent AI assistant with agentic capabilities.
184
- You can search the web, analyze information, and provide comprehensive responses.
185
- Be helpful, creative, and engaging while maintaining accuracy.
186
-
187
- {citation_instruction}
188
- {domain_context}
189
- {search_instruction}
190
-
191
- Your responses should be well-structured, informative, and properly cited with working links."""
192
-
193
- # Build messages
194
- messages = [{"role": "system", "content": system_prompt}]
195
- messages.extend(self.conversation_history[-20:])
196
-
197
- # Enhanced message for domain filtering (only for Groq models)
198
- enhanced_message = message
199
- if (include_domains or exclude_domains) and self._supports_web_search():
200
- filter_context = []
201
- if include_domains:
202
- safe_domains = [str(d) for d in include_domains if d]
203
- if safe_domains:
204
- filter_context.append(f"ONLY search these domains: {', '.join(safe_domains)}")
205
- if exclude_domains:
206
- safe_domains = [str(d) for d in exclude_domains if d]
207
- if safe_domains:
208
- filter_context.append(f"EXCLUDE these domains: {', '.join(safe_domains)}")
209
- if filter_context:
210
- enhanced_message += f"\n\n[Domain Filtering: {' | '.join(filter_context)}]"
211
-
212
- messages.append({"role": "user", "content": enhanced_message})
213
-
214
- # Set up API parameters
215
- params = {
216
- "messages": messages,
217
- "model": self.model,
218
- "temperature": temperature,
219
- "max_tokens": max_tokens,
220
- }
221
-
222
- # Add domain filtering for compound models (Groq only)
223
- if self._supports_web_search():
224
- if include_domains:
225
- safe_domains = [str(d).strip() for d in include_domains if d and str(d).strip()]
226
- if safe_domains:
227
- params["include_domains"] = safe_domains
228
- if exclude_domains:
229
- safe_domains = [str(d).strip() for d in exclude_domains if d and str(d).strip()]
230
- if safe_domains:
231
- params["exclude_domains"] = safe_domains
232
-
233
- # Add tools only for Groq models that support browser search
234
- tools = []
235
- tool_choice = None
236
- if self._supports_browser_search():
237
- if search_type in ["browser_search", "auto"] or force_search:
238
- tools = [{"type": "browser_search", "function": {"name": "browser_search"}}]
239
- tool_choice = "required" if force_search else "auto"
240
-
241
- if tools:
242
- params["tools"] = tools
243
- params["tool_choice"] = tool_choice
244
-
245
- try:
246
- # Make the API call based on model
247
- if self.available_models[self.model]["api"] == "chutes":
248
- # Use streaming for better response quality
249
- params["stream"] = True
250
- response = await self.chutes_client.chat_completions_create(**params)
251
- # Handle Chutes response
252
- content = ""
253
- if response and "choices" in response and response["choices"]:
254
- message_content = response["choices"][0].get("message", {}).get("content")
255
- content = str(message_content) if message_content else "No response content"
256
- else:
257
- content = "No response received"
258
- tool_calls = None
259
- else:
260
- # Groq API call
261
- params["max_completion_tokens"] = params.pop("max_tokens", None)
262
- response = self.groq_client.chat.completions.create(**params)
263
- content = ""
264
- if response and response.choices and response.choices[0].message:
265
- message_content = response.choices[0].message.content
266
- content = str(message_content) if message_content else "No response content"
267
- else:
268
- content = "No response received"
269
- tool_calls = response.choices[0].message.tool_calls if hasattr(response.choices[0].message, "tool_calls") else None
270
-
271
- # Extract tool usage information
272
- tool_info = self._extract_tool_info(response, tool_calls)
273
-
274
- # Process content to enhance citations
275
- processed_content = self._enhance_citations(content, tool_info)
276
-
277
- # Add to conversation history
278
- self.conversation_history.append({"role": "user", "content": message})
279
- self.conversation_history.append({"role": "assistant", "content": processed_content})
280
-
281
- return {
282
- "content": processed_content,
283
- "timestamp": datetime.now().isoformat(),
284
- "model": self.model,
285
- "tool_usage": tool_info,
286
- "search_type_used": search_type,
287
- "parameters": {
288
- "temperature": temperature,
289
- "max_tokens": max_tokens,
290
- "include_domains": include_domains,
291
- "exclude_domains": exclude_domains,
292
- "force_search": force_search
293
- }
294
- }
295
-
296
- except Exception as e:
297
- error_msg = f"Error: {str(e)}"
298
- self.conversation_history.append({"role": "user", "content": message})
299
- self.conversation_history.append({"role": "assistant", "content": error_msg})
300
-
301
- return {
302
- "content": error_msg,
303
- "timestamp": datetime.now().isoformat(),
304
- "model": self.model,
305
- "tool_usage": None,
306
- "error": str(e)
307
- }
308
-
309
- def _supports_web_search(self) -> bool:
310
- """Check if current model supports web search (compound models)"""
311
- return self.available_models.get(self.model, {}).get("supports_web_search", False)
312
-
313
- def _supports_browser_search(self) -> bool:
314
- """Check if current model supports browser search tools"""
315
- return self.available_models.get(self.model, {}).get("supports_browser_search", False)
316
-
317
- def _extract_tool_info(self, response, tool_calls) -> Dict:
318
- """Extract tool usage information in a JSON serializable format"""
319
- tool_info = {
320
- "tools_used": [],
321
- "search_queries": [],
322
- "sources_found": []
323
- }
324
-
325
- # Handle Groq executed_tools
326
- if hasattr(response, 'choices') and hasattr(response.choices[0].message, 'executed_tools'):
327
- tools = response.choices[0].message.executed_tools
328
- if tools:
329
- for tool in tools:
330
- tool_dict = {
331
- "tool_type": str(getattr(tool, "type", "unknown")),
332
- "tool_name": str(getattr(tool, "name", "unknown")),
333
- }
334
- if hasattr(tool, "input"):
335
- tool_input = getattr(tool, "input")
336
- tool_input_str = str(tool_input) if tool_input is not None else ""
337
- tool_dict["input"] = tool_input_str
338
- if "search" in tool_dict["tool_name"].lower():
339
- tool_info["search_queries"].append(tool_input_str)
340
- if hasattr(tool, "output"):
341
- tool_output = getattr(tool, "output")
342
- tool_output_str = str(tool_output) if tool_output is not None else ""
343
- tool_dict["output"] = tool_output_str
344
- urls = self._extract_urls(tool_output_str)
345
- tool_info["sources_found"].extend(urls)
346
- tool_info["tools_used"].append(tool_dict)
347
-
348
- # Handle tool_calls for both APIs
349
- if tool_calls:
350
- for tool_call in tool_calls:
351
- tool_dict = {
352
- "tool_type": str(getattr(tool_call, "type", "browser_search")),
353
- "tool_name": "browser_search",
354
- "tool_id": str(getattr(tool_call, "id", "")) if getattr(tool_call, "id", None) else ""
355
- }
356
- if hasattr(tool_call, "function") and tool_call.function:
357
- tool_dict["tool_name"] = str(getattr(tool_call.function, "name", "browser_search"))
358
- if hasattr(tool_call.function, "arguments"):
359
- try:
360
- args_raw = tool_call.function.arguments
361
- if isinstance(args_raw, str):
362
- args = json.loads(args_raw)
363
- else:
364
- args = args_raw or {}
365
- tool_dict["arguments"] = args
366
- if "query" in args:
367
- tool_info["search_queries"].append(str(args["query"]))
368
- except:
369
- args_str = str(args_raw) if args_raw is not None else ""
370
- tool_dict["arguments"] = args_str
371
- tool_info["tools_used"].append(tool_dict)
372
-
373
- return tool_info
374
-
375
- def _extract_urls(self, text: str) -> List[str]:
376
- """Extract URLs from text"""
377
- if not text:
378
- return []
379
- text_str = str(text)
380
- url_pattern = r'https?://[^\s<>"]{2,}'
381
- urls = re.findall(url_pattern, text_str)
382
- return list(set(urls))
383
-
384
- def _enhance_citations(self, content: str, tool_info: Dict) -> str:
385
- """Enhance content with better citation formatting"""
386
- if not content:
387
- return ""
388
- content_str = str(content)
389
- if not tool_info or not tool_info.get("sources_found"):
390
- return content_str
391
-
392
- if "Sources Used:" not in content_str and "sources:" not in content_str.lower():
393
- sources_section = "\n\n---\n\n### Sources Used:\n"
394
- for i, url in enumerate(tool_info["sources_found"][:10], 1):
395
- domain = self._extract_domain(str(url))
396
- sources_section += f"{i}. [{domain}]({url})\n"
397
- content_str += sources_section
398
-
399
- return content_str
400
-
401
- def _extract_domain(self, url: str) -> str:
402
- """Extract domain name from URL for display"""
403
- if not url:
404
- return ""
405
- url_str = str(url)
406
- try:
407
- if url_str.startswith(('http://', 'https://')):
408
- domain = url_str.split('/')[2]
409
- if domain.startswith('www.'):
410
- domain = domain[4:]
411
- return domain
412
- return url_str
413
- except:
414
- return url_str
415
-
416
- def get_model_info(self) -> Dict:
417
- """Get information about current model capabilities"""
418
- return self.available_models.get(self.model, {})
419
-
420
- def clear_history(self):
421
- """Clear conversation history"""
422
- self.conversation_history = []
423
-
424
- def get_history_summary(self) -> str:
425
- """Get a summary of conversation history"""
426
- if not self.conversation_history:
427
- return "No conversation history"
428
-
429
- user_messages = [msg for msg in self.conversation_history if msg["role"] == "user"]
430
- assistant_messages = [msg for msg in self.conversation_history if msg["role"] == "assistant"]
431
-
432
- return f"Conversation: {len(user_messages)} user messages, {len(assistant_messages)} assistant responses"
433
-
434
- # Global variables
435
- ai_instance = None
436
- api_key_status = "Not Set"
437
-
438
- async def validate_api_keys(groq_api_key: str, chutes_api_key: str, model: str) -> str:
439
- """Validate both Groq and Chutes API keys and initialize AI instance"""
440
- global ai_instance, api_key_status
441
-
442
- # Handle None values and convert to strings
443
- groq_api_key = str(groq_api_key) if groq_api_key else ""
444
- chutes_api_key = str(chutes_api_key) if chutes_api_key else ""
445
- model = str(model) if model else "compound-beta"
446
-
447
- if model == "openai/gpt-oss-20b" and not chutes_api_key.strip():
448
- api_key_status = "Invalid ❌"
449
- return "❌ Please enter a valid Chutes API key for the selected model"
450
-
451
- if model in ["compound-beta", "compound-beta-mini"] and not groq_api_key.strip():
452
- api_key_status = "Invalid ❌"
453
- return "❌ Please enter a valid Groq API key for the selected model"
454
-
455
- try:
456
- if model == "openai/gpt-oss-20b":
457
- chutes_client = ChutesClient(api_key=chutes_api_key)
458
- await chutes_client.chat_completions_create(
459
- messages=[{"role": "user", "content": "Hello"}],
460
- model=model,
461
- max_tokens=10
462
- )
463
- else:
464
- groq_client = Groq(api_key=groq_api_key)
465
- groq_client.chat.completions.create(
466
- messages=[{"role": "user", "content": "Hello"}],
467
- model=model,
468
- max_tokens=10
469
- )
470
-
471
- ai_instance = CreativeAgenticAI(groq_api_key=groq_api_key, chutes_api_key=chutes_api_key, model=model)
472
- api_key_status = "Valid ✅"
473
-
474
- model_info = ai_instance.get_model_info()
475
- capabilities = []
476
- if model_info.get("supports_web_search"):
477
- capabilities.append("🌐 Web Search with Domain Filtering")
478
- if model_info.get("supports_browser_search"):
479
- capabilities.append("🔍 Browser Search Tools")
480
-
481
- cap_text = " | ".join(capabilities) if capabilities else "💬 Chat Only"
482
-
483
- return f"✅ API Keys Valid! NeuroScope AI is ready.\n\n**Model:** {model}\n**Capabilities:** {cap_text}\n**API:** {model_info.get('api', 'unknown')}\n**Status:** Connected and ready for chat!"
484
-
485
- except Exception as e:
486
- api_key_status = "Invalid ❌"
487
- ai_instance = None
488
- return f"❌ Error validating API key: {str(e)}\n\nPlease check your API keys and try again."
489
-
490
- def update_model(model: str) -> str:
491
- """Update the model selection"""
492
- global ai_instance
493
-
494
- model = str(model) if model else "compound-beta"
495
-
496
- if ai_instance:
497
- ai_instance.model = model
498
- model_info = ai_instance.get_model_info()
499
- capabilities = []
500
- if model_info.get("supports_web_search"):
501
- capabilities.append("🌐 Web Search with Domain Filtering")
502
- if model_info.get("supports_browser_search"):
503
- capabilities.append("🔍 Browser Search Tools")
504
-
505
- cap_text = " | ".join(capabilities) if capabilities else "💬 Chat Only"
506
- return f"✅ Model updated to: **{model}**\n**Capabilities:** {cap_text}\n**API:** {model_info.get('api', 'unknown')}"
507
- else:
508
- return "⚠️ Please set your API keys first"
509
-
510
- def get_search_options(model: str) -> gr.update:
511
- """Get available search options based on model"""
512
- if not ai_instance:
513
- return gr.update(choices=["none"], value="none")
514
-
515
- model = str(model) if model else "compound-beta"
516
- model_info = ai_instance.available_models.get(model, {})
517
- options = ["none"]
518
-
519
- if model_info.get("supports_web_search"):
520
- options.extend(["web_search", "auto"])
521
- if model_info.get("supports_browser_search"):
522
- options.extend(["browser_search", "auto"])
523
-
524
- options = list(dict.fromkeys(options))
525
- default_value = "auto" if "auto" in options else "none"
526
- return gr.update(choices=options, value=default_value)
527
-
528
- async def chat_with_ai(message: str,
529
- include_domains: str,
530
- exclude_domains: str,
531
- system_prompt: str,
532
- temperature: float,
533
- max_tokens: int,
534
- search_type: str,
535
- force_search: bool,
536
- history: List) -> tuple:
537
- """Main chat function"""
538
- global ai_instance
539
-
540
- if not ai_instance:
541
- error_msg = "⚠️ Please set your API keys first!"
542
- history.append([str(message) if message else "", error_msg])
543
- return history, ""
544
-
545
- # Convert all inputs to strings and handle None values
546
- message = str(message) if message else ""
547
- include_domains = str(include_domains) if include_domains else ""
548
- exclude_domains = str(exclude_domains) if exclude_domains else ""
549
- system_prompt = str(system_prompt) if system_prompt else ""
550
- search_type = str(search_type) if search_type else "auto"
551
-
552
- if not message.strip():
553
- return history, ""
554
-
555
- include_list = [d.strip() for d in include_domains.split(",") if d.strip()] if include_domains.strip() else []
556
- exclude_list = [d.strip() for d in exclude_domains.split(",") if d.strip()] if exclude_domains.strip() else []
557
-
558
- try:
559
- response = await ai_instance.chat(
560
- message=message,
561
- include_domains=include_list if include_list else None,
562
- exclude_domains=exclude_list if exclude_list else None,
563
- system_prompt=system_prompt if system_prompt.strip() else None,
564
- temperature=temperature,
565
- max_tokens=int(max_tokens),
566
- search_type=search_type,
567
- force_search=force_search
568
- )
569
-
570
- ai_response = str(response.get("content", "No response received"))
571
-
572
- # Add tool usage info for Groq models
573
- if response.get("tool_usage") and ai_instance.model != "openai/gpt-oss-20b":
574
- tool_info = response["tool_usage"]
575
- tool_summary = []
576
-
577
- if tool_info.get("search_queries"):
578
- tool_summary.append(f"🔍 Search queries: {len(tool_info['search_queries'])}")
579
-
580
- if tool_info.get("sources_found"):
581
- tool_summary.append(f"📄 Sources found: {len(tool_info['sources_found'])}")
582
-
583
- if tool_info.get("tools_used"):
584
- tool_types = [str(tool.get("tool_type", "unknown")) for tool in tool_info["tools_used"]]
585
- unique_types = list(set(tool_types))
586
- tool_summary.append(f"🔧 Tools used: {', '.join(unique_types)}")
587
-
588
- if tool_summary:
589
- ai_response += f"\n\n*{' | '.join(tool_summary)}*"
590
-
591
- # Add search settings info
592
- search_info = []
593
- if response.get("search_type_used") and str(response["search_type_used"]) != "none":
594
- search_info.append(f"🔍 Search type: {response['search_type_used']}")
595
-
596
- if force_search:
597
- search_info.append("⚡ Forced search enabled")
598
-
599
- if include_list or exclude_list:
600
- filter_info = []
601
- if include_list:
602
- filter_info.append(f"✅ Included domains: {', '.join(include_list)}")
603
- if exclude_list:
604
- filter_info.append(f"❌ Excluded domains: {', '.join(exclude_list)}")
605
- search_info.extend(filter_info)
606
-
607
- if search_info and ai_instance.model != "openai/gpt-oss-20b":
608
- ai_response += f"\n\n*🌐 Search settings: {' | '.join(search_info)}*"
609
-
610
- history.append([message, ai_response])
611
-
612
- return history, ""
613
-
614
- except Exception as e:
615
- error_msg = f"❌ Error: {str(e)}"
616
- history.append([message, error_msg])
617
- return history, ""
618
-
619
- def clear_chat_history():
620
- """Clear the chat history"""
621
- global ai_instance
622
- if ai_instance:
623
- ai_instance.clear_history()
624
- return []
625
-
626
- def create_gradio_app():
627
- """Create the main Gradio application"""
628
-
629
- css = """
630
- .container {
631
- max-width: 1200px;
632
- margin: 0 auto;
633
- }
634
- .header {
635
- text-align: center;
636
- background: linear-gradient(to right, #00ff94, #00b4db);
637
- color: white;
638
- padding: 20px;
639
- border-radius: 10px;
640
- margin-bottom: 20px;
641
- }
642
- .status-box {
643
- background-color: #f8f9fa;
644
- border: 1px solid #dee2e6;
645
- border-radius: 8px;
646
- padding: 15px;
647
- margin: 10px 0;
648
- }
649
- .example-box {
650
- background-color: #e8f4fd;
651
- border-left: 4px solid #007bff;
652
- padding: 15px;
653
- margin: 10px 0;
654
- border-radius: 0 8px 8px 0;
655
- }
656
- .domain-info {
657
- background-color: #fff3cd;
658
- border: 1px solid #ffeaa7;
659
- border-radius: 8px;
660
- padding: 15px;
661
- margin: 10px 0;
662
- }
663
- .citation-info {
664
- background-color: #d1ecf1;
665
- border: 1px solid #bee5eb;
666
- border-radius: 8px;
667
- padding: 15px;
668
- margin: 10px 0;
669
- }
670
- .search-info {
671
- background-color: #e2e3e5;
672
- border: 1px solid #c6c8ca;
673
- border-radius: 8px;
674
- padding: 15px;
675
- margin: 10px 0;
676
- }
677
- #neuroscope-accordion {
678
- background: linear-gradient(to right, #00ff94, #00b4db);
679
- border-radius: 8px;
680
- }
681
- """
682
-
683
- with gr.Blocks(css=css, title="🤖 Creative Agentic AI Chat", theme=gr.themes.Ocean()) as app:
684
- gr.HTML("""
685
- <div class="header">
686
- <h1>🤖 NeuroScope-AI Enhanced</h1>
687
- <p>Powered by Groq and Chutes Models with Web Search and Agentic Capabilities</p>
688
- </div>
689
- """)
690
-
691
- with gr.Group():
692
- with gr.Accordion("🤖 NeuroScope AI Enhanced", open=False, elem_id="neuroscope-accordion"):
693
- gr.Markdown("""
694
- **Enhanced with Multiple Search Capabilities:**
695
- - 🧠 **Intelligence** (Neuro): Advanced AI reasoning across multiple models
696
- - 🔍 **Precision Search** (Scope): Domain filtering (Groq models)
697
- - 🤖 **AI Capabilities** (AI): Agentic behavior with tool usage
698
- - ⚡ **Dual APIs**: Web search (Groq) + Streaming chat (Chutes)
699
- - 🎯 **Model Flexibility**: Choose the right model for your task
700
- """)
701
-
702
- with gr.Group():
703
- with gr.Accordion("🔍 IMPORTANT - Enhanced Search Capabilities!", open=True, elem_id="neuroscope-accordion"):
704
- gr.Markdown("""
705
- <div class="search-info">
706
- <h3>🚀 NEW: Multiple Search Types Available!</h3>
707
-
708
- <h4>🌐 Web Search Models (Groq API)</h4>
709
- <ul>
710
- <li><strong>compound-beta:</strong> Most powerful with domain filtering</li>
711
- <li><strong>compound-beta-mini:</strong> Faster with domain filtering</li>
712
- <li><strong>Features:</strong> Include/exclude domains, autonomous web search</li>
713
- </ul>
714
-
715
- <h4>💬 Chat Model (Chutes API)</h4>
716
- <ul>
717
- <li><strong>openai/gpt-oss-20b:</strong> Fast conversational capabilities with streaming</li>
718
- <li><strong>Features:</strong> General chat, streaming responses, no web search</li>
719
- </ul>
720
- </div>
721
-
722
- <div class="citation-info">
723
- <h3>🔗 Enhanced Citation System</h3>
724
- <p>Groq models include:</p>
725
- <ul>
726
- <li><strong>Automatic Source Citations:</strong> Clickable links to sources</li>
727
- <li><strong>Sources Used Section:</strong> Dedicated section showing all websites</li>
728
- <li><strong>Search Type Indication:</strong> Shows which search method was used</li>
729
- </ul>
730
- <p><strong>Chutes models:</strong> Direct conversational responses without web search</p>
731
- </div>
732
- """)
733
-
734
- with gr.Row():
735
- with gr.Column(scale=2):
736
- groq_api_key = gr.Textbox(
737
- label="🔑 Groq API Key",
738
- placeholder="Enter your Groq API key here...",
739
- type="password",
740
- info="Get your API key from: https://console.groq.com/"
741
- )
742
- chutes_api_key = gr.Textbox(
743
- label="🔑 Chutes API Key",
744
- placeholder="Enter your Chutes API key here...",
745
- type="password",
746
- info="Required for openai/gpt-oss-20b model"
747
- )
748
- with gr.Column(scale=2):
749
- model_selection = gr.Radio(
750
- choices=[
751
- "compound-beta",
752
- "compound-beta-mini",
753
- "openai/gpt-oss-20b"
754
- ],
755
- label="🧠 Model Selection",
756
- value="compound-beta",
757
- info="Choose based on your needs"
758
- )
759
- with gr.Column(scale=1):
760
- connect_btn = gr.Button("🔗 Connect", variant="primary", size="lg")
761
-
762
- status_display = gr.Markdown("### 📊 Status: Not connected", elem_classes=["status-box"])
763
-
764
- connect_btn.click(
765
- fn=validate_api_keys,
766
- inputs=[groq_api_key, chutes_api_key, model_selection],
767
- outputs=[status_display]
768
- )
769
-
770
- model_selection.change(
771
- fn=update_model,
772
- inputs=[model_selection],
773
- outputs=[status_display]
774
- )
775
-
776
- with gr.Tab("💬 Chat"):
777
- chatbot = gr.Chatbot(
778
- label="Creative AI Assistant with Enhanced Search",
779
- height=500,
780
- show_label=True,
781
- bubble_full_width=False,
782
- show_copy_button=True
783
- )
784
-
785
- with gr.Row():
786
- msg = gr.Textbox(
787
- label="Your Message",
788
- placeholder="Type your message here...",
789
- lines=3
790
- )
791
- with gr.Column():
792
- send_btn = gr.Button("📤 Send", variant="primary")
793
- clear_btn = gr.Button("🗑️ Clear", variant="secondary")
794
-
795
- with gr.Accordion("🔍 Search Settings", open=False, elem_id="neuroscope-accordion"):
796
- with gr.Row():
797
- search_type = gr.Radio(
798
- choices=["auto", "web_search", "none"],
799
- label="🎯 Search Type",
800
- value="auto",
801
- info="Choose search method (auto = model decides)"
802
- )
803
- force_search = gr.Checkbox(
804
- label="⚡ Force Search",
805
- value=False,
806
- info="Force AI to search even for general questions (Groq models only)"
807
- )
808
-
809
- model_selection.change(
810
- fn=get_search_options,
811
- inputs=[model_selection],
812
- outputs=[search_type]
813
- )
814
-
815
- with gr.Accordion("🌐 Domain Filtering (Web Search Models Only)", open=False, elem_id="neuroscope-accordion"):
816
- gr.Markdown("""
817
- <div class="domain-info">
818
- <h4>🔍 Domain Filtering Guide</h4>
819
- <p><strong>Note:</strong> Domain filtering only works with compound models (compound-beta, compound-beta-mini)</p>
820
- <ul>
821
- <li><strong>Include Domains:</strong> Only search these domains (comma-separated)</li>
822
- <li><strong>Exclude Domains:</strong> Never search these domains (comma-separated)</li>
823
- <li><strong>Examples:</strong> arxiv.org, *.edu, github.com, stackoverflow.com</li>
824
- <li><strong>Wildcards:</strong> Use *.edu for all educational domains</li>
825
- </ul>
826
- </div>
827
- """)
828
-
829
- with gr.Row():
830
- include_domains = gr.Textbox(
831
- label="✅ Include Domains (comma-separated)",
832
- placeholder="arxiv.org, *.edu, github.com, stackoverflow.com",
833
- info="Only search these domains (compound models only)"
834
- )
835
- exclude_domains = gr.Textbox(
836
- label="❌ Exclude Domains (comma-separated)",
837
- placeholder="wikipedia.org, reddit.com, twitter.com",
838
- info="Never search these domains (compound models only)"
839
- )
840
-
841
- with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="neuroscope-accordion"):
842
- with gr.Row():
843
- temperature = gr.Slider(
844
- minimum=0.0,
845
- maximum=2.0,
846
- value=0.7,
847
- step=0.1,
848
- label="🌡️ Temperature",
849
- info="Higher = more creative, Lower = more focused"
850
- )
851
- max_tokens = gr.Slider(
852
- minimum=100,
853
- maximum=4000,
854
- value=1024,
855
- step=100,
856
- label="📝 Max Tokens",
857
- info="Maximum length of response"
858
- )
859
-
860
- system_prompt = gr.Textbox(
861
- label="🎭 Custom System Prompt",
862
- placeholder="Override the default system prompt...",
863
- lines=3,
864
- info="Leave empty to use default creative assistant prompt with enhanced citations"
865
- )
866
-
867
- with gr.Accordion("📊 Model Comparison Guide", open=False, elem_id="neuroscope-accordion"):
868
- gr.Markdown("""
869
- ### 🔍 Choose Your Model Based on Task:
870
-
871
- **For Academic Research & Domain-Specific Search:**
872
- - `compound-beta` or `compound-beta-mini` with include domains (*.edu, arxiv.org)
873
- - Best for: Research papers, academic sources, filtered searches
874
- - API: Groq
875
-
876
- **For General Knowledge & Creative Tasks:**
877
- - `openai/gpt-oss-20b` for fast conversational responses
878
- - Best for: Creative writing, general questions
879
- - API: Chutes
880
-
881
- **For Programming & Technical Documentation:**
882
- - `compound-beta` with tech domains
883
- - Best for: Code help, documentation, technical guides
884
- - API: Groq
885
- """)
886
-
887
- with gr.Accordion("🔗 Common Domain Examples", open=False, elem_id="neuroscope-accordion"):
888
- gr.Markdown("""
889
- **Academic & Research:**
890
- - `arxiv.org`, `*.edu`, `scholar.google.com`, `researchgate.net`, `pubmed.ncbi.nlm.nih.gov`
891
-
892
- **Technology & Programming:**
893
- - `github.com`, `stackoverflow.com`, `docs.python.org`, `developer.mozilla.org`, `medium.com`
894
-
895
- **News & Media:**
896
- - `reuters.com`, `bbc.com`, `npr.org`, `apnews.com`, `cnn.com`, `nytimes.com`
897
-
898
- **Business & Finance:**
899
- - `bloomberg.com`, `wsj.com`, `nasdaq.com`, `sec.gov`, `investopedia.com`
900
-
901
- **Science & Medicine:**
902
- - `nature.com`, `science.org`, `pubmed.ncbi.nlm.nih.gov`, `who.int`, `cdc.gov`
903
-
904
- **Government & Official:**
905
- - `*.gov`, `*.org`, `un.org`, `worldbank.org`, `imf.org`
906
- """)
907
-
908
- with gr.Accordion("📖 How to Use This Enhanced App", open=False, elem_id="neuroscope-accordion"):
909
- gr.Markdown("""
910
- ### 🚀 Getting Started
911
- 1. **Enter your API Keys** - Groq from [console.groq.com](https://console.groq.com/), Chutes for openai/gpt-oss-20b
912
- 2. **Select a model** - Choose based on your needs:
913
- - **Compound models** (Groq): For web search with domain filtering
914
- - **openai/gpt-oss-20b** (Chutes): For general conversational tasks
915
- 3. **Configure search settings** - Choose search type and options (Groq models only)
916
- 4. **Click Connect** - Validate your keys and connect to the AI
917
- 5. **Start chatting!** - Type your message and get intelligent responses with citations
918
-
919
- ### 🎯 Key Features
920
- - **Dual APIs**: Web search (Groq) + Basic chat (Chutes)
921
- - **Smart Citations**: Automatic source linking and citation formatting (Groq models)
922
- - **Domain Filtering**: Control which websites the AI searches (Groq models)
923
- - **Model Flexibility**: Choose the right model and API for your task
924
- - **Enhanced Tool Visibility**: See search tools used (Groq models)
925
-
926
- ### 💡 Tips for Best Results
927
-
928
- **For Research Tasks:**
929
- - Use compound models with domain filtering
930
- - Include academic domains (*.edu, arxiv.org) for scholarly sources
931
- - Use "Force Search" for the most current information
932
-
933
- **For Creative Tasks:**
934
- - Use openai/gpt-oss-20b (Chutes) or any model
935
- - Set search type to "none" for purely creative responses
936
- - Use higher temperature (0.8-1.0) for more creativity
937
- """)
938
-
939
- with gr.Accordion("🎯 Sample Examples to Test Enhanced Search", open=False, elem_id="neuroscope-accordion"):
940
- gr.Markdown("""
941
- <div class="example-box">
942
- <h4>🔬 Research & Analysis</h4>
943
-
944
- **Compound Model + Domain Filtering (Groq):**
945
- - Query: "What are the latest breakthroughs in quantum computing?"
946
- - Model: compound-beta
947
- - Include domains: "arxiv.org, *.edu, nature.com"
948
- - Search type: web_search
949
-
950
- <h4>💬 General Knowledge (Chutes):**
951
- - Query: "Tell me about quantum computing"
952
- - Model: openai/gpt-oss-20b
953
- - Search type: none
954
-
955
- <h4>💻 Programming & Tech</h4>
956
-
957
- **Technical Documentation (Groq):**
958
- - Query: "How to implement OAuth 2.0 in Python Flask?"
959
- - Model: compound-beta
960
- - Include domains: "github.com, docs.python.org, stackoverflow.com"
961
- - Search type: web_search
962
-
963
- **Code Help (Chutes):**
964
- - Same query with openai/gpt-oss-20b
965
- - Search type: none
966
-
967
- <h4>🎨 Creative Tasks</h4>
968
- - Query: "Write a short story about AI and humans working together"
969
- - Any model with search_type: "none"
970
- - Higher temperature (0.8-1.0)
971
-
972
- <h4>📊 Business Analysis</h4>
973
-
974
- **Business Analysis (Filtered, Groq):**
975
- - Query: "Cryptocurrency adoption in enterprise"
976
- - Model: compound-beta
977
- - Include domains: "bloomberg.com, wsj.com, harvard.edu"
978
- - Search type: web_search
979
- </div>
980
- """)
981
-
982
- send_btn.click(
983
- fn=chat_with_ai,
984
- inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot],
985
- outputs=[chatbot, msg]
986
- )
987
-
988
- msg.submit(
989
- fn=chat_with_ai,
990
- inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, search_type, force_search, chatbot],
991
- outputs=[chatbot, msg]
992
- )
993
-
994
- clear_btn.click(
995
- fn=clear_chat_history,
996
- outputs=[chatbot]
997
- )
998
-
999
- with gr.Accordion("🚀 About This Enhanced NeuroScope AI", open=True, elem_id="neuroscope-accordion"):
1000
- gr.Markdown("""
1001
- **Enhanced Creative Agentic AI Chat Tool** with dual API support:
1002
-
1003
- ### 🆕 **New in This Version:**
1004
- - 💬 **Chutes API Integration**: For openai/gpt-oss-20b model
1005
- - 🌐 **Dual API System**: Web search (Groq) + Basic chat (Chutes)
1006
- - 🎯 **Model Flexibility**: Multiple models across two APIs
1007
- - ⚡ **Force Search Option**: Make AI search for Groq models
1008
- - 🔧 **Enhanced Tool Visibility**: See search tools used (Groq models)
1009
- - 📊 **Model Comparison Guide**: Choose the right model and API
1010
-
1011
- ### 🏆 **Core Features:**
1012
- - 🔗 **Automatic Source Citations**: Clickable links to sources (Groq models)
1013
- - 📚 **Sources Used Section**: Dedicated section for websites (Groq models)
1014
- - 🌐 **Smart Domain Filtering**: Control search scope (Groq models)
1015
- - 💬 **Conversational Memory**: Maintains context throughout the session
1016
- - ⚙️ **Full Customization**: Adjust all parameters and prompts
1017
- - 🎨 **Creative & Analytical**: Optimized for both creative and research tasks
1018
-
1019
- ### 🛠️ **Technical Details:**
1020
- - **Compound Models (Groq)**: compound-beta, compound-beta-mini (web search + domain filtering)
1021
- - **Chat Model (Chutes)**: openai/gpt-oss-20b (basic conversational capabilities)
1022
- - **Automatic Search Type Detection**: AI chooses best search method (Groq models)
1023
- - **Enhanced Error Handling**: Robust error management and user feedback
1024
- - **Real-time Status Updates**: Live feedback on model capabilities and search settings
1025
- """)
1026
-
1027
- return app
1028
-
1029
- # Main execution
1030
- if __name__ == "__main__":
1031
- app = create_gradio_app()
1032
- app.launch(
1033
- share=True
1034
- )