shukdevdattaEX commited on
Commit
8b9d003
Β·
verified Β·
1 Parent(s): 3dd01fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -283
app.py CHANGED
@@ -45,9 +45,9 @@ class CreativeAgenticAI:
45
  else:
46
  raise ValueError(f"Invalid provider or missing API key for {provider}")
47
 
48
- async def _chutes_chat_async(self, messages: List[Dict], temperature: float = 0.7, max_tokens: int = 1024, stream: bool = False) -> Dict:
49
  """
50
- Async method for Chutes API chat with thinking support
51
  """
52
  headers = {
53
  "Authorization": f"Bearer {self.chutes_api_key}",
@@ -57,7 +57,7 @@ class CreativeAgenticAI:
57
  body = {
58
  "model": self.model,
59
  "messages": messages,
60
- "stream": stream,
61
  "max_tokens": max_tokens,
62
  "temperature": temperature
63
  }
@@ -69,66 +69,13 @@ class CreativeAgenticAI:
69
  json=body
70
  ) as response:
71
  if response.status == 200:
72
- if stream:
73
- thinking_content = ""
74
- final_content = ""
75
- in_thinking = False
76
-
77
- async for line in response.content:
78
- line = line.decode("utf-8").strip()
79
- if line.startswith("data: "):
80
- data = line[6:]
81
- if data == "[DONE]":
82
- break
83
- try:
84
- chunk_data = json.loads(data)
85
- if 'choices' in chunk_data and len(chunk_data['choices']) > 0:
86
- delta = chunk_data['choices'][0].get('delta', {})
87
- content = delta.get('content', '')
88
-
89
- if content:
90
- # Check for thinking tags
91
- if '<thinking>' in content:
92
- in_thinking = True
93
- thinking_content += content.replace('<thinking>', '')
94
- elif '</thinking>' in content:
95
- thinking_content += content.replace('</thinking>', '')
96
- in_thinking = False
97
- elif in_thinking:
98
- thinking_content += content
99
- else:
100
- final_content += content
101
-
102
- except json.JSONDecodeError:
103
- continue
104
-
105
- return {
106
- "thinking": thinking_content.strip(),
107
- "content": final_content.strip()
108
- }
109
- else:
110
- result = await response.json()
111
- full_content = result['choices'][0]['message']['content']
112
-
113
- # Extract thinking and final content from non-streaming response
114
- thinking_content = ""
115
- final_content = full_content
116
-
117
- if '<thinking>' in full_content and '</thinking>' in full_content:
118
- start_idx = full_content.find('<thinking>') + len('<thinking>')
119
- end_idx = full_content.find('</thinking>')
120
- thinking_content = full_content[start_idx:end_idx].strip()
121
- final_content = full_content[end_idx + len('</thinking>'):].strip()
122
-
123
- return {
124
- "thinking": thinking_content,
125
- "content": final_content
126
- }
127
  else:
128
  error_text = await response.text()
129
  raise Exception(f"Chutes API error: {response.status} - {error_text}")
130
 
131
- def _chutes_chat_sync(self, messages: List[Dict], temperature: float = 0.7, max_tokens: int = 1024, stream: bool = True) -> Dict:
132
  """
133
  Synchronous wrapper for Chutes API chat
134
  """
@@ -139,7 +86,7 @@ class CreativeAgenticAI:
139
  asyncio.set_event_loop(loop)
140
 
141
  return loop.run_until_complete(
142
- self._chutes_chat_async(messages, temperature, max_tokens, stream)
143
  )
144
 
145
  def chat(self, message: str,
@@ -189,13 +136,10 @@ IMPORTANT: When you search the web and find information, you MUST:
189
 
190
  Your responses should be well-structured, informative, and properly cited with working links."""
191
  else:
192
- # System prompt for Chutes thinking models
193
- system_prompt = """You are a creative and intelligent AI assistant with advanced reasoning capabilities.
194
- Think through problems step-by-step, showing your reasoning process clearly.
195
  Be helpful, creative, and engaging while maintaining accuracy.
196
- Your responses should be well-structured, informative, and comprehensive.
197
-
198
- When solving complex problems, break them down into steps and explain your thinking process."""
199
 
200
  # Build messages
201
  messages = [{"role": "system", "content": system_prompt}]
@@ -230,7 +174,6 @@ IMPORTANT: When you search the web and find information, you MUST:
230
 
231
  return {
232
  "content": error_msg,
233
- "thinking": "",
234
  "timestamp": datetime.now().isoformat(),
235
  "model": self.model,
236
  "provider": self.provider,
@@ -272,7 +215,6 @@ IMPORTANT: When you search the web and find information, you MUST:
272
  # Create response object
273
  return {
274
  "content": processed_content,
275
- "thinking": "", # Groq doesn't have thinking process
276
  "timestamp": datetime.now().isoformat(),
277
  "model": self.model,
278
  "provider": "groq",
@@ -286,20 +228,16 @@ IMPORTANT: When you search the web and find information, you MUST:
286
  }
287
 
288
  def _handle_chutes_chat(self, messages: List[Dict], temperature: float, max_tokens: int, original_message: str) -> Dict:
289
- """Handle Chutes API chat with thinking support"""
290
- result = self._chutes_chat_sync(messages, temperature, max_tokens, stream=True)
291
-
292
- thinking_content = result.get("thinking", "")
293
- final_content = result.get("content", "")
294
 
295
- # Add to conversation history (only store final content)
296
  self.conversation_history.append({"role": "user", "content": original_message})
297
- self.conversation_history.append({"role": "assistant", "content": final_content})
298
 
299
  # Create response object
300
  return {
301
- "content": final_content,
302
- "thinking": thinking_content,
303
  "timestamp": datetime.now().isoformat(),
304
  "model": self.model,
305
  "provider": "chutes",
@@ -443,15 +381,14 @@ def validate_api_keys(groq_api_key: str, chutes_api_key: str, provider: str, mod
443
  test_response = test_ai._chutes_chat_sync(
444
  [{"role": "user", "content": "Hello"}],
445
  temperature=0.7,
446
- max_tokens=10,
447
- stream=False # Use non-streaming for validation
448
  )
449
 
450
  # Create AI instance
451
  ai_instance = CreativeAgenticAI(chutes_api_key=chutes_api_key, provider="chutes", model=model)
452
  api_key_status["chutes"] = "Valid βœ…"
453
 
454
- return f"βœ… Chutes API Key Valid! Creative AI with Thinking is ready.\n\n**Provider:** Chutes\n**Model:** {model}\n**Status:** Connected with thinking model capabilities!"
455
 
456
  except Exception as e:
457
  api_key_status["chutes"] = "Invalid ❌"
@@ -477,9 +414,8 @@ def chat_with_ai(message: str,
477
  system_prompt: str,
478
  temperature: float,
479
  max_tokens: int,
480
- history: List,
481
- show_thinking: bool = True) -> tuple:
482
- """Main chat function with thinking support"""
483
  global ai_instance, current_provider
484
 
485
  if not ai_instance:
@@ -508,20 +444,8 @@ def chat_with_ai(message: str,
508
  max_tokens=int(max_tokens)
509
  )
510
 
511
- # Format response with thinking (if available and enabled)
512
- ai_response = ""
513
-
514
- # Add thinking section for Chutes models
515
- if current_provider == "chutes" and response.get("thinking") and show_thinking:
516
- thinking_content = response["thinking"].strip()
517
- if thinking_content:
518
- ai_response += f"### πŸ€” **Model's Thinking Process:**\n\n"
519
- ai_response += f"*The model is reasoning through your question...*\n\n"
520
- ai_response += f"```thinking\n{thinking_content}\n```\n\n"
521
- ai_response += "---\n\n### πŸ’‘ **Final Response:**\n\n"
522
-
523
- # Add main content
524
- ai_response += response["content"]
525
 
526
  # Add enhanced tool usage info (Groq only)
527
  if response.get("tool_usage") and current_provider == "groq":
@@ -550,15 +474,8 @@ def chat_with_ai(message: str,
550
 
551
  ai_response += f"\n\n*🌐 Domain filtering applied: {' | '.join(filter_info)}*"
552
 
553
- # Add provider and thinking info
554
- provider_info = f"πŸ€– Powered by: {current_provider.title()} ({response.get('model', 'unknown')})"
555
- if current_provider == "chutes" and response.get("thinking"):
556
- if show_thinking:
557
- provider_info += " | πŸ€” Thinking process shown"
558
- else:
559
- provider_info += " | πŸ€” Thinking process hidden"
560
-
561
- ai_response += f"\n\n*{provider_info}*"
562
 
563
  # Add to history
564
  history.append([message, ai_response])
@@ -629,13 +546,6 @@ def create_gradio_app():
629
  padding: 15px;
630
  margin: 10px 0;
631
  }
632
- .thinking-info {
633
- background-color: #e2e3e5;
634
- border: 1px solid #d6d8db;
635
- border-radius: 8px;
636
- padding: 15px;
637
- margin: 10px 0;
638
- }
639
  #neuroscope-accordion {
640
  background: linear-gradient(to right, #00ff94, #00b4db);
641
  border-radius: 8px;
@@ -647,65 +557,29 @@ def create_gradio_app():
647
  }
648
  """
649
 
650
- with gr.Blocks(css=css, title="πŸ€– Multi-Provider Creative Agentic AI Chat with Thinking", theme=gr.themes.Ocean()) as app:
651
 
652
  # Header
653
  gr.HTML("""
654
  <div class="header">
655
- <h1>πŸ€– NeuroScope-AI Enhanced with Thinking Models</h1>
656
- <p>Multi-Provider AI Chat Tool - Groq's Compound Models & Chutes Thinking Models</p>
657
  </div>
658
  """)
659
 
660
  # Provider Selection
661
  with gr.Group():
662
- with gr.Accordion("πŸ€– Multi-Provider NeuroScope AI with Thinking", open=False, elem_id="neuroscope-accordion"):
663
  gr.Markdown("""
664
- **Enhanced with Multiple AI Providers & Thinking Models:**
665
- - 🧠 Intelligence (Neuro) - Now supports Groq & Chutes Thinking Models
666
- - πŸ” Advanced capabilities (Scope) - Web search with Groq, reasoning traces with Chutes
667
- - πŸ€– AI capabilities (AI) - Multiple model options including thinking models
668
  - ⚑ Precision & Speed (Scope) - Choose the best provider for your needs
669
- - πŸ€” **NEW**: Thinking process visualization for Chutes models
670
- """)
671
-
672
- # Thinking Models Info
673
- with gr.Group():
674
- with gr.Accordion("πŸ€” About Chutes Thinking Models", open=False, elem_id="neuroscope-accordion"):
675
- gr.Markdown("""
676
- <div class="thinking-info">
677
- <h3>🧠 What are Thinking Models?</h3>
678
- <p><strong>Chutes Thinking Models</strong> are advanced AI systems that show their reasoning process before providing the final answer.</p>
679
-
680
- <h4>πŸ” How They Work:</h4>
681
- <ul>
682
- <li><strong>Step-by-Step Reasoning:</strong> Models think through problems systematically</li>
683
- <li><strong>Transparent Process:</strong> You can see exactly how the AI reaches its conclusions</li>
684
- <li><strong>Better Accuracy:</strong> The thinking process often leads to more accurate and well-reasoned responses</li>
685
- <li><strong>Educational Value:</strong> Learn from the AI's problem-solving approach</li>
686
- </ul>
687
-
688
- <h4>🎯 Available Thinking Models:</h4>
689
- <ul>
690
- <li><strong>openai/gpt-oss-20b:</strong> Large-scale reasoning and analysis</li>
691
- <li><strong>meta-llama/llama-3.1-8b-instruct:</strong> Efficient thinking and instruction following</li>
692
- <li><strong>anthropic/claude-3-sonnet:</strong> Advanced reasoning and creative thinking</li>
693
- </ul>
694
-
695
- <h4>πŸ’‘ Best For:</h4>
696
- <ul>
697
- <li>Complex problem-solving tasks</li>
698
- <li>Mathematical and logical reasoning</li>
699
- <li>Step-by-step analysis</li>
700
- <li>Educational explanations</li>
701
- <li>Creative writing with detailed planning</li>
702
- </ul>
703
- </div>
704
  """)
705
 
706
  # Provider and API Key Section
707
  with gr.Row():
708
-
709
  with gr.Column():
710
  provider_selection = gr.Radio(
711
  choices=["groq", "chutes"],
@@ -730,7 +604,8 @@ def create_gradio_app():
730
  info="Get your API key from: https://chutes.ai/",
731
  visible=False
732
  )
733
-
 
734
  model_selection = gr.Radio(
735
  choices=get_available_models("groq"),
736
  label="🧠 Groq Models",
@@ -738,14 +613,6 @@ def create_gradio_app():
738
  info="compound-beta: More powerful | compound-beta-mini: Faster"
739
  )
740
 
741
- # Thinking toggle for Chutes
742
- show_thinking = gr.Checkbox(
743
- label="πŸ€” Show Thinking Process",
744
- value=True,
745
- info="Display the model's reasoning process (Chutes only)",
746
- visible=False
747
- )
748
-
749
  connect_btn = gr.Button("πŸ”— Connect", variant="primary", size="lg")
750
 
751
  # Status display
@@ -765,29 +632,26 @@ def create_gradio_app():
765
  - βœ… **Citation System** - Automatic source linking and references
766
  - ⚑ **Ultra-fast inference** - Groq's hardware acceleration
767
  - 🧠 **Models**: compound-beta, compound-beta-mini
768
- - ❌ **No thinking process** - Direct responses without visible reasoning
769
 
770
- **🎯 Chutes (Thinking Models)**
771
  - βœ… **Multiple Model Access** - Various open-source and commercial models
772
- - βœ… **Thinking Process** - See the model's step-by-step reasoning
773
- - βœ… **High-quality reasoning** - Better accuracy through visible thinking
774
- - βœ… **Educational Value** - Learn from AI's problem-solving approach
775
  - ⚑ **Good performance** - Reliable and fast responses
776
- - 🧠 **Models**: GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet (all with thinking)
777
  - ❌ **No web search** - Relies on training data only
778
 
779
  **πŸ’‘ Use Groq when you need:**
780
  - Real-time information and web search
781
  - Research with source citations
782
  - Domain-specific searches
783
- - Ultra-fast responses
784
 
785
  **πŸ’‘ Use Chutes when you need:**
786
- - Complex problem-solving with visible reasoning
787
- - Educational explanations
788
- - Mathematical and logical analysis
789
- - Creative planning with detailed thinking
790
- - Understanding AI's reasoning process
791
  </div>
792
  """)
793
 
@@ -802,14 +666,15 @@ def create_gradio_app():
802
  gr.update(visible=chutes_visible), # chutes_api_key
803
  gr.update(choices=models, value=models[0] if models else None,
804
  label=f"🧠 {provider.title()} Models"), # model_selection
805
- gr.update(visible=chutes_visible), # show_thinking
806
- gr.update(visible=groq_visible),
 
807
  )
808
 
809
  provider_selection.change(
810
  fn=update_provider_ui,
811
  inputs=[provider_selection],
812
- outputs=[groq_api_key, chutes_api_key, model_selection, show_thinking]
813
  )
814
 
815
  # Connect button functionality
@@ -822,8 +687,8 @@ def create_gradio_app():
822
  # Main Chat Interface
823
  with gr.Tab("πŸ’¬ Chat"):
824
  chatbot = gr.Chatbot(
825
- label="Multi-Provider Creative AI Assistant with Thinking",
826
- height=600,
827
  show_label=True,
828
  bubble_full_width=False,
829
  show_copy_button=True
@@ -914,7 +779,7 @@ def create_gradio_app():
914
  - `nature.com`, `science.org`, `pubmed.ncbi.nlm.nih.gov`, `who.int`
915
  """)
916
 
917
- # Update provider UI function with domain filtering and thinking toggle
918
  def update_provider_ui_complete(provider):
919
  groq_visible = provider == "groq"
920
  chutes_visible = provider == "chutes"
@@ -925,27 +790,25 @@ def create_gradio_app():
925
  gr.update(visible=chutes_visible), # chutes_api_key
926
  gr.update(choices=models, value=models[0] if models else None,
927
  label=f"🧠 {provider.title()} Models"), # model_selection
928
- gr.update(visible=chutes_visible), # show_thinking
929
- gr.update(visible=groq_visible),
930
  )
931
 
932
  provider_selection.change(
933
  fn=update_provider_ui_complete,
934
  inputs=[provider_selection],
935
- outputs=[groq_api_key, chutes_api_key, model_selection, show_thinking]
936
  )
937
 
938
- # IMPORTANT Section with Citation Info and Thinking
939
  with gr.Group():
940
- with gr.Accordion("πŸ“š IMPORTANT - Citations & Thinking Models!", open=False, elem_id="neuroscope-accordion"):
941
  gr.Markdown("""
942
  <div class="citation-info">
943
- <h3>πŸ†• Multi-Provider Enhancement with Thinking Models</h3>
944
  <p>This enhanced version now supports both Groq and Chutes AI providers:</p>
945
  <ul>
946
  <li><strong>πŸš€ Groq Integration:</strong> Agentic AI with web search, citations, and tool usage</li>
947
- <li><strong>🎯 Chutes Integration:</strong> Multiple thinking models with visible reasoning processes</li>
948
- <li><strong>πŸ€” Thinking Process:</strong> See step-by-step reasoning from Chutes models</li>
949
  <li><strong>πŸ”„ Easy Switching:</strong> Switch between providers based on your needs</li>
950
  <li><strong>πŸ“Š Provider Comparison:</strong> Clear information about each provider's strengths</li>
951
  </ul>
@@ -959,14 +822,13 @@ def create_gradio_app():
959
  <li><strong>Search Query Tracking:</strong> Shows what queries were made to find information</li>
960
  </ul>
961
 
962
- <h3>πŸ€” Chutes Thinking Model Features</h3>
963
  <p>When using Chutes, you get access to:</p>
964
  <ul>
965
- <li><strong>Thinking Process Visualization:</strong> See exactly how the AI reasons through problems</li>
966
- <li><strong>Step-by-Step Analysis:</strong> Watch the model break down complex questions</li>
967
- <li><strong>Toggle Thinking Display:</strong> Choose to show or hide the reasoning process</li>
968
- <li><strong>Educational Value:</strong> Learn from AI's problem-solving approaches</li>
969
- <li><strong>Multiple Thinking Models:</strong> Different models with unique reasoning styles</li>
970
  </ul>
971
  </div>
972
 
@@ -987,20 +849,10 @@ def create_gradio_app():
987
  - Useful for **filtering out unreliable or unwanted sources**.
988
  - Allows broad search with **targeted exclusions**.
989
 
990
- ### πŸ€” **Thinking Models Behavior (Chutes Only)**
991
-
992
- **How Thinking Models Work:**
993
- - Models first **reason through the problem** in a thinking section
994
- - You can see the **step-by-step thought process**
995
- - The model then provides its **final polished answer**
996
- - **Toggle thinking display** on/off as needed
997
-
998
- **Best Use Cases for Thinking Models:**
999
- - Complex mathematical problems
1000
- - Logical reasoning tasks
1001
- - Creative writing with planning
1002
- - Educational explanations
1003
- - Problem-solving scenarios
1004
 
1005
  ---
1006
 
@@ -1011,23 +863,21 @@ def create_gradio_app():
1011
  - A **professional business consultant**
1012
  - A **coding mentor**
1013
  - A **creative writer**
1014
- - A **step-by-step tutor** (especially effective with thinking models)
1015
  - A **specific character or persona**
1016
  - Provides full control to **reshape the AI's tone, expertise, and conversational style** with a single prompt.
1017
  """)
1018
 
1019
  # How to Use Section
1020
- with gr.Accordion("πŸ“– How to Use This Enhanced Multi-Provider App with Thinking", open=False, elem_id="neuroscope-accordion"):
1021
  gr.Markdown("""
1022
  ### πŸš€ Getting Started
1023
- 1. **Choose your AI Provider** - Select between Groq (web search + agentic) or Chutes (thinking models)
1024
  2. **Enter your API Key** -
1025
  - Groq: Get one from [console.groq.com](https://console.groq.com/)
1026
  - Chutes: Get one from [chutes.ai](https://chutes.ai/)
1027
  3. **Select a model** - Choose from provider-specific model options
1028
- 4. **Configure thinking display** - For Chutes, decide if you want to see the reasoning process
1029
- 5. **Click Connect** - Validate your key and connect to the AI
1030
- 6. **Start chatting!** - Type your message and get intelligent responses
1031
 
1032
  ### 🎯 Key Features
1033
  **πŸš€ Groq Features:**
@@ -1037,11 +887,10 @@ def create_gradio_app():
1037
  - **Ultra-fast**: Groq's hardware-accelerated inference
1038
 
1039
  **🎯 Chutes Features:**
1040
- - **Thinking Models**: See the model's step-by-step reasoning process
1041
- - **Multiple Models**: Access to GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet
1042
- - **Educational**: Learn from AI's problem-solving approaches
1043
- - **Toggle Thinking**: Show/hide the reasoning process as needed
1044
- - **High Quality**: Excellent reasoning and analysis capabilities
1045
 
1046
  **πŸ”„ Universal Features:**
1047
  - **Memory**: Maintains conversation context throughout the session
@@ -1055,83 +904,73 @@ def create_gradio_app():
1055
  - Check the "Sources Used" section for all references
1056
  - Try different domain combinations to see varied results
1057
 
1058
- **For Chutes (Thinking Models):**
1059
- - Ask complex, multi-step questions to see rich thinking processes
1060
- - Use for educational purposes - the thinking is very instructive
1061
- - Try mathematical problems, logical puzzles, or creative planning tasks
1062
- - Toggle thinking display based on whether you want to see the process
1063
- - Different models have different thinking styles - experiment!
1064
 
1065
  **General:**
1066
  - Adjust temperature: higher for creativity, lower for precision
1067
  - Try different system prompts for different conversation styles
1068
  - Use the provider that best fits your current task
1069
- - For learning: use Chutes with thinking display enabled
1070
- - For research: use Groq with appropriate domain filtering
1071
  """)
1072
 
1073
  # Sample Examples Section
1074
- with gr.Accordion("🎯 Sample Examples to Test Both Providers & Thinking", open=False, elem_id="neuroscope-accordion"):
1075
  gr.Markdown("""
1076
  <div class="example-box">
1077
  <h4>πŸ†š Provider Comparison Examples</h4>
1078
  <p>Try the same prompts with both providers to see the difference:</p>
1079
 
1080
- <h4>πŸ€” Perfect for Thinking Models (Chutes)</h4>
1081
  <ul>
1082
- <li><strong>Math & Logic:</strong> "Solve this step by step: If a train travels 120 miles in 2 hours, and then 180 miles in 3 hours, what's the average speed for the entire journey?"</li>
1083
- <li><strong>Problem Solving:</strong> "I have a budget of $1000 for a home office setup. Help me plan the best allocation across desk, chair, computer, and lighting."</li>
1084
- <li><strong>Creative Planning:</strong> "Plan a short story about a time traveler who accidentally changes history. Walk through the plot structure."</li>
1085
- <li><strong>Analysis:</strong> "Compare and contrast the pros and cons of remote work vs office work, considering productivity, collaboration, and work-life balance."</li>
1086
  </ul>
1087
 
1088
- <h4>πŸ”¬ Research & Real-time Info (Groq)</h4>
1089
  <ul>
1090
- <li><strong>Current Events:</strong> "What are the latest developments in AI research in 2024?"</li>
1091
- <li><strong>Tech Updates:</strong> "What are the newest features in React 19?"</li>
1092
- <li><strong>Market Analysis:</strong> "Current trends in cryptocurrency markets with sources"</li>
1093
- <li><strong>Scientific Updates:</strong> "Recent breakthroughs in quantum computing research"</li>
1094
  </ul>
1095
 
1096
- <h4>πŸ’» Programming & Tech (Compare Both)</h4>
1097
  <ul>
1098
- <li><strong>Groq:</strong> "What are the current best practices for React 18 in 2024?" (with web search)</li>
1099
- <li><strong>Chutes:</strong> "Explain how to build a React component with useState, and walk through your reasoning for the design choices"</li>
 
 
1100
  </ul>
1101
 
1102
- <h4>🎨 Creative Tasks (Great for Thinking Models)</h4>
1103
  <ul>
1104
- <li>"Write a marketing strategy for a new eco-friendly product, showing your planning process"</li>
1105
- <li>"Create a study plan for learning Python in 3 months, explaining your reasoning for each phase"</li>
1106
- <li>"Design a mobile app concept for meditation, walking through your design thinking"</li>
1107
  </ul>
1108
 
1109
- <h4>🧠 Model-Specific Testing (Chutes Thinking)</h4>
1110
  <ul>
1111
- <li><strong>GPT-OSS-20B:</strong> "Explain quantum entanglement in simple terms, showing your thought process for making it accessible"</li>
1112
- <li><strong>Llama 3.1:</strong> "Debug this Python code and explain your debugging approach: [code with intentional errors]"</li>
1113
- <li><strong>Claude 3 Sonnet:</strong> "Analyze this business scenario and provide strategic recommendations, showing your analytical framework"</li>
1114
- </ul>
1115
-
1116
- <h4>πŸ“Š Side-by-Side Comparisons</h4>
1117
- <ul>
1118
- <li><strong>Same Question, Different Providers:</strong> Ask "How do neural networks work?" to both providers</li>
1119
- <li><strong>Groq Result:</strong> Fast response with potential web sources and current information</li>
1120
- <li><strong>Chutes Result:</strong> Detailed thinking process showing how the model breaks down the explanation</li>
1121
  </ul>
1122
  </div>
1123
  """)
1124
 
1125
- # Event handlers - Updated to include thinking toggle
1126
  send_btn.click(
1127
  fn=chat_with_ai,
1128
- inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, chatbot, show_thinking],
1129
  outputs=[chatbot, msg]
1130
  )
1131
 
1132
  msg.submit(
1133
  fn=chat_with_ai,
1134
- inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, chatbot, show_thinking],
1135
  outputs=[chatbot, msg]
1136
  )
1137
 
@@ -1141,16 +980,16 @@ def create_gradio_app():
1141
  )
1142
 
1143
  # Footer
1144
- with gr.Accordion("πŸš€ About This Enhanced Multi-Provider Tool with Thinking", open=True, elem_id="neuroscope-accordion"):
1145
  gr.Markdown("""
1146
- **Enhanced Multi-Provider Creative Agentic AI Chat Tool** with thinking model support:
1147
 
1148
- **πŸ†• New Thinking Model Features:**
1149
- - πŸ€” **Thinking Process Visualization**: See step-by-step reasoning from Chutes models
1150
- - 🧠 **Multiple Thinking Models**: GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet with reasoning
1151
- - πŸ”„ **Toggle Thinking Display**: Choose to show or hide the reasoning process
1152
- - πŸ“š **Educational Value**: Learn from AI's problem-solving approaches
1153
- - 🎯 **Better Accuracy**: Thinking process often leads to more accurate responses
1154
 
1155
  **πŸš€ Groq Features:**
1156
  - πŸ”— **Automatic Source Citations**: Every response includes clickable links to sources
@@ -1162,11 +1001,10 @@ def create_gradio_app():
1162
  - 🧠 Advanced AI reasoning with tool usage
1163
 
1164
  **🎯 Chutes Features:**
1165
- - πŸ€– **Multiple Thinking Models**: Various AI models with visible reasoning
1166
  - πŸ’° **Cost-Effective**: Competitive pricing for AI access
1167
- - 🎨 **Creative Excellence**: Optimized for reasoning and analysis tasks
1168
- - ⚑ **Reliable Performance**: Consistent and thoughtful responses
1169
- - πŸ“– **Learning Tool**: Perfect for understanding AI reasoning
1170
 
1171
  **πŸ”„ Universal Features:**
1172
  - πŸ’¬ Conversational memory and context
@@ -1176,14 +1014,7 @@ def create_gradio_app():
1176
 
1177
  **πŸ’‘ Choose Your Provider:**
1178
  - **Use Groq** when you need real-time information, web search, and citations
1179
- - **Use Chutes** when you want to see the thinking process, need complex reasoning, or want educational explanations
1180
- - **Toggle thinking display** in Chutes to customize your experience
1181
-
1182
- **πŸŽ“ Perfect for:**
1183
- - Students learning problem-solving approaches
1184
- - Developers wanting to understand AI reasoning
1185
- - Researchers needing both current information (Groq) and deep analysis (Chutes)
1186
- - Anyone curious about how AI thinks through problems
1187
  """)
1188
 
1189
  return app
 
45
  else:
46
  raise ValueError(f"Invalid provider or missing API key for {provider}")
47
 
48
+ async def _chutes_chat_async(self, messages: List[Dict], temperature: float = 0.7, max_tokens: int = 1024) -> str:
49
  """
50
+ Async method for Chutes API chat
51
  """
52
  headers = {
53
  "Authorization": f"Bearer {self.chutes_api_key}",
 
57
  body = {
58
  "model": self.model,
59
  "messages": messages,
60
+ "stream": False, # Set to False for simpler handling
61
  "max_tokens": max_tokens,
62
  "temperature": temperature
63
  }
 
69
  json=body
70
  ) as response:
71
  if response.status == 200:
72
+ result = await response.json()
73
+ return result['choices'][0]['message']['content']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  else:
75
  error_text = await response.text()
76
  raise Exception(f"Chutes API error: {response.status} - {error_text}")
77
 
78
+ def _chutes_chat_sync(self, messages: List[Dict], temperature: float = 0.7, max_tokens: int = 1024) -> str:
79
  """
80
  Synchronous wrapper for Chutes API chat
81
  """
 
86
  asyncio.set_event_loop(loop)
87
 
88
  return loop.run_until_complete(
89
+ self._chutes_chat_async(messages, temperature, max_tokens)
90
  )
91
 
92
  def chat(self, message: str,
 
136
 
137
  Your responses should be well-structured, informative, and properly cited with working links."""
138
  else:
139
+ # Simpler system prompt for Chutes (no web search capabilities)
140
+ system_prompt = """You are a creative and intelligent AI assistant.
 
141
  Be helpful, creative, and engaging while maintaining accuracy.
142
+ Your responses should be well-structured, informative, and comprehensive."""
 
 
143
 
144
  # Build messages
145
  messages = [{"role": "system", "content": system_prompt}]
 
174
 
175
  return {
176
  "content": error_msg,
 
177
  "timestamp": datetime.now().isoformat(),
178
  "model": self.model,
179
  "provider": self.provider,
 
215
  # Create response object
216
  return {
217
  "content": processed_content,
 
218
  "timestamp": datetime.now().isoformat(),
219
  "model": self.model,
220
  "provider": "groq",
 
228
  }
229
 
230
  def _handle_chutes_chat(self, messages: List[Dict], temperature: float, max_tokens: int, original_message: str) -> Dict:
231
+ """Handle Chutes API chat"""
232
+ content = self._chutes_chat_sync(messages, temperature, max_tokens)
 
 
 
233
 
234
+ # Add to conversation history
235
  self.conversation_history.append({"role": "user", "content": original_message})
236
+ self.conversation_history.append({"role": "assistant", "content": content})
237
 
238
  # Create response object
239
  return {
240
+ "content": content,
 
241
  "timestamp": datetime.now().isoformat(),
242
  "model": self.model,
243
  "provider": "chutes",
 
381
  test_response = test_ai._chutes_chat_sync(
382
  [{"role": "user", "content": "Hello"}],
383
  temperature=0.7,
384
+ max_tokens=10
 
385
  )
386
 
387
  # Create AI instance
388
  ai_instance = CreativeAgenticAI(chutes_api_key=chutes_api_key, provider="chutes", model=model)
389
  api_key_status["chutes"] = "Valid βœ…"
390
 
391
+ return f"βœ… Chutes API Key Valid! Creative AI is ready.\n\n**Provider:** Chutes\n**Model:** {model}\n**Status:** Connected (text generation focused)!"
392
 
393
  except Exception as e:
394
  api_key_status["chutes"] = "Invalid ❌"
 
414
  system_prompt: str,
415
  temperature: float,
416
  max_tokens: int,
417
+ history: List) -> tuple:
418
+ """Main chat function"""
 
419
  global ai_instance, current_provider
420
 
421
  if not ai_instance:
 
444
  max_tokens=int(max_tokens)
445
  )
446
 
447
+ # Format response
448
+ ai_response = response["content"]
 
 
 
 
 
 
 
 
 
 
 
 
449
 
450
  # Add enhanced tool usage info (Groq only)
451
  if response.get("tool_usage") and current_provider == "groq":
 
474
 
475
  ai_response += f"\n\n*🌐 Domain filtering applied: {' | '.join(filter_info)}*"
476
 
477
+ # Add provider info
478
+ ai_response += f"\n\n*πŸ€– Powered by: {current_provider.title()} ({response.get('model', 'unknown')})*"
 
 
 
 
 
 
 
479
 
480
  # Add to history
481
  history.append([message, ai_response])
 
546
  padding: 15px;
547
  margin: 10px 0;
548
  }
 
 
 
 
 
 
 
549
  #neuroscope-accordion {
550
  background: linear-gradient(to right, #00ff94, #00b4db);
551
  border-radius: 8px;
 
557
  }
558
  """
559
 
560
+ with gr.Blocks(css=css, title="πŸ€– Multi-Provider Creative Agentic AI Chat", theme=gr.themes.Ocean()) as app:
561
 
562
  # Header
563
  gr.HTML("""
564
  <div class="header">
565
+ <h1>πŸ€– NeuroScope-AI Enhanced</h1>
566
+ <p>Multi-Provider AI Chat Tool - Powered by Groq's Compound Models & Chutes API</p>
567
  </div>
568
  """)
569
 
570
  # Provider Selection
571
  with gr.Group():
572
+ with gr.Accordion("πŸ€– Multi-Provider NeuroScope AI", open=False, elem_id="neuroscope-accordion"):
573
  gr.Markdown("""
574
+ **Enhanced with Multiple AI Providers:**
575
+ - 🧠 Intelligence (Neuro) - Now supports Groq & Chutes
576
+ - πŸ” Advanced capabilities (Scope) - Web search with Groq, powerful text generation with Chutes
577
+ - πŸ€– AI capabilities (AI) - Multiple model options
578
  - ⚑ Precision & Speed (Scope) - Choose the best provider for your needs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579
  """)
580
 
581
  # Provider and API Key Section
582
  with gr.Row():
 
583
  with gr.Column():
584
  provider_selection = gr.Radio(
585
  choices=["groq", "chutes"],
 
604
  info="Get your API key from: https://chutes.ai/",
605
  visible=False
606
  )
607
+
608
+ with gr.Column():
609
  model_selection = gr.Radio(
610
  choices=get_available_models("groq"),
611
  label="🧠 Groq Models",
 
613
  info="compound-beta: More powerful | compound-beta-mini: Faster"
614
  )
615
 
 
 
 
 
 
 
 
 
616
  connect_btn = gr.Button("πŸ”— Connect", variant="primary", size="lg")
617
 
618
  # Status display
 
632
  - βœ… **Citation System** - Automatic source linking and references
633
  - ⚑ **Ultra-fast inference** - Groq's hardware acceleration
634
  - 🧠 **Models**: compound-beta, compound-beta-mini
 
635
 
636
+ **🎯 Chutes**
637
  - βœ… **Multiple Model Access** - Various open-source and commercial models
638
+ - βœ… **Cost-effective** - Competitive pricing
639
+ - βœ… **High-quality text generation** - Excellent for creative writing and analysis
 
640
  - ⚑ **Good performance** - Reliable and fast responses
641
+ - 🧠 **Models**: GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet, and more
642
  - ❌ **No web search** - Relies on training data only
643
 
644
  **πŸ’‘ Use Groq when you need:**
645
  - Real-time information and web search
646
  - Research with source citations
647
  - Domain-specific searches
648
+ - Agentic AI capabilities
649
 
650
  **πŸ’‘ Use Chutes when you need:**
651
+ - Pure text generation and analysis
652
+ - Creative writing tasks
653
+ - Cost-effective AI access
654
+ - Variety of model options
 
655
  </div>
656
  """)
657
 
 
666
  gr.update(visible=chutes_visible), # chutes_api_key
667
  gr.update(choices=models, value=models[0] if models else None,
668
  label=f"🧠 {provider.title()} Models"), # model_selection
669
+ gr.update(visible=groq_visible), # domain filtering sections
670
+ gr.update(visible=groq_visible), # include domains
671
+ gr.update(visible=groq_visible) # exclude domains
672
  )
673
 
674
  provider_selection.change(
675
  fn=update_provider_ui,
676
  inputs=[provider_selection],
677
+ outputs=[groq_api_key, chutes_api_key, model_selection] # We'll add domain filtering updates later
678
  )
679
 
680
  # Connect button functionality
 
687
  # Main Chat Interface
688
  with gr.Tab("πŸ’¬ Chat"):
689
  chatbot = gr.Chatbot(
690
+ label="Multi-Provider Creative AI Assistant",
691
+ height=500,
692
  show_label=True,
693
  bubble_full_width=False,
694
  show_copy_button=True
 
779
  - `nature.com`, `science.org`, `pubmed.ncbi.nlm.nih.gov`, `who.int`
780
  """)
781
 
782
+ # Update provider UI function with domain filtering
783
  def update_provider_ui_complete(provider):
784
  groq_visible = provider == "groq"
785
  chutes_visible = provider == "chutes"
 
790
  gr.update(visible=chutes_visible), # chutes_api_key
791
  gr.update(choices=models, value=models[0] if models else None,
792
  label=f"🧠 {provider.title()} Models"), # model_selection
793
+ gr.update(visible=groq_visible), # domain_group
 
794
  )
795
 
796
  provider_selection.change(
797
  fn=update_provider_ui_complete,
798
  inputs=[provider_selection],
799
+ outputs=[groq_api_key, chutes_api_key, model_selection]
800
  )
801
 
802
+ # IMPORTANT Section with Citation Info
803
  with gr.Group():
804
+ with gr.Accordion("πŸ“š IMPORTANT - Citations & Multi-Provider Features!", open=False, elem_id="neuroscope-accordion"):
805
  gr.Markdown("""
806
  <div class="citation-info">
807
+ <h3>πŸ†• Multi-Provider Enhancement</h3>
808
  <p>This enhanced version now supports both Groq and Chutes AI providers:</p>
809
  <ul>
810
  <li><strong>πŸš€ Groq Integration:</strong> Agentic AI with web search, citations, and tool usage</li>
811
+ <li><strong>🎯 Chutes Integration:</strong> Multiple AI models for text generation and analysis</li>
 
812
  <li><strong>πŸ”„ Easy Switching:</strong> Switch between providers based on your needs</li>
813
  <li><strong>πŸ“Š Provider Comparison:</strong> Clear information about each provider's strengths</li>
814
  </ul>
 
822
  <li><strong>Search Query Tracking:</strong> Shows what queries were made to find information</li>
823
  </ul>
824
 
825
+ <h3>🎯 Chutes Model Access</h3>
826
  <p>When using Chutes, you get access to:</p>
827
  <ul>
828
+ <li><strong>Multiple Models:</strong> GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet</li>
829
+ <li><strong>Cost-Effective:</strong> Competitive pricing for high-quality AI</li>
830
+ <li><strong>Specialized Tasks:</strong> Optimized for creative writing and analysis</li>
831
+ <li><strong>Reliable Performance:</strong> Consistent and fast responses</li>
 
832
  </ul>
833
  </div>
834
 
 
849
  - Useful for **filtering out unreliable or unwanted sources**.
850
  - Allows broad search with **targeted exclusions**.
851
 
852
+ **Both Include and Exclude Domains Specified:**
853
+ - **Only the include domains** are used for searching.
854
+ - **Exclude list is ignored** because the include list already restricts search scope.
855
+ - Guarantees AI pulls content **exclusively from whitelisted domains**, regardless of the excluded ones.
 
 
 
 
 
 
 
 
 
 
856
 
857
  ---
858
 
 
863
  - A **professional business consultant**
864
  - A **coding mentor**
865
  - A **creative writer**
 
866
  - A **specific character or persona**
867
  - Provides full control to **reshape the AI's tone, expertise, and conversational style** with a single prompt.
868
  """)
869
 
870
  # How to Use Section
871
+ with gr.Accordion("πŸ“– How to Use This Enhanced Multi-Provider App", open=False, elem_id="neuroscope-accordion"):
872
  gr.Markdown("""
873
  ### πŸš€ Getting Started
874
+ 1. **Choose your AI Provider** - Select between Groq (web search + agentic) or Chutes (text generation)
875
  2. **Enter your API Key** -
876
  - Groq: Get one from [console.groq.com](https://console.groq.com/)
877
  - Chutes: Get one from [chutes.ai](https://chutes.ai/)
878
  3. **Select a model** - Choose from provider-specific model options
879
+ 4. **Click Connect** - Validate your key and connect to the AI
880
+ 5. **Start chatting!** - Type your message and get intelligent responses
 
881
 
882
  ### 🎯 Key Features
883
  **πŸš€ Groq Features:**
 
887
  - **Ultra-fast**: Groq's hardware-accelerated inference
888
 
889
  **🎯 Chutes Features:**
890
+ - **Multiple Models**: Access to various open-source and commercial models
891
+ - **Cost-Effective**: Competitive pricing for AI access
892
+ - **High Quality**: Excellent text generation and analysis
893
+ - **Model Variety**: Choose the best model for your specific task
 
894
 
895
  **πŸ”„ Universal Features:**
896
  - **Memory**: Maintains conversation context throughout the session
 
904
  - Check the "Sources Used" section for all references
905
  - Try different domain combinations to see varied results
906
 
907
+ **For Chutes:**
908
+ - Experiment with different models for different tasks
909
+ - Use higher temperatures for creative tasks
910
+ - Leverage the variety of available models (GPT, Llama, Claude)
911
+ - Perfect for tasks that don't require real-time information
 
912
 
913
  **General:**
914
  - Adjust temperature: higher for creativity, lower for precision
915
  - Try different system prompts for different conversation styles
916
  - Use the provider that best fits your current task
 
 
917
  """)
918
 
919
  # Sample Examples Section
920
+ with gr.Accordion("🎯 Sample Examples to Test Both Providers", open=False, elem_id="neuroscope-accordion"):
921
  gr.Markdown("""
922
  <div class="example-box">
923
  <h4>πŸ†š Provider Comparison Examples</h4>
924
  <p>Try the same prompts with both providers to see the difference:</p>
925
 
926
+ <h4>πŸ”¬ Research & Analysis</h4>
927
  <ul>
928
+ <li><strong>Groq (with web search):</strong> "What are the latest breakthroughs in quantum computing in 2024?"</li>
929
+ <li><strong>Chutes (knowledge-based):</strong> "Explain the fundamental principles of quantum computing"</li>
930
+ <li><strong>Groq with domains:</strong> Same question with "arxiv.org, *.edu" in include domains</li>
 
931
  </ul>
932
 
933
+ <h4>πŸ’» Programming & Tech</h4>
934
  <ul>
935
+ <li><strong>Groq:</strong> "What are the current best practices for React 18 in 2024?"</li>
936
+ <li><strong>Chutes:</strong> "Write a comprehensive React component with hooks and best practices"</li>
937
+ <li><strong>Groq filtered:</strong> Same with "github.com, stackoverflow.com" included</li>
 
938
  </ul>
939
 
940
+ <h4>🎨 Creative Tasks (Great for Chutes)</h4>
941
  <ul>
942
+ <li>"Write a short story about AI and humans working together"</li>
943
+ <li>"Create a marketing plan for a sustainable fashion brand"</li>
944
+ <li>"Generate ideas for a mobile app that helps with mental health"</li>
945
+ <li>"Write a poem about the beauty of code"</li>
946
  </ul>
947
 
948
+ <h4>πŸ“Š Business & Analysis</h4>
949
  <ul>
950
+ <li><strong>Groq:</strong> "What are the current trends in cryptocurrency markets?"</li>
951
+ <li><strong>Chutes:</strong> "Analyze the pros and cons of different investment strategies"</li>
952
+ <li><strong>Groq filtered:</strong> Crypto question with "bloomberg.com, wsj.com" included</li>
953
  </ul>
954
 
955
+ <h4>🧠 Model-Specific Testing (Chutes)</h4>
956
  <ul>
957
+ <li><strong>GPT-OSS-20B:</strong> "Explain complex scientific concepts in simple terms"</li>
958
+ <li><strong>Llama 3.1:</strong> "Help me debug this Python code and explain the solution"</li>
959
+ <li><strong>Claude 3 Sonnet:</strong> "Analyze this business scenario and provide strategic recommendations"</li>
 
 
 
 
 
 
 
960
  </ul>
961
  </div>
962
  """)
963
 
964
+ # Event handlers
965
  send_btn.click(
966
  fn=chat_with_ai,
967
+ inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, chatbot],
968
  outputs=[chatbot, msg]
969
  )
970
 
971
  msg.submit(
972
  fn=chat_with_ai,
973
+ inputs=[msg, include_domains, exclude_domains, system_prompt, temperature, max_tokens, chatbot],
974
  outputs=[chatbot, msg]
975
  )
976
 
 
980
  )
981
 
982
  # Footer
983
+ with gr.Accordion("πŸš€ About This Enhanced Multi-Provider Tool", open=True, elem_id="neuroscope-accordion"):
984
  gr.Markdown("""
985
+ **Enhanced Multi-Provider Creative Agentic AI Chat Tool** with dual API support:
986
 
987
+ **πŸ†• New Multi-Provider Features:**
988
+ - πŸš€ **Groq Integration**: Agentic AI with web search, citations, and tool usage
989
+ - 🎯 **Chutes Integration**: Multiple AI models for diverse text generation tasks
990
+ - πŸ”„ **Provider Switching**: Easy switching between different AI providers
991
+ - πŸ“Š **Provider Comparison**: Clear information about each provider's strengths
992
+ - 🧠 **Multiple Models**: Access to various AI models through both providers
993
 
994
  **πŸš€ Groq Features:**
995
  - πŸ”— **Automatic Source Citations**: Every response includes clickable links to sources
 
1001
  - 🧠 Advanced AI reasoning with tool usage
1002
 
1003
  **🎯 Chutes Features:**
1004
+ - πŸ€– **Multiple AI Models**: GPT-OSS-20B, Llama 3.1, Claude 3 Sonnet
1005
  - πŸ’° **Cost-Effective**: Competitive pricing for AI access
1006
+ - 🎨 **Creative Excellence**: Optimized for writing and analysis tasks
1007
+ - ⚑ **Reliable Performance**: Consistent and fast responses
 
1008
 
1009
  **πŸ”„ Universal Features:**
1010
  - πŸ’¬ Conversational memory and context
 
1014
 
1015
  **πŸ’‘ Choose Your Provider:**
1016
  - **Use Groq** when you need real-time information, web search, and citations
1017
+ - **Use Chutes** when you need pure text generation, creative writing, or cost-effective AI access
 
 
 
 
 
 
 
1018
  """)
1019
 
1020
  return app