milwright commited on
Commit
719f45f
Β·
1 Parent(s): c894c1a

Update research assistant prompt to be more realistic and accessible

Browse files
Files changed (1) hide show
  1. app.py +428 -138
app.py CHANGED
@@ -145,6 +145,7 @@ GROUNDING_URLS = {grounding_urls}
145
  ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "{access_code}")
146
  ENABLE_DYNAMIC_URLS = {enable_dynamic_urls}
147
  ENABLE_VECTOR_RAG = {enable_vector_rag}
 
148
  RAG_DATA = {rag_data_json}
149
 
150
  # Get API key from environment - customizable variable name with validation
@@ -167,7 +168,7 @@ def validate_api_key():
167
  elif not API_KEY.startswith('sk-or-'):
168
  print(f"⚠️ API KEY FORMAT WARNING:")
169
  print(f" Variable name: {api_key_var}")
170
- print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else API_KEY)
171
  print(f" Expected format: sk-or-xxxxxxxxxx")
172
  print(f" Note: OpenRouter keys should start with 'sk-or-'")
173
  return True # Still try to use it
@@ -207,6 +208,12 @@ def fetch_url_content(url):
207
  except Exception as e:
208
  return f"Error fetching {{url}}: {{str(e)}}"
209
 
 
 
 
 
 
 
210
  # Global cache for URL content to avoid re-crawling in generated spaces
211
  _url_content_cache = {{}}
212
 
@@ -327,6 +334,128 @@ def generate_response(message, history):
327
  if dynamic_context_parts:
328
  grounding_context += "\\n".join(dynamic_context_parts)
329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  # Build enhanced system prompt with grounding context
331
  enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
332
 
@@ -507,6 +636,9 @@ def get_configuration_status():
507
  if ENABLE_DYNAMIC_URLS:
508
  status_parts.append("πŸ”„ **Dynamic URLs:** Enabled")
509
 
 
 
 
510
  if ENABLE_VECTOR_RAG:
511
  status_parts.append("πŸ“š **Document RAG:** Enabled")
512
 
@@ -722,19 +854,19 @@ Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} with Chat U/I Helper
722
 
723
  return readme_content
724
 
725
- def create_requirements(enable_vector_rag=False, enable_code_execution=False):
726
  """Generate requirements.txt"""
727
  base_requirements = "gradio>=5.35.0\nrequests>=2.32.3\nbeautifulsoup4>=4.12.3"
728
 
729
  if enable_vector_rag:
730
  base_requirements += "\nfaiss-cpu==1.7.4\nnumpy==1.24.3"
731
 
732
- if enable_code_execution:
733
- base_requirements += "\ngradio_client>=0.15.0"
734
 
735
  return base_requirements
736
 
737
- def generate_zip(name, description, system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code="", enable_dynamic_urls=False, url1="", url2="", url3="", url4="", enable_vector_rag=False, rag_data=None, enable_code_execution=False):
738
  """Generate deployable zip file"""
739
 
740
  # Process examples
@@ -770,6 +902,7 @@ def generate_zip(name, description, system_prompt, model, api_key_var, temperatu
770
  'access_code': "", # Access code stored in environment variable for security
771
  'enable_dynamic_urls': enable_dynamic_urls,
772
  'enable_vector_rag': enable_vector_rag,
 
773
  'rag_data_json': json.dumps(rag_data) if rag_data else 'None'
774
  }
775
 
@@ -779,7 +912,7 @@ def generate_zip(name, description, system_prompt, model, api_key_var, temperatu
779
  readme_config = config.copy()
780
  readme_config['access_code'] = access_code or ""
781
  readme_content = create_readme(readme_config)
782
- requirements_content = create_requirements(enable_vector_rag, enable_code_execution)
783
 
784
  # Create zip file with clean naming
785
  filename = f"{name.lower().replace(' ', '_').replace('-', '_')}.zip"
@@ -895,7 +1028,7 @@ def update_sandbox_preview(config_data):
895
 
896
  return preview_text, preview_html
897
 
898
- def on_preview_combined(name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag, enable_code_execution, enable_web_search):
899
  """Generate configuration and return preview updates"""
900
  if not name or not name.strip():
901
  return (
@@ -927,7 +1060,6 @@ def on_preview_combined(name, description, system_prompt, enable_research_assist
927
  'max_tokens': max_tokens,
928
  'enable_dynamic_urls': enable_dynamic_urls,
929
  'enable_vector_rag': enable_vector_rag,
930
- 'enable_code_execution': enable_code_execution,
931
  'enable_web_search': enable_web_search,
932
  'examples_text': examples_text,
933
  'preview_ready': True
@@ -944,6 +1076,7 @@ Your assistant "{name}" is now configured and ready to test in the Sandbox Previ
944
  - **Max Tokens:** {max_tokens}
945
  - **Dynamic URLs:** {'βœ… Enabled' if enable_dynamic_urls else '❌ Disabled'}
946
  - **Vector RAG:** {'βœ… Enabled' if enable_vector_rag else '❌ Disabled'}
 
947
 
948
  **System Prompt:**
949
  {final_system_prompt[:200]}{'...' if len(final_system_prompt) > 200 else ''}
@@ -964,6 +1097,7 @@ Your assistant "{name}" is now configured and ready to test in the Sandbox Previ
964
  **Features:**
965
  - **Dynamic URL Fetching:** {'βœ… Enabled' if enable_dynamic_urls else '❌ Disabled'}
966
  - **Document RAG:** {'βœ… Enabled' if enable_vector_rag else '❌ Disabled'}
 
967
 
968
  **System Prompt:**
969
  ```
@@ -1053,8 +1187,26 @@ def preview_chat_response(message, history, config_data, url1="", url2="", url3=
1053
  api_key = os.environ.get("OPENROUTER_API_KEY")
1054
 
1055
  if not api_key:
1056
- response = f"[Preview Mode - No API Key] I'm {config_data.get('name', 'your assistant')} running on {config_data.get('model', 'unknown model')}. To test with real API responses, set your OPENROUTER_API_KEY in the environment. This preview would use your system prompt: {config_data.get('system_prompt', '')[:100]}..."
1057
- history.append([message, response])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058
  return "", history
1059
 
1060
  try:
@@ -1082,59 +1234,40 @@ def preview_chat_response(message, history, config_data, url1="", url2="", url3=
1082
  # Check for web search request if enabled
1083
  web_search_result = ""
1084
  if config_data.get('enable_web_search'):
1085
- # Simple patterns to detect search requests
1086
- search_patterns = [
1087
- r'search for\s+(.+)',
1088
- r'find\s+(.+)',
1089
- r'look up\s+(.+)',
1090
- r'what is\s+(.+)',
1091
- r'who is\s+(.+)',
1092
- r'how to\s+(.+)',
1093
- r'latest\s+(.+)',
1094
- r'recent\s+(.+)'
1095
- ]
1096
 
1097
- for pattern in search_patterns:
1098
- match = re.search(pattern, message, re.IGNORECASE)
1099
- if match:
1100
- search_query = match.group(1).strip()
1101
- search_result = perform_web_search(search_query, "Web search requested")
1102
- web_search_result = f"\n\n{search_result}\n\n"
1103
- break
1104
-
1105
- # Check for code execution request if enabled
1106
- code_execution_result = ""
1107
- if config_data.get('enable_code_execution'):
1108
- # Simple pattern to detect code execution requests
1109
- code_patterns = [
1110
- r'```python\n(.*?)\n```',
1111
- r'```\n(.*?)\n```',
1112
- r'from\s+\w+\s+import|import\s+\w+',
1113
- r'def\s+\w+\s*\(',
1114
- r'print\s*\(',
1115
- r'for\s+\w+\s+in\s+',
1116
- r'if\s+.*:'
1117
- ]
1118
 
1119
- for pattern in code_patterns:
1120
- if re.search(pattern, message, re.DOTALL | re.IGNORECASE):
1121
- # Extract code from code blocks
1122
- code_match = re.search(r'```(?:python)?\n(.*?)\n```', message, re.DOTALL)
1123
- if code_match:
1124
- code_to_execute = code_match.group(1)
1125
- execution_result = execute_python_code(code_to_execute, "Code execution requested")
1126
- code_execution_result = f"\n\n{execution_result}\n\n"
1127
- break
1128
 
1129
  # Build enhanced system prompt with all contexts
1130
- enhanced_system_prompt = config_data.get('system_prompt', '') + grounding_context + rag_context + dynamic_context + web_search_result + code_execution_result
1131
 
1132
  # Build messages array for the API
1133
  messages = [{"role": "system", "content": enhanced_system_prompt}]
1134
 
1135
- # Add conversation history - handle both formats
1136
  for chat in history:
1137
- if isinstance(chat, list) and len(chat) >= 2:
 
 
 
1138
  # Legacy format: [user_msg, assistant_msg]
1139
  user_msg, assistant_msg = chat[0], chat[1]
1140
  if user_msg:
@@ -1183,8 +1316,8 @@ def preview_chat_response(message, history, config_data, url1="", url2="", url3=
1183
  if not assistant_content or assistant_content.strip() == "":
1184
  assistant_response = f"[Preview Debug] Empty content from API. Messages sent: {len(messages)} messages, last user message: '{message}', model: {request_payload['model']}"
1185
  else:
1186
- # Add preview indicator
1187
- assistant_response = f"[Preview Mode] {assistant_content}"
1188
 
1189
  except (KeyError, IndexError, json.JSONDecodeError) as e:
1190
  assistant_response = f"[Preview Error] Failed to parse API response: {str(e)}. Raw response: {response.text[:500]}"
@@ -1194,8 +1327,9 @@ def preview_chat_response(message, history, config_data, url1="", url2="", url3=
1194
  except Exception as e:
1195
  assistant_response = f"[Preview Error] {str(e)}"
1196
 
1197
- # Return in the legacy tuple format that Gradio ChatInterface expects
1198
- history.append([message, assistant_response])
 
1199
  return "", history
1200
 
1201
  def clear_preview_chat():
@@ -1217,9 +1351,9 @@ def export_preview_conversation(history):
1217
 
1218
  return gr.update(value=temp_file, visible=True)
1219
 
1220
- def on_generate(name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state, enable_code_execution, enable_web_search):
1221
  if not name or not name.strip():
1222
- return gr.update(value="Error: Please provide a Space Title", visible=True), gr.update(visible=False)
1223
 
1224
 
1225
  try:
@@ -1230,11 +1364,11 @@ def on_generate(name, description, system_prompt, enable_research_assistant, mod
1230
 
1231
  # Use the system prompt directly (research assistant toggle already updates it)
1232
  if not system_prompt or not system_prompt.strip():
1233
- return gr.update(value="Error: Please provide a System Prompt for the assistant", visible=True), gr.update(visible=False)
1234
 
1235
  final_system_prompt = system_prompt.strip()
1236
 
1237
- filename = generate_zip(name, description, final_system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_data, enable_code_execution)
1238
 
1239
  success_msg = f"""**Deployment package ready!**
1240
 
@@ -1263,13 +1397,14 @@ def on_generate(name, description, system_prompt, enable_research_assistant, mod
1263
  'max_tokens': max_tokens,
1264
  'enable_dynamic_urls': enable_dynamic_urls,
1265
  'enable_vector_rag': enable_vector_rag,
 
1266
  'filename': filename
1267
  }
1268
 
1269
  return gr.update(value=success_msg, visible=True), gr.update(value=filename, visible=True), config_data
1270
 
1271
  except Exception as e:
1272
- return gr.update(value=f"Error: {str(e)}", visible=True), gr.update(visible=False)
1273
 
1274
  # Global cache for URL content to avoid re-crawling
1275
  url_content_cache = {}
@@ -1446,34 +1581,86 @@ def remove_chat_urls(count):
1446
  return (gr.update(), gr.update(), gr.update(), gr.update(), count)
1447
 
1448
 
1449
- def toggle_code_execution(enable_code):
1450
- """Toggle visibility of code execution space field"""
1451
- return gr.update(visible=enable_code)
1452
 
1453
  def toggle_web_search(enable_search):
1454
  """Toggle visibility of web search space field"""
1455
  return gr.update(visible=enable_search)
1456
 
1457
  def perform_web_search(query, description="Web search"):
1458
- """Perform web search using HuggingFace Space"""
1459
  try:
1460
- from gradio_client import Client
1461
-
1462
- # Try to connect to a web search space (you can change this to any search space)
1463
- client = Client("huggingface-projects/web-search")
1464
-
1465
- # Submit the search query
1466
- result = client.predict(
1467
- query,
1468
- api_name="/predict"
1469
- )
1470
-
1471
- return f"**{description}**\n\nQuery: {query}\n\n**Search Results:**\n{result}"
1472
-
1473
- except ImportError:
1474
- return f"**Web Search Error:** gradio_client not installed. Install with: `pip install gradio_client`"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1475
  except Exception as e:
1476
- # Fallback to simple URL extraction and fetching
1477
  urls = extract_urls_from_text(query)
1478
  if urls:
1479
  results = []
@@ -1483,31 +1670,12 @@ def perform_web_search(query, description="Web search"):
1483
  return f"**Web Search Fallback:** {description}\n\n" + "\n\n".join(results)
1484
  return f"**Web Search Error:** {str(e)}\n\nQuery: {query}"
1485
 
1486
- def execute_python_code(code, description="Code execution"):
1487
- """Execute Python code using HuggingFace Space"""
1488
- try:
1489
- from gradio_client import Client
1490
-
1491
- # Try to connect to the code execution space
1492
- client = Client("huggingface-projects/code-execution")
1493
-
1494
- # Submit the code for execution
1495
- result = client.predict(
1496
- code,
1497
- api_name="/predict"
1498
- )
1499
-
1500
- return f"**{description}**\n\n```python\n{code}\n```\n\n**Output:**\n```\n{result}\n```"
1501
-
1502
- except ImportError:
1503
- return f"**Code Execution Error:** gradio_client not installed. Install with: `pip install gradio_client`"
1504
- except Exception as e:
1505
- return f"**Code Execution Error:** {str(e)}\n\nNote: You can try running this code manually:\n\n```python\n{code}\n```"
1506
 
1507
  def toggle_research_assistant(enable_research):
1508
  """Toggle research assistant system prompt"""
1509
  if enable_research:
1510
- combined_prompt = "You are a search tool that provides link-grounded information through web fetching, limiting source criteria to peer-reviewed articles from academic databases and official repositories. Additional responsibilities include lightly analyzing academic sources, implicitly fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims."
1511
  return (
1512
  gr.update(value=combined_prompt), # Update main system prompt
1513
  gr.update(value=True) # Enable dynamic URL fetching for research template
@@ -1519,8 +1687,116 @@ def toggle_research_assistant(enable_research):
1519
  )
1520
 
1521
 
1522
- # Create Gradio interface with proper tab structure
1523
- with gr.Blocks(title="Chat U/I Helper") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1524
  # Global state for cross-tab functionality
1525
  sandbox_state = gr.State({})
1526
  preview_config_state = gr.State({})
@@ -1597,18 +1873,6 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1597
  )
1598
 
1599
  with gr.Accordion("Tool Settings", open=True):
1600
- enable_code_execution = gr.Checkbox(
1601
- label="Enable Code Execution",
1602
- value=False,
1603
- info="Allow the assistant to execute Python code via external HuggingFace Space"
1604
- )
1605
-
1606
- code_execution_space = gr.Textbox(
1607
- label="Code Execution Space",
1608
- value="huggingface-projects/code-execution",
1609
- info="HuggingFace Space for Python code execution",
1610
- visible=False
1611
- )
1612
 
1613
  enable_dynamic_urls = gr.Checkbox(
1614
  label="Enable Dynamic URL Fetching",
@@ -1620,14 +1884,15 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1620
  enable_web_search = gr.Checkbox(
1621
  label="Enable Web Search",
1622
  value=False,
1623
- info="Allow the assistant to search the web using external HuggingFace Space"
1624
  )
1625
 
1626
  web_search_space = gr.Textbox(
1627
- label="Web Search Space",
1628
- value="huggingface-projects/web-search",
1629
- info="HuggingFace Space for web search functionality",
1630
- visible=False
 
1631
  )
1632
 
1633
  enable_vector_rag = gr.Checkbox(
@@ -1722,13 +1987,6 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1722
  outputs=[system_prompt, enable_dynamic_urls]
1723
  )
1724
 
1725
- # Connect the code execution checkbox
1726
- enable_code_execution.change(
1727
- toggle_code_execution,
1728
- inputs=[enable_code_execution],
1729
- outputs=[code_execution_space]
1730
- )
1731
-
1732
  # Connect the web search checkbox
1733
  enable_web_search.change(
1734
  toggle_web_search,
@@ -1768,7 +2026,7 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1768
  # Connect the generate button
1769
  generate_btn.click(
1770
  on_generate,
1771
- inputs=[name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state, enable_code_execution, enable_web_search],
1772
  outputs=[status, download_file, sandbox_state]
1773
  )
1774
 
@@ -1786,7 +2044,8 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1786
  preview_chatbot = gr.Chatbot(
1787
  value=[],
1788
  label="Preview Chat Interface",
1789
- height=400
 
1790
  )
1791
  preview_msg = gr.Textbox(
1792
  label="Test your assistant",
@@ -1886,13 +2145,44 @@ with gr.Blocks(title="Chat U/I Helper") as demo:
1886
  # Connect cross-tab functionality after all components are defined
1887
  preview_btn.click(
1888
  on_preview_combined,
1889
- inputs=[name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag, enable_code_execution, enable_web_search],
1890
  outputs=[preview_config_state, preview_status_comp, preview_chat_section_comp, config_display_comp]
1891
  )
1892
 
1893
  if __name__ == "__main__":
1894
- # Check if running in local development with dev tunnels
1895
- if os.environ.get('CODESPACES') or 'devtunnels.ms' in os.environ.get('GRADIO_SERVER_NAME', ''):
1896
- demo.launch(share=True, allowed_paths=[], server_name="0.0.0.0")
1897
- else:
1898
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "{access_code}")
146
  ENABLE_DYNAMIC_URLS = {enable_dynamic_urls}
147
  ENABLE_VECTOR_RAG = {enable_vector_rag}
148
+ ENABLE_WEB_SEARCH = {enable_web_search}
149
  RAG_DATA = {rag_data_json}
150
 
151
  # Get API key from environment - customizable variable name with validation
 
168
  elif not API_KEY.startswith('sk-or-'):
169
  print(f"⚠️ API KEY FORMAT WARNING:")
170
  print(f" Variable name: {api_key_var}")
171
+ print(f" Current value: {{{{API_KEY[:10]}}}}..." if len(API_KEY) > 10 else API_KEY)
172
  print(f" Expected format: sk-or-xxxxxxxxxx")
173
  print(f" Note: OpenRouter keys should start with 'sk-or-'")
174
  return True # Still try to use it
 
208
  except Exception as e:
209
  return f"Error fetching {{url}}: {{str(e)}}"
210
 
211
+ def extract_urls_from_text(text):
212
+ """Extract URLs from text using regex"""
213
+ import re
214
+ url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
215
+ return re.findall(url_pattern, text)
216
+
217
  # Global cache for URL content to avoid re-crawling in generated spaces
218
  _url_content_cache = {{}}
219
 
 
334
  if dynamic_context_parts:
335
  grounding_context += "\\n".join(dynamic_context_parts)
336
 
337
+ # If web search is enabled, use it for most queries (excluding code blocks and URLs)
338
+ if ENABLE_WEB_SEARCH:
339
+ should_search = True
340
+
341
+ # Skip search for messages that are primarily code blocks
342
+ import re
343
+ if re.search(r'```[\\s\\S]*```', message):
344
+ should_search = False
345
+
346
+ # Skip search for messages that are primarily URLs
347
+ urls_in_message = extract_urls_from_text(message)
348
+ if urls_in_message and len(' '.join(urls_in_message)) > len(message) * 0.5:
349
+ should_search = False
350
+
351
+ # Skip search for very short messages (likely greetings)
352
+ if len(message.strip()) < 5:
353
+ should_search = False
354
+
355
+ if should_search:
356
+ # Use the entire message as search query, cleaning it up
357
+ search_query = message.strip()
358
+ try:
359
+ # Perform web search using crawl4ai
360
+ import urllib.parse
361
+ import asyncio
362
+
363
+ async def search_with_crawl4ai(search_query):
364
+ try:
365
+ from crawl4ai import WebCrawler
366
+
367
+ # Create search URL for DuckDuckGo
368
+ encoded_query = urllib.parse.quote_plus(search_query)
369
+ search_url = f"https://duckduckgo.com/html/?q={{encoded_query}}"
370
+
371
+ # Initialize crawler
372
+ crawler = WebCrawler(verbose=False)
373
+
374
+ try:
375
+ # Start the crawler
376
+ await crawler.astart()
377
+
378
+ # Crawl the search results
379
+ result = await crawler.arun(url=search_url)
380
+
381
+ if result.success:
382
+ # Extract text content from search results
383
+ content = result.cleaned_html if result.cleaned_html else result.markdown
384
+
385
+ # Clean and truncate the content
386
+ if content:
387
+ # Remove excessive whitespace and limit length
388
+ lines = [line.strip() for line in content.split('\\n') if line.strip()]
389
+ cleaned_content = '\\n'.join(lines)
390
+
391
+ # Truncate to reasonable length for context
392
+ if len(cleaned_content) > 2000:
393
+ cleaned_content = cleaned_content[:2000] + "..."
394
+
395
+ return cleaned_content
396
+ else:
397
+ return "No content extracted from search results"
398
+ else:
399
+ return f"Search failed: {{result.error_message if hasattr(result, 'error_message') else 'Unknown error'}}"
400
+
401
+ finally:
402
+ # Clean up the crawler
403
+ await crawler.aclose()
404
+
405
+ except ImportError:
406
+ # Fallback to simple DuckDuckGo search without crawl4ai
407
+ encoded_query = urllib.parse.quote_plus(search_query)
408
+ search_url = f"https://duckduckgo.com/html/?q={{encoded_query}}"
409
+
410
+ # Use basic fetch as fallback
411
+ response = requests.get(search_url, headers={{'User-Agent': 'Mozilla/5.0'}}, timeout=10)
412
+ if response.status_code == 200:
413
+ from bs4 import BeautifulSoup
414
+ soup = BeautifulSoup(response.content, 'html.parser')
415
+
416
+ # Remove script and style elements
417
+ for script in soup(["script", "style", "nav", "header", "footer"]):
418
+ script.decompose()
419
+
420
+ # Get text content
421
+ text = soup.get_text()
422
+
423
+ # Clean up whitespace
424
+ lines = (line.strip() for line in text.splitlines())
425
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
426
+ text = ' '.join(chunk for chunk in chunks if chunk)
427
+
428
+ # Truncate to ~2000 characters
429
+ if len(text) > 2000:
430
+ text = text[:2000] + "..."
431
+
432
+ return text
433
+ else:
434
+ return f"Failed to fetch search results: {{response.status_code}}"
435
+
436
+ # Run the async search
437
+ if hasattr(asyncio, 'run'):
438
+ search_result = asyncio.run(search_with_crawl4ai(search_query))
439
+ else:
440
+ # Fallback for older Python versions
441
+ loop = asyncio.new_event_loop()
442
+ asyncio.set_event_loop(loop)
443
+ try:
444
+ search_result = loop.run_until_complete(search_with_crawl4ai(search_query))
445
+ finally:
446
+ loop.close()
447
+
448
+ grounding_context += f"\\n\\nWeb search results for '{{search_query}}':\\n{{search_result}}"
449
+ except Exception as e:
450
+ # Fallback to URL extraction if web search fails
451
+ urls = extract_urls_from_text(search_query)
452
+ if urls:
453
+ for url in urls[:2]: # Limit to 2 URLs for fallback
454
+ content = fetch_url_content(url)
455
+ grounding_context += f"\\n\\nFallback content from {{url}}:\\n{{content[:500]}}..."
456
+ else:
457
+ grounding_context += f"\\n\\nWeb search requested: {{search_query}} (external search not available)"
458
+
459
  # Build enhanced system prompt with grounding context
460
  enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
461
 
 
636
  if ENABLE_DYNAMIC_URLS:
637
  status_parts.append("πŸ”„ **Dynamic URLs:** Enabled")
638
 
639
+ if ENABLE_WEB_SEARCH:
640
+ status_parts.append("πŸ” **Web Search:** Enabled")
641
+
642
  if ENABLE_VECTOR_RAG:
643
  status_parts.append("πŸ“š **Document RAG:** Enabled")
644
 
 
854
 
855
  return readme_content
856
 
857
+ def create_requirements(enable_vector_rag=False, enable_web_search=False):
858
  """Generate requirements.txt"""
859
  base_requirements = "gradio>=5.35.0\nrequests>=2.32.3\nbeautifulsoup4>=4.12.3"
860
 
861
  if enable_vector_rag:
862
  base_requirements += "\nfaiss-cpu==1.7.4\nnumpy==1.24.3"
863
 
864
+ if enable_web_search:
865
+ base_requirements += "\ncrawl4ai>=0.2.0\naiohttp>=3.8.0"
866
 
867
  return base_requirements
868
 
869
+ def generate_zip(name, description, system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code="", enable_dynamic_urls=False, url1="", url2="", url3="", url4="", enable_vector_rag=False, rag_data=None, enable_web_search=False):
870
  """Generate deployable zip file"""
871
 
872
  # Process examples
 
902
  'access_code': "", # Access code stored in environment variable for security
903
  'enable_dynamic_urls': enable_dynamic_urls,
904
  'enable_vector_rag': enable_vector_rag,
905
+ 'enable_web_search': enable_web_search,
906
  'rag_data_json': json.dumps(rag_data) if rag_data else 'None'
907
  }
908
 
 
912
  readme_config = config.copy()
913
  readme_config['access_code'] = access_code or ""
914
  readme_content = create_readme(readme_config)
915
+ requirements_content = create_requirements(enable_vector_rag, enable_web_search)
916
 
917
  # Create zip file with clean naming
918
  filename = f"{name.lower().replace(' ', '_').replace('-', '_')}.zip"
 
1028
 
1029
  return preview_text, preview_html
1030
 
1031
+ def on_preview_combined(name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag, enable_web_search):
1032
  """Generate configuration and return preview updates"""
1033
  if not name or not name.strip():
1034
  return (
 
1060
  'max_tokens': max_tokens,
1061
  'enable_dynamic_urls': enable_dynamic_urls,
1062
  'enable_vector_rag': enable_vector_rag,
 
1063
  'enable_web_search': enable_web_search,
1064
  'examples_text': examples_text,
1065
  'preview_ready': True
 
1076
  - **Max Tokens:** {max_tokens}
1077
  - **Dynamic URLs:** {'βœ… Enabled' if enable_dynamic_urls else '❌ Disabled'}
1078
  - **Vector RAG:** {'βœ… Enabled' if enable_vector_rag else '❌ Disabled'}
1079
+ - **Web Search:** {'βœ… Enabled' if enable_web_search else '❌ Disabled'}
1080
 
1081
  **System Prompt:**
1082
  {final_system_prompt[:200]}{'...' if len(final_system_prompt) > 200 else ''}
 
1097
  **Features:**
1098
  - **Dynamic URL Fetching:** {'βœ… Enabled' if enable_dynamic_urls else '❌ Disabled'}
1099
  - **Document RAG:** {'βœ… Enabled' if enable_vector_rag else '❌ Disabled'}
1100
+ - **Web Search:** {'βœ… Enabled' if enable_web_search else '❌ Disabled'}
1101
 
1102
  **System Prompt:**
1103
  ```
 
1187
  api_key = os.environ.get("OPENROUTER_API_KEY")
1188
 
1189
  if not api_key:
1190
+ response = f"""πŸ”‘ **API Key Required for Preview**
1191
+
1192
+ To test your assistant with real API responses, please:
1193
+
1194
+ 1. Get your OpenRouter API key from: https://openrouter.ai/keys
1195
+ 2. Set it as an environment variable: `export OPENROUTER_API_KEY=your_key_here`
1196
+ 3. Or add it to your `.env` file: `OPENROUTER_API_KEY=your_key_here`
1197
+
1198
+ **Your Configuration:**
1199
+ - **Name:** {config_data.get('name', 'your assistant')}
1200
+ - **Model:** {config_data.get('model', 'unknown model')}
1201
+ - **Temperature:** {config_data.get('temperature', 0.7)}
1202
+ - **Max Tokens:** {config_data.get('max_tokens', 500)}
1203
+
1204
+ **System Prompt Preview:**
1205
+ {config_data.get('system_prompt', '')[:200]}{'...' if len(config_data.get('system_prompt', '')) > 200 else ''}
1206
+
1207
+ Once you set your API key, you'll be able to test real conversations in this preview."""
1208
+ history.append({"role": "user", "content": message})
1209
+ history.append({"role": "assistant", "content": response})
1210
  return "", history
1211
 
1212
  try:
 
1234
  # Check for web search request if enabled
1235
  web_search_result = ""
1236
  if config_data.get('enable_web_search'):
1237
+ # If web search is enabled, use it for most queries (excluding code blocks and URLs)
1238
+ should_search = True
 
 
 
 
 
 
 
 
 
1239
 
1240
+ # Skip search for messages that are primarily code blocks
1241
+ if re.search(r'```[\s\S]*```', message):
1242
+ should_search = False
1243
+
1244
+ # Skip search for messages that are primarily URLs
1245
+ urls_in_message = extract_urls_from_text(message)
1246
+ if urls_in_message and len(' '.join(urls_in_message)) > len(message) * 0.5:
1247
+ should_search = False
 
 
 
 
 
 
 
 
 
 
 
 
 
1248
 
1249
+ # Skip search for very short messages (likely greetings)
1250
+ if len(message.strip()) < 5:
1251
+ should_search = False
1252
+
1253
+ if should_search:
1254
+ # Use the entire message as search query, cleaning it up
1255
+ search_query = message.strip()
1256
+ search_result = perform_web_search(search_query, "Web search requested")
1257
+ web_search_result = f"\n\n{search_result}\n\n"
1258
 
1259
  # Build enhanced system prompt with all contexts
1260
+ enhanced_system_prompt = config_data.get('system_prompt', '') + grounding_context + rag_context + dynamic_context + web_search_result
1261
 
1262
  # Build messages array for the API
1263
  messages = [{"role": "system", "content": enhanced_system_prompt}]
1264
 
1265
+ # Add conversation history - handle both formats for backwards compatibility
1266
  for chat in history:
1267
+ if isinstance(chat, dict):
1268
+ # New format: {"role": "user", "content": "..."}
1269
+ messages.append(chat)
1270
+ elif isinstance(chat, list) and len(chat) >= 2:
1271
  # Legacy format: [user_msg, assistant_msg]
1272
  user_msg, assistant_msg = chat[0], chat[1]
1273
  if user_msg:
 
1316
  if not assistant_content or assistant_content.strip() == "":
1317
  assistant_response = f"[Preview Debug] Empty content from API. Messages sent: {len(messages)} messages, last user message: '{message}', model: {request_payload['model']}"
1318
  else:
1319
+ # Use the content directly - no preview indicator needed
1320
+ assistant_response = assistant_content
1321
 
1322
  except (KeyError, IndexError, json.JSONDecodeError) as e:
1323
  assistant_response = f"[Preview Error] Failed to parse API response: {str(e)}. Raw response: {response.text[:500]}"
 
1327
  except Exception as e:
1328
  assistant_response = f"[Preview Error] {str(e)}"
1329
 
1330
+ # Return in the new messages format for Gradio 5.x
1331
+ history.append({"role": "user", "content": message})
1332
+ history.append({"role": "assistant", "content": assistant_response})
1333
  return "", history
1334
 
1335
  def clear_preview_chat():
 
1351
 
1352
  return gr.update(value=temp_file, visible=True)
1353
 
1354
+ def on_generate(name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state, enable_web_search):
1355
  if not name or not name.strip():
1356
+ return gr.update(value="Error: Please provide a Space Title", visible=True), gr.update(visible=False), {}
1357
 
1358
 
1359
  try:
 
1364
 
1365
  # Use the system prompt directly (research assistant toggle already updates it)
1366
  if not system_prompt or not system_prompt.strip():
1367
+ return gr.update(value="Error: Please provide a System Prompt for the assistant", visible=True), gr.update(visible=False), {}
1368
 
1369
  final_system_prompt = system_prompt.strip()
1370
 
1371
+ filename = generate_zip(name, description, final_system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_data, enable_web_search)
1372
 
1373
  success_msg = f"""**Deployment package ready!**
1374
 
 
1397
  'max_tokens': max_tokens,
1398
  'enable_dynamic_urls': enable_dynamic_urls,
1399
  'enable_vector_rag': enable_vector_rag,
1400
+ 'enable_web_search': enable_web_search,
1401
  'filename': filename
1402
  }
1403
 
1404
  return gr.update(value=success_msg, visible=True), gr.update(value=filename, visible=True), config_data
1405
 
1406
  except Exception as e:
1407
+ return gr.update(value=f"Error: {str(e)}", visible=True), gr.update(visible=False), {}
1408
 
1409
  # Global cache for URL content to avoid re-crawling
1410
  url_content_cache = {}
 
1581
  return (gr.update(), gr.update(), gr.update(), gr.update(), count)
1582
 
1583
 
1584
+ # Code execution toggle removed - functionality no longer supported
 
 
1585
 
1586
  def toggle_web_search(enable_search):
1587
  """Toggle visibility of web search space field"""
1588
  return gr.update(visible=enable_search)
1589
 
1590
  def perform_web_search(query, description="Web search"):
1591
+ """Perform web search using crawl4ai with DuckDuckGo"""
1592
  try:
1593
+ # Try to use crawl4ai for web search
1594
+ try:
1595
+ from crawl4ai import WebCrawler
1596
+ import asyncio
1597
+
1598
+ async def search_with_crawl4ai(search_query):
1599
+ # Create search URL for DuckDuckGo
1600
+ import urllib.parse
1601
+ encoded_query = urllib.parse.quote_plus(search_query)
1602
+ search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
1603
+
1604
+ # Initialize crawler
1605
+ crawler = WebCrawler(verbose=False)
1606
+
1607
+ try:
1608
+ # Start the crawler
1609
+ await crawler.astart()
1610
+
1611
+ # Crawl the search results
1612
+ result = await crawler.arun(url=search_url)
1613
+
1614
+ if result.success:
1615
+ # Extract text content from search results
1616
+ content = result.cleaned_html if result.cleaned_html else result.markdown
1617
+
1618
+ # Clean and truncate the content
1619
+ if content:
1620
+ # Remove excessive whitespace and limit length
1621
+ lines = [line.strip() for line in content.split('\n') if line.strip()]
1622
+ cleaned_content = '\n'.join(lines)
1623
+
1624
+ # Truncate to reasonable length for context
1625
+ if len(cleaned_content) > 3000:
1626
+ cleaned_content = cleaned_content[:3000] + "..."
1627
+
1628
+ return cleaned_content
1629
+ else:
1630
+ return "No content extracted from search results"
1631
+ else:
1632
+ return f"Search failed: {result.error_message if hasattr(result, 'error_message') else 'Unknown error'}"
1633
+
1634
+ finally:
1635
+ # Clean up the crawler
1636
+ await crawler.aclose()
1637
+
1638
+ # Run the async search
1639
+ if hasattr(asyncio, 'run'):
1640
+ search_result = asyncio.run(search_with_crawl4ai(query))
1641
+ else:
1642
+ # Fallback for older Python versions
1643
+ loop = asyncio.new_event_loop()
1644
+ asyncio.set_event_loop(loop)
1645
+ try:
1646
+ search_result = loop.run_until_complete(search_with_crawl4ai(query))
1647
+ finally:
1648
+ loop.close()
1649
+
1650
+ return f"**{description}**\n\nQuery: {query}\n\n**Search Results:**\n{search_result}"
1651
+
1652
+ except ImportError:
1653
+ # Fallback to simple DuckDuckGo search without crawl4ai
1654
+ import urllib.parse
1655
+ encoded_query = urllib.parse.quote_plus(query)
1656
+ search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
1657
+
1658
+ # Use enhanced_fetch_url_content as fallback
1659
+ content = enhanced_fetch_url_content(search_url)
1660
+ return f"**{description} (Simplified)**\n\nQuery: {query}\n\n**Search Results:**\n{content}"
1661
+
1662
  except Exception as e:
1663
+ # Final fallback to URL extraction if search fails
1664
  urls = extract_urls_from_text(query)
1665
  if urls:
1666
  results = []
 
1670
  return f"**Web Search Fallback:** {description}\n\n" + "\n\n".join(results)
1671
  return f"**Web Search Error:** {str(e)}\n\nQuery: {query}"
1672
 
1673
+ # Code execution functionality removed - no longer supported
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1674
 
1675
  def toggle_research_assistant(enable_research):
1676
  """Toggle research assistant system prompt"""
1677
  if enable_research:
1678
+ combined_prompt = "You are an advanced research assistant specializing in academic literature search and analysis. Your expertise includes finding peer-reviewed sources, critically evaluating research methodology, synthesizing insights across multiple papers, and providing properly formatted citations. When responding, ground all claims in specific sources from provided URL contexts, distinguish between direct evidence and analytical interpretation, and highlight any limitations or conflicting findings. Use clear, accessible language that makes complex research understandable, and suggest related areas of inquiry when relevant. Your goal is to be a knowledgeable research partner who helps users navigate academic information with precision and clarity."
1679
  return (
1680
  gr.update(value=combined_prompt), # Update main system prompt
1681
  gr.update(value=True) # Enable dynamic URL fetching for research template
 
1687
  )
1688
 
1689
 
1690
+ # Create Gradio interface with proper tab structure and fixed configuration
1691
+ with gr.Blocks(
1692
+ title="Chat U/I Helper",
1693
+ css="""
1694
+ /* Custom CSS to fix styling issues */
1695
+ .gradio-container {
1696
+ max-width: 1200px !important;
1697
+ margin: 0 auto;
1698
+ }
1699
+
1700
+ /* Fix tab styling */
1701
+ .tab-nav {
1702
+ border-bottom: 1px solid #e0e0e0;
1703
+ }
1704
+
1705
+ /* Fix button styling */
1706
+ .btn {
1707
+ border-radius: 6px;
1708
+ }
1709
+
1710
+ /* Fix chat interface styling */
1711
+ .chat-interface {
1712
+ border-radius: 8px;
1713
+ border: 1px solid #e0e0e0;
1714
+ }
1715
+
1716
+ /* Hide gradio footer to avoid manifest issues */
1717
+ .gradio-footer {
1718
+ display: none !important;
1719
+ }
1720
+
1721
+ /* Fix accordion styling */
1722
+ .accordion {
1723
+ border: 1px solid #e0e0e0;
1724
+ border-radius: 6px;
1725
+ }
1726
+ """,
1727
+ theme=gr.themes.Default(),
1728
+ head="""
1729
+ <style>
1730
+ /* Additional head styles to prevent manifest issues */
1731
+ .gradio-app {
1732
+ background: #ffffff;
1733
+ }
1734
+ </style>
1735
+ """,
1736
+ js="""
1737
+ function() {
1738
+ // Prevent manifest.json requests and other common errors
1739
+ if (typeof window !== 'undefined') {
1740
+ // Override fetch to handle manifest.json requests
1741
+ const originalFetch = window.fetch;
1742
+ window.fetch = function(url, options) {
1743
+ // Handle both string URLs and URL objects
1744
+ const urlString = typeof url === 'string' ? url : url.toString();
1745
+
1746
+ if (urlString.includes('manifest.json')) {
1747
+ return Promise.resolve(new Response('{}', {
1748
+ status: 200,
1749
+ headers: { 'Content-Type': 'application/json' }
1750
+ }));
1751
+ }
1752
+
1753
+ // Handle favicon requests
1754
+ if (urlString.includes('favicon.ico')) {
1755
+ return Promise.resolve(new Response('', { status: 204 }));
1756
+ }
1757
+
1758
+ return originalFetch.apply(this, arguments);
1759
+ };
1760
+
1761
+ // Prevent postMessage origin errors
1762
+ window.addEventListener('message', function(event) {
1763
+ try {
1764
+ if (event.origin && event.origin !== window.location.origin) {
1765
+ event.stopImmediatePropagation();
1766
+ return false;
1767
+ }
1768
+ } catch (e) {
1769
+ // Silently ignore origin check errors
1770
+ }
1771
+ }, true);
1772
+
1773
+ // Prevent console errors from missing resources
1774
+ window.addEventListener('error', function(e) {
1775
+ if (e.target && e.target.src) {
1776
+ const src = e.target.src;
1777
+ if (src.includes('manifest.json') || src.includes('favicon.ico')) {
1778
+ e.preventDefault();
1779
+ return false;
1780
+ }
1781
+ }
1782
+ }, true);
1783
+
1784
+ // Override console.error to filter out known harmless errors
1785
+ const originalConsoleError = console.error;
1786
+ console.error = function(...args) {
1787
+ const message = args.join(' ');
1788
+ if (message.includes('manifest.json') ||
1789
+ message.includes('favicon.ico') ||
1790
+ message.includes('postMessage') ||
1791
+ message.includes('target origin')) {
1792
+ return; // Suppress these specific errors
1793
+ }
1794
+ originalConsoleError.apply(console, arguments);
1795
+ };
1796
+ }
1797
+ }
1798
+ """
1799
+ ) as demo:
1800
  # Global state for cross-tab functionality
1801
  sandbox_state = gr.State({})
1802
  preview_config_state = gr.State({})
 
1873
  )
1874
 
1875
  with gr.Accordion("Tool Settings", open=True):
 
 
 
 
 
 
 
 
 
 
 
 
1876
 
1877
  enable_dynamic_urls = gr.Checkbox(
1878
  label="Enable Dynamic URL Fetching",
 
1884
  enable_web_search = gr.Checkbox(
1885
  label="Enable Web Search",
1886
  value=False,
1887
+ info="Allow the assistant to search the web using crawl4ai"
1888
  )
1889
 
1890
  web_search_space = gr.Textbox(
1891
+ label="Web Search Technology",
1892
+ value="crawl4ai",
1893
+ info="Uses crawl4ai library for web crawling",
1894
+ visible=False,
1895
+ interactive=False
1896
  )
1897
 
1898
  enable_vector_rag = gr.Checkbox(
 
1987
  outputs=[system_prompt, enable_dynamic_urls]
1988
  )
1989
 
 
 
 
 
 
 
 
1990
  # Connect the web search checkbox
1991
  enable_web_search.change(
1992
  toggle_web_search,
 
2026
  # Connect the generate button
2027
  generate_btn.click(
2028
  on_generate,
2029
+ inputs=[name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state, enable_web_search],
2030
  outputs=[status, download_file, sandbox_state]
2031
  )
2032
 
 
2044
  preview_chatbot = gr.Chatbot(
2045
  value=[],
2046
  label="Preview Chat Interface",
2047
+ height=400,
2048
+ type="messages" # Use the new messages format
2049
  )
2050
  preview_msg = gr.Textbox(
2051
  label="Test your assistant",
 
2145
  # Connect cross-tab functionality after all components are defined
2146
  preview_btn.click(
2147
  on_preview_combined,
2148
+ inputs=[name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag, enable_web_search],
2149
  outputs=[preview_config_state, preview_status_comp, preview_chat_section_comp, config_display_comp]
2150
  )
2151
 
2152
  if __name__ == "__main__":
2153
+ # Configure launch parameters to avoid common development issues
2154
+ launch_kwargs = {
2155
+ "server_name": "127.0.0.1", # Use localhost instead of 0.0.0.0
2156
+ "server_port": 7860,
2157
+ "share": False, # Disable sharing to avoid origin issues
2158
+ "debug": False, # Disable debug mode to reduce console errors
2159
+ "show_error": True, # Show errors in interface
2160
+ "quiet": False, # Keep logging for debugging
2161
+ "favicon_path": None, # Disable favicon to avoid 404s
2162
+ "ssl_verify": False, # Disable SSL verification for local development
2163
+ "allowed_paths": [], # Empty allowed paths
2164
+ "blocked_paths": [], # Empty blocked paths
2165
+ "root_path": None, # No root path
2166
+ "app_kwargs": {
2167
+ "docs_url": None, # Disable docs endpoint
2168
+ "redoc_url": None, # Disable redoc endpoint
2169
+ }
2170
+ }
2171
+
2172
+ # Override settings for specific environments
2173
+ if os.environ.get('CODESPACES'):
2174
+ launch_kwargs.update({
2175
+ "server_name": "0.0.0.0",
2176
+ "share": True
2177
+ })
2178
+ elif 'devtunnels.ms' in os.environ.get('GRADIO_SERVER_NAME', ''):
2179
+ launch_kwargs.update({
2180
+ "server_name": "0.0.0.0",
2181
+ "share": True
2182
+ })
2183
+
2184
+ print("πŸš€ Starting Chat UI Helper...")
2185
+ print(f"πŸ“ Server: {launch_kwargs['server_name']}:{launch_kwargs['server_port']}")
2186
+ print(f"πŸ”— Share: {launch_kwargs['share']}")
2187
+
2188
+ demo.launch(**launch_kwargs)