Spaces:
Running
Running
Remove web search functionality and update template
Browse files- Remove enable_web_search parameters and UI components
- Clean up template generation and requirements
- Simplify model selection and dependencies
- Update export functionality with proper encoding
- Fix API key validation in generated spaces
- app.py +101 -320
- requirements.txt +2 -2
- support_docs.py +0 -1
app.py
CHANGED
@@ -133,13 +133,15 @@ def enhanced_fetch_url_content(url, enable_search_validation=False):
|
|
133 |
|
134 |
# Template for generated space app (based on mvp_simple.py)
|
135 |
SPACE_TEMPLATE = '''import gradio as gr
|
|
|
136 |
import os
|
137 |
import requests
|
138 |
import json
|
139 |
import re
|
140 |
from bs4 import BeautifulSoup
|
141 |
from datetime import datetime
|
142 |
-
import
|
|
|
143 |
|
144 |
# Configuration
|
145 |
SPACE_NAME = "{name}"
|
@@ -151,7 +153,6 @@ GROUNDING_URLS = {grounding_urls}
|
|
151 |
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "{access_code}")
|
152 |
ENABLE_DYNAMIC_URLS = {enable_dynamic_urls}
|
153 |
ENABLE_VECTOR_RAG = {enable_vector_rag}
|
154 |
-
ENABLE_WEB_SEARCH = {enable_web_search}
|
155 |
RAG_DATA = {rag_data_json}
|
156 |
|
157 |
# Get API key from environment - customizable variable name with validation
|
@@ -174,7 +175,7 @@ def validate_api_key():
|
|
174 |
elif not API_KEY.startswith('sk-or-'):
|
175 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
176 |
print(f" Variable name: {api_key_var}")
|
177 |
-
print(f" Current value: {{
|
178 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
179 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
180 |
return True # Still try to use it
|
@@ -185,7 +186,11 @@ def validate_api_key():
|
|
185 |
return True
|
186 |
|
187 |
# Validate on startup
|
188 |
-
|
|
|
|
|
|
|
|
|
189 |
|
190 |
def validate_url_domain(url):
|
191 |
"""Basic URL domain validation"""
|
@@ -396,130 +401,6 @@ def generate_response(message, history):
|
|
396 |
if dynamic_context_parts:
|
397 |
grounding_context += "\\n".join(dynamic_context_parts)
|
398 |
|
399 |
-
# If web search is enabled, use it for most queries (excluding code blocks and URLs)
|
400 |
-
if ENABLE_WEB_SEARCH:
|
401 |
-
should_search = True
|
402 |
-
|
403 |
-
# Skip search for messages that are primarily code blocks
|
404 |
-
import re
|
405 |
-
if re.search(r'```[\\s\\S]*```', message):
|
406 |
-
should_search = False
|
407 |
-
|
408 |
-
# Skip search for messages that are primarily URLs
|
409 |
-
urls_in_message = extract_urls_from_text(message)
|
410 |
-
if urls_in_message and len(' '.join(urls_in_message)) > len(message) * 0.5:
|
411 |
-
should_search = False
|
412 |
-
|
413 |
-
# Skip search for very short messages (likely greetings)
|
414 |
-
if len(message.strip()) < 5:
|
415 |
-
should_search = False
|
416 |
-
|
417 |
-
if should_search:
|
418 |
-
# Use the entire message as search query, cleaning it up
|
419 |
-
search_query = message.strip()
|
420 |
-
try:
|
421 |
-
# Perform web search using crawl4ai
|
422 |
-
import urllib.parse
|
423 |
-
import asyncio
|
424 |
-
|
425 |
-
async def search_with_crawl4ai(search_query):
|
426 |
-
try:
|
427 |
-
from crawl4ai import WebCrawler
|
428 |
-
|
429 |
-
# Create search URL for DuckDuckGo
|
430 |
-
encoded_query = urllib.parse.quote_plus(search_query)
|
431 |
-
search_url = f"https://duckduckgo.com/html/?q={{encoded_query}}"
|
432 |
-
|
433 |
-
# Initialize crawler
|
434 |
-
crawler = WebCrawler(verbose=False)
|
435 |
-
|
436 |
-
try:
|
437 |
-
# Start the crawler
|
438 |
-
await crawler.astart()
|
439 |
-
|
440 |
-
# Crawl the search results
|
441 |
-
result = await crawler.arun(url=search_url)
|
442 |
-
|
443 |
-
if result.success:
|
444 |
-
# Extract text content from search results
|
445 |
-
content = result.cleaned_html if result.cleaned_html else result.markdown
|
446 |
-
|
447 |
-
# Clean and truncate the content
|
448 |
-
if content:
|
449 |
-
# Remove excessive whitespace and limit length
|
450 |
-
lines = [line.strip() for line in content.split('\\n') if line.strip()]
|
451 |
-
cleaned_content = '\\n'.join(lines)
|
452 |
-
|
453 |
-
# Truncate to reasonable length for context
|
454 |
-
if len(cleaned_content) > 2000:
|
455 |
-
cleaned_content = cleaned_content[:2000] + "..."
|
456 |
-
|
457 |
-
return cleaned_content
|
458 |
-
else:
|
459 |
-
return "No content extracted from search results"
|
460 |
-
else:
|
461 |
-
return f"Search failed: {{result.error_message if hasattr(result, 'error_message') else 'Unknown error'}}"
|
462 |
-
|
463 |
-
finally:
|
464 |
-
# Clean up the crawler
|
465 |
-
await crawler.aclose()
|
466 |
-
|
467 |
-
except ImportError:
|
468 |
-
# Fallback to simple DuckDuckGo search without crawl4ai
|
469 |
-
encoded_query = urllib.parse.quote_plus(search_query)
|
470 |
-
search_url = f"https://duckduckgo.com/html/?q={{encoded_query}}"
|
471 |
-
|
472 |
-
# Use basic fetch as fallback
|
473 |
-
response = requests.get(search_url, headers={{'User-Agent': 'Mozilla/5.0'}}, timeout=10)
|
474 |
-
if response.status_code == 200:
|
475 |
-
from bs4 import BeautifulSoup
|
476 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
477 |
-
|
478 |
-
# Remove script and style elements
|
479 |
-
for script in soup(["script", "style", "nav", "header", "footer"]):
|
480 |
-
script.decompose()
|
481 |
-
|
482 |
-
# Get text content
|
483 |
-
text = soup.get_text()
|
484 |
-
|
485 |
-
# Clean up whitespace
|
486 |
-
lines = (line.strip() for line in text.splitlines())
|
487 |
-
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
488 |
-
text = ' '.join(chunk for chunk in chunks if chunk)
|
489 |
-
|
490 |
-
# Truncate to ~2000 characters
|
491 |
-
if len(text) > 2000:
|
492 |
-
text = text[:2000] + "..."
|
493 |
-
|
494 |
-
return text
|
495 |
-
else:
|
496 |
-
return f"Failed to fetch search results: {{response.status_code}}"
|
497 |
-
|
498 |
-
# Run the async search
|
499 |
-
if hasattr(asyncio, 'run'):
|
500 |
-
search_result = asyncio.run(search_with_crawl4ai(search_query))
|
501 |
-
else:
|
502 |
-
# Fallback for older Python versions
|
503 |
-
loop = asyncio.new_event_loop()
|
504 |
-
asyncio.set_event_loop(loop)
|
505 |
-
try:
|
506 |
-
search_result = loop.run_until_complete(search_with_crawl4ai(search_query))
|
507 |
-
finally:
|
508 |
-
loop.close()
|
509 |
-
|
510 |
-
grounding_context += f"\\n\\nWeb search results for '{{search_query}}':\\n{{search_result}}"
|
511 |
-
except Exception as e:
|
512 |
-
# Enhanced fallback with better error handling
|
513 |
-
urls = extract_urls_from_text(search_query)
|
514 |
-
if urls:
|
515 |
-
fallback_results = []
|
516 |
-
for url in urls[:2]: # Limit to 2 URLs for fallback
|
517 |
-
content = fetch_url_content(url)
|
518 |
-
fallback_results.append(f"Content from {{url}}:\\n{{content[:500]}}...")
|
519 |
-
grounding_context += f"\\n\\nWeb search fallback for '{{search_query}}':\\n" + "\\n\\n".join(fallback_results)
|
520 |
-
else:
|
521 |
-
grounding_context += f"\\n\\nWeb search requested for '{{search_query}}' but search functionality is unavailable"
|
522 |
-
|
523 |
# Build enhanced system prompt with grounding context
|
524 |
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
525 |
|
@@ -690,6 +571,39 @@ def protected_generate_response(message, history):
|
|
690 |
return "Please enter the access code to continue."
|
691 |
return generate_response(message, history)
|
692 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
693 |
def export_conversation(history):
|
694 |
\"\"\"Export conversation to markdown file\"\"\"
|
695 |
if not history:
|
@@ -698,7 +612,7 @@ def export_conversation(history):
|
|
698 |
markdown_content = export_conversation_to_markdown(history)
|
699 |
|
700 |
# Save to temporary file
|
701 |
-
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) as f:
|
702 |
f.write(markdown_content)
|
703 |
temp_file = f.name
|
704 |
|
@@ -723,10 +637,7 @@ def get_configuration_status():
|
|
723 |
|
724 |
if ENABLE_DYNAMIC_URLS:
|
725 |
status_parts.append("🔄 **Dynamic URLs:** Enabled")
|
726 |
-
|
727 |
-
if ENABLE_WEB_SEARCH:
|
728 |
-
status_parts.append("🔍 **Web Search:** Enabled")
|
729 |
-
|
730 |
if ENABLE_VECTOR_RAG:
|
731 |
status_parts.append("📚 **Document RAG:** Enabled")
|
732 |
|
@@ -762,7 +673,7 @@ with gr.Blocks(title=SPACE_NAME) as demo:
|
|
762 |
# Main chat interface (hidden until access granted)
|
763 |
with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
|
764 |
chat_interface = gr.ChatInterface(
|
765 |
-
fn=
|
766 |
title="", # Title already shown above
|
767 |
description="", # Description already shown above
|
768 |
examples=None,
|
@@ -771,13 +682,12 @@ with gr.Blocks(title=SPACE_NAME) as demo:
|
|
771 |
|
772 |
# Export functionality
|
773 |
with gr.Row():
|
774 |
-
export_btn = gr.Button("Export Conversation", variant="secondary", size="sm")
|
775 |
export_file = gr.File(label="Download Conversation", visible=False)
|
776 |
|
777 |
# Connect export functionality
|
778 |
export_btn.click(
|
779 |
-
|
780 |
-
inputs=[chat_interface],
|
781 |
outputs=[export_file]
|
782 |
)
|
783 |
|
@@ -801,10 +711,12 @@ if __name__ == "__main__":
|
|
801 |
# Available models - Updated with valid OpenRouter model IDs
|
802 |
MODELS = [
|
803 |
"google/gemini-2.0-flash-001", # Fast, reliable, general tasks
|
|
|
804 |
"anthropic/claude-3.5-haiku", # Complex reasoning and analysis
|
805 |
-
"openai/gpt-4o-mini",
|
806 |
-
"
|
807 |
-
"
|
|
|
808 |
]
|
809 |
|
810 |
def fetch_url_content(url):
|
@@ -943,19 +855,18 @@ Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} with Chat U/I Helper
|
|
943 |
|
944 |
return readme_content
|
945 |
|
946 |
-
def create_requirements(enable_vector_rag=False
|
947 |
"""Generate requirements.txt"""
|
948 |
-
base_requirements = "gradio>=
|
|
|
949 |
|
950 |
if enable_vector_rag:
|
951 |
-
base_requirements += "\
|
952 |
-
|
953 |
-
if enable_web_search:
|
954 |
-
base_requirements += "\ncrawl4ai>=0.2.0\naiohttp>=3.8.0"
|
955 |
|
956 |
return base_requirements
|
957 |
|
958 |
-
def generate_zip(name, description, system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code="", enable_dynamic_urls=False, url1="", url2="", url3="", url4="", enable_vector_rag=False, rag_data=None
|
959 |
"""Generate deployable zip file"""
|
960 |
|
961 |
# Process examples
|
@@ -991,7 +902,6 @@ def generate_zip(name, description, system_prompt, model, api_key_var, temperatu
|
|
991 |
'access_code': "", # Access code stored in environment variable for security
|
992 |
'enable_dynamic_urls': enable_dynamic_urls,
|
993 |
'enable_vector_rag': enable_vector_rag,
|
994 |
-
'enable_web_search': enable_web_search,
|
995 |
'rag_data_json': json.dumps(rag_data) if rag_data else 'None'
|
996 |
}
|
997 |
|
@@ -1001,7 +911,7 @@ def generate_zip(name, description, system_prompt, model, api_key_var, temperatu
|
|
1001 |
readme_config = config.copy()
|
1002 |
readme_config['access_code'] = access_code or ""
|
1003 |
readme_content = create_readme(readme_config)
|
1004 |
-
requirements_content = create_requirements(enable_vector_rag
|
1005 |
|
1006 |
# Create zip file with clean naming
|
1007 |
filename = f"{name.lower().replace(' ', '_').replace('-', '_')}.zip"
|
@@ -1157,7 +1067,7 @@ def update_sandbox_preview(config_data):
|
|
1157 |
|
1158 |
return preview_text, preview_html
|
1159 |
|
1160 |
-
def on_preview_combined(name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag
|
1161 |
"""Generate configuration and return preview updates"""
|
1162 |
if not name or not name.strip():
|
1163 |
return (
|
@@ -1189,45 +1099,24 @@ def on_preview_combined(name, description, system_prompt, enable_research_assist
|
|
1189 |
'max_tokens': max_tokens,
|
1190 |
'enable_dynamic_urls': enable_dynamic_urls,
|
1191 |
'enable_vector_rag': enable_vector_rag,
|
1192 |
-
'enable_web_search': enable_web_search,
|
1193 |
'examples_text': examples_text,
|
1194 |
'preview_ready': True
|
1195 |
}
|
1196 |
|
1197 |
# Generate preview displays
|
1198 |
-
preview_text = f"""
|
|
|
1199 |
|
1200 |
-
|
1201 |
-
|
1202 |
-
**Configuration:**
|
1203 |
-
- **Model:** {model}
|
1204 |
-
- **Temperature:** {temperature}
|
1205 |
-
- **Max Tokens:** {max_tokens}
|
1206 |
-
- **Dynamic URLs:** {'✅ Enabled' if enable_dynamic_urls else '❌ Disabled'}
|
1207 |
-
- **Vector RAG:** {'✅ Enabled' if enable_vector_rag else '❌ Disabled'}
|
1208 |
-
- **Web Search:** {'✅ Enabled' if enable_web_search else '❌ Disabled'}
|
1209 |
-
|
1210 |
-
**System Prompt:**
|
1211 |
-
{final_system_prompt[:200]}{'...' if len(final_system_prompt) > 200 else ''}
|
1212 |
-
|
1213 |
-
✨ **Next Steps:** Switch to the "Sandbox Preview" tab to test your assistant with real conversations before generating the deployment package."""
|
1214 |
-
|
1215 |
config_display = f"""### Current Configuration
|
1216 |
|
1217 |
-
**
|
1218 |
- **Name:** {name}
|
1219 |
- **Description:** {description or 'No description provided'}
|
1220 |
-
|
1221 |
-
**Model Settings:**
|
1222 |
- **Model:** {model}
|
1223 |
- **Temperature:** {temperature}
|
1224 |
- **Max Response Tokens:** {max_tokens}
|
1225 |
|
1226 |
-
**Features:**
|
1227 |
-
- **Dynamic URL Fetching:** {'✅ Enabled' if enable_dynamic_urls else '❌ Disabled'}
|
1228 |
-
- **Document RAG:** {'✅ Enabled' if enable_vector_rag else '❌ Disabled'}
|
1229 |
-
- **Web Search:** {'✅ Enabled' if enable_web_search else '❌ Disabled'}
|
1230 |
-
|
1231 |
**System Prompt:**
|
1232 |
```
|
1233 |
{final_system_prompt}
|
@@ -1238,8 +1127,8 @@ Your assistant "{name}" is now configured and ready to test in the Sandbox Previ
|
|
1238 |
"""
|
1239 |
|
1240 |
# Show success notification
|
1241 |
-
gr.Info(f"✅ Preview generated successfully for '{name}'! Switch to
|
1242 |
-
|
1243 |
return (
|
1244 |
config_data,
|
1245 |
gr.update(value=preview_text, visible=True),
|
@@ -1268,7 +1157,7 @@ def update_preview_display(config_data):
|
|
1268 |
|
1269 |
Your assistant "{config_data['name']}" is configured and ready to test.
|
1270 |
|
1271 |
-
**Configuration
|
1272 |
- **Model:** {config_data['model']}
|
1273 |
- **Temperature:** {config_data['temperature']}
|
1274 |
- **Max Tokens:** {config_data['max_tokens']}
|
@@ -1276,7 +1165,7 @@ Your assistant "{config_data['name']}" is configured and ready to test.
|
|
1276 |
- **Vector RAG:** {'✅ Enabled' if config_data['enable_vector_rag'] else '❌ Disabled'}
|
1277 |
|
1278 |
**System Prompt:**
|
1279 |
-
{config_data['system_prompt'][:
|
1280 |
|
1281 |
Use the chat interface below to test your assistant before generating the deployment package."""
|
1282 |
|
@@ -1363,33 +1252,8 @@ Once you set your API key, you'll be able to test real conversations in this pre
|
|
1363 |
if dynamic_context_parts:
|
1364 |
dynamic_context = "\n".join(dynamic_context_parts)
|
1365 |
|
1366 |
-
# Check for web search request if enabled
|
1367 |
-
web_search_result = ""
|
1368 |
-
if config_data.get('enable_web_search'):
|
1369 |
-
# If web search is enabled, use it for most queries (excluding code blocks and URLs)
|
1370 |
-
should_search = True
|
1371 |
-
|
1372 |
-
# Skip search for messages that are primarily code blocks
|
1373 |
-
if re.search(r'```[\s\S]*```', message):
|
1374 |
-
should_search = False
|
1375 |
-
|
1376 |
-
# Skip search for messages that are primarily URLs
|
1377 |
-
urls_in_message = extract_urls_from_text(message)
|
1378 |
-
if urls_in_message and len(' '.join(urls_in_message)) > len(message) * 0.5:
|
1379 |
-
should_search = False
|
1380 |
-
|
1381 |
-
# Skip search for very short messages (likely greetings)
|
1382 |
-
if len(message.strip()) < 5:
|
1383 |
-
should_search = False
|
1384 |
-
|
1385 |
-
if should_search:
|
1386 |
-
# Use the entire message as search query, cleaning it up
|
1387 |
-
search_query = message.strip()
|
1388 |
-
search_result = perform_web_search(search_query, "Web search requested")
|
1389 |
-
web_search_result = f"\n\n{search_result}\n\n"
|
1390 |
-
|
1391 |
# Build enhanced system prompt with all contexts
|
1392 |
-
enhanced_system_prompt = config_data.get('system_prompt', '') + grounding_context + rag_context + dynamic_context
|
1393 |
|
1394 |
# Build messages array for the API
|
1395 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
@@ -1483,7 +1347,7 @@ def export_preview_conversation(history):
|
|
1483 |
|
1484 |
return gr.update(value=temp_file, visible=True)
|
1485 |
|
1486 |
-
def on_generate(name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state
|
1487 |
if not name or not name.strip():
|
1488 |
return gr.update(value="Error: Please provide a Space Title", visible=True), gr.update(visible=False), {}
|
1489 |
|
@@ -1500,7 +1364,7 @@ def on_generate(name, description, system_prompt, enable_research_assistant, mod
|
|
1500 |
|
1501 |
final_system_prompt = system_prompt.strip()
|
1502 |
|
1503 |
-
filename = generate_zip(name, description, final_system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_data
|
1504 |
|
1505 |
success_msg = f"""**Deployment package ready!**
|
1506 |
|
@@ -1529,7 +1393,6 @@ def on_generate(name, description, system_prompt, enable_research_assistant, mod
|
|
1529 |
'max_tokens': max_tokens,
|
1530 |
'enable_dynamic_urls': enable_dynamic_urls,
|
1531 |
'enable_vector_rag': enable_vector_rag,
|
1532 |
-
'enable_web_search': enable_web_search,
|
1533 |
'filename': filename
|
1534 |
}
|
1535 |
|
@@ -1715,109 +1578,44 @@ def remove_chat_urls(count):
|
|
1715 |
|
1716 |
# Code execution toggle removed - functionality no longer supported
|
1717 |
|
1718 |
-
def toggle_web_search(enable_search):
|
1719 |
-
"""Toggle visibility of web search space field"""
|
1720 |
-
return gr.update(visible=enable_search)
|
1721 |
-
|
1722 |
def perform_web_search(query, description="Web search"):
|
1723 |
-
"""
|
1724 |
try:
|
1725 |
-
#
|
1726 |
-
try:
|
1727 |
-
from crawl4ai import WebCrawler
|
1728 |
-
import asyncio
|
1729 |
-
|
1730 |
-
async def search_with_crawl4ai(search_query):
|
1731 |
-
# Create search URL for DuckDuckGo
|
1732 |
-
import urllib.parse
|
1733 |
-
encoded_query = urllib.parse.quote_plus(search_query)
|
1734 |
-
search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
|
1735 |
-
|
1736 |
-
# Initialize crawler
|
1737 |
-
crawler = WebCrawler(verbose=False)
|
1738 |
-
|
1739 |
-
try:
|
1740 |
-
# Start the crawler
|
1741 |
-
await crawler.astart()
|
1742 |
-
|
1743 |
-
# Crawl the search results
|
1744 |
-
result = await crawler.arun(url=search_url)
|
1745 |
-
|
1746 |
-
if result.success:
|
1747 |
-
# Extract text content from search results
|
1748 |
-
content = result.cleaned_html if result.cleaned_html else result.markdown
|
1749 |
-
|
1750 |
-
# Clean and truncate the content
|
1751 |
-
if content:
|
1752 |
-
# Remove excessive whitespace and limit length
|
1753 |
-
lines = [line.strip() for line in content.split('\n') if line.strip()]
|
1754 |
-
cleaned_content = '\n'.join(lines)
|
1755 |
-
|
1756 |
-
# Truncate to reasonable length for context
|
1757 |
-
if len(cleaned_content) > 3000:
|
1758 |
-
cleaned_content = cleaned_content[:3000] + "..."
|
1759 |
-
|
1760 |
-
return cleaned_content
|
1761 |
-
else:
|
1762 |
-
return "No content extracted from search results"
|
1763 |
-
else:
|
1764 |
-
return f"Search failed: {result.error_message if hasattr(result, 'error_message') else 'Unknown error'}"
|
1765 |
-
|
1766 |
-
finally:
|
1767 |
-
# Clean up the crawler
|
1768 |
-
await crawler.aclose()
|
1769 |
-
|
1770 |
-
# Run the async search
|
1771 |
-
if hasattr(asyncio, 'run'):
|
1772 |
-
search_result = asyncio.run(search_with_crawl4ai(query))
|
1773 |
-
else:
|
1774 |
-
# Fallback for older Python versions
|
1775 |
-
loop = asyncio.new_event_loop()
|
1776 |
-
asyncio.set_event_loop(loop)
|
1777 |
-
try:
|
1778 |
-
search_result = loop.run_until_complete(search_with_crawl4ai(query))
|
1779 |
-
finally:
|
1780 |
-
loop.close()
|
1781 |
-
|
1782 |
-
return f"**{description}**\n\nQuery: {query}\n\n**Search Results:**\n{search_result}"
|
1783 |
-
|
1784 |
-
except ImportError:
|
1785 |
-
# Fallback to simple DuckDuckGo search without crawl4ai
|
1786 |
-
import urllib.parse
|
1787 |
-
encoded_query = urllib.parse.quote_plus(query)
|
1788 |
-
search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
|
1789 |
-
|
1790 |
-
# Use enhanced_fetch_url_content as fallback
|
1791 |
-
content = enhanced_fetch_url_content(search_url)
|
1792 |
-
return f"**{description} (Simplified)**\n\nQuery: {query}\n\n**Search Results:**\n{content}"
|
1793 |
-
|
1794 |
-
except Exception as e:
|
1795 |
-
# Final fallback to URL extraction if search fails
|
1796 |
urls = extract_urls_from_text(query)
|
1797 |
if urls:
|
1798 |
results = []
|
1799 |
-
for url in urls[:
|
1800 |
-
|
1801 |
-
|
1802 |
-
|
1803 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1804 |
|
1805 |
# Code execution functionality removed - no longer supported
|
1806 |
|
1807 |
def toggle_research_assistant(enable_research):
|
1808 |
-
"""Toggle research assistant system prompt and
|
1809 |
if enable_research:
|
1810 |
-
combined_prompt = "You are
|
1811 |
return (
|
1812 |
gr.update(value=combined_prompt), # Update main system prompt
|
1813 |
-
gr.update(value=True)
|
1814 |
-
gr.update(value=True) # Enable web search for research template
|
1815 |
)
|
1816 |
else:
|
1817 |
return (
|
1818 |
gr.update(value=""), # Clear main system prompt when disabling
|
1819 |
-
gr.update(value=False)
|
1820 |
-
gr.update(value=False) # Disable web search
|
1821 |
)
|
1822 |
|
1823 |
|
@@ -1998,20 +1796,7 @@ with gr.Blocks(
|
|
1998 |
info="Enable to use pre-configured research assistant settings"
|
1999 |
)
|
2000 |
|
2001 |
-
|
2002 |
-
enable_web_search = gr.Checkbox(
|
2003 |
-
label="Enable Web Search",
|
2004 |
-
value=False,
|
2005 |
-
info="Allow the assistant to search the web using crawl4ai"
|
2006 |
-
)
|
2007 |
-
|
2008 |
-
web_search_space = gr.Textbox(
|
2009 |
-
label="Web Search Technology",
|
2010 |
-
value="crawl4ai",
|
2011 |
-
info="Uses crawl4ai library for web crawling",
|
2012 |
-
visible=False,
|
2013 |
-
interactive=False
|
2014 |
-
)
|
2015 |
|
2016 |
# Document RAG section
|
2017 |
enable_vector_rag = gr.Checkbox(
|
@@ -2118,15 +1903,11 @@ with gr.Blocks(
|
|
2118 |
enable_research_assistant.change(
|
2119 |
toggle_research_assistant,
|
2120 |
inputs=[enable_research_assistant],
|
2121 |
-
outputs=[system_prompt, enable_dynamic_urls
|
2122 |
)
|
2123 |
|
2124 |
-
#
|
2125 |
-
|
2126 |
-
toggle_web_search,
|
2127 |
-
inputs=[enable_web_search],
|
2128 |
-
outputs=[web_search_space]
|
2129 |
-
)
|
2130 |
|
2131 |
|
2132 |
|
@@ -2160,7 +1941,7 @@ with gr.Blocks(
|
|
2160 |
# Connect the generate button
|
2161 |
generate_btn.click(
|
2162 |
on_generate,
|
2163 |
-
inputs=[name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state
|
2164 |
outputs=[status, download_file, sandbox_state]
|
2165 |
)
|
2166 |
|
@@ -2226,7 +2007,7 @@ with gr.Blocks(
|
|
2226 |
with gr.Row():
|
2227 |
preview_send = gr.Button("Send", variant="primary")
|
2228 |
preview_clear = gr.Button("Clear")
|
2229 |
-
|
2230 |
|
2231 |
# Export functionality
|
2232 |
export_file = gr.File(label="Download Conversation", visible=False)
|
@@ -2254,7 +2035,7 @@ with gr.Blocks(
|
|
2254 |
outputs=[preview_msg, preview_chatbot]
|
2255 |
)
|
2256 |
|
2257 |
-
|
2258 |
export_preview_conversation,
|
2259 |
inputs=[preview_chatbot],
|
2260 |
outputs=[export_file]
|
@@ -2279,7 +2060,7 @@ with gr.Blocks(
|
|
2279 |
# Connect cross-tab functionality after all components are defined
|
2280 |
preview_btn.click(
|
2281 |
on_preview_combined,
|
2282 |
-
inputs=[name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag
|
2283 |
outputs=[preview_config_state, preview_status_comp, preview_chat_section_comp, config_display_comp]
|
2284 |
)
|
2285 |
|
|
|
133 |
|
134 |
# Template for generated space app (based on mvp_simple.py)
|
135 |
SPACE_TEMPLATE = '''import gradio as gr
|
136 |
+
import tempfile
|
137 |
import os
|
138 |
import requests
|
139 |
import json
|
140 |
import re
|
141 |
from bs4 import BeautifulSoup
|
142 |
from datetime import datetime
|
143 |
+
import urllib.parse
|
144 |
+
|
145 |
|
146 |
# Configuration
|
147 |
SPACE_NAME = "{name}"
|
|
|
153 |
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "{access_code}")
|
154 |
ENABLE_DYNAMIC_URLS = {enable_dynamic_urls}
|
155 |
ENABLE_VECTOR_RAG = {enable_vector_rag}
|
|
|
156 |
RAG_DATA = {rag_data_json}
|
157 |
|
158 |
# Get API key from environment - customizable variable name with validation
|
|
|
175 |
elif not API_KEY.startswith('sk-or-'):
|
176 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
177 |
print(f" Variable name: {api_key_var}")
|
178 |
+
print(f" Current value: {{API_KEY[:10]}}..." if len(API_KEY) > 10 else API_KEY)
|
179 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
180 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
181 |
return True # Still try to use it
|
|
|
186 |
return True
|
187 |
|
188 |
# Validate on startup
|
189 |
+
try:
|
190 |
+
API_KEY_VALID = validate_api_key()
|
191 |
+
except NameError:
|
192 |
+
# During template generation, API_KEY might not be defined yet
|
193 |
+
API_KEY_VALID = False
|
194 |
|
195 |
def validate_url_domain(url):
|
196 |
"""Basic URL domain validation"""
|
|
|
401 |
if dynamic_context_parts:
|
402 |
grounding_context += "\\n".join(dynamic_context_parts)
|
403 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
# Build enhanced system prompt with grounding context
|
405 |
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
406 |
|
|
|
571 |
return "Please enter the access code to continue."
|
572 |
return generate_response(message, history)
|
573 |
|
574 |
+
# Global variable to store chat history for export
|
575 |
+
chat_history_store = []
|
576 |
+
|
577 |
+
def store_and_generate_response(message, history):
|
578 |
+
\"\"\"Wrapper function that stores history and generates response\"\"\"
|
579 |
+
global chat_history_store
|
580 |
+
|
581 |
+
# Store the updated history
|
582 |
+
chat_history_store = history.copy() if history else []
|
583 |
+
|
584 |
+
# Generate response using the protected function
|
585 |
+
response = protected_generate_response(message, history)
|
586 |
+
|
587 |
+
# Update stored history with the new exchange
|
588 |
+
chat_history_store.append({{"role": "user", "content": message}})
|
589 |
+
chat_history_store.append({{"role": "assistant", "content": response}})
|
590 |
+
|
591 |
+
return response
|
592 |
+
|
593 |
+
def export_current_conversation():
|
594 |
+
\"\"\"Export the current conversation\"\"\"
|
595 |
+
if not chat_history_store:
|
596 |
+
return gr.update(visible=False)
|
597 |
+
|
598 |
+
markdown_content = export_conversation_to_markdown(chat_history_store)
|
599 |
+
|
600 |
+
# Save to temporary file
|
601 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f:
|
602 |
+
f.write(markdown_content)
|
603 |
+
temp_file = f.name
|
604 |
+
|
605 |
+
return gr.update(value=temp_file, visible=True)
|
606 |
+
|
607 |
def export_conversation(history):
|
608 |
\"\"\"Export conversation to markdown file\"\"\"
|
609 |
if not history:
|
|
|
612 |
markdown_content = export_conversation_to_markdown(history)
|
613 |
|
614 |
# Save to temporary file
|
615 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f:
|
616 |
f.write(markdown_content)
|
617 |
temp_file = f.name
|
618 |
|
|
|
637 |
|
638 |
if ENABLE_DYNAMIC_URLS:
|
639 |
status_parts.append("🔄 **Dynamic URLs:** Enabled")
|
640 |
+
|
|
|
|
|
|
|
641 |
if ENABLE_VECTOR_RAG:
|
642 |
status_parts.append("📚 **Document RAG:** Enabled")
|
643 |
|
|
|
673 |
# Main chat interface (hidden until access granted)
|
674 |
with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
|
675 |
chat_interface = gr.ChatInterface(
|
676 |
+
fn=store_and_generate_response, # Use wrapper function to store history
|
677 |
title="", # Title already shown above
|
678 |
description="", # Description already shown above
|
679 |
examples=None,
|
|
|
682 |
|
683 |
# Export functionality
|
684 |
with gr.Row():
|
685 |
+
export_btn = gr.Button("📥 Export Conversation", variant="secondary", size="sm")
|
686 |
export_file = gr.File(label="Download Conversation", visible=False)
|
687 |
|
688 |
# Connect export functionality
|
689 |
export_btn.click(
|
690 |
+
export_current_conversation,
|
|
|
691 |
outputs=[export_file]
|
692 |
)
|
693 |
|
|
|
711 |
# Available models - Updated with valid OpenRouter model IDs
|
712 |
MODELS = [
|
713 |
"google/gemini-2.0-flash-001", # Fast, reliable, general tasks
|
714 |
+
"google/gemma-3-27b-it", # High-performance open model
|
715 |
"anthropic/claude-3.5-haiku", # Complex reasoning and analysis
|
716 |
+
"openai/gpt-4o-mini-search-preview", # Balanced performance and cost with search
|
717 |
+
"openai/gpt-4.1-nano", # Lightweight OpenAI model
|
718 |
+
"nvidia/llama-3.1-nemotron-70b-instruct", # Large open-source model
|
719 |
+
"mistralai/devstral-small" # Coding-focused model
|
720 |
]
|
721 |
|
722 |
def fetch_url_content(url):
|
|
|
855 |
|
856 |
return readme_content
|
857 |
|
858 |
+
def create_requirements(enable_vector_rag=False):
|
859 |
"""Generate requirements.txt"""
|
860 |
+
base_requirements = "gradio>=4.44.1\nrequests>=2.32.3\nbeautifulsoup4>=4.12.3\npython-dotenv>=1.0.0"
|
861 |
+
|
862 |
|
863 |
if enable_vector_rag:
|
864 |
+
base_requirements += "\n\n# Vector RAG dependencies"
|
865 |
+
base_requirements += "\nfaiss-cpu>=1.11.0\nnumpy>=1.25.0,<3.0\nsentence-transformers>=2.2.2\nPyMuPDF>=1.23.0\npython-docx>=0.8.11"
|
|
|
|
|
866 |
|
867 |
return base_requirements
|
868 |
|
869 |
+
def generate_zip(name, description, system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code="", enable_dynamic_urls=False, url1="", url2="", url3="", url4="", enable_vector_rag=False, rag_data=None):
|
870 |
"""Generate deployable zip file"""
|
871 |
|
872 |
# Process examples
|
|
|
902 |
'access_code': "", # Access code stored in environment variable for security
|
903 |
'enable_dynamic_urls': enable_dynamic_urls,
|
904 |
'enable_vector_rag': enable_vector_rag,
|
|
|
905 |
'rag_data_json': json.dumps(rag_data) if rag_data else 'None'
|
906 |
}
|
907 |
|
|
|
911 |
readme_config = config.copy()
|
912 |
readme_config['access_code'] = access_code or ""
|
913 |
readme_content = create_readme(readme_config)
|
914 |
+
requirements_content = create_requirements(enable_vector_rag)
|
915 |
|
916 |
# Create zip file with clean naming
|
917 |
filename = f"{name.lower().replace(' ', '_').replace('-', '_')}.zip"
|
|
|
1067 |
|
1068 |
return preview_text, preview_html
|
1069 |
|
1070 |
+
def on_preview_combined(name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag):
|
1071 |
"""Generate configuration and return preview updates"""
|
1072 |
if not name or not name.strip():
|
1073 |
return (
|
|
|
1099 |
'max_tokens': max_tokens,
|
1100 |
'enable_dynamic_urls': enable_dynamic_urls,
|
1101 |
'enable_vector_rag': enable_vector_rag,
|
|
|
1102 |
'examples_text': examples_text,
|
1103 |
'preview_ready': True
|
1104 |
}
|
1105 |
|
1106 |
# Generate preview displays
|
1107 |
+
preview_text = f"""**System Prompt:**
|
1108 |
+
> *{final_system_prompt[:600]}{'...' if len(final_system_prompt) > 600 else '...'}*
|
1109 |
|
1110 |
+
Tip: Try different configurations of your space before generating the deployment package."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1111 |
config_display = f"""### Current Configuration
|
1112 |
|
1113 |
+
> **Configuration**:
|
1114 |
- **Name:** {name}
|
1115 |
- **Description:** {description or 'No description provided'}
|
|
|
|
|
1116 |
- **Model:** {model}
|
1117 |
- **Temperature:** {temperature}
|
1118 |
- **Max Response Tokens:** {max_tokens}
|
1119 |
|
|
|
|
|
|
|
|
|
|
|
1120 |
**System Prompt:**
|
1121 |
```
|
1122 |
{final_system_prompt}
|
|
|
1127 |
"""
|
1128 |
|
1129 |
# Show success notification
|
1130 |
+
gr.Info(f"✅ Preview generated successfully for '{name}'! Switch to Preview tab.")
|
1131 |
+
|
1132 |
return (
|
1133 |
config_data,
|
1134 |
gr.update(value=preview_text, visible=True),
|
|
|
1157 |
|
1158 |
Your assistant "{config_data['name']}" is configured and ready to test.
|
1159 |
|
1160 |
+
**Configuration**
|
1161 |
- **Model:** {config_data['model']}
|
1162 |
- **Temperature:** {config_data['temperature']}
|
1163 |
- **Max Tokens:** {config_data['max_tokens']}
|
|
|
1165 |
- **Vector RAG:** {'✅ Enabled' if config_data['enable_vector_rag'] else '❌ Disabled'}
|
1166 |
|
1167 |
**System Prompt:**
|
1168 |
+
{config_data['system_prompt'][:600]}{'...' if len(config_data['system_prompt']) > 600 else ''}
|
1169 |
|
1170 |
Use the chat interface below to test your assistant before generating the deployment package."""
|
1171 |
|
|
|
1252 |
if dynamic_context_parts:
|
1253 |
dynamic_context = "\n".join(dynamic_context_parts)
|
1254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1255 |
# Build enhanced system prompt with all contexts
|
1256 |
+
enhanced_system_prompt = config_data.get('system_prompt', '') + grounding_context + rag_context + dynamic_context
|
1257 |
|
1258 |
# Build messages array for the API
|
1259 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
|
|
1347 |
|
1348 |
return gr.update(value=temp_file, visible=True)
|
1349 |
|
1350 |
+
def on_generate(name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state):
|
1351 |
if not name or not name.strip():
|
1352 |
return gr.update(value="Error: Please provide a Space Title", visible=True), gr.update(visible=False), {}
|
1353 |
|
|
|
1364 |
|
1365 |
final_system_prompt = system_prompt.strip()
|
1366 |
|
1367 |
+
filename = generate_zip(name, description, final_system_prompt, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_data)
|
1368 |
|
1369 |
success_msg = f"""**Deployment package ready!**
|
1370 |
|
|
|
1393 |
'max_tokens': max_tokens,
|
1394 |
'enable_dynamic_urls': enable_dynamic_urls,
|
1395 |
'enable_vector_rag': enable_vector_rag,
|
|
|
1396 |
'filename': filename
|
1397 |
}
|
1398 |
|
|
|
1578 |
|
1579 |
# Code execution toggle removed - functionality no longer supported
|
1580 |
|
|
|
|
|
|
|
|
|
1581 |
def perform_web_search(query, description="Web search"):
|
1582 |
+
"""Simplified web search with URL content fetching"""
|
1583 |
try:
|
1584 |
+
# Check if query contains URLs for content fetching
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1585 |
urls = extract_urls_from_text(query)
|
1586 |
if urls:
|
1587 |
results = []
|
1588 |
+
for url in urls[:3]: # Limit to 3 URLs for performance
|
1589 |
+
try:
|
1590 |
+
content = enhanced_fetch_url_content(url)
|
1591 |
+
if content and len(content.strip()) > 50:
|
1592 |
+
results.append(f"**Content from {url}:**\n{content[:1000]}...")
|
1593 |
+
except Exception as e:
|
1594 |
+
results.append(f"**Error fetching {url}:** {str(e)}")
|
1595 |
+
|
1596 |
+
if results:
|
1597 |
+
return f"**{description}**\n\nQuery: {query}\n\n" + "\n\n".join(results)
|
1598 |
+
|
1599 |
+
# For non-URL queries, return context for model to handle
|
1600 |
+
return f"**{description}**\n\nQuery: {query}\n\n**Note:** This query will be processed using the model's knowledge. For real-time information, consider providing specific URLs or mentioning that recent data verification may be needed."
|
1601 |
+
|
1602 |
+
except Exception as e:
|
1603 |
+
return f"**{description} - Processing Error:** {str(e)}\n\nQuery: {query}\n\n**Note:** Falling back to model's knowledge for this query."
|
1604 |
|
1605 |
# Code execution functionality removed - no longer supported
|
1606 |
|
1607 |
def toggle_research_assistant(enable_research):
|
1608 |
+
"""Toggle research assistant system prompt and dynamic URL fetching"""
|
1609 |
if enable_research:
|
1610 |
+
combined_prompt = "You are a research aid specializing in academic literature search and analysis. Your expertise spans discovering peer-reviewed sources, assessing research methodologies, synthesizing findings across studies, and delivering properly formatted citations. When responding, anchor claims in specific sources from provided URL contexts, differentiate between direct evidence and interpretive analysis, and note any limitations or contradictory results. Employ clear, accessible language that demystifies complex research, and propose connected research directions when appropriate. Your purpose is to serve as an informed research tool supporting users through initial concept development, exploratory investigation, information collection, and source compilation."
|
1611 |
return (
|
1612 |
gr.update(value=combined_prompt), # Update main system prompt
|
1613 |
+
gr.update(value=True) # Enable dynamic URL fetching for research template
|
|
|
1614 |
)
|
1615 |
else:
|
1616 |
return (
|
1617 |
gr.update(value=""), # Clear main system prompt when disabling
|
1618 |
+
gr.update(value=False) # Disable dynamic URL setting
|
|
|
1619 |
)
|
1620 |
|
1621 |
|
|
|
1796 |
info="Enable to use pre-configured research assistant settings"
|
1797 |
)
|
1798 |
|
1799 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1800 |
|
1801 |
# Document RAG section
|
1802 |
enable_vector_rag = gr.Checkbox(
|
|
|
1903 |
enable_research_assistant.change(
|
1904 |
toggle_research_assistant,
|
1905 |
inputs=[enable_research_assistant],
|
1906 |
+
outputs=[system_prompt, enable_dynamic_urls]
|
1907 |
)
|
1908 |
|
1909 |
+
# Web search checkbox is now just for enabling/disabling the feature
|
1910 |
+
# No additional UI elements needed since we rely on model capabilities
|
|
|
|
|
|
|
|
|
1911 |
|
1912 |
|
1913 |
|
|
|
1941 |
# Connect the generate button
|
1942 |
generate_btn.click(
|
1943 |
on_generate,
|
1944 |
+
inputs=[name, description, system_prompt, enable_research_assistant, model, api_key_var, temperature, max_tokens, examples_text, access_code, enable_dynamic_urls, url1, url2, url3, url4, enable_vector_rag, rag_tool_state],
|
1945 |
outputs=[status, download_file, sandbox_state]
|
1946 |
)
|
1947 |
|
|
|
2007 |
with gr.Row():
|
2008 |
preview_send = gr.Button("Send", variant="primary")
|
2009 |
preview_clear = gr.Button("Clear")
|
2010 |
+
preview_export_btn = gr.Button("Export Conversation", variant="secondary")
|
2011 |
|
2012 |
# Export functionality
|
2013 |
export_file = gr.File(label="Download Conversation", visible=False)
|
|
|
2035 |
outputs=[preview_msg, preview_chatbot]
|
2036 |
)
|
2037 |
|
2038 |
+
preview_export_btn.click(
|
2039 |
export_preview_conversation,
|
2040 |
inputs=[preview_chatbot],
|
2041 |
outputs=[export_file]
|
|
|
2060 |
# Connect cross-tab functionality after all components are defined
|
2061 |
preview_btn.click(
|
2062 |
on_preview_combined,
|
2063 |
+
inputs=[name, description, system_prompt, enable_research_assistant, model, temperature, max_tokens, examples_text, enable_dynamic_urls, enable_vector_rag],
|
2064 |
outputs=[preview_config_state, preview_status_comp, preview_chat_section_comp, config_display_comp]
|
2065 |
)
|
2066 |
|
requirements.txt
CHANGED
@@ -9,7 +9,7 @@ playwright==1.53.0
|
|
9 |
|
10 |
# Vector RAG dependencies (optional)
|
11 |
sentence-transformers>=2.2.2
|
12 |
-
faiss-cpu
|
13 |
PyMuPDF>=1.23.0
|
14 |
python-docx>=0.8.11
|
15 |
-
numpy
|
|
|
9 |
|
10 |
# Vector RAG dependencies (optional)
|
11 |
sentence-transformers>=2.2.2
|
12 |
+
faiss-cpu>=1.11.0
|
13 |
PyMuPDF>=1.23.0
|
14 |
python-docx>=0.8.11
|
15 |
+
numpy>=1.25.0,<3.0
|
support_docs.py
CHANGED
@@ -271,7 +271,6 @@ def create_support_docs():
|
|
271 |
**Token Usage Notes:**
|
272 |
- Tokens include both input (your prompt + context) and output
|
273 |
- Longer contexts (documents, URLs) use more input tokens
|
274 |
-
- Monitor usage through OpenRouter dashboard
|
275 |
- Consider costs when setting high token limits
|
276 |
""")
|
277 |
|
|
|
271 |
**Token Usage Notes:**
|
272 |
- Tokens include both input (your prompt + context) and output
|
273 |
- Longer contexts (documents, URLs) use more input tokens
|
|
|
274 |
- Consider costs when setting high token limits
|
275 |
""")
|
276 |
|