File size: 13,364 Bytes
e5a53e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
import gradio as gr
import os
import requests
import json
import asyncio
from crawl4ai import AsyncWebCrawler
# Configuration
SPACE_NAME = "My Custom Space"
SPACE_DESCRIPTION = ""
SYSTEM_PROMPT = """You are a research assistant that provides link-grounded information through Crawl4AI web fetching. Use MLA documentation for parenthetical citations and bibliographic entries. This assistant is designed for students and researchers conducting academic inquiry. Your main responsibilities include: analyzing academic sources, fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims."""
MODEL = "google/gemma-3-27b-it"
GROUNDING_URLS = []
# Get access code from environment variable for security
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "")
ENABLE_DYNAMIC_URLS = True
ENABLE_VECTOR_RAG = True
RAG_DATA = {"index_base64": "SXhGSYABAAABAAAAAAAAAAAAEAAAAAAAAAAQAAAAAAABAAAAAIABAAAAAAAAquL8vORiA70my6u7cPEnPSH5tTxoLAs72bgTPJiVCT18lxu9e9UbvYg0+rvksBs967PLPT+2ET1NLH+72AgnPSO7kD2qeYC8vmOLPEDvBLwIUIm7OnJUPfMGgz3xvSu9TmeRvB7TPD38lTK8gdCyveBptLsJVDO9ac2rPU/HPj27+eo8lbwnPtfTjr3iCOq7xjxvvHG0PL15RkW9nwzevMEce71fGBu9IQuqvSdelz3VUZE9ZIRPPUzBw73e+qS8wjk+PT7soD05Rym+6aqgvJbG5ju6uSs99YoxvW7ipjxIt4s71i2ZPF/Amzv5/qm8ZT4IO3hi8L0jUaE9cbO8vC1oKjyFjCG9QXKKvcFusDz52qo8BH7RveClcr1vruM9OLoGvfAZYj05Sne91q2xPXyjbzyHoJq9dt5DPUlH+bwXUZ69mfHavJ5A3jxALcy7knFZPXSwkbsdXts9hw2svd4VS7zXWcc848SUPFj/Ub3qIbM9Y4rNO6mZUz36DGE8odikPQ1/Cr3fJLk9aD0wPFaCkD1Czow94IpoPCJJd71H2o+9SGM2PMM86TxArsq8ZpORPYqX2L3psZC9/U2bPL+BG71p4aq91ai4u//MZL08k+C7iZLqvPyPIDsppIE8yy27uTUonT3r54S8fFuQvNwfpbzHs/48FaDrvLzoEQqNYpe81b7bvOTiRb2mQIE9w2LUu2a0qbzdMYW8jWivPVNqnr2Au468sSryvVZ5rz1J5j+857vGPeKy/DxuAN08w9bXvHShyTz3zNy80b32vNwHhD2h3hS92mNqPdzPmj3oeyQ9sfw3vCfVeD360q+9/iI3POSd+bxJtse90+CCvRbrvLzkay89bkHcPHzQPj34LAk8FNkQvUVpXL3KCIg8N/eVvQO6Er1Wk9c8/ZkrvRomS710Z/M8qEYQvRuZdLzgZEU940M4vSMHsTyoWj89pW7tvCncIT1TP7Q9Gg5mu89grzyWx1y8j00qPdYmRjyC0jW8fbEyvA+5mz3nix67y7kJvWPHIr2et9A6s1ZGPZGRyT2pN6Q90HmIPeI7Ez0+PxE9EfElvV5a5rwspvU7WexEva576L0tlkG83v0mPdaMIbyhQLa9R2IEPUkazr2DThk9bASDvZn+gjwoUh6+2qxiOy2ZRr2A7c27YFbBPSOeGL3GUB68e+NYPcBg4IllYLO8mnA/vX79mLx4DUs9VfpgPLEHi7xeU1u83oekPBZzcL080jC91g7VvNkxiL3kR3o9khAKvflLp7ySWJw9bO3cvJqxbL1DBGG9hfKqPdlSir3kM6M8IJR4vdPeOz36ISW8dSggvHQIQLqXATK9qoGBvLA20LzCkEi8bhFBvcANALsKMKM8BzRFPCI98bxAOo09WImcvauxyzv7a0E9jBK6PBRlmT29tA69RwYjvQ/ESb3AM4w7WMjZvaEzVT1HbGU9YDDQvEreUD1soz+9bPYhPGDAHL1JkuI82kRnvYKyoL1EBY08BheBPJYxBz2n6ne8E6kUvKAvnT23Rhg8vbrJPJPtuL2UGz+9OJbvPFwrob3XARm9zw1IPSsCi7wSzRk9uhV8Pa47WT0hBIs8NBZVPdhUBL3NLdo8H2hEvT2Sj7ycZ608dB6zPaqYkD2GErg7BVQbPqemq7zRz2C8ZdnsvLwsVz2AChG9hZQIPKJRCLwjOMA96/Q4PTNNOLPOAMm9tOguvAO8K71fWoi8+CsTPTFC+DvlUbA8u17rPR1RoL1xh8e8ph0NPQdGRbwF8eq8qKiMPMA4qz11BlA8WY0RPcGoGrzHyhY9yafEO4Udnj0xgFI9kVEVPPW85z2TnGc8TGCzPIELlrzfUtM96NeDPTUGfr2ZRPO8A2sHPW9jtrzpdqa901KlPZe5Kj247F08QnYIvGsJzr2mJBw8an7zPIQyhDuih+Y81CUUvQIfAz31O308JfcNvDywrb0J4ck6g8cnPMJxpLydUpG93vS1OzB8yT0x83w8NIq3u9q76bxWcJg8LvmZPS5WIb2XPeM7D/ehO8+q3DzEf4I8", "chunks": {"677a2f1d": {"text": "Vector Database Test Document This is a test document for evaluating the vector database functionality. Section 1: Introduction to Vector Databases Vector databases store and query high-dimensional vector representations of data. They enable semantic search by finding vectors similar to a query vector in an embedding space. Section 2: Use Cases Common applications include: - Document retrieval and question answering - Similarity search for products or content - Recommendation systems - Semantic search in chatbots Section 3: Technical Implementation Vector databases typically use embedding models to convert text into dense vectors, then use algorithms like cosine similarity or approximate nearest neighbor search to find relevant results. Section 4: Benefits - Semantic understanding beyond keyword matching - Scalable retrieval for large document collections - Integration with modern AI systems and large language models - Support for multi-modal data (text, images, audio) This document should generate multiple chunks when processed by the system.", "metadata": {"file_path": "/private/var/folders/gg/pr9vtbf50cq2z_szcsdnjvym0000gn/T/gradio/ca225f4226ff8fe4b52c49232ba98eb63f89ad9da4e107040507ee0da07ec619/doc.txt", "file_name": "doc.txt", "chunk_index": 0, "start_word": 0, "word_count": 151}, "chunk_id": "677a2f1d"}}, "chunk_ids": ["677a2f1d"], "dimension": 384, "model_name": "sentence-transformers/all-MiniLM-L6-v2"}
# Get API key from environment - customizable variable name
API_KEY = os.environ.get("OPENROUTER_API_KEY")
async def fetch_url_content_async(url, crawler):
"""Fetch and extract text content from a URL using Crawl4AI"""
try:
result = await crawler.arun(
url=url,
bypass_cache=True,
word_count_threshold=10,
excluded_tags=['script', 'style', 'nav', 'header', 'footer'],
remove_overlay_elements=True
)
if result.success:
content = result.markdown or result.cleaned_html or ""
# Truncate to ~4000 characters
if len(content) > 4000:
content = content[:4000] + "..."
return content
else:
return f"Error fetching {url}: Failed to retrieve content"
except Exception as e:
return f"Error fetching {url}: {str(e)}"
def fetch_url_content(url):
"""Synchronous wrapper for URL fetching"""
async def fetch():
async with AsyncWebCrawler(verbose=False) as crawler:
return await fetch_url_content_async(url, crawler)
try:
return asyncio.run(fetch())
except Exception as e:
return f"Error fetching {url}: {str(e)}"
# Global cache for URL content to avoid re-crawling in generated spaces
_url_content_cache = {}
def get_grounding_context():
"""Fetch context from grounding URLs with caching"""
if not GROUNDING_URLS:
return ""
# Create cache key from URLs
cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
# Check cache first
if cache_key in _url_content_cache:
return _url_content_cache[cache_key]
context_parts = []
for i, url in enumerate(GROUNDING_URLS, 1):
if url.strip():
content = fetch_url_content(url.strip())
context_parts.append(f"Context from URL {i} ({url}):\n{content}")
if context_parts:
result = "\n\n" + "\n\n".join(context_parts) + "\n\n"
else:
result = ""
# Cache the result
_url_content_cache[cache_key] = result
return result
import re
def extract_urls_from_text(text):
"""Extract URLs from text using regex"""
url_pattern = r'https?://[^\s<>"{}|\^`\[\]"]+'
return re.findall(url_pattern, text)
# Initialize RAG context if enabled
if ENABLE_VECTOR_RAG and RAG_DATA:
try:
import faiss
import numpy as np
import base64
class SimpleRAGContext:
def __init__(self, rag_data):
# Deserialize FAISS index
index_bytes = base64.b64decode(rag_data['index_base64'])
self.index = faiss.deserialize_index(index_bytes)
# Restore chunks and mappings
self.chunks = rag_data['chunks']
self.chunk_ids = rag_data['chunk_ids']
def get_context(self, query, max_chunks=3):
"""Get relevant context - simplified version"""
# In production, you'd compute query embedding here
# For now, return a simple message
return "\n\n[RAG context would be retrieved here based on similarity search]\n\n"
rag_context_provider = SimpleRAGContext(RAG_DATA)
except Exception as e:
print(f"Failed to initialize RAG: {e}")
rag_context_provider = None
else:
rag_context_provider = None
def generate_response(message, history):
"""Generate response using OpenRouter API"""
if not API_KEY:
return "Please set your OPENROUTER_API_KEY in the Space settings."
# Get grounding context
grounding_context = get_grounding_context()
# Add RAG context if available
if ENABLE_VECTOR_RAG and rag_context_provider:
rag_context = rag_context_provider.get_context(message)
if rag_context:
grounding_context += rag_context
# If dynamic URLs are enabled, check message for URLs to fetch
if ENABLE_DYNAMIC_URLS:
urls_in_message = extract_urls_from_text(message)
if urls_in_message:
# Fetch content from URLs mentioned in the message
dynamic_context_parts = []
for url in urls_in_message[:3]: # Limit to 3 URLs per message
content = fetch_url_content(url)
dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
if dynamic_context_parts:
grounding_context += "\n".join(dynamic_context_parts)
# Build enhanced system prompt with grounding context
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
# Build messages array for the API
messages = [{"role": "system", "content": enhanced_system_prompt}]
# Add conversation history - compatible with Gradio 5.x format
for chat in history:
if isinstance(chat, dict):
# New format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
messages.append(chat)
else:
# Legacy format: ("user msg", "bot msg")
user_msg, bot_msg = chat
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
# Add current message
messages.append({"role": "user", "content": message})
# Make API request
try:
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
},
json={
"model": MODEL,
"messages": messages,
"temperature": 0.7,
"max_tokens": 500
}
)
if response.status_code == 200:
return response.json()['choices'][0]['message']['content']
else:
return f"Error: {response.status_code} - {response.text}"
except Exception as e:
return f"Error: {str(e)}"
# Access code verification
access_granted = gr.State(False)
_access_granted_global = False # Global fallback
def verify_access_code(code):
"""Verify the access code"""
global _access_granted_global
if not ACCESS_CODE:
_access_granted_global = True
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
if code == ACCESS_CODE:
_access_granted_global = True
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
else:
_access_granted_global = False
return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)
def protected_generate_response(message, history):
"""Protected response function that checks access"""
# Check if access is granted via the global variable
if ACCESS_CODE and not _access_granted_global:
return "Please enter the access code to continue."
return generate_response(message, history)
# Create interface with access code protection
with gr.Blocks(title=SPACE_NAME) as demo:
gr.Markdown(f"# {SPACE_NAME}")
gr.Markdown(SPACE_DESCRIPTION)
# Access code section (shown only if ACCESS_CODE is set)
with gr.Column(visible=bool(ACCESS_CODE)) as access_section:
gr.Markdown("### 🔐 Access Required")
gr.Markdown("Please enter the access code provided by your instructor:")
access_input = gr.Textbox(
label="Access Code",
placeholder="Enter access code...",
type="password"
)
access_btn = gr.Button("Submit", variant="primary")
access_error = gr.Markdown(visible=False)
# Main chat interface (hidden until access granted)
with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
chat_interface = gr.ChatInterface(
fn=protected_generate_response,
title="", # Title already shown above
description="", # Description already shown above
examples=None
)
# Connect access verification
if ACCESS_CODE:
access_btn.click(
verify_access_code,
inputs=[access_input],
outputs=[access_error, chat_section, access_granted]
)
access_input.submit(
verify_access_code,
inputs=[access_input],
outputs=[access_error, chat_section, access_granted]
)
if __name__ == "__main__":
demo.launch()
|