File size: 9,676 Bytes
1117ec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
import gradio as gr
import os
import requests
import json
import asyncio
from crawl4ai import AsyncWebCrawler

# Configuration
SPACE_NAME = "My Custom Space"
SPACE_DESCRIPTION = "An AI research assistant tailored for academic inquiry and scholarly dialogue"
SYSTEM_PROMPT = """You are a research assistant that provides link-grounded information through Crawl4AI web fetching. Use MLA documentation for parenthetical citations and bibliographic entries. This assistant is designed for students and researchers conducting academic inquiry. Your main responsibilities include: analyzing academic sources, fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims."""
MODEL = "google/gemma-3-27b-it"
GROUNDING_URLS = []
# Get access code from environment variable for security
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "")
ENABLE_DYNAMIC_URLS = True
ENABLE_VECTOR_RAG = False
RAG_DATA = None

# Get API key from environment - customizable variable name
API_KEY = os.environ.get("OPENROUTER_API_KEY")

async def fetch_url_content_async(url, crawler):
    """Fetch and extract text content from a URL using Crawl4AI"""
    try:
        result = await crawler.arun(
            url=url,
            bypass_cache=True,
            word_count_threshold=10,
            excluded_tags=['script', 'style', 'nav', 'header', 'footer'],
            remove_overlay_elements=True
        )
        
        if result.success:
            content = result.markdown or result.cleaned_html or ""
            # Truncate to ~4000 characters
            if len(content) > 4000:
                content = content[:4000] + "..."
            return content
        else:
            return f"Error fetching {url}: Failed to retrieve content"
    except Exception as e:
        return f"Error fetching {url}: {str(e)}"

def fetch_url_content(url):
    """Synchronous wrapper for URL fetching"""
    async def fetch():
        async with AsyncWebCrawler(verbose=False) as crawler:
            return await fetch_url_content_async(url, crawler)
    
    try:
        return asyncio.run(fetch())
    except Exception as e:
        return f"Error fetching {url}: {str(e)}"

# Global cache for URL content to avoid re-crawling in generated spaces
_url_content_cache = {}

def get_grounding_context():
    """Fetch context from grounding URLs with caching"""
    if not GROUNDING_URLS:
        return ""
    
    # Create cache key from URLs
    cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
    
    # Check cache first
    if cache_key in _url_content_cache:
        return _url_content_cache[cache_key]
    
    context_parts = []
    for i, url in enumerate(GROUNDING_URLS, 1):
        if url.strip():
            content = fetch_url_content(url.strip())
            context_parts.append(f"Context from URL {i} ({url}):\n{content}")
    
    if context_parts:
        result = "\n\n" + "\n\n".join(context_parts) + "\n\n"
    else:
        result = ""
    
    # Cache the result
    _url_content_cache[cache_key] = result
    return result

import re

def extract_urls_from_text(text):
    """Extract URLs from text using regex"""
    url_pattern = r'https?://[^\s<>"{}|\^`\[\]"]+'
    return re.findall(url_pattern, text)

# Initialize RAG context if enabled
if ENABLE_VECTOR_RAG and RAG_DATA:
    try:
        import faiss
        import numpy as np
        import base64
        
        class SimpleRAGContext:
            def __init__(self, rag_data):
                # Deserialize FAISS index
                index_bytes = base64.b64decode(rag_data['index_base64'])
                self.index = faiss.deserialize_index(index_bytes)
                
                # Restore chunks and mappings
                self.chunks = rag_data['chunks']
                self.chunk_ids = rag_data['chunk_ids']
            
            def get_context(self, query, max_chunks=3):
                """Get relevant context - simplified version"""
                # In production, you'd compute query embedding here
                # For now, return a simple message
                return "\n\n[RAG context would be retrieved here based on similarity search]\n\n"
        
        rag_context_provider = SimpleRAGContext(RAG_DATA)
    except Exception as e:
        print(f"Failed to initialize RAG: {e}")
        rag_context_provider = None
else:
    rag_context_provider = None

def generate_response(message, history):
    """Generate response using OpenRouter API"""
    
    if not API_KEY:
        return "Please set your OPENROUTER_API_KEY in the Space settings."
    
    # Get grounding context
    grounding_context = get_grounding_context()
    
    # Add RAG context if available
    if ENABLE_VECTOR_RAG and rag_context_provider:
        rag_context = rag_context_provider.get_context(message)
        if rag_context:
            grounding_context += rag_context
    
    # If dynamic URLs are enabled, check message for URLs to fetch
    if ENABLE_DYNAMIC_URLS:
        urls_in_message = extract_urls_from_text(message)
        if urls_in_message:
            # Fetch content from URLs mentioned in the message
            dynamic_context_parts = []
            for url in urls_in_message[:3]:  # Limit to 3 URLs per message
                content = fetch_url_content(url)
                dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
            if dynamic_context_parts:
                grounding_context += "\n".join(dynamic_context_parts)
    
    # Build enhanced system prompt with grounding context
    enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
    
    # Build messages array for the API
    messages = [{"role": "system", "content": enhanced_system_prompt}]
    
    # Add conversation history - compatible with Gradio 5.x format
    for chat in history:
        if isinstance(chat, dict):
            # New format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
            messages.append(chat)
        else:
            # Legacy format: ("user msg", "bot msg")
            user_msg, bot_msg = chat
            messages.append({"role": "user", "content": user_msg})
            if bot_msg:
                messages.append({"role": "assistant", "content": bot_msg})
    
    # Add current message
    messages.append({"role": "user", "content": message})
    
    # Make API request
    try:
        response = requests.post(
            url="https://openrouter.ai/api/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {API_KEY}",
                "Content-Type": "application/json"
            },
            json={
                "model": MODEL,
                "messages": messages,
                "temperature": 0.7,
                "max_tokens": 500
            }
        )
        
        if response.status_code == 200:
            return response.json()['choices'][0]['message']['content']
        else:
            return f"Error: {response.status_code} - {response.text}"
            
    except Exception as e:
        return f"Error: {str(e)}"

# Access code verification
access_granted = gr.State(False)

def verify_access_code(code):
    """Verify the access code"""
    if not ACCESS_CODE:
        return gr.update(visible=False), gr.update(visible=True), True
    
    if code == ACCESS_CODE:
        return gr.update(visible=False), gr.update(visible=True), True
    else:
        return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), False

def protected_generate_response(message, history):
    """Protected response function that checks access"""
    # Check if access is granted via the global state
    if ACCESS_CODE and not access_granted.value:
        return "Please enter the access code to continue."
    return generate_response(message, history)

# Create interface with access code protection
with gr.Blocks(title=SPACE_NAME) as demo:
    gr.Markdown(f"# {SPACE_NAME}")
    gr.Markdown(SPACE_DESCRIPTION)
    
    # Access code section (shown only if ACCESS_CODE is set)
    with gr.Column(visible=bool(ACCESS_CODE)) as access_section:
        gr.Markdown("### 🔐 Access Required")
        gr.Markdown("Please enter the access code provided by your instructor:")
        
        access_input = gr.Textbox(
            label="Access Code",
            placeholder="Enter access code...",
            type="password"
        )
        access_btn = gr.Button("Submit", variant="primary")
        access_error = gr.Markdown(visible=False)
    
    # Main chat interface (hidden until access granted)
    with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
        chat_interface = gr.ChatInterface(
            fn=protected_generate_response,
            title="",  # Title already shown above
            description="",  # Description already shown above
            examples=["Hello! How can you help me?", "Tell me something interesting", "What can you do?"]
        )
    
    # Connect access verification
    if ACCESS_CODE:
        access_btn.click(
            verify_access_code,
            inputs=[access_input],
            outputs=[access_error, chat_section, access_granted]
        )
        access_input.submit(
            verify_access_code,
            inputs=[access_input],
            outputs=[access_error, chat_section, access_granted]
        )

if __name__ == "__main__":
    demo.launch()