File size: 29,504 Bytes
a58aa9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
import gradio as gr
import os
import requests
import json
import re
from bs4 import BeautifulSoup
from datetime import datetime
import tempfile

# Configuration
SPACE_NAME = "search-aid"
SPACE_DESCRIPTION = ""
SYSTEM_PROMPT = """You are an advanced research assistant specializing in academic literature search and analysis. Your expertise includes finding peer-reviewed sources, critically evaluating research methodology, synthesizing insights across multiple papers, and providing properly formatted citations. When responding, ground all claims in specific sources from provided URL contexts, distinguish between direct evidence and analytical interpretation, and highlight any limitations or conflicting findings. Use clear, accessible language that makes complex research understandable, and suggest related areas of inquiry when relevant. Your goal is to be a knowledgeable research partner who helps users navigate academic information with precision and clarity."""
MODEL = "google/gemini-2.0-flash-001"
GROUNDING_URLS = []
# Get access code from environment variable for security
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "")
ENABLE_DYNAMIC_URLS = True
ENABLE_VECTOR_RAG = False
ENABLE_WEB_SEARCH = True
RAG_DATA = None

# Get API key from environment - customizable variable name with validation
API_KEY = os.environ.get("OPENROUTER_API_KEY")
if API_KEY:
    API_KEY = API_KEY.strip()  # Remove any whitespace
    if not API_KEY:  # Check if empty after stripping
        API_KEY = None
        
# API Key validation and logging
def validate_api_key():
    """Validate API key configuration with detailed logging"""
    if not API_KEY:
        print(f"⚠️  API KEY CONFIGURATION ERROR:")
        print(f"   Variable name: OPENROUTER_API_KEY")
        print(f"   Status: Not set or empty")
        print(f"   Action needed: Set 'OPENROUTER_API_KEY' in HuggingFace Space secrets")
        print(f"   Expected format: sk-or-xxxxxxxxxx")
        return False
    elif not API_KEY.startswith('sk-or-'):
        print(f"⚠️  API KEY FORMAT WARNING:")
        print(f"   Variable name: OPENROUTER_API_KEY")
        print(f"   Current value: {{API_KEY[:10]}}..." if len(API_KEY) > 10 else API_KEY)
        print(f"   Expected format: sk-or-xxxxxxxxxx")
        print(f"   Note: OpenRouter keys should start with 'sk-or-'")
        return True  # Still try to use it
    else:
        print(f"βœ… API Key configured successfully")
        print(f"   Variable: OPENROUTER_API_KEY")
        print(f"   Format: Valid OpenRouter key")
        return True

# Validate on startup
API_KEY_VALID = validate_api_key()

def validate_url_domain(url):
    """Basic URL domain validation"""
    try:
        from urllib.parse import urlparse
        parsed = urlparse(url)
        # Check for valid domain structure
        if parsed.netloc and '.' in parsed.netloc:
            return True
    except:
        pass
    return False

def fetch_url_content(url):
    """Enhanced URL content fetching with improved compatibility and error handling"""
    if not validate_url_domain(url):
        return f"Invalid URL format: {url}"
    
    try:
        # Enhanced headers for better compatibility
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        }
        
        response = requests.get(url, timeout=15, headers=headers)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # Enhanced content cleaning
        for element in soup(["script", "style", "nav", "header", "footer", "aside", "form", "button"]):
            element.decompose()
        
        # Extract main content preferentially
        main_content = soup.find('main') or soup.find('article') or soup.find('div', class_=lambda x: bool(x and 'content' in x.lower())) or soup
        text = main_content.get_text()
        
        # Enhanced text cleaning
        lines = (line.strip() for line in text.splitlines())
        chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
        text = ' '.join(chunk for chunk in chunks if chunk and len(chunk) > 2)
        
        # Smart truncation - try to end at sentence boundaries
        if len(text) > 4000:
            truncated = text[:4000]
            last_period = truncated.rfind('.')
            if last_period > 3000:  # If we can find a reasonable sentence break
                text = truncated[:last_period + 1]
            else:
                text = truncated + "..."
        
        return text if text.strip() else "No readable content found at this URL"
        
    except requests.exceptions.Timeout:
        return f"Timeout error fetching {url} (15s limit exceeded)"
    except requests.exceptions.RequestException as e:
        return f"Error fetching {url}: {str(e)}"
    except Exception as e:
        return f"Error processing content from {url}: {str(e)}"

def extract_urls_from_text(text):
    """Extract URLs from text using regex with enhanced validation"""
    import re
    url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
    urls = re.findall(url_pattern, text)
    
    # Basic URL validation and cleanup
    validated_urls = []
    for url in urls:
        # Remove trailing punctuation that might be captured
        url = url.rstrip('.,!?;:')
        # Basic domain validation
        if '.' in url and len(url) > 10:
            validated_urls.append(url)
    
    return validated_urls

# Global cache for URL content to avoid re-crawling in generated spaces
_url_content_cache = {}

def get_grounding_context():
    """Fetch context from grounding URLs with caching"""
    if not GROUNDING_URLS:
        return ""
    
    # Create cache key from URLs
    cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
    
    # Check cache first
    if cache_key in _url_content_cache:
        return _url_content_cache[cache_key]
    
    context_parts = []
    for i, url in enumerate(GROUNDING_URLS, 1):
        if url.strip():
            content = fetch_url_content(url.strip())
            context_parts.append(f"Context from URL {i} ({url}):\n{content}")
    
    if context_parts:
        result = "\n\n" + "\n\n".join(context_parts) + "\n\n"
    else:
        result = ""
    
    # Cache the result
    _url_content_cache[cache_key] = result
    return result

def export_conversation_to_markdown(conversation_history):
    """Export conversation history to markdown format"""
    if not conversation_history:
        return "No conversation to export."
    
    markdown_content = f"""# Conversation Export
Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

---

"""
    
    message_pair_count = 0
    for i, message in enumerate(conversation_history):
        if isinstance(message, dict):
            role = message.get('role', 'unknown')
            content = message.get('content', '')
            
            if role == 'user':
                message_pair_count += 1
                markdown_content += f"## User Message {message_pair_count}\n\n{content}\n\n"
            elif role == 'assistant':
                markdown_content += f"## Assistant Response {message_pair_count}\n\n{content}\n\n---\n\n"
        elif isinstance(message, (list, tuple)) and len(message) >= 2:
            # Handle legacy tuple format: ["user msg", "assistant msg"]
            message_pair_count += 1
            user_msg, assistant_msg = message[0], message[1]
            if user_msg:
                markdown_content += f"## User Message {message_pair_count}\n\n{user_msg}\n\n"
            if assistant_msg:
                markdown_content += f"## Assistant Response {message_pair_count}\n\n{assistant_msg}\n\n---\n\n"
    
    return markdown_content

# Initialize RAG context if enabled
if ENABLE_VECTOR_RAG and RAG_DATA:
    try:
        import faiss
        import numpy as np
        import base64
        
        class SimpleRAGContext:
            def __init__(self, rag_data):
                # Deserialize FAISS index
                index_bytes = base64.b64decode(rag_data['index_base64'])
                self.index = faiss.deserialize_index(index_bytes)
                
                # Restore chunks and mappings
                self.chunks = rag_data['chunks']
                self.chunk_ids = rag_data['chunk_ids']
            
            def get_context(self, query, max_chunks=3):
                """Get relevant context - simplified version"""
                # In production, you'd compute query embedding here
                # For now, return a simple message
                return "\n\n[RAG context would be retrieved here based on similarity search]\n\n"
        
        rag_context_provider = SimpleRAGContext(RAG_DATA)
    except Exception as e:
        print(f"Failed to initialize RAG: {e}")
        rag_context_provider = None
else:
    rag_context_provider = None

def generate_response(message, history):
    """Generate response using OpenRouter API"""
    
    # Enhanced API key validation with helpful messages
    if not API_KEY:
        error_msg = f"πŸ”‘ **API Key Required**\n\n"
        error_msg += f"Please configure your OpenRouter API key:\n"
        error_msg += f"1. Go to Settings (βš™οΈ) in your HuggingFace Space\n"
        error_msg += f"2. Click 'Variables and secrets'\n"
        error_msg += f"3. Add secret: **OPENROUTER_API_KEY**\n"
        error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
        error_msg += f"Get your API key at: https://openrouter.ai/keys"
        print(f"❌ API request failed: No API key configured for OPENROUTER_API_KEY")
        return error_msg
    
    # Get grounding context
    grounding_context = get_grounding_context()
    
    # Add RAG context if available
    if ENABLE_VECTOR_RAG and rag_context_provider:
        rag_context = rag_context_provider.get_context(message)
        if rag_context:
            grounding_context += rag_context
    
    # If dynamic URLs are enabled, check message for URLs to fetch
    if ENABLE_DYNAMIC_URLS:
        urls_in_message = extract_urls_from_text(message)
        if urls_in_message:
            # Fetch content from URLs mentioned in the message
            dynamic_context_parts = []
            for url in urls_in_message[:3]:  # Limit to 3 URLs per message
                content = fetch_url_content(url)
                dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
            if dynamic_context_parts:
                grounding_context += "\n".join(dynamic_context_parts)
    
    # If web search is enabled, use it for most queries (excluding code blocks and URLs)
    if ENABLE_WEB_SEARCH:
        should_search = True
        
        # Skip search for messages that are primarily code blocks
        import re
        if re.search(r'```[\s\S]*```', message):
            should_search = False
        
        # Skip search for messages that are primarily URLs
        urls_in_message = extract_urls_from_text(message)
        if urls_in_message and len(' '.join(urls_in_message)) > len(message) * 0.5:
            should_search = False
        
        # Skip search for very short messages (likely greetings)
        if len(message.strip()) < 5:
            should_search = False
        
        if should_search:
            # Use the entire message as search query, cleaning it up
            search_query = message.strip()
            try:
                # Perform web search using crawl4ai
                import urllib.parse
                import asyncio
                
                async def search_with_crawl4ai(search_query):
                    try:
                        from crawl4ai import WebCrawler
                        
                        # Create search URL for DuckDuckGo
                        encoded_query = urllib.parse.quote_plus(search_query)
                        search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
                        
                        # Initialize crawler
                        crawler = WebCrawler(verbose=False)
                        
                        try:
                            # Start the crawler
                            await crawler.astart()
                            
                            # Crawl the search results
                            result = await crawler.arun(url=search_url)
                            
                            if result.success:
                                # Extract text content from search results
                                content = result.cleaned_html if result.cleaned_html else result.markdown
                                
                                # Clean and truncate the content
                                if content:
                                    # Remove excessive whitespace and limit length
                                    lines = [line.strip() for line in content.split('\n') if line.strip()]
                                    cleaned_content = '\n'.join(lines)
                                    
                                    # Truncate to reasonable length for context
                                    if len(cleaned_content) > 2000:
                                        cleaned_content = cleaned_content[:2000] + "..."
                                    
                                    return cleaned_content
                                else:
                                    return "No content extracted from search results"
                            else:
                                return f"Search failed: {result.error_message if hasattr(result, 'error_message') else 'Unknown error'}"
                                
                        finally:
                            # Clean up the crawler
                            await crawler.aclose()
                            
                    except ImportError:
                        # Fallback to simple DuckDuckGo search without crawl4ai
                        encoded_query = urllib.parse.quote_plus(search_query)
                        search_url = f"https://duckduckgo.com/html/?q={encoded_query}"
                        
                        # Use basic fetch as fallback
                        response = requests.get(search_url, headers={'User-Agent': 'Mozilla/5.0'}, timeout=10)
                        if response.status_code == 200:
                            from bs4 import BeautifulSoup
                            soup = BeautifulSoup(response.content, 'html.parser')
                            
                            # Remove script and style elements
                            for script in soup(["script", "style", "nav", "header", "footer"]):
                                script.decompose()
                            
                            # Get text content
                            text = soup.get_text()
                            
                            # Clean up whitespace
                            lines = (line.strip() for line in text.splitlines())
                            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
                            text = ' '.join(chunk for chunk in chunks if chunk)
                            
                            # Truncate to ~2000 characters
                            if len(text) > 2000:
                                text = text[:2000] + "..."
                            
                            return text
                        else:
                            return f"Failed to fetch search results: {response.status_code}"
                
                # Run the async search
                if hasattr(asyncio, 'run'):
                    search_result = asyncio.run(search_with_crawl4ai(search_query))
                else:
                    # Fallback for older Python versions
                    loop = asyncio.new_event_loop()
                    asyncio.set_event_loop(loop)
                    try:
                        search_result = loop.run_until_complete(search_with_crawl4ai(search_query))
                    finally:
                        loop.close()
                
                grounding_context += f"\n\nWeb search results for '{search_query}':\n{search_result}"
            except Exception as e:
                # Enhanced fallback with better error handling
                urls = extract_urls_from_text(search_query)
                if urls:
                    fallback_results = []
                    for url in urls[:2]:  # Limit to 2 URLs for fallback
                        content = fetch_url_content(url)
                        fallback_results.append(f"Content from {url}:\n{content[:500]}...")
                    grounding_context += f"\n\nWeb search fallback for '{search_query}':\n" + "\n\n".join(fallback_results)
                else:
                    grounding_context += f"\n\nWeb search requested for '{search_query}' but search functionality is unavailable"
    
    # Build enhanced system prompt with grounding context
    enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
    
    # Build messages array for the API
    messages = [{"role": "system", "content": enhanced_system_prompt}]
    
    # Add conversation history - handle both modern messages format and legacy tuples
    for chat in history:
        if isinstance(chat, dict):
            # Modern format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
            messages.append(chat)
        elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
            # Legacy format: ["user msg", "assistant msg"] or ("user msg", "assistant msg")
            user_msg, assistant_msg = chat[0], chat[1]
            if user_msg:
                messages.append({"role": "user", "content": user_msg})
            if assistant_msg:
                messages.append({"role": "assistant", "content": assistant_msg})
    
    # Add current message
    messages.append({"role": "user", "content": message})
    
    # Make API request with enhanced error handling
    try:
        print(f"πŸ”„ Making API request to OpenRouter...")
        print(f"   Model: {MODEL}")
        print(f"   Messages: {len(messages)} in conversation")
        
        response = requests.post(
            url="https://openrouter.ai/api/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {API_KEY}",
                "Content-Type": "application/json",
                "HTTP-Referer": "https://huggingface.co",  # Required by some providers
                "X-Title": "HuggingFace Space"  # Helpful for tracking
            },
            json={
                "model": MODEL,
                "messages": messages,
                "temperature": 0.7,
                "max_tokens": 1500
            },
            timeout=30
        )
        
        print(f"πŸ“‘ API Response: {response.status_code}")
        
        if response.status_code == 200:
            try:
                result = response.json()
                
                # Enhanced validation of API response structure
                if 'choices' not in result or not result['choices']:
                    print(f"⚠️  API response missing choices: {result}")
                    return "API Error: No response choices available"
                elif 'message' not in result['choices'][0]:
                    print(f"⚠️  API response missing message: {result}")
                    return "API Error: No message in response"
                elif 'content' not in result['choices'][0]['message']:
                    print(f"⚠️  API response missing content: {result}")
                    return "API Error: No content in message"
                else:
                    content = result['choices'][0]['message']['content']
                    
                    # Check for empty content
                    if not content or content.strip() == "":
                        print(f"⚠️  API returned empty content")
                        return "API Error: Empty response content"
                    
                    print(f"βœ… API request successful")
                    return content
                    
            except (KeyError, IndexError, json.JSONDecodeError) as e:
                print(f"❌ Failed to parse API response: {str(e)}")
                return f"API Error: Failed to parse response - {str(e)}"
        elif response.status_code == 401:
            error_msg = f"πŸ” **Authentication Error**\n\n"
            error_msg += f"Your API key appears to be invalid or expired.\n\n"
            error_msg += f"**Troubleshooting:**\n"
            error_msg += f"1. Check that your **OPENROUTER_API_KEY** secret is set correctly\n"
            error_msg += f"2. Verify your API key at: https://openrouter.ai/keys\n"
            error_msg += f"3. Ensure your key starts with `sk-or-`\n"
            error_msg += f"4. Check that you have credits on your OpenRouter account"
            print(f"❌ API authentication failed: {response.status_code} - {response.text[:200]}")
            return error_msg
        elif response.status_code == 429:
            error_msg = f"⏱️ **Rate Limit Exceeded**\n\n"
            error_msg += f"Too many requests. Please wait a moment and try again.\n\n"
            error_msg += f"**Troubleshooting:**\n"
            error_msg += f"1. Wait 30-60 seconds before trying again\n"
            error_msg += f"2. Check your OpenRouter usage limits\n"
            error_msg += f"3. Consider upgrading your OpenRouter plan"
            print(f"❌ Rate limit exceeded: {response.status_code}")
            return error_msg
        elif response.status_code == 400:
            try:
                error_data = response.json()
                error_message = error_data.get('error', {}).get('message', 'Unknown error')
            except:
                error_message = response.text
            
            error_msg = f"⚠️ **Request Error**\n\n"
            error_msg += f"The API request was invalid:\n"
            error_msg += f"`{error_message}`\n\n"
            if "model" in error_message.lower():
                error_msg += f"**Model Issue:** The model `{MODEL}` may not be available.\n"
                error_msg += f"Try switching to a different model in your Space configuration."
            print(f"❌ Bad request: {response.status_code} - {error_message}")
            return error_msg
        else:
            error_msg = f"🚫 **API Error {response.status_code}**\n\n"
            error_msg += f"An unexpected error occurred. Please try again.\n\n"
            error_msg += f"If this persists, check:\n"
            error_msg += f"1. OpenRouter service status\n"
            error_msg += f"2. Your API key and credits\n"
            error_msg += f"3. The model availability"
            print(f"❌ API error: {response.status_code} - {response.text[:200]}")
            return error_msg
            
    except requests.exceptions.Timeout:
        error_msg = f"⏰ **Request Timeout**\n\n"
        error_msg += f"The API request took too long (30s limit).\n\n"
        error_msg += f"**Troubleshooting:**\n"
        error_msg += f"1. Try again with a shorter message\n"
        error_msg += f"2. Check your internet connection\n"
        error_msg += f"3. Try a different model"
        print(f"❌ Request timeout after 30 seconds")
        return error_msg
    except requests.exceptions.ConnectionError:
        error_msg = f"🌐 **Connection Error**\n\n"
        error_msg += f"Could not connect to OpenRouter API.\n\n"
        error_msg += f"**Troubleshooting:**\n"
        error_msg += f"1. Check your internet connection\n"
        error_msg += f"2. Check OpenRouter service status\n"
        error_msg += f"3. Try again in a few moments"
        print(f"❌ Connection error to OpenRouter API")
        return error_msg
    except Exception as e:
        error_msg = f"❌ **Unexpected Error**\n\n"
        error_msg += f"An unexpected error occurred:\n"
        error_msg += f"`{str(e)}`\n\n"
        error_msg += f"Please try again or contact support if this persists."
        print(f"❌ Unexpected error: {str(e)}")
        return error_msg

# Access code verification
access_granted = gr.State(False)
_access_granted_global = False  # Global fallback

def verify_access_code(code):
    """Verify the access code"""
    global _access_granted_global
    if not ACCESS_CODE:
        _access_granted_global = True
        return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
    
    if code == ACCESS_CODE:
        _access_granted_global = True
        return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
    else:
        _access_granted_global = False
        return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)

def protected_generate_response(message, history):
    """Protected response function that checks access"""
    # Check if access is granted via the global variable
    if ACCESS_CODE and not _access_granted_global:
        return "Please enter the access code to continue."
    return generate_response(message, history)

def export_conversation(history):
    """Export conversation to markdown file"""
    if not history:
        return gr.update(visible=False)
    
    markdown_content = export_conversation_to_markdown(history)
    
    # Save to temporary file
    with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) as f:
        f.write(markdown_content)
        temp_file = f.name
    
    return gr.update(value=temp_file, visible=True)

# Configuration status display
def get_configuration_status():
    """Generate a configuration status message for display"""
    status_parts = []
    
    if API_KEY_VALID:
        status_parts.append("βœ… **API Key:** Configured and valid")
    else:
        status_parts.append("❌ **API Key:** Not configured - Set `OPENROUTER_API_KEY` in Space secrets")
    
    status_parts.append(f"πŸ€– **Model:** {MODEL}")
    status_parts.append(f"🌑️ **Temperature:** 0.7")
    status_parts.append(f"πŸ“ **Max Tokens:** 1500")
    
    if GROUNDING_URLS:
        status_parts.append(f"πŸ”— **URL Grounding:** {len(GROUNDING_URLS)} URLs configured")
    
    if ENABLE_DYNAMIC_URLS:
        status_parts.append("πŸ”„ **Dynamic URLs:** Enabled")
    
    if ENABLE_WEB_SEARCH:
        status_parts.append("πŸ” **Web Search:** Enabled")
    
    if ENABLE_VECTOR_RAG:
        status_parts.append("πŸ“š **Document RAG:** Enabled")
    
    if ACCESS_CODE:
        status_parts.append("πŸ” **Access Control:** Enabled")
    else:
        status_parts.append("🌐 **Access:** Public")
    
    return "\n".join(status_parts)

# Create interface with access code protection
with gr.Blocks(title=SPACE_NAME) as demo:
    gr.Markdown(f"# {SPACE_NAME}")
    gr.Markdown(SPACE_DESCRIPTION)
    
    # Configuration status (always visible)
    with gr.Accordion("πŸ“Š Configuration Status", open=not API_KEY_VALID):
        gr.Markdown(get_configuration_status())
    
    # Access code section (shown only if ACCESS_CODE is set)
    with gr.Column(visible=bool(ACCESS_CODE)) as access_section:
        gr.Markdown("### πŸ” Access Required")
        gr.Markdown("Please enter the access code provided by your instructor:")
        
        access_input = gr.Textbox(
            label="Access Code",
            placeholder="Enter access code...",
            type="password"
        )
        access_btn = gr.Button("Submit", variant="primary")
        access_error = gr.Markdown(visible=False)
    
    # Main chat interface (hidden until access granted)
    with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
        chat_interface = gr.ChatInterface(
            fn=protected_generate_response,
            title="",  # Title already shown above
            description="",  # Description already shown above
            examples=None,
            type="messages"  # Use modern message format for better compatibility
        )
        
        # Export functionality
        with gr.Row():
            export_btn = gr.Button("Export Conversation", variant="secondary", size="sm")
            export_file = gr.File(label="Download Conversation", visible=False)
        
        # Connect export functionality
        export_btn.click(
            export_conversation,
            inputs=[chat_interface],
            outputs=[export_file]
        )
    
    # Connect access verification
    if ACCESS_CODE:
        access_btn.click(
            verify_access_code,
            inputs=[access_input],
            outputs=[access_error, chat_section, access_granted]
        )
        access_input.submit(
            verify_access_code,
            inputs=[access_input],
            outputs=[access_error, chat_section, access_granted]
        )

if __name__ == "__main__":
    demo.launch()