Upload 3 files
Browse files- app.py +413 -320
- config.json +14 -10
app.py
CHANGED
@@ -10,39 +10,68 @@ import urllib.parse
|
|
10 |
|
11 |
|
12 |
# Configuration
|
13 |
-
SPACE_NAME =
|
14 |
-
SPACE_DESCRIPTION =
|
15 |
|
16 |
-
# Default configuration values
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
#
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
except:
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
MODEL = "'anthropic/claude-3.5-sonnet'"
|
37 |
-
THEME = "Ocean" # Gradio theme name
|
38 |
-
GROUNDING_URLS = ["https://southstreetseaportmuseum.org/hidden-gems-clipper-ship-cards/"]
|
39 |
# Get access code from environment variable for security
|
40 |
# If ACCESS_CODE is not set, no access control is applied
|
41 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
42 |
-
ENABLE_DYNAMIC_URLS = True
|
43 |
|
44 |
# Get API key from environment - customizable variable name with validation
|
45 |
-
|
|
|
46 |
if API_KEY:
|
47 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
48 |
if not API_KEY: # Check if empty after stripping
|
@@ -53,21 +82,21 @@ def validate_api_key():
|
|
53 |
"""Validate API key configuration with detailed logging"""
|
54 |
if not API_KEY:
|
55 |
print(f"⚠️ API KEY CONFIGURATION ERROR:")
|
56 |
-
print(f" Variable name:
|
57 |
print(f" Status: Not set or empty")
|
58 |
-
print(f" Action needed: Set ''
|
59 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
60 |
return False
|
61 |
elif not API_KEY.startswith('sk-or-'):
|
62 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
63 |
-
print(f" Variable name:
|
64 |
-
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else API_KEY)
|
65 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
66 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
67 |
return True # Still try to use it
|
68 |
else:
|
69 |
print(f"✅ API Key configured successfully")
|
70 |
-
print(f" Variable:
|
71 |
print(f" Format: Valid OpenRouter key")
|
72 |
return True
|
73 |
|
@@ -124,12 +153,13 @@ def fetch_url_content(url):
|
|
124 |
|
125 |
# Smart truncation - try to end at sentence boundaries
|
126 |
if len(text) > 4000:
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
131 |
else:
|
132 |
-
text =
|
133 |
|
134 |
return text if text.strip() else "No readable content found at this URL"
|
135 |
|
@@ -142,7 +172,6 @@ def fetch_url_content(url):
|
|
142 |
|
143 |
def extract_urls_from_text(text):
|
144 |
"""Extract URLs from text using regex with enhanced validation"""
|
145 |
-
import re
|
146 |
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
|
147 |
urls = re.findall(url_pattern, text)
|
148 |
|
@@ -162,18 +191,26 @@ _url_content_cache = {}
|
|
162 |
|
163 |
def get_grounding_context():
|
164 |
"""Fetch context from grounding URLs with caching"""
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
return ""
|
167 |
|
168 |
# Create cache key from URLs
|
169 |
-
cache_key = tuple(sorted([url for url in
|
170 |
|
171 |
# Check cache first
|
172 |
if cache_key in _url_content_cache:
|
173 |
return _url_content_cache[cache_key]
|
174 |
|
175 |
context_parts = []
|
176 |
-
for i, url in enumerate(
|
177 |
if url.strip():
|
178 |
content = fetch_url_content(url.strip())
|
179 |
# Add priority indicators
|
@@ -233,30 +270,42 @@ def generate_response(message, history):
|
|
233 |
error_msg += f"Please configure your OpenRouter API key:\n"
|
234 |
error_msg += f"1. Go to Settings (⚙️) in your HuggingFace Space\n"
|
235 |
error_msg += f"2. Click 'Variables and secrets'\n"
|
236 |
-
error_msg += f"3. Add secret: **
|
237 |
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
|
238 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
239 |
-
print(f"❌ API request failed: No API key configured for
|
240 |
return error_msg
|
241 |
|
242 |
# Get grounding context
|
243 |
grounding_context = get_grounding_context()
|
244 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
|
|
|
|
|
|
246 |
# If dynamic URLs are enabled, check message for URLs to fetch
|
247 |
if ENABLE_DYNAMIC_URLS:
|
248 |
urls_in_message = extract_urls_from_text(message)
|
249 |
if urls_in_message:
|
250 |
-
|
251 |
-
dynamic_context_parts = []
|
252 |
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
253 |
content = fetch_url_content(url)
|
254 |
-
|
255 |
-
|
256 |
-
grounding_context += "\n".join(dynamic_context_parts)
|
257 |
|
258 |
-
# Build enhanced system prompt with grounding context
|
259 |
-
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
260 |
|
261 |
# Build messages array for the API
|
262 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
@@ -264,15 +313,10 @@ def generate_response(message, history):
|
|
264 |
# Add conversation history - handle both modern messages format and legacy tuples
|
265 |
for chat in history:
|
266 |
if isinstance(chat, dict):
|
267 |
-
# Modern format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
|
268 |
messages.append(chat)
|
269 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
270 |
-
|
271 |
-
|
272 |
-
if user_msg:
|
273 |
-
messages.append({"role": "user", "content": user_msg})
|
274 |
-
if assistant_msg:
|
275 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
276 |
|
277 |
# Add current message
|
278 |
messages.append({"role": "user", "content": message})
|
@@ -288,14 +332,14 @@ def generate_response(message, history):
|
|
288 |
headers={
|
289 |
"Authorization": f"Bearer {API_KEY}",
|
290 |
"Content-Type": "application/json",
|
291 |
-
"HTTP-Referer": "https://huggingface.co",
|
292 |
-
"X-Title": "HuggingFace Space"
|
293 |
},
|
294 |
json={
|
295 |
"model": MODEL,
|
296 |
"messages": messages,
|
297 |
-
"temperature":
|
298 |
-
"max_tokens":
|
299 |
},
|
300 |
timeout=30
|
301 |
)
|
@@ -305,40 +349,26 @@ def generate_response(message, history):
|
|
305 |
if response.status_code == 200:
|
306 |
try:
|
307 |
result = response.json()
|
308 |
-
|
309 |
-
# Enhanced validation of API response structure
|
310 |
-
if 'choices' not in result or not result['choices']:
|
311 |
-
print(f"⚠️ API response missing choices: {result}")
|
312 |
-
return "API Error: No response choices available"
|
313 |
-
elif 'message' not in result['choices'][0]:
|
314 |
-
print(f"⚠️ API response missing message: {result}")
|
315 |
-
return "API Error: No message in response"
|
316 |
-
elif 'content' not in result['choices'][0]['message']:
|
317 |
-
print(f"⚠️ API response missing content: {result}")
|
318 |
-
return "API Error: No content in message"
|
319 |
-
else:
|
320 |
-
content = result['choices'][0]['message']['content']
|
321 |
-
|
322 |
-
# Check for empty content
|
323 |
-
if not content or content.strip() == "":
|
324 |
-
print(f"⚠️ API returned empty content")
|
325 |
-
return "API Error: Empty response content"
|
326 |
-
|
327 |
-
print(f"✅ API request successful")
|
328 |
-
return content
|
329 |
-
|
330 |
except (KeyError, IndexError, json.JSONDecodeError) as e:
|
331 |
-
|
332 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
elif response.status_code == 401:
|
334 |
-
error_msg = f"
|
335 |
error_msg += f"Your API key appears to be invalid or expired.\n\n"
|
336 |
error_msg += f"**Troubleshooting:**\n"
|
337 |
-
error_msg += f"1. Check that your **
|
338 |
-
error_msg += f"2. Verify your API key at
|
339 |
-
error_msg += f"3.
|
340 |
-
error_msg += f"4. Check
|
341 |
-
print(f"❌
|
342 |
return error_msg
|
343 |
elif response.status_code == 429:
|
344 |
error_msg = f"⏱️ **Rate Limit Exceeded**\n\n"
|
@@ -347,30 +377,26 @@ def generate_response(message, history):
|
|
347 |
error_msg += f"1. Wait 30-60 seconds before trying again\n"
|
348 |
error_msg += f"2. Check your OpenRouter usage limits\n"
|
349 |
error_msg += f"3. Consider upgrading your OpenRouter plan"
|
350 |
-
print(f"❌ Rate limit exceeded
|
351 |
return error_msg
|
352 |
elif response.status_code == 400:
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
error_msg
|
360 |
-
|
361 |
-
error_msg += f"`{error_message}`\n\n"
|
362 |
-
if "model" in error_message.lower():
|
363 |
-
error_msg += f"**Model Issue:** The model `{MODEL}` may not be available.\n"
|
364 |
-
error_msg += f"Try switching to a different model in your Space configuration."
|
365 |
-
print(f"❌ Bad request: {response.status_code} - {error_message}")
|
366 |
return error_msg
|
367 |
else:
|
368 |
-
error_msg = f"
|
369 |
-
error_msg += f"An unexpected error occurred
|
370 |
-
error_msg += f"
|
371 |
-
error_msg += f"
|
372 |
-
error_msg += f"
|
373 |
-
error_msg += f"
|
|
|
374 |
print(f"❌ API error: {response.status_code} - {response.text[:200]}")
|
375 |
return error_msg
|
376 |
|
@@ -393,10 +419,10 @@ def generate_response(message, history):
|
|
393 |
print(f"❌ Connection error to OpenRouter API")
|
394 |
return error_msg
|
395 |
except Exception as e:
|
396 |
-
error_msg =
|
397 |
-
error_msg +=
|
398 |
error_msg += f"`{str(e)}`\n\n"
|
399 |
-
error_msg +=
|
400 |
print(f"❌ Unexpected error: {str(e)}")
|
401 |
return error_msg
|
402 |
|
@@ -409,38 +435,40 @@ def verify_access_code(code):
|
|
409 |
global _access_granted_global
|
410 |
if ACCESS_CODE is None:
|
411 |
_access_granted_global = True
|
412 |
-
return gr.update(
|
413 |
|
414 |
if code == ACCESS_CODE:
|
415 |
_access_granted_global = True
|
416 |
-
return gr.update(
|
417 |
else:
|
418 |
_access_granted_global = False
|
419 |
-
return gr.update(
|
420 |
|
421 |
-
def protected_generate_response(message, history):
|
422 |
"""Protected response function that checks access"""
|
423 |
# Check if access is granted via the global variable
|
424 |
if ACCESS_CODE is not None and not _access_granted_global:
|
425 |
return "Please enter the access code to continue."
|
426 |
-
return generate_response(message, history)
|
427 |
|
428 |
# Global variable to store chat history for export
|
429 |
chat_history_store = []
|
430 |
|
431 |
-
def store_and_generate_response(message, history):
|
432 |
"""Wrapper function that stores history and generates response"""
|
433 |
global chat_history_store
|
434 |
|
435 |
# Generate response using the protected function
|
436 |
-
response = protected_generate_response(message, history)
|
437 |
|
438 |
# Convert current history to the format we need for export
|
439 |
# history comes in as [["user1", "bot1"], ["user2", "bot2"], ...]
|
440 |
chat_history_store = []
|
441 |
if history:
|
442 |
for exchange in history:
|
443 |
-
if isinstance(exchange,
|
|
|
|
|
444 |
chat_history_store.append({"role": "user", "content": exchange[0]})
|
445 |
chat_history_store.append({"role": "assistant", "content": exchange[1]})
|
446 |
|
@@ -471,144 +499,205 @@ def export_conversation(history):
|
|
471 |
|
472 |
markdown_content = export_conversation_to_markdown(history)
|
473 |
|
474 |
-
#
|
475 |
-
|
476 |
-
|
477 |
-
# Save to temporary file with logical name
|
478 |
-
import tempfile
|
479 |
-
import os
|
480 |
-
temp_dir = tempfile.gettempdir()
|
481 |
-
temp_file = os.path.join(temp_dir, filename)
|
482 |
-
|
483 |
-
with open(temp_file, 'w', encoding='utf-8') as f:
|
484 |
f.write(markdown_content)
|
|
|
485 |
|
486 |
return gr.update(value=temp_file, visible=True)
|
487 |
|
488 |
# Configuration status display
|
489 |
def get_configuration_status():
|
490 |
-
"""Generate a configuration status message for display"""
|
491 |
status_parts = []
|
492 |
|
493 |
-
#
|
494 |
-
status_parts.append("
|
495 |
-
|
496 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
497 |
else:
|
498 |
-
status_parts.append("
|
499 |
-
status_parts.append(" Set `'API_KEY'` in Space secrets")
|
500 |
-
|
501 |
-
# Model and parameters
|
502 |
-
status_parts.append("") # Blank line
|
503 |
-
status_parts.append("### 🤖 Model Settings")
|
504 |
-
status_parts.append(f"**Model:** {MODEL.split('/')[-1]}")
|
505 |
-
status_parts.append(f"**Temperature:** 0.5")
|
506 |
-
status_parts.append(f"**Max Tokens:** 450")
|
507 |
|
508 |
# URL Context if configured
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
status_parts.append("
|
527 |
-
|
528 |
-
|
529 |
-
|
|
|
|
|
|
|
530 |
|
531 |
return "\n".join(status_parts)
|
532 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
533 |
# Create interface with access code protection
|
534 |
# Dynamically set theme based on configuration
|
535 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
536 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
606 |
)
|
607 |
-
|
608 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
609 |
|
610 |
-
#
|
611 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
612 |
gr.Markdown("### Edit Assistant Configuration")
|
613 |
gr.Markdown("⚠️ **Warning:** Changes will affect all users immediately.")
|
614 |
|
@@ -617,29 +706,11 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
617 |
with open('config.json', 'r') as f:
|
618 |
current_config = json.load(f)
|
619 |
except:
|
620 |
-
|
621 |
-
|
622 |
-
'temperature': 0.5,
|
623 |
-
'max_tokens': 450,
|
624 |
-
'locked': False
|
625 |
-
}
|
626 |
-
|
627 |
-
# Editable fields - Order matches the Configuration tab
|
628 |
-
# 1. Assistant Identity
|
629 |
-
edit_name = gr.Textbox(
|
630 |
-
label="Assistant Name",
|
631 |
-
value=current_config.get('name', SPACE_NAME),
|
632 |
-
placeholder="My AI Assistant"
|
633 |
-
)
|
634 |
-
|
635 |
-
edit_description = gr.Textbox(
|
636 |
-
label="Assistant Description",
|
637 |
-
value=current_config.get('description', SPACE_DESCRIPTION),
|
638 |
-
lines=2,
|
639 |
-
placeholder="A helpful AI assistant for..."
|
640 |
-
)
|
641 |
|
642 |
-
#
|
|
|
643 |
edit_system_prompt = gr.Textbox(
|
644 |
label="System Prompt",
|
645 |
value=current_config.get('system_prompt', SYSTEM_PROMPT),
|
@@ -661,8 +732,8 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
661 |
],
|
662 |
value=current_config.get('model', MODEL)
|
663 |
)
|
664 |
-
|
665 |
-
# 4. Example
|
666 |
examples_value = current_config.get('examples', [])
|
667 |
if isinstance(examples_value, list):
|
668 |
examples_text_value = "\n".join(examples_value)
|
@@ -682,18 +753,18 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
682 |
label="Temperature",
|
683 |
minimum=0,
|
684 |
maximum=2,
|
685 |
-
value=current_config.get('temperature', 0.
|
686 |
step=0.1
|
687 |
)
|
688 |
edit_max_tokens = gr.Slider(
|
689 |
label="Max Tokens",
|
690 |
minimum=50,
|
691 |
maximum=4096,
|
692 |
-
value=current_config.get('max_tokens',
|
693 |
step=50
|
694 |
)
|
695 |
|
696 |
-
#
|
697 |
gr.Markdown("### URL Grounding")
|
698 |
grounding_urls_value = current_config.get('grounding_urls', [])
|
699 |
if isinstance(grounding_urls_value, str):
|
@@ -720,39 +791,23 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
720 |
)
|
721 |
|
722 |
with gr.Row():
|
723 |
-
save_config_btn = gr.Button("
|
724 |
-
reset_config_btn = gr.Button("
|
725 |
|
726 |
config_status = gr.Markdown("")
|
727 |
|
728 |
-
# Faculty authentication function
|
729 |
-
def verify_faculty_password(password):
|
730 |
-
if password == FACULTY_PASSWORD:
|
731 |
-
return (
|
732 |
-
gr.update(value="✅ Authentication successful!"),
|
733 |
-
gr.update(visible=False), # Hide auth row
|
734 |
-
gr.update(visible=True), # Show config section
|
735 |
-
True # Update auth state
|
736 |
-
)
|
737 |
-
else:
|
738 |
-
return (
|
739 |
-
gr.update(value="❌ Invalid password"),
|
740 |
-
gr.update(visible=True), # Keep auth row visible
|
741 |
-
gr.update(visible=False), # Keep config hidden
|
742 |
-
False # Auth failed
|
743 |
-
)
|
744 |
|
745 |
# Save configuration function
|
746 |
-
def save_configuration(
|
747 |
if not is_authenticated:
|
748 |
-
return "
|
749 |
|
750 |
# Check if configuration is already locked
|
751 |
try:
|
752 |
with open('config.json', 'r') as f:
|
753 |
existing_config = json.load(f)
|
754 |
if existing_config.get('locked', False):
|
755 |
-
return "
|
756 |
except:
|
757 |
pass
|
758 |
|
@@ -761,8 +816,8 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
761 |
with open('config.json', 'r') as f:
|
762 |
current_full_config = json.load(f)
|
763 |
except:
|
764 |
-
# If config.json doesn't exist, use
|
765 |
-
current_full_config =
|
766 |
|
767 |
# Process example prompts
|
768 |
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()]
|
@@ -773,10 +828,27 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
773 |
# Filter out empty URLs
|
774 |
grounding_urls = [url.strip() for url in urls if url.strip()]
|
775 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
776 |
# Update all editable fields while preserving everything else
|
777 |
current_full_config.update({
|
778 |
-
'name': new_name,
|
779 |
-
'description': new_description,
|
780 |
'system_prompt': new_prompt,
|
781 |
'model': new_model,
|
782 |
'examples': examples_list,
|
@@ -786,23 +858,58 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
786 |
'locked': lock_config_from_args,
|
787 |
'last_modified': datetime.now().isoformat(),
|
788 |
'last_modified_by': 'faculty'
|
789 |
-
}
|
790 |
|
791 |
try:
|
792 |
with open('config.json', 'w') as f:
|
793 |
json.dump(current_full_config, f, indent=2)
|
794 |
|
795 |
-
#
|
796 |
-
|
|
|
797 |
|
798 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
799 |
except Exception as e:
|
800 |
return f"❌ Error saving configuration: {str(e)}"
|
801 |
|
802 |
# Reset configuration function
|
803 |
def reset_configuration(is_authenticated):
|
804 |
if not is_authenticated:
|
805 |
-
updates = ["
|
806 |
return tuple(updates)
|
807 |
|
808 |
# Check if locked
|
@@ -810,7 +917,7 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
810 |
with open('config.json', 'r') as f:
|
811 |
existing_config = json.load(f)
|
812 |
if existing_config.get('locked', False):
|
813 |
-
updates = ["
|
814 |
return tuple(updates)
|
815 |
except:
|
816 |
pass
|
@@ -822,25 +929,25 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
822 |
else:
|
823 |
examples_text = ""
|
824 |
|
825 |
-
# Get default URLs
|
826 |
default_urls = DEFAULT_CONFIG.get('grounding_urls', [])
|
827 |
if isinstance(default_urls, str):
|
828 |
try:
|
829 |
-
import
|
830 |
-
default_urls =
|
831 |
except:
|
832 |
default_urls = []
|
|
|
|
|
833 |
|
834 |
# Reset to original default values
|
835 |
updates = [
|
836 |
-
"
|
837 |
-
gr.update(value=DEFAULT_CONFIG.get('name', SPACE_NAME)),
|
838 |
-
gr.update(value=DEFAULT_CONFIG.get('description', SPACE_DESCRIPTION)),
|
839 |
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)),
|
840 |
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)),
|
841 |
gr.update(value=examples_text),
|
842 |
-
gr.update(value=DEFAULT_CONFIG.get('temperature',
|
843 |
-
gr.update(value=DEFAULT_CONFIG.get('max_tokens',
|
844 |
]
|
845 |
|
846 |
# Add URL updates
|
@@ -850,33 +957,19 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
850 |
|
851 |
return tuple(updates)
|
852 |
|
853 |
-
# Connect authentication
|
854 |
-
faculty_auth_btn.click(
|
855 |
-
verify_faculty_password,
|
856 |
-
inputs=[faculty_password_input],
|
857 |
-
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
858 |
-
)
|
859 |
-
|
860 |
-
faculty_password_input.submit(
|
861 |
-
verify_faculty_password,
|
862 |
-
inputs=[faculty_password_input],
|
863 |
-
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
864 |
-
)
|
865 |
|
866 |
# Connect configuration buttons
|
867 |
save_config_btn.click(
|
868 |
save_configuration,
|
869 |
-
inputs=[
|
870 |
outputs=[config_status]
|
871 |
)
|
872 |
|
873 |
reset_config_btn.click(
|
874 |
reset_configuration,
|
875 |
inputs=[faculty_auth_state],
|
876 |
-
outputs=[config_status,
|
877 |
)
|
878 |
-
else:
|
879 |
-
gr.Markdown("ℹ️ Faculty configuration is not enabled. Set CONFIG_CODE in Space secrets to enable.")
|
880 |
|
881 |
if __name__ == "__main__":
|
882 |
demo.launch()
|
|
|
10 |
|
11 |
|
12 |
# Configuration
|
13 |
+
SPACE_NAME = 'Writing Aid'
|
14 |
+
SPACE_DESCRIPTION = 'A customizable AI assistant'
|
15 |
|
16 |
+
# Default configuration values (used only if config.json is missing)
|
17 |
+
DEFAULT_CONFIG = {
|
18 |
+
'name': SPACE_NAME,
|
19 |
+
'description': SPACE_DESCRIPTION,
|
20 |
+
'system_prompt': 'You are a humanities scholar and pedagogue specializing in interdisciplinary approaches across literature, philosophy, history, religious studies, and cultural analysis. Your expertise lies in close reading, hermeneutical interpretation, contextual analysis, and cross-cultural comparison. Guide students through primary source analysis, encourage deep engagement with texts and artifacts, and foster critical interpretation skills. Emphasize the importance of historical context, cultural sensitivity, and multiple perspectives. Help students develop sophisticated arguments grounded in textual evidence while appreciating the complexity and ambiguity inherent in humanistic inquiry. Draw connections between historical and contemporary issues, encouraging students to see the ongoing relevance of humanistic knowledge. Model intellectual curiosity, empathy, and the art of asking meaningful questions about human experience, meaning, and values.',
|
21 |
+
'temperature': 0.8,
|
22 |
+
'max_tokens': 1000,
|
23 |
+
'model': 'openai/gpt-4.1-nano',
|
24 |
+
'api_key_var': 'API_KEY',
|
25 |
+
'theme': Origin,
|
26 |
+
'grounding_urls': ["https://en.wikipedia.org/wiki/Hermeneutics", "https://plato.stanford.edu/entries/hermeneutics/", "https://en.wikipedia.org/wiki/Close_reading", "https://en.wikipedia.org/wiki/Cultural_studies"],
|
27 |
+
'enable_dynamic_urls': True,
|
28 |
+
'examples': ['How do I analyze the symbolism in this medieval manuscript?', "What historical context should I consider when reading Dante's Inferno?", 'Can you help me compare philosophical approaches to justice across different cultures?', 'How do I interpret conflicting historical accounts of the same event?'],
|
29 |
+
'locked': False
|
30 |
+
}
|
31 |
|
32 |
+
# Load configuration from file - this is the single source of truth
|
33 |
+
def load_config():
|
34 |
+
"""Load configuration from config.json with fallback to defaults"""
|
35 |
+
try:
|
36 |
+
with open('config.json', 'r') as f:
|
37 |
+
config = json.load(f)
|
38 |
+
print("✅ Loaded configuration from config.json")
|
39 |
+
return config
|
40 |
+
except FileNotFoundError:
|
41 |
+
print("ℹ️ No config.json found, using default configuration")
|
42 |
+
# Save default config for future use
|
43 |
+
try:
|
44 |
+
with open('config.json', 'w') as f:
|
45 |
+
json.dump(DEFAULT_CONFIG, f, indent=2)
|
46 |
+
print("✅ Created config.json with default values")
|
47 |
+
except:
|
48 |
+
pass
|
49 |
+
return DEFAULT_CONFIG
|
50 |
+
except Exception as e:
|
51 |
+
print(f"⚠️ Error loading config.json: {e}, using defaults")
|
52 |
+
return DEFAULT_CONFIG
|
53 |
+
|
54 |
+
# Load configuration
|
55 |
+
config = load_config()
|
56 |
+
|
57 |
+
# Initial load of configuration values
|
58 |
+
SPACE_NAME = config.get('name', DEFAULT_CONFIG['name'])
|
59 |
+
SPACE_DESCRIPTION = config.get('description', DEFAULT_CONFIG['description'])
|
60 |
+
SYSTEM_PROMPT = config.get('system_prompt', DEFAULT_CONFIG['system_prompt'])
|
61 |
+
temperature = config.get('temperature', DEFAULT_CONFIG['temperature'])
|
62 |
+
max_tokens = config.get('max_tokens', DEFAULT_CONFIG['max_tokens'])
|
63 |
+
MODEL = config.get('model', DEFAULT_CONFIG['model'])
|
64 |
+
THEME = config.get('theme', DEFAULT_CONFIG['theme'])
|
65 |
+
GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
|
66 |
+
ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
|
67 |
|
|
|
|
|
|
|
68 |
# Get access code from environment variable for security
|
69 |
# If ACCESS_CODE is not set, no access control is applied
|
70 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
|
|
71 |
|
72 |
# Get API key from environment - customizable variable name with validation
|
73 |
+
API_KEY_VAR = config.get('api_key_var', DEFAULT_CONFIG['api_key_var'])
|
74 |
+
API_KEY = os.environ.get(API_KEY_VAR)
|
75 |
if API_KEY:
|
76 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
77 |
if not API_KEY: # Check if empty after stripping
|
|
|
82 |
"""Validate API key configuration with detailed logging"""
|
83 |
if not API_KEY:
|
84 |
print(f"⚠️ API KEY CONFIGURATION ERROR:")
|
85 |
+
print(f" Variable name: {API_KEY_VAR}")
|
86 |
print(f" Status: Not set or empty")
|
87 |
+
print(f" Action needed: Set '{API_KEY_VAR}' in HuggingFace Space secrets")
|
88 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
89 |
return False
|
90 |
elif not API_KEY.startswith('sk-or-'):
|
91 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
92 |
+
print(f" Variable name: {API_KEY_VAR}")
|
93 |
+
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else "{API_KEY}")
|
94 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
95 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
96 |
return True # Still try to use it
|
97 |
else:
|
98 |
print(f"✅ API Key configured successfully")
|
99 |
+
print(f" Variable: {API_KEY_VAR}")
|
100 |
print(f" Format: Valid OpenRouter key")
|
101 |
return True
|
102 |
|
|
|
153 |
|
154 |
# Smart truncation - try to end at sentence boundaries
|
155 |
if len(text) > 4000:
|
156 |
+
truncated_text = text[:4000]
|
157 |
+
# Try to find the last complete sentence
|
158 |
+
last_period = truncated_text.rfind('.')
|
159 |
+
if last_period > 3500: # Only if we have a reasonably long truncation
|
160 |
+
text = truncated_text[:last_period + 1]
|
161 |
else:
|
162 |
+
text = truncated_text + "..."
|
163 |
|
164 |
return text if text.strip() else "No readable content found at this URL"
|
165 |
|
|
|
172 |
|
173 |
def extract_urls_from_text(text):
|
174 |
"""Extract URLs from text using regex with enhanced validation"""
|
|
|
175 |
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
|
176 |
urls = re.findall(url_pattern, text)
|
177 |
|
|
|
191 |
|
192 |
def get_grounding_context():
|
193 |
"""Fetch context from grounding URLs with caching"""
|
194 |
+
# Handle both string and list formats for grounding_urls
|
195 |
+
urls = GROUNDING_URLS
|
196 |
+
if isinstance(urls, str):
|
197 |
+
try:
|
198 |
+
urls = json.loads(urls)
|
199 |
+
except:
|
200 |
+
urls = []
|
201 |
+
|
202 |
+
if not urls:
|
203 |
return ""
|
204 |
|
205 |
# Create cache key from URLs
|
206 |
+
cache_key = tuple(sorted([url for url in urls if url and url.strip()]))
|
207 |
|
208 |
# Check cache first
|
209 |
if cache_key in _url_content_cache:
|
210 |
return _url_content_cache[cache_key]
|
211 |
|
212 |
context_parts = []
|
213 |
+
for i, url in enumerate(urls, 1):
|
214 |
if url.strip():
|
215 |
content = fetch_url_content(url.strip())
|
216 |
# Add priority indicators
|
|
|
270 |
error_msg += f"Please configure your OpenRouter API key:\n"
|
271 |
error_msg += f"1. Go to Settings (⚙️) in your HuggingFace Space\n"
|
272 |
error_msg += f"2. Click 'Variables and secrets'\n"
|
273 |
+
error_msg += f"3. Add secret: **{API_KEY_VAR}**\n"
|
274 |
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
|
275 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
276 |
+
print(f"❌ API request failed: No API key configured for {API_KEY_VAR}")
|
277 |
return error_msg
|
278 |
|
279 |
# Get grounding context
|
280 |
grounding_context = get_grounding_context()
|
281 |
|
282 |
+
# Process uploaded files if any
|
283 |
+
file_context = ""
|
284 |
+
if files:
|
285 |
+
file_contents = []
|
286 |
+
for file_obj in files:
|
287 |
+
if file_obj is not None:
|
288 |
+
try:
|
289 |
+
file_content = extract_file_content(file_obj.name)
|
290 |
+
file_contents.append(file_content)
|
291 |
+
except Exception as e:
|
292 |
+
file_contents.append(f"Error processing file: {str(e)}")
|
293 |
|
294 |
+
if file_contents:
|
295 |
+
file_context = "\n\n[UPLOADED FILES]\n" + "\n\n".join(file_contents) + "\n"
|
296 |
+
|
297 |
# If dynamic URLs are enabled, check message for URLs to fetch
|
298 |
if ENABLE_DYNAMIC_URLS:
|
299 |
urls_in_message = extract_urls_from_text(message)
|
300 |
if urls_in_message:
|
301 |
+
dynamic_context = ""
|
|
|
302 |
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
303 |
content = fetch_url_content(url)
|
304 |
+
dynamic_context += f"\n\n[DYNAMIC] Context from {url}:\n{content}"
|
305 |
+
grounding_context += dynamic_context
|
|
|
306 |
|
307 |
+
# Build enhanced system prompt with grounding context and file content
|
308 |
+
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context + file_context
|
309 |
|
310 |
# Build messages array for the API
|
311 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
|
|
313 |
# Add conversation history - handle both modern messages format and legacy tuples
|
314 |
for chat in history:
|
315 |
if isinstance(chat, dict):
|
|
|
316 |
messages.append(chat)
|
317 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
318 |
+
messages.append({"role": "user", "content": chat[0]})
|
319 |
+
messages.append({"role": "assistant", "content": chat[1]})
|
|
|
|
|
|
|
|
|
320 |
|
321 |
# Add current message
|
322 |
messages.append({"role": "user", "content": message})
|
|
|
332 |
headers={
|
333 |
"Authorization": f"Bearer {API_KEY}",
|
334 |
"Content-Type": "application/json",
|
335 |
+
"HTTP-Referer": "https://huggingface.co",
|
336 |
+
"X-Title": "HuggingFace Space"
|
337 |
},
|
338 |
json={
|
339 |
"model": MODEL,
|
340 |
"messages": messages,
|
341 |
+
"temperature": temperature,
|
342 |
+
"max_tokens": max_tokens
|
343 |
},
|
344 |
timeout=30
|
345 |
)
|
|
|
349 |
if response.status_code == 200:
|
350 |
try:
|
351 |
result = response.json()
|
352 |
+
return result['choices'][0]['message']['content']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
except (KeyError, IndexError, json.JSONDecodeError) as e:
|
354 |
+
error_msg = f"❌ **Response Parsing Error**\n\n"
|
355 |
+
error_msg += f"Received response from API but couldn't parse it properly.\n"
|
356 |
+
error_msg += f"Error: {str(e)}\n\n"
|
357 |
+
error_msg += f"**Troubleshooting:**\n"
|
358 |
+
error_msg += f"1. Check OpenRouter service status\n"
|
359 |
+
error_msg += f"2. Try again in a few moments\n"
|
360 |
+
error_msg += f"3. Try a different model if available"
|
361 |
+
print(f"❌ Response parsing error: {str(e)}")
|
362 |
+
return error_msg
|
363 |
elif response.status_code == 401:
|
364 |
+
error_msg = f"🔑 **Authentication Error**\n\n"
|
365 |
error_msg += f"Your API key appears to be invalid or expired.\n\n"
|
366 |
error_msg += f"**Troubleshooting:**\n"
|
367 |
+
error_msg += f"1. Check that your **{API_KEY_VAR}** secret is set correctly\n"
|
368 |
+
error_msg += f"2. Verify your OpenRouter API key at https://openrouter.ai/keys\n"
|
369 |
+
error_msg += f"3. Make sure the key starts with `sk-or-`\n"
|
370 |
+
error_msg += f"4. Check if you have sufficient credits"
|
371 |
+
print(f"❌ Authentication failed: Invalid API key")
|
372 |
return error_msg
|
373 |
elif response.status_code == 429:
|
374 |
error_msg = f"⏱️ **Rate Limit Exceeded**\n\n"
|
|
|
377 |
error_msg += f"1. Wait 30-60 seconds before trying again\n"
|
378 |
error_msg += f"2. Check your OpenRouter usage limits\n"
|
379 |
error_msg += f"3. Consider upgrading your OpenRouter plan"
|
380 |
+
print(f"❌ Rate limit exceeded")
|
381 |
return error_msg
|
382 |
elif response.status_code == 400:
|
383 |
+
error_msg = f"📝 **Request Error**\n\n"
|
384 |
+
error_msg += f"There was a problem with the request format.\n"
|
385 |
+
error_msg += f"Response: {response.text[:500]}\n\n"
|
386 |
+
error_msg += f"**Troubleshooting:**\n"
|
387 |
+
error_msg += f"1. Try a shorter message\n"
|
388 |
+
error_msg += f"2. Check for special characters in your message\n"
|
389 |
+
error_msg += f"3. Try a different model"
|
390 |
+
print(f"❌ Bad request: {response.status_code} - {response.text[:200]}")
|
|
|
|
|
|
|
|
|
|
|
391 |
return error_msg
|
392 |
else:
|
393 |
+
error_msg = f"🌐 **API Error {response.status_code}**\n\n"
|
394 |
+
error_msg += f"An unexpected error occurred.\n"
|
395 |
+
error_msg += f"Response: {response.text[:500]}\n\n"
|
396 |
+
error_msg += f"**Troubleshooting:**\n"
|
397 |
+
error_msg += f"1. Try again in a few moments\n"
|
398 |
+
error_msg += f"2. Check OpenRouter service status\n"
|
399 |
+
error_msg += f"3. Contact support if this persists"
|
400 |
print(f"❌ API error: {response.status_code} - {response.text[:200]}")
|
401 |
return error_msg
|
402 |
|
|
|
419 |
print(f"❌ Connection error to OpenRouter API")
|
420 |
return error_msg
|
421 |
except Exception as e:
|
422 |
+
error_msg = "❌ **Unexpected Error**\n\n"
|
423 |
+
error_msg += "An unexpected error occurred:\n"
|
424 |
error_msg += f"`{str(e)}`\n\n"
|
425 |
+
error_msg += "Please try again or contact support if this persists."
|
426 |
print(f"❌ Unexpected error: {str(e)}")
|
427 |
return error_msg
|
428 |
|
|
|
435 |
global _access_granted_global
|
436 |
if ACCESS_CODE is None:
|
437 |
_access_granted_global = True
|
438 |
+
return gr.update(value="No access code required.", style={"color": "green"}), gr.update(visible=True), True
|
439 |
|
440 |
if code == ACCESS_CODE:
|
441 |
_access_granted_global = True
|
442 |
+
return gr.update(value="✅ Access granted!", style={"color": "green"}), gr.update(visible=True), True
|
443 |
else:
|
444 |
_access_granted_global = False
|
445 |
+
return gr.update(value="❌ Invalid access code. Please try again.", style={"color": "red"}), gr.update(visible=False), False
|
446 |
|
447 |
+
def protected_generate_response(message, history, files=None):
|
448 |
"""Protected response function that checks access"""
|
449 |
# Check if access is granted via the global variable
|
450 |
if ACCESS_CODE is not None and not _access_granted_global:
|
451 |
return "Please enter the access code to continue."
|
452 |
+
return generate_response(message, history, files)
|
453 |
|
454 |
# Global variable to store chat history for export
|
455 |
chat_history_store = []
|
456 |
|
457 |
+
def store_and_generate_response(message, history, files=None):
|
458 |
"""Wrapper function that stores history and generates response"""
|
459 |
global chat_history_store
|
460 |
|
461 |
# Generate response using the protected function
|
462 |
+
response = protected_generate_response(message, history, files)
|
463 |
|
464 |
# Convert current history to the format we need for export
|
465 |
# history comes in as [["user1", "bot1"], ["user2", "bot2"], ...]
|
466 |
chat_history_store = []
|
467 |
if history:
|
468 |
for exchange in history:
|
469 |
+
if isinstance(exchange, dict):
|
470 |
+
chat_history_store.append(exchange)
|
471 |
+
elif isinstance(exchange, (list, tuple)) and len(exchange) >= 2:
|
472 |
chat_history_store.append({"role": "user", "content": exchange[0]})
|
473 |
chat_history_store.append({"role": "assistant", "content": exchange[1]})
|
474 |
|
|
|
499 |
|
500 |
markdown_content = export_conversation_to_markdown(history)
|
501 |
|
502 |
+
# Save to temporary file
|
503 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
f.write(markdown_content)
|
505 |
+
temp_file = f.name
|
506 |
|
507 |
return gr.update(value=temp_file, visible=True)
|
508 |
|
509 |
# Configuration status display
|
510 |
def get_configuration_status():
|
511 |
+
"""Generate a clean configuration status message for display"""
|
512 |
status_parts = []
|
513 |
|
514 |
+
# Basic configuration info (without redundant "Configuration:" header)
|
515 |
+
status_parts.append(f"**Name:** {SPACE_NAME}")
|
516 |
+
status_parts.append(f"**Model:** {MODEL}")
|
517 |
+
status_parts.append(f"**Theme:** {THEME}")
|
518 |
+
status_parts.append(f"**Temperature:** {temperature}")
|
519 |
+
status_parts.append(f"**Max Response Tokens:** {max_tokens}")
|
520 |
+
status_parts.append("")
|
521 |
+
|
522 |
+
# Example prompts
|
523 |
+
status_parts.append("")
|
524 |
+
examples_list = config.get('examples', [])
|
525 |
+
if isinstance(examples_list, str):
|
526 |
+
try:
|
527 |
+
import ast
|
528 |
+
examples_list = ast.literal_eval(examples_list)
|
529 |
+
except:
|
530 |
+
examples_list = []
|
531 |
+
|
532 |
+
if examples_list and len(examples_list) > 0:
|
533 |
+
status_parts.append("**Example Prompts:**")
|
534 |
+
for example in examples_list[:5]: # Show first 5 examples
|
535 |
+
status_parts.append(f"• {example}")
|
536 |
+
if len(examples_list) > 5:
|
537 |
+
status_parts.append(f"• ... and {len(examples_list) - 5} more")
|
538 |
else:
|
539 |
+
status_parts.append("**Example Prompts:** No example prompts configured")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
|
541 |
# URL Context if configured
|
542 |
+
urls = GROUNDING_URLS
|
543 |
+
if isinstance(urls, str):
|
544 |
+
try:
|
545 |
+
import ast
|
546 |
+
urls = ast.literal_eval(urls)
|
547 |
+
except:
|
548 |
+
urls = []
|
549 |
+
|
550 |
+
if urls and len(urls) > 0:
|
551 |
+
status_parts.append("")
|
552 |
+
status_parts.append("**Grounding URLs:**")
|
553 |
+
for i, url in enumerate(urls[:5], 1): # Show first 5 URLs
|
554 |
+
status_parts.append(f"{i}. {url}")
|
555 |
+
if len(urls) > 5:
|
556 |
+
status_parts.append(f"... and {len(urls) - 5} more URLs")
|
557 |
+
|
558 |
+
# System prompt at the end
|
559 |
+
status_parts.append("")
|
560 |
+
status_parts.append(f"**System Prompt:** {SYSTEM_PROMPT}")
|
561 |
+
|
562 |
+
# API Key status (minimal, at the end)
|
563 |
+
status_parts.append("")
|
564 |
+
if not API_KEY_VALID:
|
565 |
+
status_parts.append(f"**Note:** API key ({API_KEY_VAR}) not configured in Space secrets")
|
566 |
|
567 |
return "\n".join(status_parts)
|
568 |
|
569 |
+
# HuggingFace Authentication Utility
|
570 |
+
def verify_hf_token_access():
|
571 |
+
"""Verify HF_TOKEN has write access to the space"""
|
572 |
+
hf_token = os.environ.get("HF_TOKEN")
|
573 |
+
space_id = os.environ.get("SPACE_ID")
|
574 |
+
|
575 |
+
if not hf_token or not space_id:
|
576 |
+
return False, "Missing HF_TOKEN or SPACE_ID environment variables"
|
577 |
+
|
578 |
+
try:
|
579 |
+
from huggingface_hub import HfApi
|
580 |
+
api = HfApi(token=hf_token)
|
581 |
+
# Test access by getting space info
|
582 |
+
api.space_info(space_id)
|
583 |
+
return True, "Authenticated successfully"
|
584 |
+
except Exception as e:
|
585 |
+
return False, f"Authentication failed: {str(e)}"
|
586 |
+
|
587 |
# Create interface with access code protection
|
588 |
# Dynamically set theme based on configuration
|
589 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
590 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
591 |
+
# Check if HF_TOKEN is configured to determine configuration panel availability
|
592 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", "").strip()
|
593 |
+
SPACE_ID = os.environ.get("SPACE_ID", "").strip()
|
594 |
+
|
595 |
+
# Verify actual HuggingFace API access
|
596 |
+
HF_ACCESS_VALID, HF_ACCESS_MESSAGE = verify_hf_token_access()
|
597 |
+
|
598 |
+
# Always use tabs structure, Configuration tab visible only with HF_TOKEN
|
599 |
+
with gr.Tabs() as main_tabs:
|
600 |
+
with gr.Tab("Chat U/I"):
|
601 |
+
gr.Markdown(f"# {SPACE_NAME}")
|
602 |
+
gr.Markdown(SPACE_DESCRIPTION)
|
603 |
+
|
604 |
+
# Access code section (shown only if ACCESS_CODE is set)
|
605 |
+
with gr.Column(visible=(ACCESS_CODE is not None)) as access_section:
|
606 |
+
gr.Markdown("### 🔐 Access Required")
|
607 |
+
gr.Markdown("Please enter the access code provided by your instructor:")
|
608 |
+
|
609 |
+
access_input = gr.Textbox(
|
610 |
+
label="Access Code",
|
611 |
+
placeholder="Enter access code...",
|
612 |
+
type="password"
|
613 |
+
)
|
614 |
+
access_btn = gr.Button("Submit", variant="primary")
|
615 |
+
access_error = gr.Markdown(visible=False)
|
616 |
+
|
617 |
+
# Main chat interface (hidden until access granted)
|
618 |
+
with gr.Column(visible=(ACCESS_CODE is None)) as chat_section:
|
619 |
+
# Get examples from config
|
620 |
+
examples = config.get('examples', [])
|
621 |
+
if isinstance(examples, str):
|
622 |
+
try:
|
623 |
+
import ast
|
624 |
+
examples = ast.literal_eval(examples)
|
625 |
+
except:
|
626 |
+
examples = []
|
627 |
+
|
628 |
+
# Format examples for ChatInterface with additional_inputs
|
629 |
+
# When additional_inputs are present, examples must be list of lists
|
630 |
+
# where each inner list contains [message, *additional_input_values]
|
631 |
+
formatted_examples = None
|
632 |
+
if examples:
|
633 |
+
# Check if examples are already formatted correctly (list of lists)
|
634 |
+
if examples and isinstance(examples[0], list):
|
635 |
+
# Already formatted, use as-is
|
636 |
+
formatted_examples = examples
|
637 |
+
else:
|
638 |
+
# Format as [message, file_input] where file_input=None for examples
|
639 |
+
formatted_examples = [[example, None] for example in examples]
|
640 |
+
|
641 |
+
chat_interface = gr.ChatInterface(
|
642 |
+
fn=store_and_generate_response, # Use wrapper function to store history
|
643 |
+
title="", # Title already shown above
|
644 |
+
description="", # Description already shown above
|
645 |
+
examples=formatted_examples,
|
646 |
+
type="messages", # Use modern message format for better compatibility
|
647 |
+
additional_inputs=[
|
648 |
+
gr.File(
|
649 |
+
label="📎",
|
650 |
+
file_types=None, # Accept all file types
|
651 |
+
file_count="multiple",
|
652 |
+
visible=True
|
653 |
+
)
|
654 |
+
]
|
655 |
+
)
|
656 |
+
|
657 |
+
# Export functionality
|
658 |
+
with gr.Row():
|
659 |
+
export_btn = gr.Button("📥 Export Conversation", variant="secondary", size="sm")
|
660 |
+
export_file = gr.File(label="Download", visible=False)
|
661 |
+
|
662 |
+
# Connect export functionality
|
663 |
+
export_btn.click(
|
664 |
+
export_current_conversation,
|
665 |
+
outputs=[export_file]
|
666 |
)
|
667 |
+
|
668 |
+
# Configuration status
|
669 |
+
with gr.Accordion("Configuration", open=False):
|
670 |
+
gr.Markdown(get_configuration_status())
|
671 |
+
|
672 |
+
# Connect access verification within tab context
|
673 |
+
if ACCESS_CODE is not None:
|
674 |
+
access_btn.click(
|
675 |
+
verify_access_code,
|
676 |
+
inputs=[access_input],
|
677 |
+
outputs=[access_error, chat_section, access_granted]
|
678 |
+
)
|
679 |
+
access_input.submit(
|
680 |
+
verify_access_code,
|
681 |
+
inputs=[access_input],
|
682 |
+
outputs=[access_error, chat_section, access_granted]
|
683 |
+
)
|
684 |
+
|
685 |
+
# Add Configuration tab (only visible with valid HF_TOKEN)
|
686 |
+
with gr.Tab("Configuration", visible=HF_ACCESS_VALID) as config_tab:
|
687 |
+
gr.Markdown("## Configuration Management")
|
688 |
|
689 |
+
# Show authentication status
|
690 |
+
if HF_ACCESS_VALID:
|
691 |
+
gr.Markdown(f"✅ **Authenticated** - {HF_ACCESS_MESSAGE}")
|
692 |
+
gr.Markdown("Configuration changes will be saved to the HuggingFace repository and the Space will restart automatically.")
|
693 |
+
faculty_auth_state = gr.State(True)
|
694 |
+
else:
|
695 |
+
gr.Markdown(f"❌ **Not Available** - {HF_ACCESS_MESSAGE}")
|
696 |
+
gr.Markdown("Set HF_TOKEN and SPACE_ID in Space secrets to enable configuration management.")
|
697 |
+
faculty_auth_state = gr.State(False)
|
698 |
+
|
699 |
+
# Configuration editor (visible if HF authentication is valid)
|
700 |
+
with gr.Column(visible=HF_ACCESS_VALID) as faculty_config_section:
|
701 |
gr.Markdown("### Edit Assistant Configuration")
|
702 |
gr.Markdown("⚠️ **Warning:** Changes will affect all users immediately.")
|
703 |
|
|
|
706 |
with open('config.json', 'r') as f:
|
707 |
current_config = json.load(f)
|
708 |
except:
|
709 |
+
# Use DEFAULT_CONFIG as fallback
|
710 |
+
current_config = DEFAULT_CONFIG.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
711 |
|
712 |
+
# Editable fields
|
713 |
+
# System Prompt
|
714 |
edit_system_prompt = gr.Textbox(
|
715 |
label="System Prompt",
|
716 |
value=current_config.get('system_prompt', SYSTEM_PROMPT),
|
|
|
732 |
],
|
733 |
value=current_config.get('model', MODEL)
|
734 |
)
|
735 |
+
|
736 |
+
# 4. Example prompts field
|
737 |
examples_value = current_config.get('examples', [])
|
738 |
if isinstance(examples_value, list):
|
739 |
examples_text_value = "\n".join(examples_value)
|
|
|
753 |
label="Temperature",
|
754 |
minimum=0,
|
755 |
maximum=2,
|
756 |
+
value=current_config.get('temperature', 0.7),
|
757 |
step=0.1
|
758 |
)
|
759 |
edit_max_tokens = gr.Slider(
|
760 |
label="Max Tokens",
|
761 |
minimum=50,
|
762 |
maximum=4096,
|
763 |
+
value=current_config.get('max_tokens', 750),
|
764 |
step=50
|
765 |
)
|
766 |
|
767 |
+
# URL Grounding fields
|
768 |
gr.Markdown("### URL Grounding")
|
769 |
grounding_urls_value = current_config.get('grounding_urls', [])
|
770 |
if isinstance(grounding_urls_value, str):
|
|
|
791 |
)
|
792 |
|
793 |
with gr.Row():
|
794 |
+
save_config_btn = gr.Button("Save Configuration", variant="primary")
|
795 |
+
reset_config_btn = gr.Button("Reset to Defaults", variant="secondary")
|
796 |
|
797 |
config_status = gr.Markdown("")
|
798 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
799 |
|
800 |
# Save configuration function
|
801 |
+
def save_configuration(is_authenticated, new_prompt, new_model, new_examples, new_temp, new_tokens, *url_values):
|
802 |
if not is_authenticated:
|
803 |
+
return "Not authenticated"
|
804 |
|
805 |
# Check if configuration is already locked
|
806 |
try:
|
807 |
with open('config.json', 'r') as f:
|
808 |
existing_config = json.load(f)
|
809 |
if existing_config.get('locked', False):
|
810 |
+
return "Configuration is locked and cannot be modified"
|
811 |
except:
|
812 |
pass
|
813 |
|
|
|
816 |
with open('config.json', 'r') as f:
|
817 |
current_full_config = json.load(f)
|
818 |
except:
|
819 |
+
# If config.json doesn't exist, use default configuration
|
820 |
+
current_full_config = DEFAULT_CONFIG.copy()
|
821 |
|
822 |
# Process example prompts
|
823 |
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()]
|
|
|
828 |
# Filter out empty URLs
|
829 |
grounding_urls = [url.strip() for url in urls if url.strip()]
|
830 |
|
831 |
+
# Create backup before making changes
|
832 |
+
try:
|
833 |
+
# Create backups directory if it doesn't exist
|
834 |
+
os.makedirs('config_backups', exist_ok=True)
|
835 |
+
|
836 |
+
# Create timestamped backup
|
837 |
+
backup_filename = f"config_backups/config_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
838 |
+
with open(backup_filename, 'w') as backup_file:
|
839 |
+
json.dump(current_full_config, backup_file, indent=2)
|
840 |
+
|
841 |
+
# Keep only last 10 backups
|
842 |
+
backups = sorted([f for f in os.listdir('config_backups') if f.endswith('.json')])
|
843 |
+
if len(backups) > 10:
|
844 |
+
for old_backup in backups[:-10]:
|
845 |
+
os.remove(os.path.join('config_backups', old_backup))
|
846 |
+
except Exception as backup_error:
|
847 |
+
print(f"Warning: Could not create backup: {backup_error}")
|
848 |
+
# Continue with save even if backup fails
|
849 |
+
|
850 |
# Update all editable fields while preserving everything else
|
851 |
current_full_config.update({
|
|
|
|
|
852 |
'system_prompt': new_prompt,
|
853 |
'model': new_model,
|
854 |
'examples': examples_list,
|
|
|
858 |
'locked': lock_config_from_args,
|
859 |
'last_modified': datetime.now().isoformat(),
|
860 |
'last_modified_by': 'faculty'
|
861 |
+
})
|
862 |
|
863 |
try:
|
864 |
with open('config.json', 'w') as f:
|
865 |
json.dump(current_full_config, f, indent=2)
|
866 |
|
867 |
+
# Optional: Auto-commit to HuggingFace if token is available
|
868 |
+
hf_token = os.environ.get("HF_TOKEN")
|
869 |
+
space_id = os.environ.get("SPACE_ID")
|
870 |
|
871 |
+
if hf_token and space_id:
|
872 |
+
try:
|
873 |
+
from huggingface_hub import HfApi, CommitOperationAdd, restart_space
|
874 |
+
api = HfApi(token=hf_token)
|
875 |
+
|
876 |
+
# Create commit operation to upload config.json
|
877 |
+
operations = [
|
878 |
+
CommitOperationAdd(
|
879 |
+
path_or_fileobj="config.json",
|
880 |
+
path_in_repo="config.json"
|
881 |
+
)
|
882 |
+
]
|
883 |
+
|
884 |
+
# Create commit with updated configuration
|
885 |
+
api.create_commit(
|
886 |
+
repo_id=space_id,
|
887 |
+
operations=operations,
|
888 |
+
commit_message=f"Update configuration by faculty at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
889 |
+
commit_description="Faculty configuration update through web interface",
|
890 |
+
repo_type="space",
|
891 |
+
token=hf_token
|
892 |
+
)
|
893 |
+
|
894 |
+
# Automatic restart
|
895 |
+
try:
|
896 |
+
restart_space(space_id, token=hf_token)
|
897 |
+
return f"✅ Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Space is restarting automatically!**\n\nThe page will refresh in about 30 seconds. Your changes will be applied."
|
898 |
+
except Exception as restart_error:
|
899 |
+
print(f"Could not auto-restart: {restart_error}")
|
900 |
+
return f"✅ Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Please restart manually** (auto-restart failed)\n\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds"
|
901 |
+
except Exception as commit_error:
|
902 |
+
print(f"Note: Could not auto-commit to repository: {commit_error}")
|
903 |
+
return f"✅ Configuration saved locally at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
904 |
+
else:
|
905 |
+
return f"✅ Configuration saved at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
906 |
except Exception as e:
|
907 |
return f"❌ Error saving configuration: {str(e)}"
|
908 |
|
909 |
# Reset configuration function
|
910 |
def reset_configuration(is_authenticated):
|
911 |
if not is_authenticated:
|
912 |
+
updates = ["Not authenticated"] + [gr.update() for _ in range(14)] # 1 status + 14 fields (prompt, model, examples, temp, tokens + 10 urls)
|
913 |
return tuple(updates)
|
914 |
|
915 |
# Check if locked
|
|
|
917 |
with open('config.json', 'r') as f:
|
918 |
existing_config = json.load(f)
|
919 |
if existing_config.get('locked', False):
|
920 |
+
updates = ["Configuration is locked"] + [gr.update() for _ in range(14)]
|
921 |
return tuple(updates)
|
922 |
except:
|
923 |
pass
|
|
|
929 |
else:
|
930 |
examples_text = ""
|
931 |
|
932 |
+
# Get default URLs - parse from JSON string if needed
|
933 |
default_urls = DEFAULT_CONFIG.get('grounding_urls', [])
|
934 |
if isinstance(default_urls, str):
|
935 |
try:
|
936 |
+
import json
|
937 |
+
default_urls = json.loads(default_urls)
|
938 |
except:
|
939 |
default_urls = []
|
940 |
+
elif not isinstance(default_urls, list):
|
941 |
+
default_urls = []
|
942 |
|
943 |
# Reset to original default values
|
944 |
updates = [
|
945 |
+
"Reset to default values",
|
|
|
|
|
946 |
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)),
|
947 |
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)),
|
948 |
gr.update(value=examples_text),
|
949 |
+
gr.update(value=DEFAULT_CONFIG.get('temperature', temperature)),
|
950 |
+
gr.update(value=DEFAULT_CONFIG.get('max_tokens', max_tokens))
|
951 |
]
|
952 |
|
953 |
# Add URL updates
|
|
|
957 |
|
958 |
return tuple(updates)
|
959 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
960 |
|
961 |
# Connect configuration buttons
|
962 |
save_config_btn.click(
|
963 |
save_configuration,
|
964 |
+
inputs=[faculty_auth_state, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields + [config_locked],
|
965 |
outputs=[config_status]
|
966 |
)
|
967 |
|
968 |
reset_config_btn.click(
|
969 |
reset_configuration,
|
970 |
inputs=[faculty_auth_state],
|
971 |
+
outputs=[config_status, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields
|
972 |
)
|
|
|
|
|
973 |
|
974 |
if __name__ == "__main__":
|
975 |
demo.launch()
|
config.json
CHANGED
@@ -1,19 +1,23 @@
|
|
1 |
{
|
2 |
-
"name": "
|
3 |
"description": "A customizable AI assistant",
|
4 |
-
"system_prompt": "You are a
|
5 |
-
"model": "
|
6 |
"api_key_var": "API_KEY",
|
7 |
-
"temperature": 0.
|
8 |
-
"max_tokens":
|
9 |
"examples": [
|
10 |
-
"
|
11 |
-
"
|
12 |
-
"
|
|
|
13 |
],
|
14 |
"grounding_urls": [
|
15 |
-
"https://
|
|
|
|
|
|
|
16 |
],
|
17 |
"enable_dynamic_urls": true,
|
18 |
-
"theme": "
|
19 |
}
|
|
|
1 |
{
|
2 |
+
"name": "Writing Aid",
|
3 |
"description": "A customizable AI assistant",
|
4 |
+
"system_prompt": "You are a humanities scholar and pedagogue specializing in interdisciplinary approaches across literature, philosophy, history, religious studies, and cultural analysis. Your expertise lies in close reading, hermeneutical interpretation, contextual analysis, and cross-cultural comparison. Guide students through primary source analysis, encourage deep engagement with texts and artifacts, and foster critical interpretation skills. Emphasize the importance of historical context, cultural sensitivity, and multiple perspectives. Help students develop sophisticated arguments grounded in textual evidence while appreciating the complexity and ambiguity inherent in humanistic inquiry. Draw connections between historical and contemporary issues, encouraging students to see the ongoing relevance of humanistic knowledge. Model intellectual curiosity, empathy, and the art of asking meaningful questions about human experience, meaning, and values.",
|
5 |
+
"model": "openai/gpt-4.1-nano",
|
6 |
"api_key_var": "API_KEY",
|
7 |
+
"temperature": 0.8,
|
8 |
+
"max_tokens": 1000,
|
9 |
"examples": [
|
10 |
+
"How do I analyze the symbolism in this medieval manuscript?",
|
11 |
+
"What historical context should I consider when reading Dante's Inferno?",
|
12 |
+
"Can you help me compare philosophical approaches to justice across different cultures?",
|
13 |
+
"How do I interpret conflicting historical accounts of the same event?"
|
14 |
],
|
15 |
"grounding_urls": [
|
16 |
+
"https://en.wikipedia.org/wiki/Hermeneutics",
|
17 |
+
"https://plato.stanford.edu/entries/hermeneutics/",
|
18 |
+
"https://en.wikipedia.org/wiki/Close_reading",
|
19 |
+
"https://en.wikipedia.org/wiki/Cultural_studies"
|
20 |
],
|
21 |
"enable_dynamic_urls": true,
|
22 |
+
"theme": "Origin"
|
23 |
}
|