Upload 3 files
Browse files- app.py +319 -412
- config.json +12 -14
- requirements.txt +4 -0
app.py
CHANGED
@@ -10,68 +10,39 @@ import urllib.parse
|
|
10 |
|
11 |
|
12 |
# Configuration
|
13 |
-
SPACE_NAME = '
|
14 |
-
SPACE_DESCRIPTION = 'A customizable AI assistant'
|
15 |
|
16 |
-
# Default configuration values
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
'system_prompt': 'You are a humanities scholar and pedagogue specializing in interdisciplinary approaches across literature, philosophy, history, religious studies, and cultural analysis. Your expertise lies in close reading, hermeneutical interpretation, contextual analysis, and cross-cultural comparison. Guide students through primary source analysis, encourage deep engagement with texts and artifacts, and foster critical interpretation skills. Emphasize the importance of historical context, cultural sensitivity, and multiple perspectives. Help students develop sophisticated arguments grounded in textual evidence while appreciating the complexity and ambiguity inherent in humanistic inquiry. Draw connections between historical and contemporary issues, encouraging students to see the ongoing relevance of humanistic knowledge. Model intellectual curiosity, empathy, and the art of asking meaningful questions about human experience, meaning, and values.',
|
21 |
-
'temperature': 0.8,
|
22 |
-
'max_tokens': 1000,
|
23 |
-
'model': 'openai/gpt-4.1-nano',
|
24 |
-
'api_key_var': 'API_KEY',
|
25 |
-
'theme': Origin,
|
26 |
-
'grounding_urls': ["https://en.wikipedia.org/wiki/Hermeneutics", "https://plato.stanford.edu/entries/hermeneutics/", "https://en.wikipedia.org/wiki/Close_reading", "https://en.wikipedia.org/wiki/Cultural_studies"],
|
27 |
-
'enable_dynamic_urls': True,
|
28 |
-
'examples': ['How do I analyze the symbolism in this medieval manuscript?', "What historical context should I consider when reading Dante's Inferno?", 'Can you help me compare philosophical approaches to justice across different cultures?', 'How do I interpret conflicting historical accounts of the same event?'],
|
29 |
-
'locked': False
|
30 |
-
}
|
31 |
|
32 |
-
#
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
print("✅ Created config.json with default values")
|
47 |
-
except:
|
48 |
-
pass
|
49 |
-
return DEFAULT_CONFIG
|
50 |
-
except Exception as e:
|
51 |
-
print(f"⚠️ Error loading config.json: {e}, using defaults")
|
52 |
-
return DEFAULT_CONFIG
|
53 |
-
|
54 |
-
# Load configuration
|
55 |
-
config = load_config()
|
56 |
-
|
57 |
-
# Initial load of configuration values
|
58 |
-
SPACE_NAME = config.get('name', DEFAULT_CONFIG['name'])
|
59 |
-
SPACE_DESCRIPTION = config.get('description', DEFAULT_CONFIG['description'])
|
60 |
-
SYSTEM_PROMPT = config.get('system_prompt', DEFAULT_CONFIG['system_prompt'])
|
61 |
-
temperature = config.get('temperature', DEFAULT_CONFIG['temperature'])
|
62 |
-
max_tokens = config.get('max_tokens', DEFAULT_CONFIG['max_tokens'])
|
63 |
-
MODEL = config.get('model', DEFAULT_CONFIG['model'])
|
64 |
-
THEME = config.get('theme', DEFAULT_CONFIG['theme'])
|
65 |
-
GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
|
66 |
-
ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
|
67 |
|
|
|
|
|
|
|
68 |
# Get access code from environment variable for security
|
69 |
# If ACCESS_CODE is not set, no access control is applied
|
70 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
|
|
71 |
|
72 |
# Get API key from environment - customizable variable name with validation
|
73 |
-
|
74 |
-
API_KEY = os.environ.get(API_KEY_VAR)
|
75 |
if API_KEY:
|
76 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
77 |
if not API_KEY: # Check if empty after stripping
|
@@ -82,21 +53,21 @@ def validate_api_key():
|
|
82 |
"""Validate API key configuration with detailed logging"""
|
83 |
if not API_KEY:
|
84 |
print(f"⚠️ API KEY CONFIGURATION ERROR:")
|
85 |
-
print(f" Variable name:
|
86 |
print(f" Status: Not set or empty")
|
87 |
-
print(f" Action needed: Set '
|
88 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
89 |
return False
|
90 |
elif not API_KEY.startswith('sk-or-'):
|
91 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
92 |
-
print(f" Variable name:
|
93 |
-
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else
|
94 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
95 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
96 |
return True # Still try to use it
|
97 |
else:
|
98 |
print(f"✅ API Key configured successfully")
|
99 |
-
print(f" Variable:
|
100 |
print(f" Format: Valid OpenRouter key")
|
101 |
return True
|
102 |
|
@@ -153,13 +124,12 @@ def fetch_url_content(url):
|
|
153 |
|
154 |
# Smart truncation - try to end at sentence boundaries
|
155 |
if len(text) > 4000:
|
156 |
-
|
157 |
-
|
158 |
-
last_period
|
159 |
-
|
160 |
-
text = truncated_text[:last_period + 1]
|
161 |
else:
|
162 |
-
text =
|
163 |
|
164 |
return text if text.strip() else "No readable content found at this URL"
|
165 |
|
@@ -172,6 +142,7 @@ def fetch_url_content(url):
|
|
172 |
|
173 |
def extract_urls_from_text(text):
|
174 |
"""Extract URLs from text using regex with enhanced validation"""
|
|
|
175 |
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
|
176 |
urls = re.findall(url_pattern, text)
|
177 |
|
@@ -191,26 +162,18 @@ _url_content_cache = {}
|
|
191 |
|
192 |
def get_grounding_context():
|
193 |
"""Fetch context from grounding URLs with caching"""
|
194 |
-
|
195 |
-
urls = GROUNDING_URLS
|
196 |
-
if isinstance(urls, str):
|
197 |
-
try:
|
198 |
-
urls = json.loads(urls)
|
199 |
-
except:
|
200 |
-
urls = []
|
201 |
-
|
202 |
-
if not urls:
|
203 |
return ""
|
204 |
|
205 |
# Create cache key from URLs
|
206 |
-
cache_key = tuple(sorted([url for url in
|
207 |
|
208 |
# Check cache first
|
209 |
if cache_key in _url_content_cache:
|
210 |
return _url_content_cache[cache_key]
|
211 |
|
212 |
context_parts = []
|
213 |
-
for i, url in enumerate(
|
214 |
if url.strip():
|
215 |
content = fetch_url_content(url.strip())
|
216 |
# Add priority indicators
|
@@ -270,42 +233,30 @@ def generate_response(message, history):
|
|
270 |
error_msg += f"Please configure your OpenRouter API key:\n"
|
271 |
error_msg += f"1. Go to Settings (⚙️) in your HuggingFace Space\n"
|
272 |
error_msg += f"2. Click 'Variables and secrets'\n"
|
273 |
-
error_msg += f"3. Add secret: **
|
274 |
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
|
275 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
276 |
-
print(f"❌ API request failed: No API key configured for
|
277 |
return error_msg
|
278 |
|
279 |
# Get grounding context
|
280 |
grounding_context = get_grounding_context()
|
281 |
|
282 |
-
# Process uploaded files if any
|
283 |
-
file_context = ""
|
284 |
-
if files:
|
285 |
-
file_contents = []
|
286 |
-
for file_obj in files:
|
287 |
-
if file_obj is not None:
|
288 |
-
try:
|
289 |
-
file_content = extract_file_content(file_obj.name)
|
290 |
-
file_contents.append(file_content)
|
291 |
-
except Exception as e:
|
292 |
-
file_contents.append(f"Error processing file: {str(e)}")
|
293 |
|
294 |
-
if file_contents:
|
295 |
-
file_context = "\n\n[UPLOADED FILES]\n" + "\n\n".join(file_contents) + "\n"
|
296 |
-
|
297 |
# If dynamic URLs are enabled, check message for URLs to fetch
|
298 |
if ENABLE_DYNAMIC_URLS:
|
299 |
urls_in_message = extract_urls_from_text(message)
|
300 |
if urls_in_message:
|
301 |
-
|
|
|
302 |
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
303 |
content = fetch_url_content(url)
|
304 |
-
|
305 |
-
|
|
|
306 |
|
307 |
-
# Build enhanced system prompt with grounding context
|
308 |
-
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
309 |
|
310 |
# Build messages array for the API
|
311 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
@@ -313,10 +264,15 @@ def generate_response(message, history):
|
|
313 |
# Add conversation history - handle both modern messages format and legacy tuples
|
314 |
for chat in history:
|
315 |
if isinstance(chat, dict):
|
|
|
316 |
messages.append(chat)
|
317 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
318 |
-
|
319 |
-
|
|
|
|
|
|
|
|
|
320 |
|
321 |
# Add current message
|
322 |
messages.append({"role": "user", "content": message})
|
@@ -332,14 +288,14 @@ def generate_response(message, history):
|
|
332 |
headers={
|
333 |
"Authorization": f"Bearer {API_KEY}",
|
334 |
"Content-Type": "application/json",
|
335 |
-
"HTTP-Referer": "https://huggingface.co",
|
336 |
-
"X-Title": "HuggingFace Space"
|
337 |
},
|
338 |
json={
|
339 |
"model": MODEL,
|
340 |
"messages": messages,
|
341 |
-
"temperature":
|
342 |
-
"max_tokens":
|
343 |
},
|
344 |
timeout=30
|
345 |
)
|
@@ -349,26 +305,40 @@ def generate_response(message, history):
|
|
349 |
if response.status_code == 200:
|
350 |
try:
|
351 |
result = response.json()
|
352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
except (KeyError, IndexError, json.JSONDecodeError) as e:
|
354 |
-
|
355 |
-
|
356 |
-
error_msg += f"Error: {str(e)}\n\n"
|
357 |
-
error_msg += f"**Troubleshooting:**\n"
|
358 |
-
error_msg += f"1. Check OpenRouter service status\n"
|
359 |
-
error_msg += f"2. Try again in a few moments\n"
|
360 |
-
error_msg += f"3. Try a different model if available"
|
361 |
-
print(f"❌ Response parsing error: {str(e)}")
|
362 |
-
return error_msg
|
363 |
elif response.status_code == 401:
|
364 |
-
error_msg = f"
|
365 |
error_msg += f"Your API key appears to be invalid or expired.\n\n"
|
366 |
error_msg += f"**Troubleshooting:**\n"
|
367 |
-
error_msg += f"1. Check that your **
|
368 |
-
error_msg += f"2. Verify your
|
369 |
-
error_msg += f"3.
|
370 |
-
error_msg += f"4. Check
|
371 |
-
print(f"❌
|
372 |
return error_msg
|
373 |
elif response.status_code == 429:
|
374 |
error_msg = f"⏱️ **Rate Limit Exceeded**\n\n"
|
@@ -377,26 +347,30 @@ def generate_response(message, history):
|
|
377 |
error_msg += f"1. Wait 30-60 seconds before trying again\n"
|
378 |
error_msg += f"2. Check your OpenRouter usage limits\n"
|
379 |
error_msg += f"3. Consider upgrading your OpenRouter plan"
|
380 |
-
print(f"❌ Rate limit exceeded")
|
381 |
return error_msg
|
382 |
elif response.status_code == 400:
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
error_msg
|
390 |
-
|
|
|
|
|
|
|
|
|
|
|
391 |
return error_msg
|
392 |
else:
|
393 |
-
error_msg = f"
|
394 |
-
error_msg += f"An unexpected error occurred.\n"
|
395 |
-
error_msg += f"
|
396 |
-
error_msg += f"
|
397 |
-
error_msg += f"
|
398 |
-
error_msg += f"
|
399 |
-
error_msg += f"3. Contact support if this persists"
|
400 |
print(f"❌ API error: {response.status_code} - {response.text[:200]}")
|
401 |
return error_msg
|
402 |
|
@@ -419,10 +393,10 @@ def generate_response(message, history):
|
|
419 |
print(f"❌ Connection error to OpenRouter API")
|
420 |
return error_msg
|
421 |
except Exception as e:
|
422 |
-
error_msg = "❌ **Unexpected Error**\n\n"
|
423 |
-
error_msg += "An unexpected error occurred:\n"
|
424 |
error_msg += f"`{str(e)}`\n\n"
|
425 |
-
error_msg += "Please try again or contact support if this persists."
|
426 |
print(f"❌ Unexpected error: {str(e)}")
|
427 |
return error_msg
|
428 |
|
@@ -435,40 +409,38 @@ def verify_access_code(code):
|
|
435 |
global _access_granted_global
|
436 |
if ACCESS_CODE is None:
|
437 |
_access_granted_global = True
|
438 |
-
return gr.update(
|
439 |
|
440 |
if code == ACCESS_CODE:
|
441 |
_access_granted_global = True
|
442 |
-
return gr.update(
|
443 |
else:
|
444 |
_access_granted_global = False
|
445 |
-
return gr.update(value="❌
|
446 |
|
447 |
-
def protected_generate_response(message, history
|
448 |
"""Protected response function that checks access"""
|
449 |
# Check if access is granted via the global variable
|
450 |
if ACCESS_CODE is not None and not _access_granted_global:
|
451 |
return "Please enter the access code to continue."
|
452 |
-
return generate_response(message, history
|
453 |
|
454 |
# Global variable to store chat history for export
|
455 |
chat_history_store = []
|
456 |
|
457 |
-
def store_and_generate_response(message, history
|
458 |
"""Wrapper function that stores history and generates response"""
|
459 |
global chat_history_store
|
460 |
|
461 |
# Generate response using the protected function
|
462 |
-
response = protected_generate_response(message, history
|
463 |
|
464 |
# Convert current history to the format we need for export
|
465 |
# history comes in as [["user1", "bot1"], ["user2", "bot2"], ...]
|
466 |
chat_history_store = []
|
467 |
if history:
|
468 |
for exchange in history:
|
469 |
-
if isinstance(exchange,
|
470 |
-
chat_history_store.append(exchange)
|
471 |
-
elif isinstance(exchange, (list, tuple)) and len(exchange) >= 2:
|
472 |
chat_history_store.append({"role": "user", "content": exchange[0]})
|
473 |
chat_history_store.append({"role": "assistant", "content": exchange[1]})
|
474 |
|
@@ -499,205 +471,144 @@ def export_conversation(history):
|
|
499 |
|
500 |
markdown_content = export_conversation_to_markdown(history)
|
501 |
|
502 |
-
#
|
503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
f.write(markdown_content)
|
505 |
-
temp_file = f.name
|
506 |
|
507 |
return gr.update(value=temp_file, visible=True)
|
508 |
|
509 |
# Configuration status display
|
510 |
def get_configuration_status():
|
511 |
-
"""Generate a
|
512 |
status_parts = []
|
513 |
|
514 |
-
#
|
515 |
-
status_parts.append(
|
516 |
-
|
517 |
-
|
518 |
-
status_parts.append(f"**Temperature:** {temperature}")
|
519 |
-
status_parts.append(f"**Max Response Tokens:** {max_tokens}")
|
520 |
-
status_parts.append("")
|
521 |
-
|
522 |
-
# Example prompts
|
523 |
-
status_parts.append("")
|
524 |
-
examples_list = config.get('examples', [])
|
525 |
-
if isinstance(examples_list, str):
|
526 |
-
try:
|
527 |
-
import ast
|
528 |
-
examples_list = ast.literal_eval(examples_list)
|
529 |
-
except:
|
530 |
-
examples_list = []
|
531 |
-
|
532 |
-
if examples_list and len(examples_list) > 0:
|
533 |
-
status_parts.append("**Example Prompts:**")
|
534 |
-
for example in examples_list[:5]: # Show first 5 examples
|
535 |
-
status_parts.append(f"• {example}")
|
536 |
-
if len(examples_list) > 5:
|
537 |
-
status_parts.append(f"• ... and {len(examples_list) - 5} more")
|
538 |
else:
|
539 |
-
status_parts.append("**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
|
541 |
# URL Context if configured
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
#
|
559 |
-
status_parts.append("")
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
status_parts.append("")
|
564 |
-
if not API_KEY_VALID:
|
565 |
-
status_parts.append(f"**Note:** API key ({API_KEY_VAR}) not configured in Space secrets")
|
566 |
|
567 |
return "\n".join(status_parts)
|
568 |
|
569 |
-
# HuggingFace Authentication Utility
|
570 |
-
def verify_hf_token_access():
|
571 |
-
"""Verify HF_TOKEN has write access to the space"""
|
572 |
-
hf_token = os.environ.get("HF_TOKEN")
|
573 |
-
space_id = os.environ.get("SPACE_ID")
|
574 |
-
|
575 |
-
if not hf_token or not space_id:
|
576 |
-
return False, "Missing HF_TOKEN or SPACE_ID environment variables"
|
577 |
-
|
578 |
-
try:
|
579 |
-
from huggingface_hub import HfApi
|
580 |
-
api = HfApi(token=hf_token)
|
581 |
-
# Test access by getting space info
|
582 |
-
api.space_info(space_id)
|
583 |
-
return True, "Authenticated successfully"
|
584 |
-
except Exception as e:
|
585 |
-
return False, f"Authentication failed: {str(e)}"
|
586 |
-
|
587 |
# Create interface with access code protection
|
588 |
# Dynamically set theme based on configuration
|
589 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
590 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
591 |
-
#
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
file_count="multiple",
|
652 |
-
visible=True
|
653 |
-
)
|
654 |
-
]
|
655 |
-
)
|
656 |
-
|
657 |
-
# Export functionality
|
658 |
-
with gr.Row():
|
659 |
-
export_btn = gr.Button("📥 Export Conversation", variant="secondary", size="sm")
|
660 |
-
export_file = gr.File(label="Download", visible=False)
|
661 |
-
|
662 |
-
# Connect export functionality
|
663 |
-
export_btn.click(
|
664 |
-
export_current_conversation,
|
665 |
-
outputs=[export_file]
|
666 |
-
)
|
667 |
-
|
668 |
-
# Configuration status
|
669 |
-
with gr.Accordion("Configuration", open=False):
|
670 |
-
gr.Markdown(get_configuration_status())
|
671 |
-
|
672 |
-
# Connect access verification within tab context
|
673 |
-
if ACCESS_CODE is not None:
|
674 |
-
access_btn.click(
|
675 |
-
verify_access_code,
|
676 |
-
inputs=[access_input],
|
677 |
-
outputs=[access_error, chat_section, access_granted]
|
678 |
-
)
|
679 |
-
access_input.submit(
|
680 |
-
verify_access_code,
|
681 |
-
inputs=[access_input],
|
682 |
-
outputs=[access_error, chat_section, access_granted]
|
683 |
-
)
|
684 |
-
|
685 |
-
# Add Configuration tab (only visible with valid HF_TOKEN)
|
686 |
-
with gr.Tab("Configuration", visible=HF_ACCESS_VALID) as config_tab:
|
687 |
-
gr.Markdown("## Configuration Management")
|
688 |
|
689 |
-
#
|
690 |
-
|
691 |
-
gr.
|
692 |
-
|
693 |
-
|
694 |
-
|
695 |
-
|
696 |
-
|
697 |
-
|
|
|
|
|
698 |
|
699 |
-
# Configuration editor (
|
700 |
-
with gr.Column(visible=
|
701 |
gr.Markdown("### Edit Assistant Configuration")
|
702 |
gr.Markdown("⚠️ **Warning:** Changes will affect all users immediately.")
|
703 |
|
@@ -706,11 +617,29 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
706 |
with open('config.json', 'r') as f:
|
707 |
current_config = json.load(f)
|
708 |
except:
|
709 |
-
|
710 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
711 |
|
712 |
-
#
|
713 |
-
# System Prompt
|
714 |
edit_system_prompt = gr.Textbox(
|
715 |
label="System Prompt",
|
716 |
value=current_config.get('system_prompt', SYSTEM_PROMPT),
|
@@ -732,8 +661,8 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
732 |
],
|
733 |
value=current_config.get('model', MODEL)
|
734 |
)
|
735 |
-
|
736 |
-
# 4. Example
|
737 |
examples_value = current_config.get('examples', [])
|
738 |
if isinstance(examples_value, list):
|
739 |
examples_text_value = "\n".join(examples_value)
|
@@ -753,18 +682,18 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
753 |
label="Temperature",
|
754 |
minimum=0,
|
755 |
maximum=2,
|
756 |
-
value=current_config.get('temperature', 0.
|
757 |
step=0.1
|
758 |
)
|
759 |
edit_max_tokens = gr.Slider(
|
760 |
label="Max Tokens",
|
761 |
minimum=50,
|
762 |
maximum=4096,
|
763 |
-
value=current_config.get('max_tokens',
|
764 |
step=50
|
765 |
)
|
766 |
|
767 |
-
# URL Grounding
|
768 |
gr.Markdown("### URL Grounding")
|
769 |
grounding_urls_value = current_config.get('grounding_urls', [])
|
770 |
if isinstance(grounding_urls_value, str):
|
@@ -791,23 +720,39 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
791 |
)
|
792 |
|
793 |
with gr.Row():
|
794 |
-
save_config_btn = gr.Button("Save Configuration", variant="primary")
|
795 |
-
reset_config_btn = gr.Button("Reset to Defaults", variant="secondary")
|
796 |
|
797 |
config_status = gr.Markdown("")
|
798 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
799 |
|
800 |
# Save configuration function
|
801 |
-
def save_configuration(
|
802 |
if not is_authenticated:
|
803 |
-
return "Not authenticated"
|
804 |
|
805 |
# Check if configuration is already locked
|
806 |
try:
|
807 |
with open('config.json', 'r') as f:
|
808 |
existing_config = json.load(f)
|
809 |
if existing_config.get('locked', False):
|
810 |
-
return "Configuration is locked and cannot be modified"
|
811 |
except:
|
812 |
pass
|
813 |
|
@@ -816,8 +761,8 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
816 |
with open('config.json', 'r') as f:
|
817 |
current_full_config = json.load(f)
|
818 |
except:
|
819 |
-
# If config.json doesn't exist, use
|
820 |
-
current_full_config =
|
821 |
|
822 |
# Process example prompts
|
823 |
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()]
|
@@ -828,27 +773,10 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
828 |
# Filter out empty URLs
|
829 |
grounding_urls = [url.strip() for url in urls if url.strip()]
|
830 |
|
831 |
-
# Create backup before making changes
|
832 |
-
try:
|
833 |
-
# Create backups directory if it doesn't exist
|
834 |
-
os.makedirs('config_backups', exist_ok=True)
|
835 |
-
|
836 |
-
# Create timestamped backup
|
837 |
-
backup_filename = f"config_backups/config_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
838 |
-
with open(backup_filename, 'w') as backup_file:
|
839 |
-
json.dump(current_full_config, backup_file, indent=2)
|
840 |
-
|
841 |
-
# Keep only last 10 backups
|
842 |
-
backups = sorted([f for f in os.listdir('config_backups') if f.endswith('.json')])
|
843 |
-
if len(backups) > 10:
|
844 |
-
for old_backup in backups[:-10]:
|
845 |
-
os.remove(os.path.join('config_backups', old_backup))
|
846 |
-
except Exception as backup_error:
|
847 |
-
print(f"Warning: Could not create backup: {backup_error}")
|
848 |
-
# Continue with save even if backup fails
|
849 |
-
|
850 |
# Update all editable fields while preserving everything else
|
851 |
current_full_config.update({
|
|
|
|
|
852 |
'system_prompt': new_prompt,
|
853 |
'model': new_model,
|
854 |
'examples': examples_list,
|
@@ -864,52 +792,17 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
864 |
with open('config.json', 'w') as f:
|
865 |
json.dump(current_full_config, f, indent=2)
|
866 |
|
867 |
-
#
|
868 |
-
|
869 |
-
space_id = os.environ.get("SPACE_ID")
|
870 |
|
871 |
-
|
872 |
-
try:
|
873 |
-
from huggingface_hub import HfApi, CommitOperationAdd, restart_space
|
874 |
-
api = HfApi(token=hf_token)
|
875 |
-
|
876 |
-
# Create commit operation to upload config.json
|
877 |
-
operations = [
|
878 |
-
CommitOperationAdd(
|
879 |
-
path_or_fileobj="config.json",
|
880 |
-
path_in_repo="config.json"
|
881 |
-
)
|
882 |
-
]
|
883 |
-
|
884 |
-
# Create commit with updated configuration
|
885 |
-
api.create_commit(
|
886 |
-
repo_id=space_id,
|
887 |
-
operations=operations,
|
888 |
-
commit_message=f"Update configuration by faculty at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
889 |
-
commit_description="Faculty configuration update through web interface",
|
890 |
-
repo_type="space",
|
891 |
-
token=hf_token
|
892 |
-
)
|
893 |
-
|
894 |
-
# Automatic restart
|
895 |
-
try:
|
896 |
-
restart_space(space_id, token=hf_token)
|
897 |
-
return f"✅ Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Space is restarting automatically!**\n\nThe page will refresh in about 30 seconds. Your changes will be applied."
|
898 |
-
except Exception as restart_error:
|
899 |
-
print(f"Could not auto-restart: {restart_error}")
|
900 |
-
return f"✅ Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Please restart manually** (auto-restart failed)\n\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds"
|
901 |
-
except Exception as commit_error:
|
902 |
-
print(f"Note: Could not auto-commit to repository: {commit_error}")
|
903 |
-
return f"✅ Configuration saved locally at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
904 |
-
else:
|
905 |
-
return f"✅ Configuration saved at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
906 |
except Exception as e:
|
907 |
return f"❌ Error saving configuration: {str(e)}"
|
908 |
|
909 |
# Reset configuration function
|
910 |
def reset_configuration(is_authenticated):
|
911 |
if not is_authenticated:
|
912 |
-
updates = ["Not authenticated"] + [gr.update() for _ in range(
|
913 |
return tuple(updates)
|
914 |
|
915 |
# Check if locked
|
@@ -917,7 +810,7 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
917 |
with open('config.json', 'r') as f:
|
918 |
existing_config = json.load(f)
|
919 |
if existing_config.get('locked', False):
|
920 |
-
updates = ["Configuration is locked"] + [gr.update() for _ in range(
|
921 |
return tuple(updates)
|
922 |
except:
|
923 |
pass
|
@@ -929,25 +822,25 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
929 |
else:
|
930 |
examples_text = ""
|
931 |
|
932 |
-
# Get default URLs
|
933 |
default_urls = DEFAULT_CONFIG.get('grounding_urls', [])
|
934 |
if isinstance(default_urls, str):
|
935 |
try:
|
936 |
-
import
|
937 |
-
default_urls =
|
938 |
except:
|
939 |
default_urls = []
|
940 |
-
elif not isinstance(default_urls, list):
|
941 |
-
default_urls = []
|
942 |
|
943 |
# Reset to original default values
|
944 |
updates = [
|
945 |
-
"Reset to default values",
|
|
|
|
|
946 |
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)),
|
947 |
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)),
|
948 |
gr.update(value=examples_text),
|
949 |
-
gr.update(value=DEFAULT_CONFIG.get('temperature',
|
950 |
-
gr.update(value=DEFAULT_CONFIG.get('max_tokens',
|
951 |
]
|
952 |
|
953 |
# Add URL updates
|
@@ -957,19 +850,33 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
|
957 |
|
958 |
return tuple(updates)
|
959 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
960 |
|
961 |
# Connect configuration buttons
|
962 |
save_config_btn.click(
|
963 |
save_configuration,
|
964 |
-
inputs=[
|
965 |
outputs=[config_status]
|
966 |
)
|
967 |
|
968 |
reset_config_btn.click(
|
969 |
reset_configuration,
|
970 |
inputs=[faculty_auth_state],
|
971 |
-
outputs=[config_status, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields
|
972 |
)
|
|
|
|
|
973 |
|
974 |
if __name__ == "__main__":
|
975 |
demo.launch()
|
|
|
10 |
|
11 |
|
12 |
# Configuration
|
13 |
+
SPACE_NAME = "'AI Assistant'"
|
14 |
+
SPACE_DESCRIPTION = "'A customizable AI assistant'"
|
15 |
|
16 |
+
# Default configuration values
|
17 |
+
DEFAULT_SYSTEM_PROMPT = """You are a sentence-level writing and composition assistant specializing in micro-writing pedagogy. Work with student submissions of single sentences to one paragraph maximum, providing focused feedback on sentence structure, word choice, clarity, and flow. Guide students through incremental, single-paragraph improvements rather than wholesale rewrites and emphasize the building blocks of process-based writing and the rhetorical grammar of subject-verb relationships, citational practice, transitions and signposting, prosody and rhythm. Focus on sentence structure clarity and variety, precise word choice, transitional logic between ideas, concrete specificity over vague generalizations and genre-specific tone and voice. Frequently offer 1-2 specific, actionable suggestions per submission, celebrate what's working before suggesting improvements. Briefly unpack the **why** behind suggestions and ask at least one simple follow-up question to facilitate the next cycle. Maintain high fidelity to existing versions as a rule of thumb, and expect to complete the response in less than 400 tokens."""
|
18 |
+
DEFAULT_TEMPERATURE = 0.5
|
19 |
+
DEFAULT_MAX_TOKENS = 450
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
# Try to load configuration from file (if modified by faculty)
|
22 |
+
try:
|
23 |
+
with open('config.json', 'r') as f:
|
24 |
+
saved_config = json.load(f)
|
25 |
+
SYSTEM_PROMPT = saved_config.get('system_prompt', DEFAULT_SYSTEM_PROMPT)
|
26 |
+
temperature = saved_config.get('temperature', DEFAULT_TEMPERATURE)
|
27 |
+
max_tokens = saved_config.get('max_tokens', DEFAULT_MAX_TOKENS)
|
28 |
+
print("✅ Loaded configuration from config.json")
|
29 |
+
except:
|
30 |
+
# Use defaults if no config file or error
|
31 |
+
SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT
|
32 |
+
temperature = DEFAULT_TEMPERATURE
|
33 |
+
max_tokens = DEFAULT_MAX_TOKENS
|
34 |
+
print("ℹ️ Using default configuration")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
MODEL = "'anthropic/claude-3.5-sonnet'"
|
37 |
+
THEME = "Ocean" # Gradio theme name
|
38 |
+
GROUNDING_URLS = ["https://owl.purdue.edu/owl/general_writing/index.html", "https://en.wikipedia.org/wiki/Academic_writing", "https://en.wikipedia.org/wiki/Essay"]
|
39 |
# Get access code from environment variable for security
|
40 |
# If ACCESS_CODE is not set, no access control is applied
|
41 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
42 |
+
ENABLE_DYNAMIC_URLS = True
|
43 |
|
44 |
# Get API key from environment - customizable variable name with validation
|
45 |
+
API_KEY = os.environ.get("'API_KEY'")
|
|
|
46 |
if API_KEY:
|
47 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
48 |
if not API_KEY: # Check if empty after stripping
|
|
|
53 |
"""Validate API key configuration with detailed logging"""
|
54 |
if not API_KEY:
|
55 |
print(f"⚠️ API KEY CONFIGURATION ERROR:")
|
56 |
+
print(f" Variable name: 'API_KEY'")
|
57 |
print(f" Status: Not set or empty")
|
58 |
+
print(f" Action needed: Set ''API_KEY'' in HuggingFace Space secrets")
|
59 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
60 |
return False
|
61 |
elif not API_KEY.startswith('sk-or-'):
|
62 |
print(f"⚠️ API KEY FORMAT WARNING:")
|
63 |
+
print(f" Variable name: 'API_KEY'")
|
64 |
+
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else API_KEY)
|
65 |
print(f" Expected format: sk-or-xxxxxxxxxx")
|
66 |
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
67 |
return True # Still try to use it
|
68 |
else:
|
69 |
print(f"✅ API Key configured successfully")
|
70 |
+
print(f" Variable: 'API_KEY'")
|
71 |
print(f" Format: Valid OpenRouter key")
|
72 |
return True
|
73 |
|
|
|
124 |
|
125 |
# Smart truncation - try to end at sentence boundaries
|
126 |
if len(text) > 4000:
|
127 |
+
truncated = text[:4000]
|
128 |
+
last_period = truncated.rfind('.')
|
129 |
+
if last_period > 3000: # If we can find a reasonable sentence break
|
130 |
+
text = truncated[:last_period + 1]
|
|
|
131 |
else:
|
132 |
+
text = truncated + "..."
|
133 |
|
134 |
return text if text.strip() else "No readable content found at this URL"
|
135 |
|
|
|
142 |
|
143 |
def extract_urls_from_text(text):
|
144 |
"""Extract URLs from text using regex with enhanced validation"""
|
145 |
+
import re
|
146 |
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
|
147 |
urls = re.findall(url_pattern, text)
|
148 |
|
|
|
162 |
|
163 |
def get_grounding_context():
|
164 |
"""Fetch context from grounding URLs with caching"""
|
165 |
+
if not GROUNDING_URLS:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
return ""
|
167 |
|
168 |
# Create cache key from URLs
|
169 |
+
cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
|
170 |
|
171 |
# Check cache first
|
172 |
if cache_key in _url_content_cache:
|
173 |
return _url_content_cache[cache_key]
|
174 |
|
175 |
context_parts = []
|
176 |
+
for i, url in enumerate(GROUNDING_URLS, 1):
|
177 |
if url.strip():
|
178 |
content = fetch_url_content(url.strip())
|
179 |
# Add priority indicators
|
|
|
233 |
error_msg += f"Please configure your OpenRouter API key:\n"
|
234 |
error_msg += f"1. Go to Settings (⚙️) in your HuggingFace Space\n"
|
235 |
error_msg += f"2. Click 'Variables and secrets'\n"
|
236 |
+
error_msg += f"3. Add secret: **'API_KEY'**\n"
|
237 |
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
|
238 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
239 |
+
print(f"❌ API request failed: No API key configured for 'API_KEY'")
|
240 |
return error_msg
|
241 |
|
242 |
# Get grounding context
|
243 |
grounding_context = get_grounding_context()
|
244 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
|
|
|
|
|
|
246 |
# If dynamic URLs are enabled, check message for URLs to fetch
|
247 |
if ENABLE_DYNAMIC_URLS:
|
248 |
urls_in_message = extract_urls_from_text(message)
|
249 |
if urls_in_message:
|
250 |
+
# Fetch content from URLs mentioned in the message
|
251 |
+
dynamic_context_parts = []
|
252 |
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
253 |
content = fetch_url_content(url)
|
254 |
+
dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
|
255 |
+
if dynamic_context_parts:
|
256 |
+
grounding_context += "\n".join(dynamic_context_parts)
|
257 |
|
258 |
+
# Build enhanced system prompt with grounding context
|
259 |
+
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
260 |
|
261 |
# Build messages array for the API
|
262 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
|
|
264 |
# Add conversation history - handle both modern messages format and legacy tuples
|
265 |
for chat in history:
|
266 |
if isinstance(chat, dict):
|
267 |
+
# Modern format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
|
268 |
messages.append(chat)
|
269 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
270 |
+
# Legacy format: ["user msg", "assistant msg"] or ("user msg", "assistant msg")
|
271 |
+
user_msg, assistant_msg = chat[0], chat[1]
|
272 |
+
if user_msg:
|
273 |
+
messages.append({"role": "user", "content": user_msg})
|
274 |
+
if assistant_msg:
|
275 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
276 |
|
277 |
# Add current message
|
278 |
messages.append({"role": "user", "content": message})
|
|
|
288 |
headers={
|
289 |
"Authorization": f"Bearer {API_KEY}",
|
290 |
"Content-Type": "application/json",
|
291 |
+
"HTTP-Referer": "https://huggingface.co", # Required by some providers
|
292 |
+
"X-Title": "HuggingFace Space" # Helpful for tracking
|
293 |
},
|
294 |
json={
|
295 |
"model": MODEL,
|
296 |
"messages": messages,
|
297 |
+
"temperature": 0.5,
|
298 |
+
"max_tokens": 450
|
299 |
},
|
300 |
timeout=30
|
301 |
)
|
|
|
305 |
if response.status_code == 200:
|
306 |
try:
|
307 |
result = response.json()
|
308 |
+
|
309 |
+
# Enhanced validation of API response structure
|
310 |
+
if 'choices' not in result or not result['choices']:
|
311 |
+
print(f"⚠️ API response missing choices: {result}")
|
312 |
+
return "API Error: No response choices available"
|
313 |
+
elif 'message' not in result['choices'][0]:
|
314 |
+
print(f"⚠️ API response missing message: {result}")
|
315 |
+
return "API Error: No message in response"
|
316 |
+
elif 'content' not in result['choices'][0]['message']:
|
317 |
+
print(f"⚠️ API response missing content: {result}")
|
318 |
+
return "API Error: No content in message"
|
319 |
+
else:
|
320 |
+
content = result['choices'][0]['message']['content']
|
321 |
+
|
322 |
+
# Check for empty content
|
323 |
+
if not content or content.strip() == "":
|
324 |
+
print(f"⚠️ API returned empty content")
|
325 |
+
return "API Error: Empty response content"
|
326 |
+
|
327 |
+
print(f"✅ API request successful")
|
328 |
+
return content
|
329 |
+
|
330 |
except (KeyError, IndexError, json.JSONDecodeError) as e:
|
331 |
+
print(f"❌ Failed to parse API response: {str(e)}")
|
332 |
+
return f"API Error: Failed to parse response - {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
elif response.status_code == 401:
|
334 |
+
error_msg = f"🔐 **Authentication Error**\n\n"
|
335 |
error_msg += f"Your API key appears to be invalid or expired.\n\n"
|
336 |
error_msg += f"**Troubleshooting:**\n"
|
337 |
+
error_msg += f"1. Check that your **'API_KEY'** secret is set correctly\n"
|
338 |
+
error_msg += f"2. Verify your API key at: https://openrouter.ai/keys\n"
|
339 |
+
error_msg += f"3. Ensure your key starts with `sk-or-`\n"
|
340 |
+
error_msg += f"4. Check that you have credits on your OpenRouter account"
|
341 |
+
print(f"❌ API authentication failed: {response.status_code} - {response.text[:200]}")
|
342 |
return error_msg
|
343 |
elif response.status_code == 429:
|
344 |
error_msg = f"⏱️ **Rate Limit Exceeded**\n\n"
|
|
|
347 |
error_msg += f"1. Wait 30-60 seconds before trying again\n"
|
348 |
error_msg += f"2. Check your OpenRouter usage limits\n"
|
349 |
error_msg += f"3. Consider upgrading your OpenRouter plan"
|
350 |
+
print(f"❌ Rate limit exceeded: {response.status_code}")
|
351 |
return error_msg
|
352 |
elif response.status_code == 400:
|
353 |
+
try:
|
354 |
+
error_data = response.json()
|
355 |
+
error_message = error_data.get('error', {}).get('message', 'Unknown error')
|
356 |
+
except:
|
357 |
+
error_message = response.text
|
358 |
+
|
359 |
+
error_msg = f"⚠️ **Request Error**\n\n"
|
360 |
+
error_msg += f"The API request was invalid:\n"
|
361 |
+
error_msg += f"`{error_message}`\n\n"
|
362 |
+
if "model" in error_message.lower():
|
363 |
+
error_msg += f"**Model Issue:** The model `{MODEL}` may not be available.\n"
|
364 |
+
error_msg += f"Try switching to a different model in your Space configuration."
|
365 |
+
print(f"❌ Bad request: {response.status_code} - {error_message}")
|
366 |
return error_msg
|
367 |
else:
|
368 |
+
error_msg = f"🚫 **API Error {response.status_code}**\n\n"
|
369 |
+
error_msg += f"An unexpected error occurred. Please try again.\n\n"
|
370 |
+
error_msg += f"If this persists, check:\n"
|
371 |
+
error_msg += f"1. OpenRouter service status\n"
|
372 |
+
error_msg += f"2. Your API key and credits\n"
|
373 |
+
error_msg += f"3. The model availability"
|
|
|
374 |
print(f"❌ API error: {response.status_code} - {response.text[:200]}")
|
375 |
return error_msg
|
376 |
|
|
|
393 |
print(f"❌ Connection error to OpenRouter API")
|
394 |
return error_msg
|
395 |
except Exception as e:
|
396 |
+
error_msg = f"❌ **Unexpected Error**\n\n"
|
397 |
+
error_msg += f"An unexpected error occurred:\n"
|
398 |
error_msg += f"`{str(e)}`\n\n"
|
399 |
+
error_msg += f"Please try again or contact support if this persists."
|
400 |
print(f"❌ Unexpected error: {str(e)}")
|
401 |
return error_msg
|
402 |
|
|
|
409 |
global _access_granted_global
|
410 |
if ACCESS_CODE is None:
|
411 |
_access_granted_global = True
|
412 |
+
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
413 |
|
414 |
if code == ACCESS_CODE:
|
415 |
_access_granted_global = True
|
416 |
+
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
417 |
else:
|
418 |
_access_granted_global = False
|
419 |
+
return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)
|
420 |
|
421 |
+
def protected_generate_response(message, history):
|
422 |
"""Protected response function that checks access"""
|
423 |
# Check if access is granted via the global variable
|
424 |
if ACCESS_CODE is not None and not _access_granted_global:
|
425 |
return "Please enter the access code to continue."
|
426 |
+
return generate_response(message, history)
|
427 |
|
428 |
# Global variable to store chat history for export
|
429 |
chat_history_store = []
|
430 |
|
431 |
+
def store_and_generate_response(message, history):
|
432 |
"""Wrapper function that stores history and generates response"""
|
433 |
global chat_history_store
|
434 |
|
435 |
# Generate response using the protected function
|
436 |
+
response = protected_generate_response(message, history)
|
437 |
|
438 |
# Convert current history to the format we need for export
|
439 |
# history comes in as [["user1", "bot1"], ["user2", "bot2"], ...]
|
440 |
chat_history_store = []
|
441 |
if history:
|
442 |
for exchange in history:
|
443 |
+
if isinstance(exchange, (list, tuple)) and len(exchange) >= 2:
|
|
|
|
|
444 |
chat_history_store.append({"role": "user", "content": exchange[0]})
|
445 |
chat_history_store.append({"role": "assistant", "content": exchange[1]})
|
446 |
|
|
|
471 |
|
472 |
markdown_content = export_conversation_to_markdown(history)
|
473 |
|
474 |
+
# Create logical filename: conversation_YYYYMMDD_HHMMSS.md
|
475 |
+
filename = create_safe_filename("conversation", suffix=".md", include_timestamp=True)
|
476 |
+
|
477 |
+
# Save to temporary file with logical name
|
478 |
+
import tempfile
|
479 |
+
import os
|
480 |
+
temp_dir = tempfile.gettempdir()
|
481 |
+
temp_file = os.path.join(temp_dir, filename)
|
482 |
+
|
483 |
+
with open(temp_file, 'w', encoding='utf-8') as f:
|
484 |
f.write(markdown_content)
|
|
|
485 |
|
486 |
return gr.update(value=temp_file, visible=True)
|
487 |
|
488 |
# Configuration status display
|
489 |
def get_configuration_status():
|
490 |
+
"""Generate a configuration status message for display"""
|
491 |
status_parts = []
|
492 |
|
493 |
+
# API Key status
|
494 |
+
status_parts.append("### 🔑 API Configuration")
|
495 |
+
if API_KEY_VALID:
|
496 |
+
status_parts.append("✅ **API Key:** Ready")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
497 |
else:
|
498 |
+
status_parts.append("❌ **API Key:** Not configured")
|
499 |
+
status_parts.append(" Set `'API_KEY'` in Space secrets")
|
500 |
+
|
501 |
+
# Model and parameters
|
502 |
+
status_parts.append("") # Blank line
|
503 |
+
status_parts.append("### 🤖 Model Settings")
|
504 |
+
status_parts.append(f"**Model:** {MODEL.split('/')[-1]}")
|
505 |
+
status_parts.append(f"**Temperature:** 0.5")
|
506 |
+
status_parts.append(f"**Max Tokens:** 450")
|
507 |
|
508 |
# URL Context if configured
|
509 |
+
if GROUNDING_URLS:
|
510 |
+
status_parts.append("") # Blank line
|
511 |
+
status_parts.append("### 🔗 Context Sources")
|
512 |
+
status_parts.append(f"**URLs Configured:** {len(GROUNDING_URLS)}")
|
513 |
+
for i, url in enumerate(GROUNDING_URLS[:2], 1):
|
514 |
+
status_parts.append(f" {i}. {url[:50]}{'...' if len(url) > 50 else ''}")
|
515 |
+
if len(GROUNDING_URLS) > 2:
|
516 |
+
status_parts.append(f" ... and {len(GROUNDING_URLS) - 2} more")
|
517 |
+
|
518 |
+
# Access control
|
519 |
+
if ACCESS_CODE is not None:
|
520 |
+
status_parts.append("") # Blank line
|
521 |
+
status_parts.append("### 🔐 Access Control")
|
522 |
+
status_parts.append("**Status:** Password protected")
|
523 |
+
|
524 |
+
# System prompt
|
525 |
+
status_parts.append("") # Blank line
|
526 |
+
status_parts.append("### 📝 System Prompt")
|
527 |
+
# Show first 200 chars of system prompt
|
528 |
+
prompt_preview = SYSTEM_PROMPT[:200] + "..." if len(SYSTEM_PROMPT) > 200 else SYSTEM_PROMPT
|
529 |
+
status_parts.append(f"```\n{prompt_preview}\n```")
|
|
|
|
|
|
|
530 |
|
531 |
return "\n".join(status_parts)
|
532 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
533 |
# Create interface with access code protection
|
534 |
# Dynamically set theme based on configuration
|
535 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
536 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
537 |
+
gr.Markdown(f"# {SPACE_NAME}")
|
538 |
+
gr.Markdown(SPACE_DESCRIPTION)
|
539 |
+
|
540 |
+
# Access code section (shown only if ACCESS_CODE is set)
|
541 |
+
with gr.Column(visible=(ACCESS_CODE is not None)) as access_section:
|
542 |
+
gr.Markdown("### 🔐 Access Required")
|
543 |
+
gr.Markdown("Please enter the access code provided by your instructor:")
|
544 |
+
|
545 |
+
access_input = gr.Textbox(
|
546 |
+
label="Access Code",
|
547 |
+
placeholder="Enter access code...",
|
548 |
+
type="password"
|
549 |
+
)
|
550 |
+
access_btn = gr.Button("Submit", variant="primary")
|
551 |
+
access_error = gr.Markdown(visible=False)
|
552 |
+
|
553 |
+
# Main chat interface (hidden until access granted)
|
554 |
+
with gr.Column(visible=(ACCESS_CODE is None)) as chat_section:
|
555 |
+
chat_interface = gr.ChatInterface(
|
556 |
+
fn=store_and_generate_response, # Use wrapper function to store history
|
557 |
+
title="", # Title already shown above
|
558 |
+
description="", # Description already shown above
|
559 |
+
examples=['Can you help me improve my thesis statement?', 'How should I structure my argumentative essay?', 'What makes a good academic paragraph?'],
|
560 |
+
type="messages" # Use modern message format for better compatibility
|
561 |
+
)
|
562 |
+
|
563 |
+
# Export functionality
|
564 |
+
with gr.Row():
|
565 |
+
export_btn = gr.Button("📥 Export Conversation", variant="secondary", size="sm")
|
566 |
+
export_file = gr.File(label="Download", visible=False)
|
567 |
+
|
568 |
+
# Connect export functionality
|
569 |
+
export_btn.click(
|
570 |
+
export_current_conversation,
|
571 |
+
outputs=[export_file]
|
572 |
+
)
|
573 |
+
|
574 |
+
|
575 |
+
# Connect access verification
|
576 |
+
if ACCESS_CODE is not None:
|
577 |
+
access_btn.click(
|
578 |
+
verify_access_code,
|
579 |
+
inputs=[access_input],
|
580 |
+
outputs=[access_error, chat_section, access_granted]
|
581 |
+
)
|
582 |
+
access_input.submit(
|
583 |
+
verify_access_code,
|
584 |
+
inputs=[access_input],
|
585 |
+
outputs=[access_error, chat_section, access_granted]
|
586 |
+
)
|
587 |
+
|
588 |
+
# Faculty Configuration Section - appears at the bottom with password protection
|
589 |
+
with gr.Accordion("🔧 Faculty Configuration", open=False, visible=True) as faculty_section:
|
590 |
+
gr.Markdown("**Faculty Only:** Edit assistant configuration. Requires CONFIG_CODE secret.")
|
591 |
+
|
592 |
+
# Check if faculty password is configured
|
593 |
+
FACULTY_PASSWORD = os.environ.get("CONFIG_CODE", "").strip()
|
594 |
+
|
595 |
+
if FACULTY_PASSWORD:
|
596 |
+
faculty_auth_state = gr.State(False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
597 |
|
598 |
+
# Authentication row
|
599 |
+
with gr.Column() as faculty_auth_row:
|
600 |
+
with gr.Row():
|
601 |
+
faculty_password_input = gr.Textbox(
|
602 |
+
label="Faculty Password",
|
603 |
+
type="password",
|
604 |
+
placeholder="Enter faculty configuration password",
|
605 |
+
scale=3
|
606 |
+
)
|
607 |
+
faculty_auth_btn = gr.Button("Unlock Configuration", variant="primary", scale=1)
|
608 |
+
faculty_auth_status = gr.Markdown("")
|
609 |
|
610 |
+
# Configuration editor (hidden until authenticated)
|
611 |
+
with gr.Column(visible=False) as faculty_config_section:
|
612 |
gr.Markdown("### Edit Assistant Configuration")
|
613 |
gr.Markdown("⚠️ **Warning:** Changes will affect all users immediately.")
|
614 |
|
|
|
617 |
with open('config.json', 'r') as f:
|
618 |
current_config = json.load(f)
|
619 |
except:
|
620 |
+
current_config = {
|
621 |
+
'system_prompt': SYSTEM_PROMPT,
|
622 |
+
'temperature': 0.5,
|
623 |
+
'max_tokens': 450,
|
624 |
+
'locked': False
|
625 |
+
}
|
626 |
+
|
627 |
+
# Editable fields - Order matches the Configuration tab
|
628 |
+
# 1. Assistant Identity
|
629 |
+
edit_name = gr.Textbox(
|
630 |
+
label="Assistant Name",
|
631 |
+
value=current_config.get('name', SPACE_NAME),
|
632 |
+
placeholder="My AI Assistant"
|
633 |
+
)
|
634 |
+
|
635 |
+
edit_description = gr.Textbox(
|
636 |
+
label="Assistant Description",
|
637 |
+
value=current_config.get('description', SPACE_DESCRIPTION),
|
638 |
+
lines=2,
|
639 |
+
placeholder="A helpful AI assistant for..."
|
640 |
+
)
|
641 |
|
642 |
+
# 2. System Prompt
|
|
|
643 |
edit_system_prompt = gr.Textbox(
|
644 |
label="System Prompt",
|
645 |
value=current_config.get('system_prompt', SYSTEM_PROMPT),
|
|
|
661 |
],
|
662 |
value=current_config.get('model', MODEL)
|
663 |
)
|
664 |
+
|
665 |
+
# 4. Example Prompts
|
666 |
examples_value = current_config.get('examples', [])
|
667 |
if isinstance(examples_value, list):
|
668 |
examples_text_value = "\n".join(examples_value)
|
|
|
682 |
label="Temperature",
|
683 |
minimum=0,
|
684 |
maximum=2,
|
685 |
+
value=current_config.get('temperature', 0.5),
|
686 |
step=0.1
|
687 |
)
|
688 |
edit_max_tokens = gr.Slider(
|
689 |
label="Max Tokens",
|
690 |
minimum=50,
|
691 |
maximum=4096,
|
692 |
+
value=current_config.get('max_tokens', 450),
|
693 |
step=50
|
694 |
)
|
695 |
|
696 |
+
# 6. URL Grounding
|
697 |
gr.Markdown("### URL Grounding")
|
698 |
grounding_urls_value = current_config.get('grounding_urls', [])
|
699 |
if isinstance(grounding_urls_value, str):
|
|
|
720 |
)
|
721 |
|
722 |
with gr.Row():
|
723 |
+
save_config_btn = gr.Button("💾 Save Configuration", variant="primary")
|
724 |
+
reset_config_btn = gr.Button("↩️ Reset to Defaults", variant="secondary")
|
725 |
|
726 |
config_status = gr.Markdown("")
|
727 |
|
728 |
+
# Faculty authentication function
|
729 |
+
def verify_faculty_password(password):
|
730 |
+
if password == FACULTY_PASSWORD:
|
731 |
+
return (
|
732 |
+
gr.update(value="✅ Authentication successful!"),
|
733 |
+
gr.update(visible=False), # Hide auth row
|
734 |
+
gr.update(visible=True), # Show config section
|
735 |
+
True # Update auth state
|
736 |
+
)
|
737 |
+
else:
|
738 |
+
return (
|
739 |
+
gr.update(value="❌ Invalid password"),
|
740 |
+
gr.update(visible=True), # Keep auth row visible
|
741 |
+
gr.update(visible=False), # Keep config hidden
|
742 |
+
False # Auth failed
|
743 |
+
)
|
744 |
|
745 |
# Save configuration function
|
746 |
+
def save_configuration(new_name, new_description, new_prompt, new_model, new_examples, new_temp, new_tokens, *url_values, lock_config, is_authenticated):
|
747 |
if not is_authenticated:
|
748 |
+
return "❌ Not authenticated"
|
749 |
|
750 |
# Check if configuration is already locked
|
751 |
try:
|
752 |
with open('config.json', 'r') as f:
|
753 |
existing_config = json.load(f)
|
754 |
if existing_config.get('locked', False):
|
755 |
+
return "🔒 Configuration is locked and cannot be modified"
|
756 |
except:
|
757 |
pass
|
758 |
|
|
|
761 |
with open('config.json', 'r') as f:
|
762 |
current_full_config = json.load(f)
|
763 |
except:
|
764 |
+
# If config.json doesn't exist, use global config
|
765 |
+
current_full_config = config.copy()
|
766 |
|
767 |
# Process example prompts
|
768 |
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()]
|
|
|
773 |
# Filter out empty URLs
|
774 |
grounding_urls = [url.strip() for url in urls if url.strip()]
|
775 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
776 |
# Update all editable fields while preserving everything else
|
777 |
current_full_config.update({
|
778 |
+
'name': new_name,
|
779 |
+
'description': new_description,
|
780 |
'system_prompt': new_prompt,
|
781 |
'model': new_model,
|
782 |
'examples': examples_list,
|
|
|
792 |
with open('config.json', 'w') as f:
|
793 |
json.dump(current_full_config, f, indent=2)
|
794 |
|
795 |
+
# Reload all configuration values
|
796 |
+
reload_config_values()
|
|
|
797 |
|
798 |
+
return f"✅ Configuration saved successfully at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
799 |
except Exception as e:
|
800 |
return f"❌ Error saving configuration: {str(e)}"
|
801 |
|
802 |
# Reset configuration function
|
803 |
def reset_configuration(is_authenticated):
|
804 |
if not is_authenticated:
|
805 |
+
updates = ["❌ Not authenticated"] + [gr.update() for _ in range(16)] # 1 status + 16 fields
|
806 |
return tuple(updates)
|
807 |
|
808 |
# Check if locked
|
|
|
810 |
with open('config.json', 'r') as f:
|
811 |
existing_config = json.load(f)
|
812 |
if existing_config.get('locked', False):
|
813 |
+
updates = ["🔒 Configuration is locked"] + [gr.update() for _ in range(16)]
|
814 |
return tuple(updates)
|
815 |
except:
|
816 |
pass
|
|
|
822 |
else:
|
823 |
examples_text = ""
|
824 |
|
825 |
+
# Get default URLs
|
826 |
default_urls = DEFAULT_CONFIG.get('grounding_urls', [])
|
827 |
if isinstance(default_urls, str):
|
828 |
try:
|
829 |
+
import ast
|
830 |
+
default_urls = ast.literal_eval(default_urls)
|
831 |
except:
|
832 |
default_urls = []
|
|
|
|
|
833 |
|
834 |
# Reset to original default values
|
835 |
updates = [
|
836 |
+
"✅ Reset to default values",
|
837 |
+
gr.update(value=DEFAULT_CONFIG.get('name', SPACE_NAME)),
|
838 |
+
gr.update(value=DEFAULT_CONFIG.get('description', SPACE_DESCRIPTION)),
|
839 |
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)),
|
840 |
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)),
|
841 |
gr.update(value=examples_text),
|
842 |
+
gr.update(value=DEFAULT_CONFIG.get('temperature', 0.5)),
|
843 |
+
gr.update(value=DEFAULT_CONFIG.get('max_tokens', 450))
|
844 |
]
|
845 |
|
846 |
# Add URL updates
|
|
|
850 |
|
851 |
return tuple(updates)
|
852 |
|
853 |
+
# Connect authentication
|
854 |
+
faculty_auth_btn.click(
|
855 |
+
verify_faculty_password,
|
856 |
+
inputs=[faculty_password_input],
|
857 |
+
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
858 |
+
)
|
859 |
+
|
860 |
+
faculty_password_input.submit(
|
861 |
+
verify_faculty_password,
|
862 |
+
inputs=[faculty_password_input],
|
863 |
+
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
864 |
+
)
|
865 |
|
866 |
# Connect configuration buttons
|
867 |
save_config_btn.click(
|
868 |
save_configuration,
|
869 |
+
inputs=[edit_name, edit_description, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields + [config_locked, faculty_auth_state],
|
870 |
outputs=[config_status]
|
871 |
)
|
872 |
|
873 |
reset_config_btn.click(
|
874 |
reset_configuration,
|
875 |
inputs=[faculty_auth_state],
|
876 |
+
outputs=[config_status, edit_name, edit_description, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields
|
877 |
)
|
878 |
+
else:
|
879 |
+
gr.Markdown("ℹ️ Faculty configuration is not enabled. Set CONFIG_CODE in Space secrets to enable.")
|
880 |
|
881 |
if __name__ == "__main__":
|
882 |
demo.launch()
|
config.json
CHANGED
@@ -1,23 +1,21 @@
|
|
1 |
{
|
2 |
-
"name": "
|
3 |
"description": "A customizable AI assistant",
|
4 |
-
"system_prompt": "You are a
|
5 |
-
"model": "
|
6 |
"api_key_var": "API_KEY",
|
7 |
-
"temperature": 0.
|
8 |
-
"max_tokens":
|
9 |
"examples": [
|
10 |
-
"
|
11 |
-
"
|
12 |
-
"
|
13 |
-
"How do I interpret conflicting historical accounts of the same event?"
|
14 |
],
|
15 |
"grounding_urls": [
|
16 |
-
"https://
|
17 |
-
"https://
|
18 |
-
"https://en.wikipedia.org/wiki/
|
19 |
-
"https://en.wikipedia.org/wiki/Cultural_studies"
|
20 |
],
|
21 |
"enable_dynamic_urls": true,
|
22 |
-
"theme": "
|
23 |
}
|
|
|
1 |
{
|
2 |
+
"name": "AI Assistant",
|
3 |
"description": "A customizable AI assistant",
|
4 |
+
"system_prompt": "You are a sentence-level writing and composition assistant specializing in micro-writing pedagogy. Work with student submissions of single sentences to one paragraph maximum, providing focused feedback on sentence structure, word choice, clarity, and flow. Guide students through incremental, single-paragraph improvements rather than wholesale rewrites and emphasize the building blocks of process-based writing and the rhetorical grammar of subject-verb relationships, citational practice, transitions and signposting, prosody and rhythm. Focus on sentence structure clarity and variety, precise word choice, transitional logic between ideas, concrete specificity over vague generalizations and genre-specific tone and voice. Frequently offer 1-2 specific, actionable suggestions per submission, celebrate what's working before suggesting improvements. Briefly unpack the **why** behind suggestions and ask at least one simple follow-up question to facilitate the next cycle. Maintain high fidelity to existing versions as a rule of thumb, and expect to complete the response in less than 400 tokens.",
|
5 |
+
"model": "anthropic/claude-3.5-sonnet",
|
6 |
"api_key_var": "API_KEY",
|
7 |
+
"temperature": 0.5,
|
8 |
+
"max_tokens": 450,
|
9 |
"examples": [
|
10 |
+
"Can you help me improve my thesis statement?",
|
11 |
+
"How should I structure my argumentative essay?",
|
12 |
+
"What makes a good academic paragraph?"
|
|
|
13 |
],
|
14 |
"grounding_urls": [
|
15 |
+
"https://owl.purdue.edu/owl/general_writing/index.html",
|
16 |
+
"https://en.wikipedia.org/wiki/Academic_writing",
|
17 |
+
"https://en.wikipedia.org/wiki/Essay"
|
|
|
18 |
],
|
19 |
"enable_dynamic_urls": true,
|
20 |
+
"theme": "Ocean"
|
21 |
}
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=5.38.0
|
2 |
+
requests>=2.32.3
|
3 |
+
beautifulsoup4>=4.12.3
|
4 |
+
python-dotenv>=1.0.0
|