Upload 2 files
Browse files- app.py +55 -800
- config.json +3 -3
app.py
CHANGED
@@ -11,23 +11,23 @@ import urllib.parse
|
|
11 |
|
12 |
# Configuration
|
13 |
SPACE_NAME = 'AI Assistant'
|
14 |
-
SPACE_DESCRIPTION = 'A
|
15 |
|
16 |
# Default configuration values (used only if config.json is missing)
|
17 |
-
DEFAULT_CONFIG = {
|
18 |
'name': SPACE_NAME,
|
19 |
'description': SPACE_DESCRIPTION,
|
20 |
-
'system_prompt': "You are a
|
21 |
'temperature': 0.7,
|
22 |
'max_tokens': 750,
|
23 |
'model': 'google/gemini-2.0-flash-001',
|
24 |
'api_key_var': 'API_KEY',
|
25 |
-
'theme': '
|
26 |
'grounding_urls': '[]',
|
27 |
'enable_dynamic_urls': True,
|
28 |
'examples': ['Can you help me understand why the sky is blue?'],
|
29 |
'locked': False
|
30 |
-
}
|
31 |
|
32 |
# Load configuration from file - this is the single source of truth
|
33 |
def load_config():
|
@@ -48,7 +48,7 @@ def load_config():
|
|
48 |
pass
|
49 |
return DEFAULT_CONFIG
|
50 |
except Exception as e:
|
51 |
-
print(f"⚠️ Error loading config.json: {
|
52 |
return DEFAULT_CONFIG
|
53 |
|
54 |
# Load configuration
|
@@ -76,150 +76,23 @@ if API_KEY:
|
|
76 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
77 |
if not API_KEY: # Check if empty after stripping
|
78 |
API_KEY = None
|
79 |
-
|
80 |
-
# API Key validation and logging
|
81 |
-
def validate_api_key():
|
82 |
-
"""Validate API key configuration with detailed logging"""
|
83 |
-
if not API_KEY:
|
84 |
-
print(f"⚠️ API KEY CONFIGURATION ERROR:")
|
85 |
-
print(f" Variable name: {API_KEY_VAR}")
|
86 |
-
print(f" Status: Not set or empty")
|
87 |
-
print(f" Action needed: Set '{API_KEY_VAR}' in HuggingFace Space secrets")
|
88 |
-
print(f" Expected format: sk-or-xxxxxxxxxx")
|
89 |
-
return False
|
90 |
-
elif not API_KEY.startswith('sk-or-'):
|
91 |
-
print(f"⚠️ API KEY FORMAT WARNING:")
|
92 |
-
print(f" Variable name: {API_KEY_VAR}")
|
93 |
-
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else "{API_KEY}")
|
94 |
-
print(f" Expected format: sk-or-xxxxxxxxxx")
|
95 |
-
print(f" Note: OpenRouter keys should start with 'sk-or-'")
|
96 |
-
return True # Still try to use it
|
97 |
-
else:
|
98 |
-
print(f"✅ API Key configured successfully")
|
99 |
-
print(f" Variable: {API_KEY_VAR}")
|
100 |
-
print(f" Format: Valid OpenRouter key")
|
101 |
-
return True
|
102 |
-
|
103 |
-
# Validate on startup
|
104 |
-
try:
|
105 |
-
API_KEY_VALID = validate_api_key()
|
106 |
-
except NameError:
|
107 |
-
# During template generation, API_KEY might not be defined yet
|
108 |
-
API_KEY_VALID = False
|
109 |
-
|
110 |
-
def validate_url_domain(url):
|
111 |
-
"""Basic URL domain validation"""
|
112 |
-
try:
|
113 |
-
from urllib.parse import urlparse
|
114 |
-
parsed = urlparse(url)
|
115 |
-
# Check for valid domain structure
|
116 |
-
if parsed.netloc and '.' in parsed.netloc:
|
117 |
-
return True
|
118 |
-
except:
|
119 |
-
pass
|
120 |
-
return False
|
121 |
-
|
122 |
-
def fetch_url_content(url):
|
123 |
-
"""Enhanced URL content fetching with improved compatibility and error handling"""
|
124 |
-
if not validate_url_domain(url):
|
125 |
-
return f"Invalid URL format: {url}"
|
126 |
-
|
127 |
-
try:
|
128 |
-
# Enhanced headers for better compatibility
|
129 |
-
headers = {
|
130 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
131 |
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
132 |
-
'Accept-Language': 'en-US,en;q=0.5',
|
133 |
-
'Accept-Encoding': 'gzip, deflate',
|
134 |
-
'Connection': 'keep-alive'
|
135 |
-
}
|
136 |
-
|
137 |
-
response = requests.get(url, timeout=15, headers=headers)
|
138 |
-
response.raise_for_status()
|
139 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
140 |
-
|
141 |
-
# Enhanced content cleaning
|
142 |
-
for element in soup(["script", "style", "nav", "header", "footer", "aside", "form", "button"]):
|
143 |
-
element.decompose()
|
144 |
-
|
145 |
-
# Extract main content preferentially
|
146 |
-
main_content = soup.find('main') or soup.find('article') or soup.find('div', class_=lambda x: bool(x and 'content' in x.lower())) or soup
|
147 |
-
text = main_content.get_text()
|
148 |
-
|
149 |
-
# Enhanced text cleaning
|
150 |
-
lines = (line.strip() for line in text.splitlines())
|
151 |
-
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
152 |
-
text = ' '.join(chunk for chunk in chunks if chunk and len(chunk) > 2)
|
153 |
-
|
154 |
-
# Smart truncation - try to end at sentence boundaries
|
155 |
-
if len(text) > 4000:
|
156 |
-
truncated = text[:4000]
|
157 |
-
last_period = truncated.rfind('.')
|
158 |
-
if last_period > 3000: # If we can find a reasonable sentence break
|
159 |
-
text = truncated[:last_period + 1]
|
160 |
-
else:
|
161 |
-
text = truncated + "..."
|
162 |
-
|
163 |
-
return text if text.strip() else "No readable content found at this URL"
|
164 |
-
|
165 |
-
except requests.exceptions.Timeout:
|
166 |
-
return f"Timeout error fetching {url} (15s limit exceeded)"
|
167 |
-
except requests.exceptions.RequestException as e:
|
168 |
-
return f"Error fetching {url}: {str(e)}"
|
169 |
-
except Exception as e:
|
170 |
-
return f"Error processing content from {url}: {str(e)}"
|
171 |
-
|
172 |
-
def extract_urls_from_text(text):
|
173 |
-
"""Extract URLs from text using regex with enhanced validation"""
|
174 |
-
import re
|
175 |
-
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+'
|
176 |
-
urls = re.findall(url_pattern, text)
|
177 |
-
|
178 |
-
# Basic URL validation and cleanup
|
179 |
-
validated_urls = []
|
180 |
-
for url in urls:
|
181 |
-
# Remove trailing punctuation that might be captured
|
182 |
-
url = url.rstrip('.,!?;:')
|
183 |
-
# Basic domain validation
|
184 |
-
if '.' in url and len(url) > 10:
|
185 |
-
validated_urls.append(url)
|
186 |
-
|
187 |
-
return validated_urls
|
188 |
-
|
189 |
-
# Global cache for URL content to avoid re-crawling in generated spaces
|
190 |
-
_url_content_cache = {}
|
191 |
|
192 |
def get_grounding_context():
|
193 |
"""Fetch context from grounding URLs with caching"""
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
if cache_key in _url_content_cache:
|
202 |
-
return _url_content_cache[cache_key]
|
203 |
-
|
204 |
-
context_parts = []
|
205 |
-
for i, url in enumerate(GROUNDING_URLS, 1):
|
206 |
-
if url.strip():
|
207 |
-
content = fetch_url_content(url.strip())
|
208 |
-
# Add priority indicators
|
209 |
-
priority_label = "PRIMARY" if i <= 2 else "SECONDARY"
|
210 |
-
context_parts.append(f"[{priority_label}] Context from URL {i} ({url}):\n{content}")
|
211 |
|
212 |
-
if
|
213 |
-
|
214 |
-
" + "\n\
|
215 |
-
".join(context_parts) + "\n\
|
216 |
-
"
|
217 |
-
else:
|
218 |
-
result = ""
|
219 |
|
220 |
-
#
|
221 |
-
|
222 |
-
return
|
223 |
|
224 |
def export_conversation_to_markdown(conversation_history):
|
225 |
"""Export conversation history to markdown format"""
|
@@ -227,7 +100,7 @@ def export_conversation_to_markdown(conversation_history):
|
|
227 |
return "No conversation to export."
|
228 |
|
229 |
markdown_content = f"""# Conversation Export
|
230 |
-
Generated on: {datetime.now().strftime('
|
231 |
|
232 |
---
|
233 |
|
@@ -241,713 +114,95 @@ Generated on: {datetime.now().strftime('%%Y-%%m-%%d %%H:%%M:%%S')}
|
|
241 |
|
242 |
if role == 'user':
|
243 |
message_pair_count += 1
|
244 |
-
markdown_content += f"## User Message {
|
245 |
-
"
|
246 |
elif role == 'assistant':
|
247 |
-
markdown_content += f"## Assistant Response {
|
248 |
-
"
|
249 |
-
elif isinstance(message, (list, tuple)) and len(message) >= 2:
|
250 |
-
# Handle legacy tuple format: ["user msg", "assistant msg"]
|
251 |
-
message_pair_count += 1
|
252 |
-
user_msg, assistant_msg = message[0], message[1]
|
253 |
-
if user_msg:
|
254 |
-
markdown_content += f"## User Message {{message_pair_count}}\n\n{{user_msg}}\n\
|
255 |
-
"
|
256 |
-
if assistant_msg:
|
257 |
-
markdown_content += f"## Assistant Response {{message_pair_count}}\n\n{{assistant_msg}}\n\n---\n\
|
258 |
-
"
|
259 |
|
260 |
return markdown_content
|
261 |
|
262 |
-
|
263 |
def generate_response(message, history):
|
264 |
"""Generate response using OpenRouter API"""
|
265 |
|
266 |
# Enhanced API key validation with helpful messages
|
267 |
if not API_KEY:
|
268 |
-
error_msg = f"🔑 **API Key Required**\n\
|
269 |
-
"
|
270 |
-
error_msg += f"
|
271 |
-
"
|
272 |
-
error_msg += f"
|
273 |
-
"
|
274 |
-
error_msg += f"2. Click 'Variables and secrets'\
|
275 |
-
"
|
276 |
-
error_msg += f"3. Add secret: **{API_KEY_VAR}**\
|
277 |
-
"
|
278 |
-
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\
|
279 |
-
"
|
280 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
281 |
-
print(f"❌ API request failed: No API key configured for {API_KEY_VAR}")
|
282 |
return error_msg
|
283 |
|
284 |
# Get grounding context
|
285 |
grounding_context = get_grounding_context()
|
286 |
|
287 |
-
|
288 |
-
# If dynamic URLs are enabled, check message for URLs to fetch
|
289 |
-
if ENABLE_DYNAMIC_URLS:
|
290 |
-
urls_in_message = extract_urls_from_text(message)
|
291 |
-
if urls_in_message:
|
292 |
-
# Fetch content from URLs mentioned in the message
|
293 |
-
dynamic_context_parts = []
|
294 |
-
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
295 |
-
content = fetch_url_content(url)
|
296 |
-
dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
|
297 |
-
if dynamic_context_parts:
|
298 |
-
grounding_context += "\
|
299 |
-
".join(dynamic_context_parts)
|
300 |
-
|
301 |
# Build enhanced system prompt with grounding context
|
302 |
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
303 |
|
304 |
# Build messages array for the API
|
305 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
306 |
|
307 |
-
# Add conversation history
|
308 |
for chat in history:
|
309 |
if isinstance(chat, dict):
|
310 |
-
# Modern format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
|
311 |
messages.append(chat)
|
312 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
313 |
-
|
314 |
-
|
315 |
-
if user_msg:
|
316 |
-
messages.append({"role": "user", "content": user_msg})
|
317 |
-
if assistant_msg:
|
318 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
319 |
|
320 |
# Add current message
|
321 |
messages.append({"role": "user", "content": message})
|
322 |
|
323 |
-
# Make API request
|
324 |
try:
|
325 |
-
print(f"🔄 Making API request to OpenRouter...")
|
326 |
-
print(f" Model: {MODEL}")
|
327 |
-
print(f" Messages: {len(messages)} in conversation")
|
328 |
-
|
329 |
response = requests.post(
|
330 |
url="https://openrouter.ai/api/v1/chat/completions",
|
331 |
headers={
|
332 |
"Authorization": f"Bearer {API_KEY}",
|
333 |
"Content-Type": "application/json",
|
334 |
-
"HTTP-Referer": "https://huggingface.co",
|
335 |
-
"X-Title": "HuggingFace Space"
|
336 |
},
|
337 |
json={
|
338 |
"model": MODEL,
|
339 |
"messages": messages,
|
340 |
-
"temperature":
|
341 |
-
"max_tokens":
|
342 |
},
|
343 |
timeout=30
|
344 |
)
|
345 |
|
346 |
-
print(f"📡 API Response: {response.status_code}")
|
347 |
-
|
348 |
if response.status_code == 200:
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
# Enhanced validation of API response structure
|
353 |
-
if 'choices' not in result or not result['choices']:
|
354 |
-
print(f"⚠️ API response missing choices: {result}")
|
355 |
-
return "API Error: No response choices available"
|
356 |
-
elif 'message' not in result['choices'][0]:
|
357 |
-
print(f"⚠️ API response missing message: {result}")
|
358 |
-
return "API Error: No message in response"
|
359 |
-
elif 'content' not in result['choices'][0]['message']:
|
360 |
-
print(f"⚠️ API response missing content: {result}")
|
361 |
-
return "API Error: No content in message"
|
362 |
-
else:
|
363 |
-
content = result['choices'][0]['message']['content']
|
364 |
-
|
365 |
-
# Check for empty content
|
366 |
-
if not content or content.strip() == "":
|
367 |
-
print(f"⚠️ API returned empty content")
|
368 |
-
return "API Error: Empty response content"
|
369 |
-
|
370 |
-
print(f"✅ API request successful")
|
371 |
-
return content
|
372 |
-
|
373 |
-
except (KeyError, IndexError, json.JSONDecodeError) as e:
|
374 |
-
print(f"❌ Failed to parse API response: {str(e)}")
|
375 |
-
return f"API Error: Failed to parse response - {str(e)}"
|
376 |
-
elif response.status_code == 401:
|
377 |
-
error_msg = f"🔐 **Authentication Error**\n\
|
378 |
-
"
|
379 |
-
error_msg += f"Your API key appears to be invalid or expired.\n\
|
380 |
-
"
|
381 |
-
error_msg += f"**Troubleshooting:**\
|
382 |
-
"
|
383 |
-
error_msg += f"1. Check that your **{API_KEY_VAR}** secret is set correctly\
|
384 |
-
"
|
385 |
-
error_msg += f"2. Verify your API key at: https://openrouter.ai/keys\
|
386 |
-
"
|
387 |
-
error_msg += f"3. Ensure your key starts with `sk-or-`\
|
388 |
-
"
|
389 |
-
error_msg += f"4. Check that you have credits on your OpenRouter account"
|
390 |
-
print(f"❌ API authentication failed: {response.status_code} - {response.text[:200]}")
|
391 |
-
return error_msg
|
392 |
-
elif response.status_code == 429:
|
393 |
-
error_msg = f"⏱️ **Rate Limit Exceeded**\n\
|
394 |
-
"
|
395 |
-
error_msg += f"Too many requests. Please wait a moment and try again.\n\
|
396 |
-
"
|
397 |
-
error_msg += f"**Troubleshooting:**\
|
398 |
-
"
|
399 |
-
error_msg += f"1. Wait 30-60 seconds before trying again\n"
|
400 |
-
error_msg += f"2. Check your OpenRouter usage limits\n"
|
401 |
-
error_msg += f"3. Consider upgrading your OpenRouter plan"
|
402 |
-
print(f"❌ Rate limit exceeded: {response.status_code}")
|
403 |
-
return error_msg
|
404 |
-
elif response.status_code == 400:
|
405 |
-
try:
|
406 |
-
error_data = response.json()
|
407 |
-
error_message = error_data.get('error', {}).get('message', 'Unknown error')
|
408 |
-
except:
|
409 |
-
error_message = response.text
|
410 |
-
|
411 |
-
error_msg = f"⚠️ **Request Error**\n\
|
412 |
-
"
|
413 |
-
error_msg += f"The API request was invalid:\n"
|
414 |
-
error_msg += f"`{error_message}`\n\
|
415 |
-
"
|
416 |
-
if "model" in error_message.lower():
|
417 |
-
error_msg += f"**Model Issue:** The model `{MODEL}` may not be available.\n"
|
418 |
-
error_msg += f"Try switching to a different model in your Space configuration."
|
419 |
-
print(f"❌ Bad request: {response.status_code} - {error_message}")
|
420 |
-
return error_msg
|
421 |
else:
|
422 |
-
|
423 |
-
"
|
424 |
-
error_msg += f"An unexpected error occurred. Please try again.\n\
|
425 |
-
"
|
426 |
-
error_msg += f"If this persists, check:\n"
|
427 |
-
error_msg += f"1. OpenRouter service status\n"
|
428 |
-
error_msg += f"2. Your API key and credits\n"
|
429 |
-
error_msg += f"3. The model availability"
|
430 |
-
print(f"❌ API error: {response.status_code} - {response.text[:200]}")
|
431 |
-
return error_msg
|
432 |
|
433 |
-
except requests.exceptions.Timeout:
|
434 |
-
error_msg = f"⏰ **Request Timeout**\n\
|
435 |
-
"
|
436 |
-
error_msg += f"The API request took too long (30s limit).\n\
|
437 |
-
"
|
438 |
-
error_msg += f"**Troubleshooting:**\n"
|
439 |
-
error_msg += f"1. Try again with a shorter message\n"
|
440 |
-
error_msg += f"2. Check your internet connection\n"
|
441 |
-
error_msg += f"3. Try a different model"
|
442 |
-
print(f"❌ Request timeout after 30 seconds")
|
443 |
-
return error_msg
|
444 |
-
except requests.exceptions.ConnectionError:
|
445 |
-
error_msg = f"🌐 **Connection Error**\n\
|
446 |
-
"
|
447 |
-
error_msg += f"Could not connect to OpenRouter API.\n\
|
448 |
-
"
|
449 |
-
error_msg += f"**Troubleshooting:**\n"
|
450 |
-
error_msg += f"1. Check your internet connection\n"
|
451 |
-
error_msg += f"2. Check OpenRouter service status\n"
|
452 |
-
error_msg += f"3. Try again in a few moments"
|
453 |
-
print(f"❌ Connection error to OpenRouter API")
|
454 |
-
return error_msg
|
455 |
except Exception as e:
|
456 |
-
|
457 |
-
"
|
458 |
-
error_msg += "An unexpected error occurred:\n"
|
459 |
-
error_msg += f"`{str(e)}`\n\
|
460 |
-
"
|
461 |
-
error_msg += "Please try again or contact support if this persists."
|
462 |
-
print(f"❌ Unexpected error: {str(e)}")
|
463 |
-
return error_msg
|
464 |
-
|
465 |
-
# Access code verification
|
466 |
-
access_granted = gr.State(False)
|
467 |
-
_access_granted_global = False # Global fallback
|
468 |
-
|
469 |
-
def verify_access_code(code):
|
470 |
-
"""Verify the access code"""
|
471 |
-
global _access_granted_global
|
472 |
-
if ACCESS_CODE is None:
|
473 |
-
_access_granted_global = True
|
474 |
-
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
475 |
-
|
476 |
-
if code == ACCESS_CODE:
|
477 |
-
_access_granted_global = True
|
478 |
-
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
479 |
-
else:
|
480 |
-
_access_granted_global = False
|
481 |
-
return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)
|
482 |
-
|
483 |
-
def protected_generate_response(message, history):
|
484 |
-
"""Protected response function that checks access"""
|
485 |
-
# Check if access is granted via the global variable
|
486 |
-
if ACCESS_CODE is not None and not _access_granted_global:
|
487 |
-
return "Please enter the access code to continue."
|
488 |
-
return generate_response(message, history)
|
489 |
-
|
490 |
-
# Global variable to store chat history for export
|
491 |
-
chat_history_store = []
|
492 |
-
|
493 |
-
def store_and_generate_response(message, history):
|
494 |
-
"""Wrapper function that stores history and generates response"""
|
495 |
-
global chat_history_store
|
496 |
-
|
497 |
-
# Generate response using the protected function
|
498 |
-
response = protected_generate_response(message, history)
|
499 |
-
|
500 |
-
# Convert current history to the format we need for export
|
501 |
-
# history comes in as [["user1", "bot1"], ["user2", "bot2"], ...]
|
502 |
-
chat_history_store = []
|
503 |
-
if history:
|
504 |
-
for exchange in history:
|
505 |
-
if isinstance(exchange, (list, tuple)) and len(exchange) >= 2:
|
506 |
-
chat_history_store.append({"role": "user", "content": exchange[0]})
|
507 |
-
chat_history_store.append({"role": "assistant", "content": exchange[1]})
|
508 |
-
|
509 |
-
# Add the current exchange
|
510 |
-
chat_history_store.append({"role": "user", "content": message})
|
511 |
-
chat_history_store.append({"role": "assistant", "content": response})
|
512 |
-
|
513 |
-
return response
|
514 |
|
515 |
-
|
516 |
-
"""Export the current conversation"""
|
517 |
-
if not chat_history_store:
|
518 |
-
return gr.update(visible=False)
|
519 |
-
|
520 |
-
markdown_content = export_conversation_to_markdown(chat_history_store)
|
521 |
-
|
522 |
-
# Save to temporary file
|
523 |
-
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f:
|
524 |
-
f.write(markdown_content)
|
525 |
-
temp_file = f.name
|
526 |
-
|
527 |
-
return gr.update(value=temp_file, visible=True)
|
528 |
-
|
529 |
-
def export_conversation(history):
|
530 |
-
"""Export conversation to markdown file"""
|
531 |
-
if not history:
|
532 |
-
return gr.update(visible=False)
|
533 |
-
|
534 |
-
markdown_content = export_conversation_to_markdown(history)
|
535 |
-
|
536 |
-
# Save to temporary file
|
537 |
-
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f:
|
538 |
-
f.write(markdown_content)
|
539 |
-
temp_file = f.name
|
540 |
-
|
541 |
-
return gr.update(value=temp_file, visible=True)
|
542 |
-
|
543 |
-
# Configuration status display
|
544 |
-
def get_configuration_status():
|
545 |
-
"""Generate a clean configuration status message for display"""
|
546 |
-
status_parts = []
|
547 |
-
|
548 |
-
# Basic configuration info (without redundant "Configuration:" header)
|
549 |
-
status_parts.append(f"**Name:** {SPACE_NAME}")
|
550 |
-
status_parts.append(f"**Model:** {MODEL}")
|
551 |
-
status_parts.append(f"**Theme:** {THEME}")
|
552 |
-
status_parts.append(f"**Temperature:** 0.7")
|
553 |
-
status_parts.append(f"**Max Response Tokens:** 750")
|
554 |
-
status_parts.append("")
|
555 |
-
|
556 |
-
# Example prompts
|
557 |
-
status_parts.append("")
|
558 |
-
examples_list = config.get('examples', [])
|
559 |
-
if isinstance(examples_list, str):
|
560 |
-
try:
|
561 |
-
import ast
|
562 |
-
examples_list = ast.literal_eval(examples_list)
|
563 |
-
except:
|
564 |
-
examples_list = []
|
565 |
-
|
566 |
-
if examples_list and len(examples_list) > 0:
|
567 |
-
status_parts.append("**Example Prompts:**")
|
568 |
-
for example in examples_list[:5]: # Show up to 5 examples
|
569 |
-
status_parts.append(f"• {example}")
|
570 |
-
if len(examples_list) > 5:
|
571 |
-
status_parts.append(f"• ... and {len(examples_list) - 5} more")
|
572 |
-
else:
|
573 |
-
status_parts.append("**Example Prompts:** No example prompts configured")
|
574 |
-
|
575 |
-
# URL Context if configured
|
576 |
-
if GROUNDING_URLS and len(GROUNDING_URLS) > 0:
|
577 |
-
status_parts.append("")
|
578 |
-
status_parts.append("**Grounding URLs:**")
|
579 |
-
for i, url in enumerate(GROUNDING_URLS[:5], 1): # Show first 5 URLs
|
580 |
-
status_parts.append(f"{i}. {url}")
|
581 |
-
if len(GROUNDING_URLS) > 5:
|
582 |
-
status_parts.append(f"... and {len(GROUNDING_URLS) - 5} more URLs")
|
583 |
-
|
584 |
-
# System prompt at the end
|
585 |
-
status_parts.append("")
|
586 |
-
status_parts.append(f"**System Prompt:** {SYSTEM_PROMPT}")
|
587 |
-
|
588 |
-
# API Key status (minimal, at the end)
|
589 |
-
status_parts.append("")
|
590 |
-
if not API_KEY_VALID:
|
591 |
-
status_parts.append(f"**Note:** API key ({API_KEY_VAR}) not configured in Space secrets")
|
592 |
-
|
593 |
-
return "\n".join(status_parts)
|
594 |
-
|
595 |
-
# Create interface with access code protection
|
596 |
-
# Dynamically set theme based on configuration
|
597 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
598 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
599 |
gr.Markdown(f"# {SPACE_NAME}")
|
600 |
gr.Markdown(SPACE_DESCRIPTION)
|
601 |
|
602 |
-
#
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
# Get examples from config
|
618 |
-
examples = config.get('examples', [])
|
619 |
-
if isinstance(examples, str):
|
620 |
-
try:
|
621 |
-
import ast
|
622 |
-
examples = ast.literal_eval(examples)
|
623 |
-
except:
|
624 |
-
examples = []
|
625 |
-
|
626 |
-
chat_interface = gr.ChatInterface(
|
627 |
-
fn=store_and_generate_response, # Use wrapper function to store history
|
628 |
-
title="", # Title already shown above
|
629 |
-
description="", # Description already shown above
|
630 |
-
examples=examples if examples else None,
|
631 |
-
type="messages" # Use modern message format for better compatibility
|
632 |
-
)
|
633 |
-
|
634 |
-
# Export functionality
|
635 |
-
with gr.Row():
|
636 |
-
export_btn = gr.Button("📥 Export Conversation", variant="secondary", size="sm")
|
637 |
-
export_file = gr.File(label="Download", visible=False)
|
638 |
-
|
639 |
-
# Connect export functionality
|
640 |
-
export_btn.click(
|
641 |
-
export_current_conversation,
|
642 |
-
outputs=[export_file]
|
643 |
-
)
|
644 |
-
|
645 |
-
# Configuration status
|
646 |
-
with gr.Accordion("Configuration", open=False):
|
647 |
-
gr.Markdown(get_configuration_status())
|
648 |
-
|
649 |
-
# Connect access verification
|
650 |
-
if ACCESS_CODE is not None:
|
651 |
-
access_btn.click(
|
652 |
-
verify_access_code,
|
653 |
-
inputs=[access_input],
|
654 |
-
outputs=[access_error, chat_section, access_granted]
|
655 |
-
)
|
656 |
-
access_input.submit(
|
657 |
-
verify_access_code,
|
658 |
-
inputs=[access_input],
|
659 |
-
outputs=[access_error, chat_section, access_granted]
|
660 |
-
)
|
661 |
-
|
662 |
-
# Faculty Configuration Section - appears at the bottom with password protection
|
663 |
-
with gr.Accordion("🔧 Faculty Configuration", open=False, visible=True) as faculty_section:
|
664 |
-
gr.Markdown("**Faculty Only:** Edit assistant configuration. Requires CONFIG_CODE secret.")
|
665 |
-
|
666 |
-
# Check if faculty password is configured
|
667 |
-
FACULTY_PASSWORD = os.environ.get("CONFIG_CODE", "").strip()
|
668 |
-
|
669 |
-
if FACULTY_PASSWORD:
|
670 |
-
faculty_auth_state = gr.State(False)
|
671 |
-
|
672 |
-
# Authentication row
|
673 |
-
with gr.Column() as faculty_auth_row:
|
674 |
-
with gr.Row():
|
675 |
-
faculty_password_input = gr.Textbox(
|
676 |
-
label="Faculty Password",
|
677 |
-
type="password",
|
678 |
-
placeholder="Enter faculty configuration password",
|
679 |
-
scale=3
|
680 |
-
)
|
681 |
-
faculty_auth_btn = gr.Button("Unlock Configuration", variant="primary", scale=1)
|
682 |
-
faculty_auth_status = gr.Markdown("")
|
683 |
-
|
684 |
-
# Configuration editor (hidden until authenticated)
|
685 |
-
with gr.Column(visible=False) as faculty_config_section:
|
686 |
-
gr.Markdown("### Edit Assistant Configuration")
|
687 |
-
gr.Markdown("⚠️ **Warning:** Changes will affect all users immediately.")
|
688 |
-
|
689 |
-
# Load current configuration
|
690 |
-
try:
|
691 |
-
with open('config.json', 'r') as f:
|
692 |
-
current_config = json.load(f)
|
693 |
-
except:
|
694 |
-
# Use DEFAULT_CONFIG as fallback
|
695 |
-
current_config = DEFAULT_CONFIG.copy()
|
696 |
-
|
697 |
-
# Editable fields
|
698 |
-
# System Prompt
|
699 |
-
edit_system_prompt = gr.Textbox(
|
700 |
-
label="System Prompt",
|
701 |
-
value=current_config.get('system_prompt', SYSTEM_PROMPT),
|
702 |
-
lines=5
|
703 |
-
)
|
704 |
-
|
705 |
-
# 3. Model Selection
|
706 |
-
edit_model = gr.Dropdown(
|
707 |
-
label="Model",
|
708 |
-
choices=[
|
709 |
-
"google/gemini-2.0-flash-001",
|
710 |
-
"google/gemma-3-27b-it",
|
711 |
-
"anthropic/claude-3.5-sonnet",
|
712 |
-
"anthropic/claude-3.5-haiku",
|
713 |
-
"openai/gpt-4o-mini-search-preview",
|
714 |
-
"openai/gpt-4.1-nano",
|
715 |
-
"nvidia/llama-3.1-nemotron-70b-instruct",
|
716 |
-
"mistralai/devstral-small"
|
717 |
-
],
|
718 |
-
value=current_config.get('model', MODEL)
|
719 |
-
)
|
720 |
-
|
721 |
-
# 4. Example prompts field
|
722 |
-
examples_value = current_config.get('examples', [])
|
723 |
-
if isinstance(examples_value, list):
|
724 |
-
examples_text_value = "\n".join(examples_value)
|
725 |
-
else:
|
726 |
-
examples_text_value = ""
|
727 |
-
|
728 |
-
edit_examples = gr.Textbox(
|
729 |
-
label="Example Prompts (one per line)",
|
730 |
-
value=examples_text_value,
|
731 |
-
lines=3,
|
732 |
-
placeholder="What can you help me with?\nExplain this concept\nHelp me understand..."
|
733 |
-
)
|
734 |
-
|
735 |
-
# 5. Model Parameters
|
736 |
-
with gr.Row():
|
737 |
-
edit_temperature = gr.Slider(
|
738 |
-
label="Temperature",
|
739 |
-
minimum=0,
|
740 |
-
maximum=2,
|
741 |
-
value=current_config.get('temperature', 0.7),
|
742 |
-
step=0.1
|
743 |
-
)
|
744 |
-
edit_max_tokens = gr.Slider(
|
745 |
-
label="Max Tokens",
|
746 |
-
minimum=50,
|
747 |
-
maximum=4096,
|
748 |
-
value=current_config.get('max_tokens', 750),
|
749 |
-
step=50
|
750 |
-
)
|
751 |
-
|
752 |
-
# URL Grounding fields
|
753 |
-
gr.Markdown("### URL Grounding")
|
754 |
-
grounding_urls_value = current_config.get('grounding_urls', [])
|
755 |
-
if isinstance(grounding_urls_value, str):
|
756 |
-
try:
|
757 |
-
import ast
|
758 |
-
grounding_urls_value = ast.literal_eval(grounding_urls_value)
|
759 |
-
except:
|
760 |
-
grounding_urls_value = []
|
761 |
-
|
762 |
-
# Create 10 URL input fields
|
763 |
-
url_fields = []
|
764 |
-
for i in range(10):
|
765 |
-
url_value = grounding_urls_value[i] if i < len(grounding_urls_value) else ""
|
766 |
-
url_field = gr.Textbox(
|
767 |
-
label=f"URL {i+1}" + (" (Primary)" if i < 2 else " (Secondary)"),
|
768 |
-
value=url_value,
|
769 |
-
placeholder="https://..."
|
770 |
-
)
|
771 |
-
url_fields.append(url_field)
|
772 |
-
|
773 |
-
config_locked = gr.Checkbox(
|
774 |
-
label="Lock Configuration (Prevent further edits)",
|
775 |
-
value=current_config.get('locked', False)
|
776 |
-
)
|
777 |
-
|
778 |
-
with gr.Row():
|
779 |
-
save_config_btn = gr.Button("Save Configuration", variant="primary")
|
780 |
-
reset_config_btn = gr.Button("Reset to Defaults", variant="secondary")
|
781 |
-
|
782 |
-
config_status = gr.Markdown("")
|
783 |
-
|
784 |
-
# Faculty authentication function
|
785 |
-
def verify_faculty_password(password):
|
786 |
-
if password == FACULTY_PASSWORD:
|
787 |
-
return (
|
788 |
-
gr.update(value="Authentication successful!"),
|
789 |
-
gr.update(visible=False), # Hide auth row
|
790 |
-
gr.update(visible=True), # Show config section
|
791 |
-
True # Update auth state
|
792 |
-
)
|
793 |
-
else:
|
794 |
-
return (
|
795 |
-
gr.update(value="Invalid password"),
|
796 |
-
gr.update(visible=True), # Keep auth row visible
|
797 |
-
gr.update(visible=False), # Keep config hidden
|
798 |
-
False # Auth failed
|
799 |
-
)
|
800 |
-
|
801 |
-
# Save configuration function
|
802 |
-
def save_configuration(new_prompt, new_model, new_examples, new_temp, new_tokens, *url_values, lock_config, is_authenticated):
|
803 |
-
if not is_authenticated:
|
804 |
-
return "Not authenticated"
|
805 |
-
|
806 |
-
# Check if configuration is already locked
|
807 |
-
try:
|
808 |
-
with open('config.json', 'r') as f:
|
809 |
-
existing_config = json.load(f)
|
810 |
-
if existing_config.get('locked', False):
|
811 |
-
return "Configuration is locked and cannot be modified"
|
812 |
-
except:
|
813 |
-
pass
|
814 |
-
|
815 |
-
# Load current config to preserve all values
|
816 |
-
try:
|
817 |
-
with open('config.json', 'r') as f:
|
818 |
-
current_full_config = json.load(f)
|
819 |
-
except:
|
820 |
-
# If config.json doesn't exist, use default configuration
|
821 |
-
current_full_config = DEFAULT_CONFIG.copy()
|
822 |
-
|
823 |
-
# Process example prompts
|
824 |
-
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()]
|
825 |
-
|
826 |
-
# Process URL values - lock_config is the last parameter
|
827 |
-
urls = list(url_values[:-1]) # All but last are URLs
|
828 |
-
lock_config_from_args = url_values[-1] # Last is lock_config
|
829 |
-
# Filter out empty URLs
|
830 |
-
grounding_urls = [url.strip() for url in urls if url.strip()]
|
831 |
-
|
832 |
-
# Update all editable fields while preserving everything else
|
833 |
-
current_full_config.update({
|
834 |
-
'system_prompt': new_prompt,
|
835 |
-
'model': new_model,
|
836 |
-
'examples': examples_list,
|
837 |
-
'temperature': new_temp,
|
838 |
-
'max_tokens': int(new_tokens),
|
839 |
-
'grounding_urls': grounding_urls,
|
840 |
-
'locked': lock_config_from_args,
|
841 |
-
'last_modified': datetime.now().isoformat(),
|
842 |
-
'last_modified_by': 'faculty'
|
843 |
-
})
|
844 |
-
|
845 |
-
try:
|
846 |
-
with open('config.json', 'w') as f:
|
847 |
-
json.dump(current_full_config, f, indent=2)
|
848 |
-
|
849 |
-
# Optional: Auto-commit to HuggingFace if token is available
|
850 |
-
hf_token = os.environ.get("HF_TOKEN")
|
851 |
-
space_id = os.environ.get("SPACE_ID")
|
852 |
-
|
853 |
-
if hf_token and space_id:
|
854 |
-
try:
|
855 |
-
from huggingface_hub import HfApi
|
856 |
-
api = HfApi()
|
857 |
-
api.upload_file(
|
858 |
-
path_or_fileobj="config.json",
|
859 |
-
path_in_repo="config.json",
|
860 |
-
repo_id=space_id,
|
861 |
-
repo_type="space",
|
862 |
-
commit_message=f"Update configuration by faculty at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
863 |
-
)
|
864 |
-
return f"✅ Configuration saved and committed to repository at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Space will restart automatically** to apply changes."
|
865 |
-
except Exception as commit_error:
|
866 |
-
print(f"Note: Could not auto-commit to repository: {commit_error}")
|
867 |
-
return f"✅ Configuration saved locally at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
868 |
-
else:
|
869 |
-
return f"✅ Configuration saved at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (⚙️)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart"
|
870 |
-
except Exception as e:
|
871 |
-
return f"❌ Error saving configuration: {str(e)}"
|
872 |
-
|
873 |
-
# Reset configuration function
|
874 |
-
def reset_configuration(is_authenticated):
|
875 |
-
if not is_authenticated:
|
876 |
-
updates = ["Not authenticated"] + [gr.update() for _ in range(14)] # 1 status + 14 fields (prompt, model, examples, temp, tokens + 10 urls)
|
877 |
-
return tuple(updates)
|
878 |
-
|
879 |
-
# Check if locked
|
880 |
-
try:
|
881 |
-
with open('config.json', 'r') as f:
|
882 |
-
existing_config = json.load(f)
|
883 |
-
if existing_config.get('locked', False):
|
884 |
-
updates = ["Configuration is locked"] + [gr.update() for _ in range(14)]
|
885 |
-
return tuple(updates)
|
886 |
-
except:
|
887 |
-
pass
|
888 |
-
|
889 |
-
# Get default examples as text
|
890 |
-
default_examples = DEFAULT_CONFIG.get('examples', [])
|
891 |
-
if isinstance(default_examples, list):
|
892 |
-
examples_text = "\n".join(default_examples)
|
893 |
-
else:
|
894 |
-
examples_text = ""
|
895 |
-
|
896 |
-
# Get default URLs - parse from JSON string if needed
|
897 |
-
default_urls = DEFAULT_CONFIG.get('grounding_urls', [])
|
898 |
-
if isinstance(default_urls, str):
|
899 |
-
try:
|
900 |
-
import json
|
901 |
-
default_urls = json.loads(default_urls)
|
902 |
-
except:
|
903 |
-
default_urls = []
|
904 |
-
elif not isinstance(default_urls, list):
|
905 |
-
default_urls = []
|
906 |
-
|
907 |
-
# Reset to original default values
|
908 |
-
updates = [
|
909 |
-
"Reset to default values",
|
910 |
-
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)),
|
911 |
-
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)),
|
912 |
-
gr.update(value=examples_text),
|
913 |
-
gr.update(value=DEFAULT_CONFIG.get('temperature', 0.7)),
|
914 |
-
gr.update(value=DEFAULT_CONFIG.get('max_tokens', 750))
|
915 |
-
]
|
916 |
-
|
917 |
-
# Add URL updates
|
918 |
-
for i in range(10):
|
919 |
-
url_value = default_urls[i] if i < len(default_urls) else ""
|
920 |
-
updates.append(gr.update(value=url_value))
|
921 |
-
|
922 |
-
return tuple(updates)
|
923 |
-
|
924 |
-
# Connect authentication
|
925 |
-
faculty_auth_btn.click(
|
926 |
-
verify_faculty_password,
|
927 |
-
inputs=[faculty_password_input],
|
928 |
-
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
929 |
-
)
|
930 |
-
|
931 |
-
faculty_password_input.submit(
|
932 |
-
verify_faculty_password,
|
933 |
-
inputs=[faculty_password_input],
|
934 |
-
outputs=[faculty_auth_status, faculty_auth_row, faculty_config_section, faculty_auth_state]
|
935 |
-
)
|
936 |
-
|
937 |
-
# Connect configuration buttons
|
938 |
-
save_config_btn.click(
|
939 |
-
save_configuration,
|
940 |
-
inputs=[edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields + [config_locked, faculty_auth_state],
|
941 |
-
outputs=[config_status]
|
942 |
-
)
|
943 |
-
|
944 |
-
reset_config_btn.click(
|
945 |
-
reset_configuration,
|
946 |
-
inputs=[faculty_auth_state],
|
947 |
-
outputs=[config_status, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields
|
948 |
-
)
|
949 |
-
else:
|
950 |
-
gr.Markdown("Faculty configuration is not enabled. Set CONFIG_CODE in Space secrets to enable.")
|
951 |
|
952 |
if __name__ == "__main__":
|
953 |
demo.launch()
|
|
|
11 |
|
12 |
# Configuration
|
13 |
SPACE_NAME = 'AI Assistant'
|
14 |
+
SPACE_DESCRIPTION = 'A research conversation partner loosely based on Phaedrus from the eponymous Socratic dialogue'
|
15 |
|
16 |
# Default configuration values (used only if config.json is missing)
|
17 |
+
DEFAULT_CONFIG = {
|
18 |
'name': SPACE_NAME,
|
19 |
'description': SPACE_DESCRIPTION,
|
20 |
+
'system_prompt': "You are a Socratic conversation partner for general education courses across all disciplines, embodying constructivist learning principles. Model your approach after Socrates' interlocutor Phaedrus from the eponymous Socratic dialogue, guiding students through source discovery, evaluation, and synthesis using methods of Socratic dialogue. In tone, use punchy responses with ironic or self-referential levity. Ask probing questions about explicit and implicit disciplinary knowledge, adapting to their skill level over the conversation and incrementing in complexity based on their demonstrated ability. Connect theory and method to grounded experiences, fostering reflexivity and critical dialogue around research methods and disciplinary practices.",
|
21 |
'temperature': 0.7,
|
22 |
'max_tokens': 750,
|
23 |
'model': 'google/gemini-2.0-flash-001',
|
24 |
'api_key_var': 'API_KEY',
|
25 |
+
'theme': 'Glass',
|
26 |
'grounding_urls': '[]',
|
27 |
'enable_dynamic_urls': True,
|
28 |
'examples': ['Can you help me understand why the sky is blue?'],
|
29 |
'locked': False
|
30 |
+
}
|
31 |
|
32 |
# Load configuration from file - this is the single source of truth
|
33 |
def load_config():
|
|
|
48 |
pass
|
49 |
return DEFAULT_CONFIG
|
50 |
except Exception as e:
|
51 |
+
print(f"⚠️ Error loading config.json: {e}, using defaults")
|
52 |
return DEFAULT_CONFIG
|
53 |
|
54 |
# Load configuration
|
|
|
76 |
API_KEY = API_KEY.strip() # Remove any whitespace
|
77 |
if not API_KEY: # Check if empty after stripping
|
78 |
API_KEY = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
def get_grounding_context():
|
81 |
"""Fetch context from grounding URLs with caching"""
|
82 |
+
# Handle both string and list formats for grounding_urls
|
83 |
+
urls = GROUNDING_URLS
|
84 |
+
if isinstance(urls, str):
|
85 |
+
try:
|
86 |
+
urls = json.loads(urls)
|
87 |
+
except:
|
88 |
+
urls = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
if not urls:
|
91 |
+
return ""
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
+
# For this simplified template, return empty context
|
94 |
+
# Full URL fetching can be implemented as needed
|
95 |
+
return ""
|
96 |
|
97 |
def export_conversation_to_markdown(conversation_history):
|
98 |
"""Export conversation history to markdown format"""
|
|
|
100 |
return "No conversation to export."
|
101 |
|
102 |
markdown_content = f"""# Conversation Export
|
103 |
+
Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
104 |
|
105 |
---
|
106 |
|
|
|
114 |
|
115 |
if role == 'user':
|
116 |
message_pair_count += 1
|
117 |
+
markdown_content += f"## User Message {message_pair_count}\n\n{content}\n\n"
|
|
|
118 |
elif role == 'assistant':
|
119 |
+
markdown_content += f"## Assistant Response {message_pair_count}\n\n{content}\n\n---\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
return markdown_content
|
122 |
|
|
|
123 |
def generate_response(message, history):
|
124 |
"""Generate response using OpenRouter API"""
|
125 |
|
126 |
# Enhanced API key validation with helpful messages
|
127 |
if not API_KEY:
|
128 |
+
error_msg = f"🔑 **API Key Required**\n\n"
|
129 |
+
error_msg += f"Please configure your OpenRouter API key:"
|
130 |
+
error_msg += f"1. Go to Settings (⚙️) in your HuggingFace Space\n"
|
131 |
+
error_msg += f"2. Click 'Variables and secrets'\n"
|
132 |
+
error_msg += f"3. Add secret: **{API_KEY_VAR}**\n"
|
133 |
+
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
error_msg += f"Get your API key at: https://openrouter.ai/keys"
|
|
|
135 |
return error_msg
|
136 |
|
137 |
# Get grounding context
|
138 |
grounding_context = get_grounding_context()
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
# Build enhanced system prompt with grounding context
|
141 |
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
142 |
|
143 |
# Build messages array for the API
|
144 |
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
145 |
|
146 |
+
# Add conversation history
|
147 |
for chat in history:
|
148 |
if isinstance(chat, dict):
|
|
|
149 |
messages.append(chat)
|
150 |
elif isinstance(chat, (list, tuple)) and len(chat) >= 2:
|
151 |
+
messages.append({"role": "user", "content": chat[0]})
|
152 |
+
messages.append({"role": "assistant", "content": chat[1]})
|
|
|
|
|
|
|
|
|
153 |
|
154 |
# Add current message
|
155 |
messages.append({"role": "user", "content": message})
|
156 |
|
157 |
+
# Make API request
|
158 |
try:
|
|
|
|
|
|
|
|
|
159 |
response = requests.post(
|
160 |
url="https://openrouter.ai/api/v1/chat/completions",
|
161 |
headers={
|
162 |
"Authorization": f"Bearer {API_KEY}",
|
163 |
"Content-Type": "application/json",
|
164 |
+
"HTTP-Referer": "https://huggingface.co",
|
165 |
+
"X-Title": "HuggingFace Space"
|
166 |
},
|
167 |
json={
|
168 |
"model": MODEL,
|
169 |
"messages": messages,
|
170 |
+
"temperature": temperature,
|
171 |
+
"max_tokens": max_tokens
|
172 |
},
|
173 |
timeout=30
|
174 |
)
|
175 |
|
|
|
|
|
176 |
if response.status_code == 200:
|
177 |
+
result = response.json()
|
178 |
+
return result['choices'][0]['message']['content']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
else:
|
180 |
+
return f"❌ API Error: {response.status_code} - {response.text}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
except Exception as e:
|
183 |
+
return f"❌ Error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
+
# Create interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
theme_class = getattr(gr.themes, THEME, gr.themes.Default)
|
187 |
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
|
188 |
gr.Markdown(f"# {SPACE_NAME}")
|
189 |
gr.Markdown(SPACE_DESCRIPTION)
|
190 |
|
191 |
+
# Get examples from config
|
192 |
+
examples = config.get('examples', [])
|
193 |
+
if isinstance(examples, str):
|
194 |
+
try:
|
195 |
+
examples = json.loads(examples)
|
196 |
+
except:
|
197 |
+
examples = []
|
198 |
+
|
199 |
+
chat_interface = gr.ChatInterface(
|
200 |
+
fn=generate_response,
|
201 |
+
title="",
|
202 |
+
description="",
|
203 |
+
examples=examples if examples else None,
|
204 |
+
type="messages"
|
205 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
207 |
if __name__ == "__main__":
|
208 |
demo.launch()
|
config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"name": "AI Assistant",
|
3 |
-
"description": "A
|
4 |
-
"system_prompt": "You are a
|
5 |
"model": "google/gemini-2.0-flash-001",
|
6 |
"api_key_var": "API_KEY",
|
7 |
"temperature": 0.7,
|
@@ -11,5 +11,5 @@
|
|
11 |
],
|
12 |
"grounding_urls": [],
|
13 |
"enable_dynamic_urls": true,
|
14 |
-
"theme": "
|
15 |
}
|
|
|
1 |
{
|
2 |
"name": "AI Assistant",
|
3 |
+
"description": "A research conversation partner loosely based on Phaedrus from the eponymous Socratic dialogue",
|
4 |
+
"system_prompt": "You are a Socratic conversation partner for general education courses across all disciplines, embodying constructivist learning principles. Model your approach after Socrates' interlocutor Phaedrus from the eponymous Socratic dialogue, guiding students through source discovery, evaluation, and synthesis using methods of Socratic dialogue. In tone, use punchy responses with ironic or self-referential levity. Ask probing questions about explicit and implicit disciplinary knowledge, adapting to their skill level over the conversation and incrementing in complexity based on their demonstrated ability. Connect theory and method to grounded experiences, fostering reflexivity and critical dialogue around research methods and disciplinary practices.",
|
5 |
"model": "google/gemini-2.0-flash-001",
|
6 |
"api_key_var": "API_KEY",
|
7 |
"temperature": 0.7,
|
|
|
11 |
],
|
12 |
"grounding_urls": [],
|
13 |
"enable_dynamic_urls": true,
|
14 |
+
"theme": "Glass"
|
15 |
}
|