|
import gradio as gr |
|
import tempfile |
|
import os |
|
import requests |
|
import json |
|
import re |
|
from bs4 import BeautifulSoup |
|
from datetime import datetime |
|
import urllib.parse |
|
|
|
|
|
|
|
SPACE_NAME = 'Writing Aid' |
|
SPACE_DESCRIPTION = 'A customizable AI assistant' |
|
|
|
|
|
DEFAULT_CONFIG = { |
|
'name': SPACE_NAME, |
|
'description': SPACE_DESCRIPTION, |
|
'system_prompt': 'You are a humanities scholar and pedagogue specializing in interdisciplinary approaches across literature, philosophy, history, religious studies, and cultural analysis. Your expertise lies in close reading, hermeneutical interpretation, contextual analysis, and cross-cultural comparison. Guide students through primary source analysis, encourage deep engagement with texts and artifacts, and foster critical interpretation skills. Emphasize the importance of historical context, cultural sensitivity, and multiple perspectives. Help students develop sophisticated arguments grounded in textual evidence while appreciating the complexity and ambiguity inherent in humanistic inquiry. Draw connections between historical and contemporary issues, encouraging students to see the ongoing relevance of humanistic knowledge. Model intellectual curiosity, empathy, and the art of asking meaningful questions about human experience, meaning, and values.', |
|
'temperature': 0.8, |
|
'max_tokens': 1000, |
|
'model': 'openai/gpt-4.1-nano', |
|
'api_key_var': 'API_KEY', |
|
'theme': Origin, |
|
'grounding_urls': ["https://en.wikipedia.org/wiki/Hermeneutics", "https://plato.stanford.edu/entries/hermeneutics/", "https://en.wikipedia.org/wiki/Close_reading", "https://en.wikipedia.org/wiki/Cultural_studies"], |
|
'enable_dynamic_urls': True, |
|
'examples': ['How do I analyze the symbolism in this medieval manuscript?', "What historical context should I consider when reading Dante's Inferno?", 'Can you help me compare philosophical approaches to justice across different cultures?', 'How do I interpret conflicting historical accounts of the same event?'], |
|
'locked': False |
|
} |
|
|
|
|
|
def load_config(): |
|
"""Load configuration from config.json with fallback to defaults""" |
|
try: |
|
with open('config.json', 'r') as f: |
|
config = json.load(f) |
|
print("β
Loaded configuration from config.json") |
|
return config |
|
except FileNotFoundError: |
|
print("βΉοΈ No config.json found, using default configuration") |
|
|
|
try: |
|
with open('config.json', 'w') as f: |
|
json.dump(DEFAULT_CONFIG, f, indent=2) |
|
print("β
Created config.json with default values") |
|
except: |
|
pass |
|
return DEFAULT_CONFIG |
|
except Exception as e: |
|
print(f"β οΈ Error loading config.json: {e}, using defaults") |
|
return DEFAULT_CONFIG |
|
|
|
|
|
config = load_config() |
|
|
|
|
|
SPACE_NAME = config.get('name', DEFAULT_CONFIG['name']) |
|
SPACE_DESCRIPTION = config.get('description', DEFAULT_CONFIG['description']) |
|
SYSTEM_PROMPT = config.get('system_prompt', DEFAULT_CONFIG['system_prompt']) |
|
temperature = config.get('temperature', DEFAULT_CONFIG['temperature']) |
|
max_tokens = config.get('max_tokens', DEFAULT_CONFIG['max_tokens']) |
|
MODEL = config.get('model', DEFAULT_CONFIG['model']) |
|
THEME = config.get('theme', DEFAULT_CONFIG['theme']) |
|
GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls']) |
|
ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls']) |
|
|
|
|
|
|
|
ACCESS_CODE = os.environ.get("ACCESS_CODE") |
|
|
|
|
|
API_KEY_VAR = config.get('api_key_var', DEFAULT_CONFIG['api_key_var']) |
|
API_KEY = os.environ.get(API_KEY_VAR) |
|
if API_KEY: |
|
API_KEY = API_KEY.strip() |
|
if not API_KEY: |
|
API_KEY = None |
|
|
|
|
|
def validate_api_key(): |
|
"""Validate API key configuration with detailed logging""" |
|
if not API_KEY: |
|
print(f"β οΈ API KEY CONFIGURATION ERROR:") |
|
print(f" Variable name: {API_KEY_VAR}") |
|
print(f" Status: Not set or empty") |
|
print(f" Action needed: Set '{API_KEY_VAR}' in HuggingFace Space secrets") |
|
print(f" Expected format: sk-or-xxxxxxxxxx") |
|
return False |
|
elif not API_KEY.startswith('sk-or-'): |
|
print(f"β οΈ API KEY FORMAT WARNING:") |
|
print(f" Variable name: {API_KEY_VAR}") |
|
print(f" Current value: {API_KEY[:10]}..." if len(API_KEY) > 10 else "{API_KEY}") |
|
print(f" Expected format: sk-or-xxxxxxxxxx") |
|
print(f" Note: OpenRouter keys should start with 'sk-or-'") |
|
return True |
|
else: |
|
print(f"β
API Key configured successfully") |
|
print(f" Variable: {API_KEY_VAR}") |
|
print(f" Format: Valid OpenRouter key") |
|
return True |
|
|
|
|
|
try: |
|
API_KEY_VALID = validate_api_key() |
|
except NameError: |
|
|
|
API_KEY_VALID = False |
|
|
|
def validate_url_domain(url): |
|
"""Basic URL domain validation""" |
|
try: |
|
from urllib.parse import urlparse |
|
parsed = urlparse(url) |
|
|
|
if parsed.netloc and '.' in parsed.netloc: |
|
return True |
|
except: |
|
pass |
|
return False |
|
|
|
def fetch_url_content(url): |
|
"""Enhanced URL content fetching with improved compatibility and error handling""" |
|
if not validate_url_domain(url): |
|
return f"Invalid URL format: {url}" |
|
|
|
try: |
|
|
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', |
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', |
|
'Accept-Language': 'en-US,en;q=0.5', |
|
'Accept-Encoding': 'gzip, deflate', |
|
'Connection': 'keep-alive' |
|
} |
|
|
|
response = requests.get(url, timeout=15, headers=headers) |
|
response.raise_for_status() |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
for element in soup(["script", "style", "nav", "header", "footer", "aside", "form", "button"]): |
|
element.decompose() |
|
|
|
|
|
main_content = soup.find('main') or soup.find('article') or soup.find('div', class_=lambda x: bool(x and 'content' in x.lower())) or soup |
|
text = main_content.get_text() |
|
|
|
|
|
lines = (line.strip() for line in text.splitlines()) |
|
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) |
|
text = ' '.join(chunk for chunk in chunks if chunk and len(chunk) > 2) |
|
|
|
|
|
if len(text) > 4000: |
|
truncated_text = text[:4000] |
|
|
|
last_period = truncated_text.rfind('.') |
|
if last_period > 3500: |
|
text = truncated_text[:last_period + 1] |
|
else: |
|
text = truncated_text + "..." |
|
|
|
return text if text.strip() else "No readable content found at this URL" |
|
|
|
except requests.exceptions.Timeout: |
|
return f"Timeout error fetching {url} (15s limit exceeded)" |
|
except requests.exceptions.RequestException as e: |
|
return f"Error fetching {url}: {str(e)}" |
|
except Exception as e: |
|
return f"Error processing content from {url}: {str(e)}" |
|
|
|
def extract_urls_from_text(text): |
|
"""Extract URLs from text using regex with enhanced validation""" |
|
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]"]+' |
|
urls = re.findall(url_pattern, text) |
|
|
|
|
|
validated_urls = [] |
|
for url in urls: |
|
|
|
url = url.rstrip('.,!?;:') |
|
|
|
if '.' in url and len(url) > 10: |
|
validated_urls.append(url) |
|
|
|
return validated_urls |
|
|
|
|
|
_url_content_cache = {} |
|
|
|
def get_grounding_context(): |
|
"""Fetch context from grounding URLs with caching""" |
|
|
|
urls = GROUNDING_URLS |
|
if isinstance(urls, str): |
|
try: |
|
urls = json.loads(urls) |
|
except: |
|
urls = [] |
|
|
|
if not urls: |
|
return "" |
|
|
|
|
|
cache_key = tuple(sorted([url for url in urls if url and url.strip()])) |
|
|
|
|
|
if cache_key in _url_content_cache: |
|
return _url_content_cache[cache_key] |
|
|
|
context_parts = [] |
|
for i, url in enumerate(urls, 1): |
|
if url.strip(): |
|
content = fetch_url_content(url.strip()) |
|
|
|
priority_label = "PRIMARY" if i <= 2 else "SECONDARY" |
|
context_parts.append(f"[{priority_label}] Context from URL {i} ({url}):\n{content}") |
|
|
|
if context_parts: |
|
result = "\n\n" + "\n\n".join(context_parts) + "\n\n" |
|
else: |
|
result = "" |
|
|
|
|
|
_url_content_cache[cache_key] = result |
|
return result |
|
|
|
def export_conversation_to_markdown(conversation_history): |
|
"""Export conversation history to markdown format""" |
|
if not conversation_history: |
|
return "No conversation to export." |
|
|
|
markdown_content = f"""# Conversation Export |
|
Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} |
|
|
|
--- |
|
|
|
""" |
|
|
|
message_pair_count = 0 |
|
for i, message in enumerate(conversation_history): |
|
if isinstance(message, dict): |
|
role = message.get('role', 'unknown') |
|
content = message.get('content', '') |
|
|
|
if role == 'user': |
|
message_pair_count += 1 |
|
markdown_content += f"## User Message {message_pair_count}\n\n{content}\n\n" |
|
elif role == 'assistant': |
|
markdown_content += f"## Assistant Response {message_pair_count}\n\n{content}\n\n---\n\n" |
|
elif isinstance(message, (list, tuple)) and len(message) >= 2: |
|
|
|
message_pair_count += 1 |
|
user_msg, assistant_msg = message[0], message[1] |
|
if user_msg: |
|
markdown_content += f"## User Message {message_pair_count}\n\n{user_msg}\n\n" |
|
if assistant_msg: |
|
markdown_content += f"## Assistant Response {message_pair_count}\n\n{assistant_msg}\n\n---\n\n" |
|
|
|
return markdown_content |
|
|
|
|
|
def generate_response(message, history): |
|
"""Generate response using OpenRouter API""" |
|
|
|
|
|
if not API_KEY: |
|
error_msg = f"π **API Key Required**\n\n" |
|
error_msg += f"Please configure your OpenRouter API key:\n" |
|
error_msg += f"1. Go to Settings (βοΈ) in your HuggingFace Space\n" |
|
error_msg += f"2. Click 'Variables and secrets'\n" |
|
error_msg += f"3. Add secret: **{API_KEY_VAR}**\n" |
|
error_msg += f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n" |
|
error_msg += f"Get your API key at: https://openrouter.ai/keys" |
|
print(f"β API request failed: No API key configured for {API_KEY_VAR}") |
|
return error_msg |
|
|
|
|
|
grounding_context = get_grounding_context() |
|
|
|
|
|
file_context = "" |
|
if files: |
|
file_contents = [] |
|
for file_obj in files: |
|
if file_obj is not None: |
|
try: |
|
file_content = extract_file_content(file_obj.name) |
|
file_contents.append(file_content) |
|
except Exception as e: |
|
file_contents.append(f"Error processing file: {str(e)}") |
|
|
|
if file_contents: |
|
file_context = "\n\n[UPLOADED FILES]\n" + "\n\n".join(file_contents) + "\n" |
|
|
|
|
|
if ENABLE_DYNAMIC_URLS: |
|
urls_in_message = extract_urls_from_text(message) |
|
if urls_in_message: |
|
dynamic_context = "" |
|
for url in urls_in_message[:3]: |
|
content = fetch_url_content(url) |
|
dynamic_context += f"\n\n[DYNAMIC] Context from {url}:\n{content}" |
|
grounding_context += dynamic_context |
|
|
|
|
|
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context + file_context |
|
|
|
|
|
messages = [{"role": "system", "content": enhanced_system_prompt}] |
|
|
|
|
|
for chat in history: |
|
if isinstance(chat, dict): |
|
messages.append(chat) |
|
elif isinstance(chat, (list, tuple)) and len(chat) >= 2: |
|
messages.append({"role": "user", "content": chat[0]}) |
|
messages.append({"role": "assistant", "content": chat[1]}) |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
try: |
|
print(f"π Making API request to OpenRouter...") |
|
print(f" Model: {MODEL}") |
|
print(f" Messages: {len(messages)} in conversation") |
|
|
|
response = requests.post( |
|
url="https://openrouter.ai/api/v1/chat/completions", |
|
headers={ |
|
"Authorization": f"Bearer {API_KEY}", |
|
"Content-Type": "application/json", |
|
"HTTP-Referer": "https://huggingface.co", |
|
"X-Title": "HuggingFace Space" |
|
}, |
|
json={ |
|
"model": MODEL, |
|
"messages": messages, |
|
"temperature": temperature, |
|
"max_tokens": max_tokens |
|
}, |
|
timeout=30 |
|
) |
|
|
|
print(f"π‘ API Response: {response.status_code}") |
|
|
|
if response.status_code == 200: |
|
try: |
|
result = response.json() |
|
return result['choices'][0]['message']['content'] |
|
except (KeyError, IndexError, json.JSONDecodeError) as e: |
|
error_msg = f"β **Response Parsing Error**\n\n" |
|
error_msg += f"Received response from API but couldn't parse it properly.\n" |
|
error_msg += f"Error: {str(e)}\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Check OpenRouter service status\n" |
|
error_msg += f"2. Try again in a few moments\n" |
|
error_msg += f"3. Try a different model if available" |
|
print(f"β Response parsing error: {str(e)}") |
|
return error_msg |
|
elif response.status_code == 401: |
|
error_msg = f"π **Authentication Error**\n\n" |
|
error_msg += f"Your API key appears to be invalid or expired.\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Check that your **{API_KEY_VAR}** secret is set correctly\n" |
|
error_msg += f"2. Verify your OpenRouter API key at https://openrouter.ai/keys\n" |
|
error_msg += f"3. Make sure the key starts with `sk-or-`\n" |
|
error_msg += f"4. Check if you have sufficient credits" |
|
print(f"β Authentication failed: Invalid API key") |
|
return error_msg |
|
elif response.status_code == 429: |
|
error_msg = f"β±οΈ **Rate Limit Exceeded**\n\n" |
|
error_msg += f"Too many requests. Please wait a moment and try again.\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Wait 30-60 seconds before trying again\n" |
|
error_msg += f"2. Check your OpenRouter usage limits\n" |
|
error_msg += f"3. Consider upgrading your OpenRouter plan" |
|
print(f"β Rate limit exceeded") |
|
return error_msg |
|
elif response.status_code == 400: |
|
error_msg = f"π **Request Error**\n\n" |
|
error_msg += f"There was a problem with the request format.\n" |
|
error_msg += f"Response: {response.text[:500]}\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Try a shorter message\n" |
|
error_msg += f"2. Check for special characters in your message\n" |
|
error_msg += f"3. Try a different model" |
|
print(f"β Bad request: {response.status_code} - {response.text[:200]}") |
|
return error_msg |
|
else: |
|
error_msg = f"π **API Error {response.status_code}**\n\n" |
|
error_msg += f"An unexpected error occurred.\n" |
|
error_msg += f"Response: {response.text[:500]}\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Try again in a few moments\n" |
|
error_msg += f"2. Check OpenRouter service status\n" |
|
error_msg += f"3. Contact support if this persists" |
|
print(f"β API error: {response.status_code} - {response.text[:200]}") |
|
return error_msg |
|
|
|
except requests.exceptions.Timeout: |
|
error_msg = f"β° **Request Timeout**\n\n" |
|
error_msg += f"The API request took too long (30s limit).\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Try again with a shorter message\n" |
|
error_msg += f"2. Check your internet connection\n" |
|
error_msg += f"3. Try a different model" |
|
print(f"β Request timeout after 30 seconds") |
|
return error_msg |
|
except requests.exceptions.ConnectionError: |
|
error_msg = f"π **Connection Error**\n\n" |
|
error_msg += f"Could not connect to OpenRouter API.\n\n" |
|
error_msg += f"**Troubleshooting:**\n" |
|
error_msg += f"1. Check your internet connection\n" |
|
error_msg += f"2. Check OpenRouter service status\n" |
|
error_msg += f"3. Try again in a few moments" |
|
print(f"β Connection error to OpenRouter API") |
|
return error_msg |
|
except Exception as e: |
|
error_msg = "β **Unexpected Error**\n\n" |
|
error_msg += "An unexpected error occurred:\n" |
|
error_msg += f"`{str(e)}`\n\n" |
|
error_msg += "Please try again or contact support if this persists." |
|
print(f"β Unexpected error: {str(e)}") |
|
return error_msg |
|
|
|
|
|
access_granted = gr.State(False) |
|
_access_granted_global = False |
|
|
|
def verify_access_code(code): |
|
"""Verify the access code""" |
|
global _access_granted_global |
|
if ACCESS_CODE is None: |
|
_access_granted_global = True |
|
return gr.update(value="No access code required.", style={"color": "green"}), gr.update(visible=True), True |
|
|
|
if code == ACCESS_CODE: |
|
_access_granted_global = True |
|
return gr.update(value="β
Access granted!", style={"color": "green"}), gr.update(visible=True), True |
|
else: |
|
_access_granted_global = False |
|
return gr.update(value="β Invalid access code. Please try again.", style={"color": "red"}), gr.update(visible=False), False |
|
|
|
def protected_generate_response(message, history, files=None): |
|
"""Protected response function that checks access""" |
|
|
|
if ACCESS_CODE is not None and not _access_granted_global: |
|
return "Please enter the access code to continue." |
|
return generate_response(message, history, files) |
|
|
|
|
|
chat_history_store = [] |
|
|
|
def store_and_generate_response(message, history, files=None): |
|
"""Wrapper function that stores history and generates response""" |
|
global chat_history_store |
|
|
|
|
|
response = protected_generate_response(message, history, files) |
|
|
|
|
|
|
|
chat_history_store = [] |
|
if history: |
|
for exchange in history: |
|
if isinstance(exchange, dict): |
|
chat_history_store.append(exchange) |
|
elif isinstance(exchange, (list, tuple)) and len(exchange) >= 2: |
|
chat_history_store.append({"role": "user", "content": exchange[0]}) |
|
chat_history_store.append({"role": "assistant", "content": exchange[1]}) |
|
|
|
|
|
chat_history_store.append({"role": "user", "content": message}) |
|
chat_history_store.append({"role": "assistant", "content": response}) |
|
|
|
return response |
|
|
|
def export_current_conversation(): |
|
"""Export the current conversation""" |
|
if not chat_history_store: |
|
return gr.update(visible=False) |
|
|
|
markdown_content = export_conversation_to_markdown(chat_history_store) |
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f: |
|
f.write(markdown_content) |
|
temp_file = f.name |
|
|
|
return gr.update(value=temp_file, visible=True) |
|
|
|
def export_conversation(history): |
|
"""Export conversation to markdown file""" |
|
if not history: |
|
return gr.update(visible=False) |
|
|
|
markdown_content = export_conversation_to_markdown(history) |
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8') as f: |
|
f.write(markdown_content) |
|
temp_file = f.name |
|
|
|
return gr.update(value=temp_file, visible=True) |
|
|
|
|
|
def get_configuration_status(): |
|
"""Generate a clean configuration status message for display""" |
|
status_parts = [] |
|
|
|
|
|
status_parts.append(f"**Name:** {SPACE_NAME}") |
|
status_parts.append(f"**Model:** {MODEL}") |
|
status_parts.append(f"**Theme:** {THEME}") |
|
status_parts.append(f"**Temperature:** {temperature}") |
|
status_parts.append(f"**Max Response Tokens:** {max_tokens}") |
|
status_parts.append("") |
|
|
|
|
|
status_parts.append("") |
|
examples_list = config.get('examples', []) |
|
if isinstance(examples_list, str): |
|
try: |
|
import ast |
|
examples_list = ast.literal_eval(examples_list) |
|
except: |
|
examples_list = [] |
|
|
|
if examples_list and len(examples_list) > 0: |
|
status_parts.append("**Example Prompts:**") |
|
for example in examples_list[:5]: |
|
status_parts.append(f"β’ {example}") |
|
if len(examples_list) > 5: |
|
status_parts.append(f"β’ ... and {len(examples_list) - 5} more") |
|
else: |
|
status_parts.append("**Example Prompts:** No example prompts configured") |
|
|
|
|
|
urls = GROUNDING_URLS |
|
if isinstance(urls, str): |
|
try: |
|
import ast |
|
urls = ast.literal_eval(urls) |
|
except: |
|
urls = [] |
|
|
|
if urls and len(urls) > 0: |
|
status_parts.append("") |
|
status_parts.append("**Grounding URLs:**") |
|
for i, url in enumerate(urls[:5], 1): |
|
status_parts.append(f"{i}. {url}") |
|
if len(urls) > 5: |
|
status_parts.append(f"... and {len(urls) - 5} more URLs") |
|
|
|
|
|
status_parts.append("") |
|
status_parts.append(f"**System Prompt:** {SYSTEM_PROMPT}") |
|
|
|
|
|
status_parts.append("") |
|
if not API_KEY_VALID: |
|
status_parts.append(f"**Note:** API key ({API_KEY_VAR}) not configured in Space secrets") |
|
|
|
return "\n".join(status_parts) |
|
|
|
|
|
def verify_hf_token_access(): |
|
"""Verify HF_TOKEN has write access to the space""" |
|
hf_token = os.environ.get("HF_TOKEN") |
|
space_id = os.environ.get("SPACE_ID") |
|
|
|
if not hf_token or not space_id: |
|
return False, "Missing HF_TOKEN or SPACE_ID environment variables" |
|
|
|
try: |
|
from huggingface_hub import HfApi |
|
api = HfApi(token=hf_token) |
|
|
|
api.space_info(space_id) |
|
return True, "Authenticated successfully" |
|
except Exception as e: |
|
return False, f"Authentication failed: {str(e)}" |
|
|
|
|
|
|
|
theme_class = getattr(gr.themes, THEME, gr.themes.Default) |
|
with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo: |
|
|
|
HF_TOKEN = os.environ.get("HF_TOKEN", "").strip() |
|
SPACE_ID = os.environ.get("SPACE_ID", "").strip() |
|
|
|
|
|
HF_ACCESS_VALID, HF_ACCESS_MESSAGE = verify_hf_token_access() |
|
|
|
|
|
with gr.Tabs() as main_tabs: |
|
with gr.Tab("Chat U/I"): |
|
gr.Markdown(f"# {SPACE_NAME}") |
|
gr.Markdown(SPACE_DESCRIPTION) |
|
|
|
|
|
with gr.Column(visible=(ACCESS_CODE is not None)) as access_section: |
|
gr.Markdown("### π Access Required") |
|
gr.Markdown("Please enter the access code provided by your instructor:") |
|
|
|
access_input = gr.Textbox( |
|
label="Access Code", |
|
placeholder="Enter access code...", |
|
type="password" |
|
) |
|
access_btn = gr.Button("Submit", variant="primary") |
|
access_error = gr.Markdown(visible=False) |
|
|
|
|
|
with gr.Column(visible=(ACCESS_CODE is None)) as chat_section: |
|
|
|
examples = config.get('examples', []) |
|
if isinstance(examples, str): |
|
try: |
|
import ast |
|
examples = ast.literal_eval(examples) |
|
except: |
|
examples = [] |
|
|
|
|
|
|
|
|
|
formatted_examples = None |
|
if examples: |
|
|
|
if examples and isinstance(examples[0], list): |
|
|
|
formatted_examples = examples |
|
else: |
|
|
|
formatted_examples = [[example, None] for example in examples] |
|
|
|
chat_interface = gr.ChatInterface( |
|
fn=store_and_generate_response, |
|
title="", |
|
description="", |
|
examples=formatted_examples, |
|
type="messages", |
|
additional_inputs=[ |
|
gr.File( |
|
label="π", |
|
file_types=None, |
|
file_count="multiple", |
|
visible=True |
|
) |
|
] |
|
) |
|
|
|
|
|
with gr.Row(): |
|
export_btn = gr.Button("π₯ Export Conversation", variant="secondary", size="sm") |
|
export_file = gr.File(label="Download", visible=False) |
|
|
|
|
|
export_btn.click( |
|
export_current_conversation, |
|
outputs=[export_file] |
|
) |
|
|
|
|
|
with gr.Accordion("Configuration", open=False): |
|
gr.Markdown(get_configuration_status()) |
|
|
|
|
|
if ACCESS_CODE is not None: |
|
access_btn.click( |
|
verify_access_code, |
|
inputs=[access_input], |
|
outputs=[access_error, chat_section, access_granted] |
|
) |
|
access_input.submit( |
|
verify_access_code, |
|
inputs=[access_input], |
|
outputs=[access_error, chat_section, access_granted] |
|
) |
|
|
|
|
|
with gr.Tab("Configuration", visible=HF_ACCESS_VALID) as config_tab: |
|
gr.Markdown("## Configuration Management") |
|
|
|
|
|
if HF_ACCESS_VALID: |
|
gr.Markdown(f"β
**Authenticated** - {HF_ACCESS_MESSAGE}") |
|
gr.Markdown("Configuration changes will be saved to the HuggingFace repository and the Space will restart automatically.") |
|
faculty_auth_state = gr.State(True) |
|
else: |
|
gr.Markdown(f"β **Not Available** - {HF_ACCESS_MESSAGE}") |
|
gr.Markdown("Set HF_TOKEN and SPACE_ID in Space secrets to enable configuration management.") |
|
faculty_auth_state = gr.State(False) |
|
|
|
|
|
with gr.Column(visible=HF_ACCESS_VALID) as faculty_config_section: |
|
gr.Markdown("### Edit Assistant Configuration") |
|
gr.Markdown("β οΈ **Warning:** Changes will affect all users immediately.") |
|
|
|
|
|
try: |
|
with open('config.json', 'r') as f: |
|
current_config = json.load(f) |
|
except: |
|
|
|
current_config = DEFAULT_CONFIG.copy() |
|
|
|
|
|
|
|
edit_system_prompt = gr.Textbox( |
|
label="System Prompt", |
|
value=current_config.get('system_prompt', SYSTEM_PROMPT), |
|
lines=5 |
|
) |
|
|
|
|
|
edit_model = gr.Dropdown( |
|
label="Model", |
|
choices=[ |
|
"google/gemini-2.0-flash-001", |
|
"google/gemma-3-27b-it", |
|
"anthropic/claude-3.5-sonnet", |
|
"anthropic/claude-3.5-haiku", |
|
"openai/gpt-4o-mini-search-preview", |
|
"openai/gpt-4.1-nano", |
|
"nvidia/llama-3.1-nemotron-70b-instruct", |
|
"mistralai/devstral-small" |
|
], |
|
value=current_config.get('model', MODEL) |
|
) |
|
|
|
|
|
examples_value = current_config.get('examples', []) |
|
if isinstance(examples_value, list): |
|
examples_text_value = "\n".join(examples_value) |
|
else: |
|
examples_text_value = "" |
|
|
|
edit_examples = gr.Textbox( |
|
label="Example Prompts (one per line)", |
|
value=examples_text_value, |
|
lines=3, |
|
placeholder="What can you help me with?\nExplain this concept\nHelp me understand..." |
|
) |
|
|
|
|
|
with gr.Row(): |
|
edit_temperature = gr.Slider( |
|
label="Temperature", |
|
minimum=0, |
|
maximum=2, |
|
value=current_config.get('temperature', 0.7), |
|
step=0.1 |
|
) |
|
edit_max_tokens = gr.Slider( |
|
label="Max Tokens", |
|
minimum=50, |
|
maximum=4096, |
|
value=current_config.get('max_tokens', 750), |
|
step=50 |
|
) |
|
|
|
|
|
gr.Markdown("### URL Grounding") |
|
grounding_urls_value = current_config.get('grounding_urls', []) |
|
if isinstance(grounding_urls_value, str): |
|
try: |
|
import ast |
|
grounding_urls_value = ast.literal_eval(grounding_urls_value) |
|
except: |
|
grounding_urls_value = [] |
|
|
|
|
|
url_fields = [] |
|
for i in range(10): |
|
url_value = grounding_urls_value[i] if i < len(grounding_urls_value) else "" |
|
url_field = gr.Textbox( |
|
label=f"URL {i+1}" + (" (Primary)" if i < 2 else " (Secondary)"), |
|
value=url_value, |
|
placeholder="https://..." |
|
) |
|
url_fields.append(url_field) |
|
|
|
config_locked = gr.Checkbox( |
|
label="Lock Configuration (Prevent further edits)", |
|
value=current_config.get('locked', False) |
|
) |
|
|
|
with gr.Row(): |
|
save_config_btn = gr.Button("Save Configuration", variant="primary") |
|
reset_config_btn = gr.Button("Reset to Defaults", variant="secondary") |
|
|
|
config_status = gr.Markdown("") |
|
|
|
|
|
|
|
def save_configuration(is_authenticated, new_prompt, new_model, new_examples, new_temp, new_tokens, *url_values): |
|
if not is_authenticated: |
|
return "Not authenticated" |
|
|
|
|
|
try: |
|
with open('config.json', 'r') as f: |
|
existing_config = json.load(f) |
|
if existing_config.get('locked', False): |
|
return "Configuration is locked and cannot be modified" |
|
except: |
|
pass |
|
|
|
|
|
try: |
|
with open('config.json', 'r') as f: |
|
current_full_config = json.load(f) |
|
except: |
|
|
|
current_full_config = DEFAULT_CONFIG.copy() |
|
|
|
|
|
examples_list = [ex.strip() for ex in new_examples.split('\n') if ex.strip()] |
|
|
|
|
|
urls = list(url_values[:-1]) |
|
lock_config_from_args = url_values[-1] |
|
|
|
grounding_urls = [url.strip() for url in urls if url.strip()] |
|
|
|
|
|
try: |
|
|
|
os.makedirs('config_backups', exist_ok=True) |
|
|
|
|
|
backup_filename = f"config_backups/config_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" |
|
with open(backup_filename, 'w') as backup_file: |
|
json.dump(current_full_config, backup_file, indent=2) |
|
|
|
|
|
backups = sorted([f for f in os.listdir('config_backups') if f.endswith('.json')]) |
|
if len(backups) > 10: |
|
for old_backup in backups[:-10]: |
|
os.remove(os.path.join('config_backups', old_backup)) |
|
except Exception as backup_error: |
|
print(f"Warning: Could not create backup: {backup_error}") |
|
|
|
|
|
|
|
current_full_config.update({ |
|
'system_prompt': new_prompt, |
|
'model': new_model, |
|
'examples': examples_list, |
|
'temperature': new_temp, |
|
'max_tokens': int(new_tokens), |
|
'grounding_urls': grounding_urls, |
|
'locked': lock_config_from_args, |
|
'last_modified': datetime.now().isoformat(), |
|
'last_modified_by': 'faculty' |
|
}) |
|
|
|
try: |
|
with open('config.json', 'w') as f: |
|
json.dump(current_full_config, f, indent=2) |
|
|
|
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
space_id = os.environ.get("SPACE_ID") |
|
|
|
if hf_token and space_id: |
|
try: |
|
from huggingface_hub import HfApi, CommitOperationAdd, restart_space |
|
api = HfApi(token=hf_token) |
|
|
|
|
|
operations = [ |
|
CommitOperationAdd( |
|
path_or_fileobj="config.json", |
|
path_in_repo="config.json" |
|
) |
|
] |
|
|
|
|
|
api.create_commit( |
|
repo_id=space_id, |
|
operations=operations, |
|
commit_message=f"Update configuration by faculty at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", |
|
commit_description="Faculty configuration update through web interface", |
|
repo_type="space", |
|
token=hf_token |
|
) |
|
|
|
|
|
try: |
|
restart_space(space_id, token=hf_token) |
|
return f"β
Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nπ **Space is restarting automatically!**\n\nThe page will refresh in about 30 seconds. Your changes will be applied." |
|
except Exception as restart_error: |
|
print(f"Could not auto-restart: {restart_error}") |
|
return f"β
Configuration saved and committed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nπ **Please restart manually** (auto-restart failed)\n\n1. Go to Settings (βοΈ)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds" |
|
except Exception as commit_error: |
|
print(f"Note: Could not auto-commit to repository: {commit_error}") |
|
return f"β
Configuration saved locally at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nπ **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (βοΈ)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart" |
|
else: |
|
return f"β
Configuration saved at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nπ **Manual Restart Required**\nFor changes to take effect:\n1. Go to Settings (βοΈ)\n2. Click 'Factory reboot'\n3. Wait ~30 seconds for restart" |
|
except Exception as e: |
|
return f"β Error saving configuration: {str(e)}" |
|
|
|
|
|
def reset_configuration(is_authenticated): |
|
if not is_authenticated: |
|
updates = ["Not authenticated"] + [gr.update() for _ in range(14)] |
|
return tuple(updates) |
|
|
|
|
|
try: |
|
with open('config.json', 'r') as f: |
|
existing_config = json.load(f) |
|
if existing_config.get('locked', False): |
|
updates = ["Configuration is locked"] + [gr.update() for _ in range(14)] |
|
return tuple(updates) |
|
except: |
|
pass |
|
|
|
|
|
default_examples = DEFAULT_CONFIG.get('examples', []) |
|
if isinstance(default_examples, list): |
|
examples_text = "\n".join(default_examples) |
|
else: |
|
examples_text = "" |
|
|
|
|
|
default_urls = DEFAULT_CONFIG.get('grounding_urls', []) |
|
if isinstance(default_urls, str): |
|
try: |
|
import json |
|
default_urls = json.loads(default_urls) |
|
except: |
|
default_urls = [] |
|
elif not isinstance(default_urls, list): |
|
default_urls = [] |
|
|
|
|
|
updates = [ |
|
"Reset to default values", |
|
gr.update(value=DEFAULT_CONFIG.get('system_prompt', SYSTEM_PROMPT)), |
|
gr.update(value=DEFAULT_CONFIG.get('model', MODEL)), |
|
gr.update(value=examples_text), |
|
gr.update(value=DEFAULT_CONFIG.get('temperature', temperature)), |
|
gr.update(value=DEFAULT_CONFIG.get('max_tokens', max_tokens)) |
|
] |
|
|
|
|
|
for i in range(10): |
|
url_value = default_urls[i] if i < len(default_urls) else "" |
|
updates.append(gr.update(value=url_value)) |
|
|
|
return tuple(updates) |
|
|
|
|
|
|
|
save_config_btn.click( |
|
save_configuration, |
|
inputs=[faculty_auth_state, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields + [config_locked], |
|
outputs=[config_status] |
|
) |
|
|
|
reset_config_btn.click( |
|
reset_configuration, |
|
inputs=[faculty_auth_state], |
|
outputs=[config_status, edit_system_prompt, edit_model, edit_examples, edit_temperature, edit_max_tokens] + url_fields |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|