milwright commited on
Commit
1117ec5
·
verified ·
1 Parent(s): a20cbae

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +76 -0
  2. app.py +256 -0
  3. config.json +15 -0
  4. requirements.txt +4 -0
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: My Custom Space
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 5.35.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # My Custom Space
13
+
14
+ An AI research assistant tailored for academic inquiry and scholarly dialogue
15
+
16
+ ## Quick Deploy to HuggingFace Spaces
17
+
18
+ ### Step 1: Create the Space
19
+ 1. Go to https://huggingface.co/spaces
20
+ 2. Click "Create new Space"
21
+ 3. Choose a name for your Space
22
+ 4. Select **Gradio** as the SDK
23
+ 5. Set visibility (Public/Private)
24
+ 6. Click "Create Space"
25
+
26
+ ### Step 2: Upload Files
27
+ 1. In your new Space, click "Files" tab
28
+ 2. Upload these files from the zip:
29
+ - `app.py`
30
+ - `requirements.txt`
31
+ 3. Wait for "Building" to complete
32
+
33
+ ### Step 3: Add API Key
34
+ 1. Go to Settings (gear icon)
35
+ 2. Click "Variables and secrets"
36
+ 3. Click "New secret"
37
+ 4. Name: `OPENROUTER_API_KEY`
38
+ 5. Value: Your OpenRouter API key
39
+ 6. Click "Add"
40
+
41
+ ### Step 4: Configure Access Control
42
+ Your Space is configured with access code protection. Students will need to enter the access code to use the chatbot.
43
+
44
+ 1. Go to Settings (gear icon)
45
+ 2. Click "Variables and secrets"
46
+ 3. Click "New secret"
47
+ 4. Name: `SPACE_ACCESS_CODE`
48
+ 5. Value: `TLC`
49
+ 6. Click "Add"
50
+
51
+ **Important**: The access code is now stored securely as an environment variable and is not visible in your app code.
52
+
53
+ To disable access protection:
54
+ 1. Go to Settings → Variables and secrets
55
+ 2. Delete the `SPACE_ACCESS_CODE` secret
56
+ 3. The Space will rebuild automatically with no access protection
57
+
58
+
59
+
60
+ ### Step 5: Get Your API Key
61
+ 1. Go to https://openrouter.ai/keys
62
+ 2. Sign up/login if needed
63
+ 3. Click "Create Key"
64
+ 4. Copy the key (starts with `sk-or-`)
65
+
66
+ ### Step 6: Test Your Space
67
+ - Go back to "App" tab
68
+ - Your Space should be running!
69
+ - Try the example prompts or ask a question
70
+
71
+ ## Configuration
72
+
73
+ - **Model**: google/gemma-3-27b-it
74
+ - **Temperature**: 0.7
75
+ - **Max Tokens**: 500
76
+ - **API Key Variable**: OPENROUTER_API_KEY
app.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+ import json
5
+ import asyncio
6
+ from crawl4ai import AsyncWebCrawler
7
+
8
+ # Configuration
9
+ SPACE_NAME = "My Custom Space"
10
+ SPACE_DESCRIPTION = "An AI research assistant tailored for academic inquiry and scholarly dialogue"
11
+ SYSTEM_PROMPT = """You are a research assistant that provides link-grounded information through Crawl4AI web fetching. Use MLA documentation for parenthetical citations and bibliographic entries. This assistant is designed for students and researchers conducting academic inquiry. Your main responsibilities include: analyzing academic sources, fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims."""
12
+ MODEL = "google/gemma-3-27b-it"
13
+ GROUNDING_URLS = []
14
+ # Get access code from environment variable for security
15
+ ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "")
16
+ ENABLE_DYNAMIC_URLS = True
17
+ ENABLE_VECTOR_RAG = False
18
+ RAG_DATA = None
19
+
20
+ # Get API key from environment - customizable variable name
21
+ API_KEY = os.environ.get("OPENROUTER_API_KEY")
22
+
23
+ async def fetch_url_content_async(url, crawler):
24
+ """Fetch and extract text content from a URL using Crawl4AI"""
25
+ try:
26
+ result = await crawler.arun(
27
+ url=url,
28
+ bypass_cache=True,
29
+ word_count_threshold=10,
30
+ excluded_tags=['script', 'style', 'nav', 'header', 'footer'],
31
+ remove_overlay_elements=True
32
+ )
33
+
34
+ if result.success:
35
+ content = result.markdown or result.cleaned_html or ""
36
+ # Truncate to ~4000 characters
37
+ if len(content) > 4000:
38
+ content = content[:4000] + "..."
39
+ return content
40
+ else:
41
+ return f"Error fetching {url}: Failed to retrieve content"
42
+ except Exception as e:
43
+ return f"Error fetching {url}: {str(e)}"
44
+
45
+ def fetch_url_content(url):
46
+ """Synchronous wrapper for URL fetching"""
47
+ async def fetch():
48
+ async with AsyncWebCrawler(verbose=False) as crawler:
49
+ return await fetch_url_content_async(url, crawler)
50
+
51
+ try:
52
+ return asyncio.run(fetch())
53
+ except Exception as e:
54
+ return f"Error fetching {url}: {str(e)}"
55
+
56
+ # Global cache for URL content to avoid re-crawling in generated spaces
57
+ _url_content_cache = {}
58
+
59
+ def get_grounding_context():
60
+ """Fetch context from grounding URLs with caching"""
61
+ if not GROUNDING_URLS:
62
+ return ""
63
+
64
+ # Create cache key from URLs
65
+ cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
66
+
67
+ # Check cache first
68
+ if cache_key in _url_content_cache:
69
+ return _url_content_cache[cache_key]
70
+
71
+ context_parts = []
72
+ for i, url in enumerate(GROUNDING_URLS, 1):
73
+ if url.strip():
74
+ content = fetch_url_content(url.strip())
75
+ context_parts.append(f"Context from URL {i} ({url}):\n{content}")
76
+
77
+ if context_parts:
78
+ result = "\n\n" + "\n\n".join(context_parts) + "\n\n"
79
+ else:
80
+ result = ""
81
+
82
+ # Cache the result
83
+ _url_content_cache[cache_key] = result
84
+ return result
85
+
86
+ import re
87
+
88
+ def extract_urls_from_text(text):
89
+ """Extract URLs from text using regex"""
90
+ url_pattern = r'https?://[^\s<>"{}|\^`\[\]"]+'
91
+ return re.findall(url_pattern, text)
92
+
93
+ # Initialize RAG context if enabled
94
+ if ENABLE_VECTOR_RAG and RAG_DATA:
95
+ try:
96
+ import faiss
97
+ import numpy as np
98
+ import base64
99
+
100
+ class SimpleRAGContext:
101
+ def __init__(self, rag_data):
102
+ # Deserialize FAISS index
103
+ index_bytes = base64.b64decode(rag_data['index_base64'])
104
+ self.index = faiss.deserialize_index(index_bytes)
105
+
106
+ # Restore chunks and mappings
107
+ self.chunks = rag_data['chunks']
108
+ self.chunk_ids = rag_data['chunk_ids']
109
+
110
+ def get_context(self, query, max_chunks=3):
111
+ """Get relevant context - simplified version"""
112
+ # In production, you'd compute query embedding here
113
+ # For now, return a simple message
114
+ return "\n\n[RAG context would be retrieved here based on similarity search]\n\n"
115
+
116
+ rag_context_provider = SimpleRAGContext(RAG_DATA)
117
+ except Exception as e:
118
+ print(f"Failed to initialize RAG: {e}")
119
+ rag_context_provider = None
120
+ else:
121
+ rag_context_provider = None
122
+
123
+ def generate_response(message, history):
124
+ """Generate response using OpenRouter API"""
125
+
126
+ if not API_KEY:
127
+ return "Please set your OPENROUTER_API_KEY in the Space settings."
128
+
129
+ # Get grounding context
130
+ grounding_context = get_grounding_context()
131
+
132
+ # Add RAG context if available
133
+ if ENABLE_VECTOR_RAG and rag_context_provider:
134
+ rag_context = rag_context_provider.get_context(message)
135
+ if rag_context:
136
+ grounding_context += rag_context
137
+
138
+ # If dynamic URLs are enabled, check message for URLs to fetch
139
+ if ENABLE_DYNAMIC_URLS:
140
+ urls_in_message = extract_urls_from_text(message)
141
+ if urls_in_message:
142
+ # Fetch content from URLs mentioned in the message
143
+ dynamic_context_parts = []
144
+ for url in urls_in_message[:3]: # Limit to 3 URLs per message
145
+ content = fetch_url_content(url)
146
+ dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
147
+ if dynamic_context_parts:
148
+ grounding_context += "\n".join(dynamic_context_parts)
149
+
150
+ # Build enhanced system prompt with grounding context
151
+ enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
152
+
153
+ # Build messages array for the API
154
+ messages = [{"role": "system", "content": enhanced_system_prompt}]
155
+
156
+ # Add conversation history - compatible with Gradio 5.x format
157
+ for chat in history:
158
+ if isinstance(chat, dict):
159
+ # New format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
160
+ messages.append(chat)
161
+ else:
162
+ # Legacy format: ("user msg", "bot msg")
163
+ user_msg, bot_msg = chat
164
+ messages.append({"role": "user", "content": user_msg})
165
+ if bot_msg:
166
+ messages.append({"role": "assistant", "content": bot_msg})
167
+
168
+ # Add current message
169
+ messages.append({"role": "user", "content": message})
170
+
171
+ # Make API request
172
+ try:
173
+ response = requests.post(
174
+ url="https://openrouter.ai/api/v1/chat/completions",
175
+ headers={
176
+ "Authorization": f"Bearer {API_KEY}",
177
+ "Content-Type": "application/json"
178
+ },
179
+ json={
180
+ "model": MODEL,
181
+ "messages": messages,
182
+ "temperature": 0.7,
183
+ "max_tokens": 500
184
+ }
185
+ )
186
+
187
+ if response.status_code == 200:
188
+ return response.json()['choices'][0]['message']['content']
189
+ else:
190
+ return f"Error: {response.status_code} - {response.text}"
191
+
192
+ except Exception as e:
193
+ return f"Error: {str(e)}"
194
+
195
+ # Access code verification
196
+ access_granted = gr.State(False)
197
+
198
+ def verify_access_code(code):
199
+ """Verify the access code"""
200
+ if not ACCESS_CODE:
201
+ return gr.update(visible=False), gr.update(visible=True), True
202
+
203
+ if code == ACCESS_CODE:
204
+ return gr.update(visible=False), gr.update(visible=True), True
205
+ else:
206
+ return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), False
207
+
208
+ def protected_generate_response(message, history):
209
+ """Protected response function that checks access"""
210
+ # Check if access is granted via the global state
211
+ if ACCESS_CODE and not access_granted.value:
212
+ return "Please enter the access code to continue."
213
+ return generate_response(message, history)
214
+
215
+ # Create interface with access code protection
216
+ with gr.Blocks(title=SPACE_NAME) as demo:
217
+ gr.Markdown(f"# {SPACE_NAME}")
218
+ gr.Markdown(SPACE_DESCRIPTION)
219
+
220
+ # Access code section (shown only if ACCESS_CODE is set)
221
+ with gr.Column(visible=bool(ACCESS_CODE)) as access_section:
222
+ gr.Markdown("### 🔐 Access Required")
223
+ gr.Markdown("Please enter the access code provided by your instructor:")
224
+
225
+ access_input = gr.Textbox(
226
+ label="Access Code",
227
+ placeholder="Enter access code...",
228
+ type="password"
229
+ )
230
+ access_btn = gr.Button("Submit", variant="primary")
231
+ access_error = gr.Markdown(visible=False)
232
+
233
+ # Main chat interface (hidden until access granted)
234
+ with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
235
+ chat_interface = gr.ChatInterface(
236
+ fn=protected_generate_response,
237
+ title="", # Title already shown above
238
+ description="", # Description already shown above
239
+ examples=["Hello! How can you help me?", "Tell me something interesting", "What can you do?"]
240
+ )
241
+
242
+ # Connect access verification
243
+ if ACCESS_CODE:
244
+ access_btn.click(
245
+ verify_access_code,
246
+ inputs=[access_input],
247
+ outputs=[access_error, chat_section, access_granted]
248
+ )
249
+ access_input.submit(
250
+ verify_access_code,
251
+ inputs=[access_input],
252
+ outputs=[access_error, chat_section, access_granted]
253
+ )
254
+
255
+ if __name__ == "__main__":
256
+ demo.launch()
config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "My Custom Space",
3
+ "description": "An AI research assistant tailored for academic inquiry and scholarly dialogue",
4
+ "system_prompt": "You are a research assistant that provides link-grounded information through Crawl4AI web fetching. Use MLA documentation for parenthetical citations and bibliographic entries. This assistant is designed for students and researchers conducting academic inquiry. Your main responsibilities include: analyzing academic sources, fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims.",
5
+ "model": "google/gemma-3-27b-it",
6
+ "api_key_var": "OPENROUTER_API_KEY",
7
+ "temperature": 0.7,
8
+ "max_tokens": 500,
9
+ "examples": "[\"Hello! How can you help me?\", \"Tell me something interesting\", \"What can you do?\"]",
10
+ "grounding_urls": "[]",
11
+ "access_code": "",
12
+ "enable_dynamic_urls": true,
13
+ "enable_vector_rag": false,
14
+ "rag_data_json": "None"
15
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio>=5.35.0
2
+ requests>=2.32.3
3
+ crawl4ai>=0.4.0
4
+ aiofiles>=24.0