Delete app.py
Browse files
app.py
DELETED
@@ -1,261 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
-
import asyncio
|
6 |
-
from crawl4ai import AsyncWebCrawler
|
7 |
-
|
8 |
-
# Configuration
|
9 |
-
SPACE_NAME = "My Custom Space"
|
10 |
-
SPACE_DESCRIPTION = ""
|
11 |
-
SYSTEM_PROMPT = """You are a research assistant that provides link-grounded information through Crawl4AI web fetching. Use MLA documentation for parenthetical citations and bibliographic entries. This assistant is designed for students and researchers conducting academic inquiry. Your main responsibilities include: analyzing academic sources, fact-checking claims with evidence, providing properly cited research summaries, and helping users navigate scholarly information. Ground all responses in provided URL contexts and any additional URLs you're instructed to fetch. Never rely on memory for factual claims."""
|
12 |
-
MODEL = "google/gemma-3-27b-it"
|
13 |
-
GROUNDING_URLS = []
|
14 |
-
# Get access code from environment variable for security
|
15 |
-
ACCESS_CODE = os.environ.get("SPACE_ACCESS_CODE", "")
|
16 |
-
ENABLE_DYNAMIC_URLS = True
|
17 |
-
ENABLE_VECTOR_RAG = True
|
18 |
-
RAG_DATA = {"index_base64": "SXhGSYABAAABAAAAAAAAAAAAEAAAAAAAAAAQAAAAAAABAAAAAIABAAAAAAAAquL8vORiA70my6u7cPEnPSH5tTxoLAs72bgTPJiVCT18lxu9e9UbvYg0+rvksBs967PLPT+2ET1NLH+72AgnPSO7kD2qeYC8vmOLPEDvBLwIUIm7OnJUPfMGgz3xvSu9TmeRvB7TPD38lTK8gdCyveBptLsJVDO9ac2rPU/HPj27+eo8lbwnPtfTjr3iCOq7xjxvvHG0PL15RkW9nwzevMEce71fGBu9IQuqvSdelz3VUZE9ZIRPPUzBw73e+qS8wjk+PT7soD05Rym+6aqgvJbG5ju6uSs99YoxvW7ipjxIt4s71i2ZPF/Amzv5/qm8ZT4IO3hi8L0jUaE9cbO8vC1oKjyFjCG9QXKKvcFusDz52qo8BH7RveClcr1vruM9OLoGvfAZYj05Sne91q2xPXyjbzyHoJq9dt5DPUlH+bwXUZ69mfHavJ5A3jxALcy7knFZPXSwkbsdXts9hw2svd4VS7zXWcc848SUPFj/Ub3qIbM9Y4rNO6mZUz36DGE8odikPQ1/Cr3fJLk9aD0wPFaCkD1Czow94IpoPCJJd71H2o+9SGM2PMM86TxArsq8ZpORPYqX2L3psZC9/U2bPL+BG71p4aq91ai4u//MZL08k+C7iZLqvPyPIDsppIE8yy27uTUonT3r54S8fFuQvNwfpbzHs/48FaDrvLzoEQqNYpe81b7bvOTiRb2mQIE9w2LUu2a0qbzdMYW8jWivPVNqnr2Au468sSryvVZ5rz1J5j+857vGPeKy/DxuAN08w9bXvHShyTz3zNy80b32vNwHhD2h3hS92mNqPdzPmj3oeyQ9sfw3vCfVeD360q+9/iI3POSd+bxJtse90+CCvRbrvLzkay89bkHcPHzQPj34LAk8FNkQvUVpXL3KCIg8N/eVvQO6Er1Wk9c8/ZkrvRomS710Z/M8qEYQvRuZdLzgZEU940M4vSMHsTyoWj89pW7tvCncIT1TP7Q9Gg5mu89grzyWx1y8j00qPdYmRjyC0jW8fbEyvA+5mz3nix67y7kJvWPHIr2et9A6s1ZGPZGRyT2pN6Q90HmIPeI7Ez0+PxE9EfElvV5a5rwspvU7WexEva576L0tlkG83v0mPdaMIbyhQLa9R2IEPUkazr2DThk9bASDvZn+gjwoUh6+2qxiOy2ZRr2A7c27YFbBPSOeGL3GUB68e+NYPcBg4IllYLO8mnA/vX79mLx4DUs9VfpgPLEHi7xeU1u83oekPBZzcL080jC91g7VvNkxiL3kR3o9khAKvflLp7ySWJw9bO3cvJqxbL1DBGG9hfKqPdlSir3kM6M8IJR4vdPeOz36ISW8dSggvHQIQLqXATK9qoGBvLA20LzCkEi8bhFBvcANALsKMKM8BzRFPCI98bxAOo09WImcvauxyzv7a0E9jBK6PBRlmT29tA69RwYjvQ/ESb3AM4w7WMjZvaEzVT1HbGU9YDDQvEreUD1soz+9bPYhPGDAHL1JkuI82kRnvYKyoL1EBY08BheBPJYxBz2n6ne8E6kUvKAvnT23Rhg8vbrJPJPtuL2UGz+9OJbvPFwrob3XARm9zw1IPSsCi7wSzRk9uhV8Pa47WT0hBIs8NBZVPdhUBL3NLdo8H2hEvT2Sj7ycZ608dB6zPaqYkD2GErg7BVQbPqemq7zRz2C8ZdnsvLwsVz2AChG9hZQIPKJRCLwjOMA96/Q4PTNNOLPOAMm9tOguvAO8K71fWoi8+CsTPTFC+DvlUbA8u17rPR1RoL1xh8e8ph0NPQdGRbwF8eq8qKiMPMA4qz11BlA8WY0RPcGoGrzHyhY9yafEO4Udnj0xgFI9kVEVPPW85z2TnGc8TGCzPIELlrzfUtM96NeDPTUGfr2ZRPO8A2sHPW9jtrzpdqa901KlPZe5Kj247F08QnYIvGsJzr2mJBw8an7zPIQyhDuih+Y81CUUvQIfAz31O308JfcNvDywrb0J4ck6g8cnPMJxpLydUpG93vS1OzB8yT0x83w8NIq3u9q76bxWcJg8LvmZPS5WIb2XPeM7D/ehO8+q3DzEf4I8", "chunks": {"e4b82490": {"text": "Vector Database Test Document This is a test document for evaluating the vector database functionality. Section 1: Introduction to Vector Databases Vector databases store and query high-dimensional vector representations of data. They enable semantic search by finding vectors similar to a query vector in an embedding space. Section 2: Use Cases Common applications include: - Document retrieval and question answering - Similarity search for products or content - Recommendation systems - Semantic search in chatbots Section 3: Technical Implementation Vector databases typically use embedding models to convert text into dense vectors, then use algorithms like cosine similarity or approximate nearest neighbor search to find relevant results. Section 4: Benefits - Semantic understanding beyond keyword matching - Scalable retrieval for large document collections - Integration with modern AI systems and large language models - Support for multi-modal data (text, images, audio) This document should generate multiple chunks when processed by the system.", "metadata": {"file_path": "/private/var/folders/gg/pr9vtbf50cq2z_szcsdnjvym0000gn/T/gradio/ca225f4226ff8fe4b52c49232ba98eb63f89ad9da4e107040507ee0da07ec619/test_document copy.txt", "file_name": "test_document copy.txt", "chunk_index": 0, "start_word": 0, "word_count": 151}, "chunk_id": "e4b82490"}}, "chunk_ids": ["e4b82490"], "dimension": 384, "model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
19 |
-
|
20 |
-
# Get API key from environment - customizable variable name
|
21 |
-
API_KEY = os.environ.get("OPENROUTER_API_KEY")
|
22 |
-
|
23 |
-
async def fetch_url_content_async(url, crawler):
|
24 |
-
"""Fetch and extract text content from a URL using Crawl4AI"""
|
25 |
-
try:
|
26 |
-
result = await crawler.arun(
|
27 |
-
url=url,
|
28 |
-
bypass_cache=True,
|
29 |
-
word_count_threshold=10,
|
30 |
-
excluded_tags=['script', 'style', 'nav', 'header', 'footer'],
|
31 |
-
remove_overlay_elements=True
|
32 |
-
)
|
33 |
-
|
34 |
-
if result.success:
|
35 |
-
content = result.markdown or result.cleaned_html or ""
|
36 |
-
# Truncate to ~4000 characters
|
37 |
-
if len(content) > 4000:
|
38 |
-
content = content[:4000] + "..."
|
39 |
-
return content
|
40 |
-
else:
|
41 |
-
return f"Error fetching {url}: Failed to retrieve content"
|
42 |
-
except Exception as e:
|
43 |
-
return f"Error fetching {url}: {str(e)}"
|
44 |
-
|
45 |
-
def fetch_url_content(url):
|
46 |
-
"""Synchronous wrapper for URL fetching"""
|
47 |
-
async def fetch():
|
48 |
-
async with AsyncWebCrawler(verbose=False) as crawler:
|
49 |
-
return await fetch_url_content_async(url, crawler)
|
50 |
-
|
51 |
-
try:
|
52 |
-
return asyncio.run(fetch())
|
53 |
-
except Exception as e:
|
54 |
-
return f"Error fetching {url}: {str(e)}"
|
55 |
-
|
56 |
-
# Global cache for URL content to avoid re-crawling in generated spaces
|
57 |
-
_url_content_cache = {}
|
58 |
-
|
59 |
-
def get_grounding_context():
|
60 |
-
"""Fetch context from grounding URLs with caching"""
|
61 |
-
if not GROUNDING_URLS:
|
62 |
-
return ""
|
63 |
-
|
64 |
-
# Create cache key from URLs
|
65 |
-
cache_key = tuple(sorted([url for url in GROUNDING_URLS if url and url.strip()]))
|
66 |
-
|
67 |
-
# Check cache first
|
68 |
-
if cache_key in _url_content_cache:
|
69 |
-
return _url_content_cache[cache_key]
|
70 |
-
|
71 |
-
context_parts = []
|
72 |
-
for i, url in enumerate(GROUNDING_URLS, 1):
|
73 |
-
if url.strip():
|
74 |
-
content = fetch_url_content(url.strip())
|
75 |
-
context_parts.append(f"Context from URL {i} ({url}):\n{content}")
|
76 |
-
|
77 |
-
if context_parts:
|
78 |
-
result = "\n\n" + "\n\n".join(context_parts) + "\n\n"
|
79 |
-
else:
|
80 |
-
result = ""
|
81 |
-
|
82 |
-
# Cache the result
|
83 |
-
_url_content_cache[cache_key] = result
|
84 |
-
return result
|
85 |
-
|
86 |
-
import re
|
87 |
-
|
88 |
-
def extract_urls_from_text(text):
|
89 |
-
"""Extract URLs from text using regex"""
|
90 |
-
url_pattern = r'https?://[^\s<>"{}|\^`\[\]"]+'
|
91 |
-
return re.findall(url_pattern, text)
|
92 |
-
|
93 |
-
# Initialize RAG context if enabled
|
94 |
-
if ENABLE_VECTOR_RAG and RAG_DATA:
|
95 |
-
try:
|
96 |
-
import faiss
|
97 |
-
import numpy as np
|
98 |
-
import base64
|
99 |
-
|
100 |
-
class SimpleRAGContext:
|
101 |
-
def __init__(self, rag_data):
|
102 |
-
# Deserialize FAISS index
|
103 |
-
index_bytes = base64.b64decode(rag_data['index_base64'])
|
104 |
-
self.index = faiss.deserialize_index(index_bytes)
|
105 |
-
|
106 |
-
# Restore chunks and mappings
|
107 |
-
self.chunks = rag_data['chunks']
|
108 |
-
self.chunk_ids = rag_data['chunk_ids']
|
109 |
-
|
110 |
-
def get_context(self, query, max_chunks=3):
|
111 |
-
"""Get relevant context - simplified version"""
|
112 |
-
# In production, you'd compute query embedding here
|
113 |
-
# For now, return a simple message
|
114 |
-
return "\n\n[RAG context would be retrieved here based on similarity search]\n\n"
|
115 |
-
|
116 |
-
rag_context_provider = SimpleRAGContext(RAG_DATA)
|
117 |
-
except Exception as e:
|
118 |
-
print(f"Failed to initialize RAG: {e}")
|
119 |
-
rag_context_provider = None
|
120 |
-
else:
|
121 |
-
rag_context_provider = None
|
122 |
-
|
123 |
-
def generate_response(message, history):
|
124 |
-
"""Generate response using OpenRouter API"""
|
125 |
-
|
126 |
-
if not API_KEY:
|
127 |
-
return "Please set your OPENROUTER_API_KEY in the Space settings."
|
128 |
-
|
129 |
-
# Get grounding context
|
130 |
-
grounding_context = get_grounding_context()
|
131 |
-
|
132 |
-
# Add RAG context if available
|
133 |
-
if ENABLE_VECTOR_RAG and rag_context_provider:
|
134 |
-
rag_context = rag_context_provider.get_context(message)
|
135 |
-
if rag_context:
|
136 |
-
grounding_context += rag_context
|
137 |
-
|
138 |
-
# If dynamic URLs are enabled, check message for URLs to fetch
|
139 |
-
if ENABLE_DYNAMIC_URLS:
|
140 |
-
urls_in_message = extract_urls_from_text(message)
|
141 |
-
if urls_in_message:
|
142 |
-
# Fetch content from URLs mentioned in the message
|
143 |
-
dynamic_context_parts = []
|
144 |
-
for url in urls_in_message[:3]: # Limit to 3 URLs per message
|
145 |
-
content = fetch_url_content(url)
|
146 |
-
dynamic_context_parts.append(f"\n\nDynamic context from {url}:\n{content}")
|
147 |
-
if dynamic_context_parts:
|
148 |
-
grounding_context += "\n".join(dynamic_context_parts)
|
149 |
-
|
150 |
-
# Build enhanced system prompt with grounding context
|
151 |
-
enhanced_system_prompt = SYSTEM_PROMPT + grounding_context
|
152 |
-
|
153 |
-
# Build messages array for the API
|
154 |
-
messages = [{"role": "system", "content": enhanced_system_prompt}]
|
155 |
-
|
156 |
-
# Add conversation history - compatible with Gradio 5.x format
|
157 |
-
for chat in history:
|
158 |
-
if isinstance(chat, dict):
|
159 |
-
# New format: {"role": "user", "content": "..."} or {"role": "assistant", "content": "..."}
|
160 |
-
messages.append(chat)
|
161 |
-
else:
|
162 |
-
# Legacy format: ("user msg", "bot msg")
|
163 |
-
user_msg, bot_msg = chat
|
164 |
-
messages.append({"role": "user", "content": user_msg})
|
165 |
-
if bot_msg:
|
166 |
-
messages.append({"role": "assistant", "content": bot_msg})
|
167 |
-
|
168 |
-
# Add current message
|
169 |
-
messages.append({"role": "user", "content": message})
|
170 |
-
|
171 |
-
# Make API request
|
172 |
-
try:
|
173 |
-
response = requests.post(
|
174 |
-
url="https://openrouter.ai/api/v1/chat/completions",
|
175 |
-
headers={
|
176 |
-
"Authorization": f"Bearer {API_KEY}",
|
177 |
-
"Content-Type": "application/json"
|
178 |
-
},
|
179 |
-
json={
|
180 |
-
"model": MODEL,
|
181 |
-
"messages": messages,
|
182 |
-
"temperature": 0.7,
|
183 |
-
"max_tokens": 500
|
184 |
-
}
|
185 |
-
)
|
186 |
-
|
187 |
-
if response.status_code == 200:
|
188 |
-
return response.json()['choices'][0]['message']['content']
|
189 |
-
else:
|
190 |
-
return f"Error: {response.status_code} - {response.text}"
|
191 |
-
|
192 |
-
except Exception as e:
|
193 |
-
return f"Error: {str(e)}"
|
194 |
-
|
195 |
-
# Access code verification
|
196 |
-
access_granted = gr.State(False)
|
197 |
-
_access_granted_global = False # Global fallback
|
198 |
-
|
199 |
-
def verify_access_code(code):
|
200 |
-
"""Verify the access code"""
|
201 |
-
global _access_granted_global
|
202 |
-
if not ACCESS_CODE:
|
203 |
-
_access_granted_global = True
|
204 |
-
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
205 |
-
|
206 |
-
if code == ACCESS_CODE:
|
207 |
-
_access_granted_global = True
|
208 |
-
return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
|
209 |
-
else:
|
210 |
-
_access_granted_global = False
|
211 |
-
return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)
|
212 |
-
|
213 |
-
def protected_generate_response(message, history):
|
214 |
-
"""Protected response function that checks access"""
|
215 |
-
# Check if access is granted via the global variable
|
216 |
-
if ACCESS_CODE and not _access_granted_global:
|
217 |
-
return "Please enter the access code to continue."
|
218 |
-
return generate_response(message, history)
|
219 |
-
|
220 |
-
# Create interface with access code protection
|
221 |
-
with gr.Blocks(title=SPACE_NAME) as demo:
|
222 |
-
gr.Markdown(f"# {SPACE_NAME}")
|
223 |
-
gr.Markdown(SPACE_DESCRIPTION)
|
224 |
-
|
225 |
-
# Access code section (shown only if ACCESS_CODE is set)
|
226 |
-
with gr.Column(visible=bool(ACCESS_CODE)) as access_section:
|
227 |
-
gr.Markdown("### 🔐 Access Required")
|
228 |
-
gr.Markdown("Please enter the access code provided by your instructor:")
|
229 |
-
|
230 |
-
access_input = gr.Textbox(
|
231 |
-
label="Access Code",
|
232 |
-
placeholder="Enter access code...",
|
233 |
-
type="password"
|
234 |
-
)
|
235 |
-
access_btn = gr.Button("Submit", variant="primary")
|
236 |
-
access_error = gr.Markdown(visible=False)
|
237 |
-
|
238 |
-
# Main chat interface (hidden until access granted)
|
239 |
-
with gr.Column(visible=not bool(ACCESS_CODE)) as chat_section:
|
240 |
-
chat_interface = gr.ChatInterface(
|
241 |
-
fn=protected_generate_response,
|
242 |
-
title="", # Title already shown above
|
243 |
-
description="", # Description already shown above
|
244 |
-
examples=None
|
245 |
-
)
|
246 |
-
|
247 |
-
# Connect access verification
|
248 |
-
if ACCESS_CODE:
|
249 |
-
access_btn.click(
|
250 |
-
verify_access_code,
|
251 |
-
inputs=[access_input],
|
252 |
-
outputs=[access_error, chat_section, access_granted]
|
253 |
-
)
|
254 |
-
access_input.submit(
|
255 |
-
verify_access_code,
|
256 |
-
inputs=[access_input],
|
257 |
-
outputs=[access_error, chat_section, access_granted]
|
258 |
-
)
|
259 |
-
|
260 |
-
if __name__ == "__main__":
|
261 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|