Upload 4 files
Browse files- README.md +1 -1
- app.py +173 -56
- config.json +12 -14
README.md
CHANGED
@@ -42,7 +42,7 @@ Python support for cultural analytics students
|
|
42 |
Your Space should now be running! Try the example prompts or ask your own questions.
|
43 |
|
44 |
## Configuration
|
45 |
-
- **Model**:
|
46 |
- **API Key Variable**: API_KEY
|
47 |
- **HF Token Variable**: HF_TOKEN (for auto-updates)
|
48 |
- **Access Control**: Enabled (ACCESS_CODE)
|
|
|
42 |
Your Space should now be running! Try the example prompts or ask your own questions.
|
43 |
|
44 |
## Configuration
|
45 |
+
- **Model**: deepseek/deepseek-r1-distill-qwen-32b
|
46 |
- **API Key Variable**: API_KEY
|
47 |
- **HF Token Variable**: HF_TOKEN (for auto-updates)
|
48 |
- **Access Control**: Enabled (ACCESS_CODE)
|
app.py
CHANGED
@@ -19,16 +19,17 @@ SPACE_DESCRIPTION = 'Python support for cultural analytics students'
|
|
19 |
DEFAULT_CONFIG = {
|
20 |
'name': SPACE_NAME,
|
21 |
'description': SPACE_DESCRIPTION,
|
22 |
-
'system_prompt': "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements
|
23 |
-
'temperature': 0.
|
24 |
-
'max_tokens':
|
25 |
-
'model': '
|
26 |
'api_key_var': 'API_KEY',
|
27 |
'theme': 'Default',
|
28 |
-
'grounding_urls': ["https://zmuhls.github.io/ccny-data-science/syllabus/", "https://zmuhls.github.io/ccny-data-science/schedule/"
|
29 |
'enable_dynamic_urls': True,
|
30 |
'enable_file_upload': True,
|
31 |
-
'examples': ['How do I set up a development environment
|
|
|
32 |
'locked': False
|
33 |
}
|
34 |
|
@@ -136,6 +137,7 @@ THEME = config.get('theme', DEFAULT_CONFIG['theme'])
|
|
136 |
GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
|
137 |
ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
|
138 |
ENABLE_FILE_UPLOAD = config.get('enable_file_upload', DEFAULT_CONFIG.get('enable_file_upload', True))
|
|
|
139 |
|
140 |
# Environment variables
|
141 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
@@ -174,7 +176,7 @@ def validate_url_domain(url: str) -> bool:
|
|
174 |
return False
|
175 |
|
176 |
|
177 |
-
def fetch_url_content(url: str) -> str:
|
178 |
"""Fetch and convert URL content to text"""
|
179 |
try:
|
180 |
if not validate_url_domain(url):
|
@@ -203,16 +205,16 @@ def fetch_url_content(url: str) -> str:
|
|
203 |
text = ' '.join(text.split())
|
204 |
|
205 |
# Limit content length
|
206 |
-
if len(text) >
|
207 |
-
text = text[:
|
208 |
|
209 |
-
return f"π Content from {url}
|
210 |
|
211 |
elif any(ct in content_type for ct in ['text/plain', 'application/json']):
|
212 |
text = response.text
|
213 |
-
if len(text) >
|
214 |
-
text = text[:
|
215 |
-
return f"π Content from {url}
|
216 |
|
217 |
else:
|
218 |
return f"β οΈ Unsupported content type at {url}: {content_type}"
|
@@ -292,19 +294,37 @@ def get_grounding_context() -> str:
|
|
292 |
if not urls:
|
293 |
return ""
|
294 |
|
295 |
-
context_parts = [
|
296 |
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
|
|
|
|
|
|
|
|
306 |
|
307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
return "\n".join(context_parts)
|
309 |
return ""
|
310 |
|
@@ -395,8 +415,20 @@ Get your API key at: https://openrouter.ai/keys"""
|
|
395 |
dynamic_context += f"\n{content}"
|
396 |
grounding_context += dynamic_context
|
397 |
|
398 |
-
# Build messages
|
399 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
400 |
|
401 |
# Add conversation history
|
402 |
for msg in history:
|
@@ -406,16 +438,10 @@ Get your API key at: https://openrouter.ai/keys"""
|
|
406 |
"content": msg['content']
|
407 |
})
|
408 |
|
409 |
-
# Add current message
|
410 |
-
full_message = message
|
411 |
-
if grounding_context:
|
412 |
-
full_message = f"{grounding_context}\n\n{message}"
|
413 |
-
if file_context:
|
414 |
-
full_message = f"{file_context}\n\n{full_message}"
|
415 |
-
|
416 |
messages.append({
|
417 |
"role": "user",
|
418 |
-
"content":
|
419 |
})
|
420 |
|
421 |
# Make API request
|
@@ -683,24 +709,45 @@ def create_interface():
|
|
683 |
with gr.Tab("βοΈ Configuration"):
|
684 |
gr.Markdown("## Configuration Management")
|
685 |
|
686 |
-
#
|
687 |
-
|
688 |
-
gr.Markdown(f"β
{HF_ACCESS_MESSAGE}")
|
689 |
-
gr.Markdown("Configuration changes will be saved to the HuggingFace repository.")
|
690 |
-
else:
|
691 |
-
gr.Markdown(f"βΉοΈ {HF_ACCESS_MESSAGE}")
|
692 |
-
gr.Markdown("Set HF_TOKEN in Space secrets to enable auto-save.")
|
693 |
|
694 |
-
#
|
695 |
-
gr.
|
696 |
-
|
697 |
-
|
698 |
-
|
699 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
700 |
|
701 |
-
#
|
702 |
-
with gr.Column():
|
703 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
704 |
label="Space Name",
|
705 |
value=config.get('name', ''),
|
706 |
max_lines=1
|
@@ -708,19 +755,49 @@ def create_interface():
|
|
708 |
edit_model = gr.Dropdown(
|
709 |
label="Model",
|
710 |
choices=[
|
|
|
711 |
"google/gemini-2.0-flash-001",
|
712 |
"google/gemma-3-27b-it",
|
|
|
713 |
"anthropic/claude-3.5-sonnet",
|
714 |
"anthropic/claude-3.5-haiku",
|
|
|
715 |
"openai/gpt-4o-mini-search-preview",
|
716 |
"openai/gpt-4.1-nano",
|
|
|
|
|
|
|
|
|
|
|
717 |
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
|
718 |
"qwen/qwen3-30b-a3b-instruct-2507"
|
719 |
],
|
720 |
value=config.get('model', ''),
|
721 |
allow_custom_value=True
|
722 |
)
|
723 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
724 |
edit_description = gr.Textbox(
|
725 |
label="Description",
|
726 |
value=config.get('description', ''),
|
@@ -762,7 +839,7 @@ def create_interface():
|
|
762 |
placeholder="https://example.com/docs\nhttps://example.com/api",
|
763 |
value='\n'.join(config.get('grounding_urls', [])),
|
764 |
lines=5,
|
765 |
-
info="
|
766 |
)
|
767 |
|
768 |
with gr.Row():
|
@@ -784,7 +861,7 @@ def create_interface():
|
|
784 |
|
785 |
config_status = gr.Markdown()
|
786 |
|
787 |
-
def save_configuration(name, description, system_prompt, model, temp, tokens, examples, grounding_urls, enable_dynamic_urls, enable_file_upload):
|
788 |
"""Save updated configuration"""
|
789 |
try:
|
790 |
updated_config = config.copy()
|
@@ -793,6 +870,7 @@ def create_interface():
|
|
793 |
'description': description,
|
794 |
'system_prompt': system_prompt,
|
795 |
'model': model,
|
|
|
796 |
'temperature': temp,
|
797 |
'max_tokens': int(tokens),
|
798 |
'examples': [ex.strip() for ex in examples.split('\n') if ex.strip()],
|
@@ -837,7 +915,7 @@ def create_interface():
|
|
837 |
|
838 |
save_btn.click(
|
839 |
save_configuration,
|
840 |
-
inputs=[edit_name, edit_description, edit_system_prompt, edit_model,
|
841 |
edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
|
842 |
edit_enable_dynamic_urls, edit_enable_file_upload],
|
843 |
outputs=[config_status]
|
@@ -852,6 +930,7 @@ def create_interface():
|
|
852 |
DEFAULT_CONFIG['description'],
|
853 |
DEFAULT_CONFIG['system_prompt'],
|
854 |
DEFAULT_CONFIG['model'],
|
|
|
855 |
DEFAULT_CONFIG['temperature'],
|
856 |
DEFAULT_CONFIG['max_tokens'],
|
857 |
'\n'.join(DEFAULT_CONFIG['examples']),
|
@@ -861,16 +940,54 @@ def create_interface():
|
|
861 |
"β
Reset to default configuration"
|
862 |
)
|
863 |
else:
|
864 |
-
return (*[gr.update() for _ in range(
|
865 |
except Exception as e:
|
866 |
-
return (*[gr.update() for _ in range(
|
867 |
|
868 |
reset_btn.click(
|
869 |
reset_configuration,
|
870 |
-
outputs=[edit_name, edit_description, edit_system_prompt, edit_model,
|
871 |
edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
|
872 |
edit_enable_dynamic_urls, edit_enable_file_upload, config_status]
|
873 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
874 |
|
875 |
# Access control handler
|
876 |
if ACCESS_CODE:
|
|
|
19 |
DEFAULT_CONFIG = {
|
20 |
'name': SPACE_NAME,
|
21 |
'description': SPACE_DESCRIPTION,
|
22 |
+
'system_prompt': "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements and allow students to work and learn at their comfort level while, giving advanced learners paths to explore new concept and expand their programming repertoire. Expect to complete all responses in under 1000 tokens.",
|
23 |
+
'temperature': 0.5,
|
24 |
+
'max_tokens': 1000,
|
25 |
+
'model': 'deepseek/deepseek-r1-distill-qwen-32b',
|
26 |
'api_key_var': 'API_KEY',
|
27 |
'theme': 'Default',
|
28 |
+
'grounding_urls': ["https://zmuhls.github.io/ccny-data-science/syllabus/", "https://zmuhls.github.io/ccny-data-science/schedule/"],
|
29 |
'enable_dynamic_urls': True,
|
30 |
'enable_file_upload': True,
|
31 |
+
'examples': ['How do I set up a interactive development environment?', 'Where can I find the course schedule?', 'When is the social coding portfolio due?', 'How do I push a commit to GitHub?', "I'm confused on how to use Jupyter notebooks"],
|
32 |
+
'language': 'English',
|
33 |
'locked': False
|
34 |
}
|
35 |
|
|
|
137 |
GROUNDING_URLS = config.get('grounding_urls', DEFAULT_CONFIG['grounding_urls'])
|
138 |
ENABLE_DYNAMIC_URLS = config.get('enable_dynamic_urls', DEFAULT_CONFIG['enable_dynamic_urls'])
|
139 |
ENABLE_FILE_UPLOAD = config.get('enable_file_upload', DEFAULT_CONFIG.get('enable_file_upload', True))
|
140 |
+
LANGUAGE = config.get('language', DEFAULT_CONFIG.get('language', 'English'))
|
141 |
|
142 |
# Environment variables
|
143 |
ACCESS_CODE = os.environ.get("ACCESS_CODE")
|
|
|
176 |
return False
|
177 |
|
178 |
|
179 |
+
def fetch_url_content(url: str, max_length: int = 3000) -> str:
|
180 |
"""Fetch and convert URL content to text"""
|
181 |
try:
|
182 |
if not validate_url_domain(url):
|
|
|
205 |
text = ' '.join(text.split())
|
206 |
|
207 |
# Limit content length
|
208 |
+
if len(text) > max_length:
|
209 |
+
text = text[:max_length] + "... [truncated]"
|
210 |
|
211 |
+
return f"π **Content from:** {url}\n\n{text}\n"
|
212 |
|
213 |
elif any(ct in content_type for ct in ['text/plain', 'application/json']):
|
214 |
text = response.text
|
215 |
+
if len(text) > max_length:
|
216 |
+
text = text[:max_length] + "... [truncated]"
|
217 |
+
return f"π **Content from:** {url}\n\n{text}\n"
|
218 |
|
219 |
else:
|
220 |
return f"β οΈ Unsupported content type at {url}: {content_type}"
|
|
|
294 |
if not urls:
|
295 |
return ""
|
296 |
|
297 |
+
context_parts = []
|
298 |
|
299 |
+
# Process primary sources (first 2 URLs with 8000 char limit)
|
300 |
+
primary_urls = urls[:2]
|
301 |
+
if primary_urls:
|
302 |
+
context_parts.append("π **PRIMARY SOURCES:**\n")
|
303 |
+
for i, url in enumerate(primary_urls, 1):
|
304 |
+
if url in _url_content_cache:
|
305 |
+
content = _url_content_cache[url]
|
306 |
+
else:
|
307 |
+
content = fetch_url_content(url, max_length=8000)
|
308 |
+
_url_content_cache[url] = content
|
309 |
+
|
310 |
+
if not content.startswith("β") and not content.startswith("β±οΈ"):
|
311 |
+
context_parts.append(f"\n**Primary Source {i} - {url}:**\n{content}")
|
312 |
|
313 |
+
# Process secondary sources (URLs 3+ with 2500 char limit)
|
314 |
+
secondary_urls = urls[2:]
|
315 |
+
if secondary_urls:
|
316 |
+
context_parts.append("\n\nπ **SECONDARY SOURCES:**\n")
|
317 |
+
for i, url in enumerate(secondary_urls, 1):
|
318 |
+
if url in _url_content_cache:
|
319 |
+
content = _url_content_cache[url]
|
320 |
+
else:
|
321 |
+
content = fetch_url_content(url, max_length=2500)
|
322 |
+
_url_content_cache[url] = content
|
323 |
+
|
324 |
+
if not content.startswith("β") and not content.startswith("β±οΈ"):
|
325 |
+
context_parts.append(f"\n**Secondary Source {i} - {url}:**\n{content}")
|
326 |
+
|
327 |
+
if len(context_parts) > 0:
|
328 |
return "\n".join(context_parts)
|
329 |
return ""
|
330 |
|
|
|
415 |
dynamic_context += f"\n{content}"
|
416 |
grounding_context += dynamic_context
|
417 |
|
418 |
+
# Build messages with grounding context and file context in system prompt
|
419 |
+
system_content = SYSTEM_PROMPT
|
420 |
+
|
421 |
+
# Add language instruction if not English
|
422 |
+
if LANGUAGE != 'English':
|
423 |
+
system_content += f"\n\nIMPORTANT: You must respond EXCLUSIVELY in {LANGUAGE}. All your responses should be written entirely in {LANGUAGE}, even when user input is in a different language, particularly English."
|
424 |
+
|
425 |
+
if grounding_context:
|
426 |
+
system_content += "\n\nIMPORTANT: When providing information from the reference sources below, please cite the specific URL(s) where the information can be found."
|
427 |
+
system_content = f"{system_content}\n\n{grounding_context}"
|
428 |
+
if file_context:
|
429 |
+
system_content = f"{system_content}\n\n{file_context}"
|
430 |
+
|
431 |
+
messages = [{"role": "system", "content": system_content}]
|
432 |
|
433 |
# Add conversation history
|
434 |
for msg in history:
|
|
|
438 |
"content": msg['content']
|
439 |
})
|
440 |
|
441 |
+
# Add current message
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
messages.append({
|
443 |
"role": "user",
|
444 |
+
"content": message
|
445 |
})
|
446 |
|
447 |
# Make API request
|
|
|
709 |
with gr.Tab("βοΈ Configuration"):
|
710 |
gr.Markdown("## Configuration Management")
|
711 |
|
712 |
+
# State for config tab authentication
|
713 |
+
config_authenticated = gr.State(False)
|
|
|
|
|
|
|
|
|
|
|
714 |
|
715 |
+
# Authentication panel
|
716 |
+
with gr.Column(visible=True) as config_auth_panel:
|
717 |
+
gr.Markdown("### π Authentication Required")
|
718 |
+
gr.Markdown("Enter your HF_TOKEN to access configuration settings:")
|
719 |
+
|
720 |
+
with gr.Row():
|
721 |
+
config_password = gr.Textbox(
|
722 |
+
label="HF Token",
|
723 |
+
placeholder="Enter your HF_TOKEN...",
|
724 |
+
type="password",
|
725 |
+
scale=3
|
726 |
+
)
|
727 |
+
config_auth_btn = gr.Button("Authenticate", variant="primary", scale=1)
|
728 |
+
|
729 |
+
config_auth_status = gr.Markdown()
|
730 |
|
731 |
+
# Configuration panel (hidden until authenticated)
|
732 |
+
with gr.Column(visible=False) as config_panel:
|
733 |
+
# Show authentication status
|
734 |
+
if HF_ACCESS_VALID:
|
735 |
+
gr.Markdown(f"β
{HF_ACCESS_MESSAGE}")
|
736 |
+
gr.Markdown("Configuration changes will be saved to the HuggingFace repository.")
|
737 |
+
else:
|
738 |
+
gr.Markdown(f"βΉοΈ {HF_ACCESS_MESSAGE}")
|
739 |
+
gr.Markdown("Set HF_TOKEN in Space secrets to enable auto-save.")
|
740 |
+
|
741 |
+
# Configuration editor
|
742 |
+
gr.Markdown("### βοΈ Configuration Editor")
|
743 |
+
|
744 |
+
# Show lock status if locked
|
745 |
+
if config.get('locked', False):
|
746 |
+
gr.Markdown("β οΈ **Note:** Configuration is locked.")
|
747 |
+
|
748 |
+
# Basic settings
|
749 |
+
with gr.Column():
|
750 |
+
edit_name = gr.Textbox(
|
751 |
label="Space Name",
|
752 |
value=config.get('name', ''),
|
753 |
max_lines=1
|
|
|
755 |
edit_model = gr.Dropdown(
|
756 |
label="Model",
|
757 |
choices=[
|
758 |
+
# Google models
|
759 |
"google/gemini-2.0-flash-001",
|
760 |
"google/gemma-3-27b-it",
|
761 |
+
# Anthropic models
|
762 |
"anthropic/claude-3.5-sonnet",
|
763 |
"anthropic/claude-3.5-haiku",
|
764 |
+
# OpenAI models
|
765 |
"openai/gpt-4o-mini-search-preview",
|
766 |
"openai/gpt-4.1-nano",
|
767 |
+
# MistralAI models
|
768 |
+
"mistralai/mistral-medium-3",
|
769 |
+
# DeepSeek models
|
770 |
+
"deepseek/deepseek-r1-distill-qwen-32b",
|
771 |
+
# NVIDIA models
|
772 |
"nvidia/llama-3.1-nemotron-70b-instruct",
|
773 |
+
# Qwen models
|
774 |
"qwen/qwen3-30b-a3b-instruct-2507"
|
775 |
],
|
776 |
value=config.get('model', ''),
|
777 |
allow_custom_value=True
|
778 |
)
|
779 |
|
780 |
+
edit_language = gr.Dropdown(
|
781 |
+
label="Language",
|
782 |
+
choices=[
|
783 |
+
"Arabic",
|
784 |
+
"Bengali",
|
785 |
+
"English",
|
786 |
+
"French",
|
787 |
+
"German",
|
788 |
+
"Hindi",
|
789 |
+
"Italian",
|
790 |
+
"Japanese",
|
791 |
+
"Korean",
|
792 |
+
"Mandarin",
|
793 |
+
"Portuguese",
|
794 |
+
"Russian",
|
795 |
+
"Spanish",
|
796 |
+
"Turkish"
|
797 |
+
],
|
798 |
+
value=config.get('language', 'English')
|
799 |
+
)
|
800 |
+
|
801 |
edit_description = gr.Textbox(
|
802 |
label="Description",
|
803 |
value=config.get('description', ''),
|
|
|
839 |
placeholder="https://example.com/docs\nhttps://example.com/api",
|
840 |
value='\n'.join(config.get('grounding_urls', [])),
|
841 |
lines=5,
|
842 |
+
info="First 2 URLs: Primary sources (8000 chars). URLs 3+: Secondary sources (2500 chars)."
|
843 |
)
|
844 |
|
845 |
with gr.Row():
|
|
|
861 |
|
862 |
config_status = gr.Markdown()
|
863 |
|
864 |
+
def save_configuration(name, description, system_prompt, model, language, temp, tokens, examples, grounding_urls, enable_dynamic_urls, enable_file_upload):
|
865 |
"""Save updated configuration"""
|
866 |
try:
|
867 |
updated_config = config.copy()
|
|
|
870 |
'description': description,
|
871 |
'system_prompt': system_prompt,
|
872 |
'model': model,
|
873 |
+
'language': language,
|
874 |
'temperature': temp,
|
875 |
'max_tokens': int(tokens),
|
876 |
'examples': [ex.strip() for ex in examples.split('\n') if ex.strip()],
|
|
|
915 |
|
916 |
save_btn.click(
|
917 |
save_configuration,
|
918 |
+
inputs=[edit_name, edit_description, edit_system_prompt, edit_model, edit_language,
|
919 |
edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
|
920 |
edit_enable_dynamic_urls, edit_enable_file_upload],
|
921 |
outputs=[config_status]
|
|
|
930 |
DEFAULT_CONFIG['description'],
|
931 |
DEFAULT_CONFIG['system_prompt'],
|
932 |
DEFAULT_CONFIG['model'],
|
933 |
+
DEFAULT_CONFIG.get('language', 'English'),
|
934 |
DEFAULT_CONFIG['temperature'],
|
935 |
DEFAULT_CONFIG['max_tokens'],
|
936 |
'\n'.join(DEFAULT_CONFIG['examples']),
|
|
|
940 |
"β
Reset to default configuration"
|
941 |
)
|
942 |
else:
|
943 |
+
return (*[gr.update() for _ in range(11)], "β Failed to reset")
|
944 |
except Exception as e:
|
945 |
+
return (*[gr.update() for _ in range(11)], f"β Error: {str(e)}")
|
946 |
|
947 |
reset_btn.click(
|
948 |
reset_configuration,
|
949 |
+
outputs=[edit_name, edit_description, edit_system_prompt, edit_model, edit_language,
|
950 |
edit_temperature, edit_max_tokens, edit_examples, edit_grounding_urls,
|
951 |
edit_enable_dynamic_urls, edit_enable_file_upload, config_status]
|
952 |
)
|
953 |
+
|
954 |
+
# Configuration tab authentication handler
|
955 |
+
def handle_config_auth(password):
|
956 |
+
"""Handle configuration tab authentication"""
|
957 |
+
if not HF_TOKEN:
|
958 |
+
return (
|
959 |
+
gr.update(visible=True), # Keep auth panel visible
|
960 |
+
gr.update(visible=False), # Keep config panel hidden
|
961 |
+
gr.update(value="β No HF_TOKEN is set in Space secrets. Configuration cannot be enabled."),
|
962 |
+
False
|
963 |
+
)
|
964 |
+
|
965 |
+
if password == HF_TOKEN:
|
966 |
+
return (
|
967 |
+
gr.update(visible=False), # Hide auth panel
|
968 |
+
gr.update(visible=True), # Show config panel
|
969 |
+
gr.update(value="β
Authentication successful!"),
|
970 |
+
True
|
971 |
+
)
|
972 |
+
else:
|
973 |
+
return (
|
974 |
+
gr.update(visible=True), # Keep auth panel visible
|
975 |
+
gr.update(visible=False), # Keep config panel hidden
|
976 |
+
gr.update(value="β Invalid HF_TOKEN. Please try again."),
|
977 |
+
False
|
978 |
+
)
|
979 |
+
|
980 |
+
config_auth_btn.click(
|
981 |
+
handle_config_auth,
|
982 |
+
inputs=[config_password],
|
983 |
+
outputs=[config_auth_panel, config_panel, config_auth_status, config_authenticated]
|
984 |
+
)
|
985 |
+
|
986 |
+
config_password.submit(
|
987 |
+
handle_config_auth,
|
988 |
+
inputs=[config_password],
|
989 |
+
outputs=[config_auth_panel, config_panel, config_auth_status, config_authenticated]
|
990 |
+
)
|
991 |
|
992 |
# Access control handler
|
993 |
if ACCESS_CODE:
|
config.json
CHANGED
@@ -2,26 +2,24 @@
|
|
2 |
"name": "Course Assistant Example",
|
3 |
"tagline": "Python support for cultural analytics students",
|
4 |
"description": "Python support for cultural analytics students",
|
5 |
-
"system_prompt": "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements
|
6 |
-
"model": "
|
|
|
7 |
"api_key_var": "API_KEY",
|
8 |
-
"temperature": 0.
|
9 |
-
"max_tokens":
|
10 |
"examples": [
|
11 |
-
"How do I set up a development environment
|
12 |
-
"Where can I find the course schedule
|
13 |
-
"
|
14 |
-
"How do I
|
15 |
-
"I'm confused on how to use
|
16 |
],
|
17 |
"grounding_urls": [
|
18 |
"https://zmuhls.github.io/ccny-data-science/syllabus/",
|
19 |
-
"https://zmuhls.github.io/ccny-data-science/schedule/"
|
20 |
-
"https://melaniewalsh.github.io/Intro-Cultural-Analytics/02-Python/00-Python.html",
|
21 |
-
"https://melaniewalsh.github.io/Intro-Cultural-Analytics/02-Python/03-Anatomy-Python-Script.html"
|
22 |
],
|
23 |
"enable_dynamic_urls": true,
|
24 |
"enable_file_upload": true,
|
25 |
-
"theme": "Default"
|
26 |
-
"locked": false
|
27 |
}
|
|
|
2 |
"name": "Course Assistant Example",
|
3 |
"tagline": "Python support for cultural analytics students",
|
4 |
"description": "Python support for cultural analytics students",
|
5 |
+
"system_prompt": "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements and allow students to work and learn at their comfort level while, giving advanced learners paths to explore new concept and expand their programming repertoire. Expect to complete all responses in under 1000 tokens.",
|
6 |
+
"model": "deepseek/deepseek-r1-distill-qwen-32b",
|
7 |
+
"language": "English",
|
8 |
"api_key_var": "API_KEY",
|
9 |
+
"temperature": 0.5,
|
10 |
+
"max_tokens": 1000,
|
11 |
"examples": [
|
12 |
+
"How do I set up a interactive development environment?",
|
13 |
+
"Where can I find the course schedule?",
|
14 |
+
"When is the social coding portfolio due?",
|
15 |
+
"How do I push a commit to GitHub?",
|
16 |
+
"I'm confused on how to use Jupyter notebooks"
|
17 |
],
|
18 |
"grounding_urls": [
|
19 |
"https://zmuhls.github.io/ccny-data-science/syllabus/",
|
20 |
+
"https://zmuhls.github.io/ccny-data-science/schedule/"
|
|
|
|
|
21 |
],
|
22 |
"enable_dynamic_urls": true,
|
23 |
"enable_file_upload": true,
|
24 |
+
"theme": "Default"
|
|
|
25 |
}
|