milwright commited on
Commit
6bf95cb
·
verified ·
1 Parent(s): 15b5dc3

Upload 3 files

Browse files
Files changed (2) hide show
  1. app.py +18 -146
  2. config.json +7 -12
app.py CHANGED
@@ -10,11 +10,11 @@ import urllib.parse
10
 
11
 
12
  # Configuration
13
- SPACE_NAME = "AI Assistant"
14
- SPACE_DESCRIPTION = "A customizable AI assistant"
15
 
16
  # Default configuration values
17
- DEFAULT_SYSTEM_PROMPT = """You are an AI assistant specialized in mathematics and statistics who guides users through problem-solving rather than providing direct answers. You help users discover solutions by asking strategic questions ('What do we know so far?' 'What method might apply here?' 'Can you identify a pattern?'), prompting them to explain their reasoning, and offering hints that build on their current understanding. Format all mathematical expressions in LaTeX (inline: $x^2 + y^2 = r^2$, display: $$\int_a^b f(x)dx$$). When users are stuck, provide scaffolded support: suggest examining simpler cases, identifying relevant formulas or theorems, or breaking the problem into smaller parts. Use multiple representations to illuminate different aspects of the problem, validate partial progress to build confidence, and help users recognize and correct their own errors through targeted questions rather than corrections. Your goal is to develop problem-solving skills and mathematical reasoning, not just arrive at answers."""
18
  DEFAULT_TEMPERATURE = 0.7
19
  DEFAULT_MAX_TOKENS = 750
20
 
@@ -22,53 +22,20 @@ DEFAULT_MAX_TOKENS = 750
22
  try:
23
  with open('config.json', 'r') as f:
24
  saved_config = json.load(f)
25
- SPACE_NAME = saved_config.get('name', "AI Assistant")
26
- SPACE_DESCRIPTION = saved_config.get('description', "A customizable AI assistant")
27
- MODEL = saved_config.get('model', "google/gemini-2.0-flash-001")
28
- THEME = saved_config.get('theme', "Default")
29
  SYSTEM_PROMPT = saved_config.get('system_prompt', DEFAULT_SYSTEM_PROMPT)
30
  temperature = saved_config.get('temperature', DEFAULT_TEMPERATURE)
31
  max_tokens = saved_config.get('max_tokens', DEFAULT_MAX_TOKENS)
32
-
33
- # Load grounding URLs
34
- saved_urls = saved_config.get('grounding_urls', [])
35
- if isinstance(saved_urls, str):
36
- try:
37
- import ast
38
- GROUNDING_URLS = ast.literal_eval(saved_urls)
39
- except:
40
- GROUNDING_URLS = []
41
- else:
42
- GROUNDING_URLS = saved_urls
43
-
44
- # Load examples
45
- saved_examples = saved_config.get('examples', ['Hello! How can you help me?', 'Tell me something interesting', 'What can you do?'])
46
- if isinstance(saved_examples, str):
47
- try:
48
- import ast
49
- EXAMPLES = ast.literal_eval(saved_examples)
50
- except:
51
- EXAMPLES = ['Hello! How can you help me?', 'Tell me something interesting', 'What can you do?']
52
- elif isinstance(saved_examples, list):
53
- EXAMPLES = saved_examples
54
- else:
55
- EXAMPLES = ['Hello! How can you help me?', 'Tell me something interesting', 'What can you do?']
56
-
57
  print("✅ Loaded configuration from config.json")
58
  except:
59
  # Use defaults if no config file or error
60
- SPACE_NAME = "AI Assistant"
61
- SPACE_DESCRIPTION = "A customizable AI assistant"
62
- MODEL = "google/gemini-2.0-flash-001"
63
- THEME = "Default"
64
  SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT
65
  temperature = DEFAULT_TEMPERATURE
66
  max_tokens = DEFAULT_MAX_TOKENS
67
- GROUNDING_URLS = []
68
- EXAMPLES = ['Hello! How can you help me?', 'Tell me something interesting', 'What can you do?']
69
  print("ℹ️ Using default configuration")
70
 
71
- # MODEL, THEME, and GROUNDING_URLS are now set by config loading above
 
 
72
  # Get access code from environment variable for security
73
  # If ACCESS_CODE is not set, no access control is applied
74
  ACCESS_CODE = os.environ.get("ACCESS_CODE")
@@ -442,14 +409,14 @@ def verify_access_code(code):
442
  global _access_granted_global
443
  if ACCESS_CODE is None:
444
  _access_granted_global = True
445
- return gr.update(visible=False), gr.update(visible=True), gr.update(value=True), gr.update(visible=False)
446
 
447
  if code == ACCESS_CODE:
448
  _access_granted_global = True
449
- return gr.update(visible=False), gr.update(visible=True), gr.update(value=True), gr.update(visible=False)
450
  else:
451
  _access_granted_global = False
452
- return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False), gr.update(visible=True)
453
 
454
  def protected_generate_response(message, history):
455
  """Protected response function that checks access"""
@@ -582,7 +549,7 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
582
  fn=store_and_generate_response, # Use wrapper function to store history
583
  title="", # Title already shown above
584
  description="", # Description already shown above
585
- examples=EXAMPLES if EXAMPLES else None,
586
  type="messages" # Use modern message format for better compatibility
587
  )
588
 
@@ -603,12 +570,12 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
603
  access_btn.click(
604
  verify_access_code,
605
  inputs=[access_input],
606
- outputs=[access_error, chat_section, access_granted, access_section]
607
  )
608
  access_input.submit(
609
  verify_access_code,
610
  inputs=[access_input],
611
- outputs=[access_error, chat_section, access_granted, access_section]
612
  )
613
 
614
  # Faculty Configuration Section - appears at the bottom with password protection
@@ -791,58 +758,14 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
791
  # Save configuration function
792
  def save_configuration(new_name, new_description, new_model, new_theme, new_prompt, new_examples, new_temp, new_tokens, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, lock_config, is_authenticated):
793
  if not is_authenticated:
794
- # Return not authenticated message and keep current values
795
- return (
796
- "❌ Not authenticated",
797
- gr.update(), # Keep current name
798
- gr.update(), # Keep current description
799
- gr.update(), # Keep current model
800
- gr.update(), # Keep current theme
801
- gr.update(), # Keep current prompt
802
- gr.update(), # Keep current examples
803
- gr.update(), # Keep current temp
804
- gr.update(), # Keep current tokens
805
- gr.update(), # Keep current url1
806
- gr.update(), # Keep current url2
807
- gr.update(), # Keep current url3
808
- gr.update(), # Keep current url4
809
- gr.update(), # Keep current url5
810
- gr.update(), # Keep current url6
811
- gr.update(), # Keep current url7
812
- gr.update(), # Keep current url8
813
- gr.update(), # Keep current url9
814
- gr.update(), # Keep current url10
815
- gr.update() # Keep current lock
816
- )
817
 
818
  # Check if configuration is already locked
819
  try:
820
  with open('config.json', 'r') as f:
821
  existing_config = json.load(f)
822
  if existing_config.get('locked', False):
823
- # Return locked message and keep current values
824
- return (
825
- "🔒 Configuration is locked and cannot be modified",
826
- gr.update(), # Keep current name
827
- gr.update(), # Keep current description
828
- gr.update(), # Keep current model
829
- gr.update(), # Keep current theme
830
- gr.update(), # Keep current prompt
831
- gr.update(), # Keep current examples
832
- gr.update(), # Keep current temp
833
- gr.update(), # Keep current tokens
834
- gr.update(), # Keep current url1
835
- gr.update(), # Keep current url2
836
- gr.update(), # Keep current url3
837
- gr.update(), # Keep current url4
838
- gr.update(), # Keep current url5
839
- gr.update(), # Keep current url6
840
- gr.update(), # Keep current url7
841
- gr.update(), # Keep current url8
842
- gr.update(), # Keep current url9
843
- gr.update(), # Keep current url10
844
- gr.update() # Keep current lock
845
- )
846
  except:
847
  pass
848
 
@@ -873,10 +796,9 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
873
  try:
874
  with open('config.json', 'w') as f:
875
  json.dump(new_config, f, indent=2)
876
- print(f"✅ Faculty config saved to config.json: {new_config}")
877
 
878
  # Update global variables
879
- global SPACE_NAME, SPACE_DESCRIPTION, MODEL, THEME, SYSTEM_PROMPT, temperature, max_tokens, GROUNDING_URLS, EXAMPLES
880
  SPACE_NAME = new_name
881
  SPACE_DESCRIPTION = new_description
882
  MODEL = new_model
@@ -885,59 +807,10 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
885
  temperature = new_temp
886
  max_tokens = int(new_tokens)
887
  GROUNDING_URLS = grounding_urls
888
- EXAMPLES = examples_list
889
 
890
- # Return success message AND updated form values
891
- success_msg = f"✅ Configuration saved successfully at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n🔄 **Please refresh the page to see all changes take effect.**"
892
-
893
- # Return tuple with status message and all updated field values
894
- return (
895
- success_msg,
896
- gr.update(value=new_name),
897
- gr.update(value=new_description),
898
- gr.update(value=new_model),
899
- gr.update(value=new_theme),
900
- gr.update(value=new_prompt),
901
- gr.update(value=new_examples),
902
- gr.update(value=new_temp),
903
- gr.update(value=int(new_tokens)),
904
- gr.update(value=url1),
905
- gr.update(value=url2),
906
- gr.update(value=url3),
907
- gr.update(value=url4),
908
- gr.update(value=url5),
909
- gr.update(value=url6),
910
- gr.update(value=url7),
911
- gr.update(value=url8),
912
- gr.update(value=url9),
913
- gr.update(value=url10),
914
- gr.update(value=lock_config)
915
- )
916
  except Exception as e:
917
- error_msg = f"❌ Error saving configuration: {str(e)}"
918
- # Return error message and keep current values
919
- return (
920
- error_msg,
921
- gr.update(), # Keep current name
922
- gr.update(), # Keep current description
923
- gr.update(), # Keep current model
924
- gr.update(), # Keep current theme
925
- gr.update(), # Keep current prompt
926
- gr.update(), # Keep current examples
927
- gr.update(), # Keep current temp
928
- gr.update(), # Keep current tokens
929
- gr.update(), # Keep current url1
930
- gr.update(), # Keep current url2
931
- gr.update(), # Keep current url3
932
- gr.update(), # Keep current url4
933
- gr.update(), # Keep current url5
934
- gr.update(), # Keep current url6
935
- gr.update(), # Keep current url7
936
- gr.update(), # Keep current url8
937
- gr.update(), # Keep current url9
938
- gr.update(), # Keep current url10
939
- gr.update() # Keep current lock
940
- )
941
 
942
  # Reset configuration function
943
  def reset_configuration(is_authenticated):
@@ -997,8 +870,7 @@ with gr.Blocks(title=SPACE_NAME, theme=theme_class()) as demo:
997
  inputs=[edit_name, edit_description, edit_model, edit_theme, edit_system_prompt, edit_examples, edit_temperature, edit_max_tokens,
998
  edit_url1, edit_url2, edit_url3, edit_url4, edit_url5, edit_url6, edit_url7, edit_url8, edit_url9, edit_url10,
999
  config_locked, faculty_auth_state],
1000
- outputs=[config_status, edit_name, edit_description, edit_model, edit_theme, edit_system_prompt, edit_examples, edit_temperature, edit_max_tokens,
1001
- edit_url1, edit_url2, edit_url3, edit_url4, edit_url5, edit_url6, edit_url7, edit_url8, edit_url9, edit_url10, config_locked]
1002
  )
1003
 
1004
  reset_config_btn.click(
 
10
 
11
 
12
  # Configuration
13
+ SPACE_NAME = "CUNY Virgil"
14
+ SPACE_DESCRIPTION = "Adaptable AI assistant for research methods support"
15
 
16
  # Default configuration values
17
+ DEFAULT_SYSTEM_PROMPT = """You are a research aid specializing in academic literature search and analysis. Your expertise spans discovering peer-reviewed sources, assessing research methodologies, synthesizing findings across studies, and delivering properly formatted citations. When responding, anchor claims in specific sources from provided URL contexts, differentiate between direct evidence and interpretive analysis, and note any limitations or contradictory results. Employ clear, accessible language that demystifies complex research, and propose connected research directions when appropriate. Your purpose is to serve as an informed research tool supporting users through initial concept development, exploratory investigation, information collection, and source compilation."""
18
  DEFAULT_TEMPERATURE = 0.7
19
  DEFAULT_MAX_TOKENS = 750
20
 
 
22
  try:
23
  with open('config.json', 'r') as f:
24
  saved_config = json.load(f)
 
 
 
 
25
  SYSTEM_PROMPT = saved_config.get('system_prompt', DEFAULT_SYSTEM_PROMPT)
26
  temperature = saved_config.get('temperature', DEFAULT_TEMPERATURE)
27
  max_tokens = saved_config.get('max_tokens', DEFAULT_MAX_TOKENS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  print("✅ Loaded configuration from config.json")
29
  except:
30
  # Use defaults if no config file or error
 
 
 
 
31
  SYSTEM_PROMPT = DEFAULT_SYSTEM_PROMPT
32
  temperature = DEFAULT_TEMPERATURE
33
  max_tokens = DEFAULT_MAX_TOKENS
 
 
34
  print("ℹ️ Using default configuration")
35
 
36
+ MODEL = "nvidia/llama-3.1-nemotron-70b-instruct"
37
+ THEME = "Glass" # Gradio theme name
38
+ GROUNDING_URLS = []
39
  # Get access code from environment variable for security
40
  # If ACCESS_CODE is not set, no access control is applied
41
  ACCESS_CODE = os.environ.get("ACCESS_CODE")
 
409
  global _access_granted_global
410
  if ACCESS_CODE is None:
411
  _access_granted_global = True
412
+ return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
413
 
414
  if code == ACCESS_CODE:
415
  _access_granted_global = True
416
+ return gr.update(visible=False), gr.update(visible=True), gr.update(value=True)
417
  else:
418
  _access_granted_global = False
419
+ return gr.update(visible=True, value="❌ Incorrect access code. Please try again."), gr.update(visible=False), gr.update(value=False)
420
 
421
  def protected_generate_response(message, history):
422
  """Protected response function that checks access"""
 
549
  fn=store_and_generate_response, # Use wrapper function to store history
550
  title="", # Title already shown above
551
  description="", # Description already shown above
552
+ examples=['Help me narrow a research question'],
553
  type="messages" # Use modern message format for better compatibility
554
  )
555
 
 
570
  access_btn.click(
571
  verify_access_code,
572
  inputs=[access_input],
573
+ outputs=[access_error, chat_section, access_granted]
574
  )
575
  access_input.submit(
576
  verify_access_code,
577
  inputs=[access_input],
578
+ outputs=[access_error, chat_section, access_granted]
579
  )
580
 
581
  # Faculty Configuration Section - appears at the bottom with password protection
 
758
  # Save configuration function
759
  def save_configuration(new_name, new_description, new_model, new_theme, new_prompt, new_examples, new_temp, new_tokens, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, lock_config, is_authenticated):
760
  if not is_authenticated:
761
+ return "❌ Not authenticated"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762
 
763
  # Check if configuration is already locked
764
  try:
765
  with open('config.json', 'r') as f:
766
  existing_config = json.load(f)
767
  if existing_config.get('locked', False):
768
+ return "🔒 Configuration is locked and cannot be modified"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769
  except:
770
  pass
771
 
 
796
  try:
797
  with open('config.json', 'w') as f:
798
  json.dump(new_config, f, indent=2)
 
799
 
800
  # Update global variables
801
+ global SPACE_NAME, SPACE_DESCRIPTION, MODEL, THEME, SYSTEM_PROMPT, temperature, max_tokens, GROUNDING_URLS
802
  SPACE_NAME = new_name
803
  SPACE_DESCRIPTION = new_description
804
  MODEL = new_model
 
807
  temperature = new_temp
808
  max_tokens = int(new_tokens)
809
  GROUNDING_URLS = grounding_urls
 
810
 
811
+ return f"✅ Configuration saved successfully at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n⚠️ **Note:** Example prompts will update on next page refresh."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812
  except Exception as e:
813
+ return f"❌ Error saving configuration: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
814
 
815
  # Reset configuration function
816
  def reset_configuration(is_authenticated):
 
870
  inputs=[edit_name, edit_description, edit_model, edit_theme, edit_system_prompt, edit_examples, edit_temperature, edit_max_tokens,
871
  edit_url1, edit_url2, edit_url3, edit_url4, edit_url5, edit_url6, edit_url7, edit_url8, edit_url9, edit_url10,
872
  config_locked, faculty_auth_state],
873
+ outputs=[config_status]
 
874
  )
875
 
876
  reset_config_btn.click(
config.json CHANGED
@@ -1,18 +1,13 @@
1
  {
2
- "name": "AI Assistant",
3
- "description": "A customizable AI assistant",
4
- "system_prompt": "You are an AI assistant specialized in mathematics and statistics who guides users through problem-solving rather than providing direct answers. You help users discover solutions by asking strategic questions ('What do we know so far?' 'What method might apply here?' 'Can you identify a pattern?'), prompting them to explain their reasoning, and offering hints that build on their current understanding. Format all mathematical expressions in LaTeX (inline: $x^2 + y^2 = r^2$, display: $$\\int_a^b f(x)dx$$). When users are stuck, provide scaffolded support: suggest examining simpler cases, identifying relevant formulas or theorems, or breaking the problem into smaller parts. Use multiple representations to illuminate different aspects of the problem, validate partial progress to build confidence, and help users recognize and correct their own errors through targeted questions rather than corrections. Your goal is to develop problem-solving skills and mathematical reasoning, not just arrive at answers.",
5
- "model": "google/gemini-2.0-flash-001",
6
  "api_key_var": "API_KEY",
7
  "temperature": 0.7,
8
  "max_tokens": 750,
9
- "examples": [
10
- "Hello! How can you help me?",
11
- "Tell me something interesting",
12
- "What can you do?"
13
- ],
14
- "grounding_urls": [],
15
  "enable_dynamic_urls": true,
16
- "theme": "Default",
17
- "locked": false
18
  }
 
1
  {
2
+ "name": "CUNY Virgil",
3
+ "description": "Adaptable AI assistant for research methods support",
4
+ "system_prompt": "You are a research aid specializing in academic literature search and analysis. Your expertise spans discovering peer-reviewed sources, assessing research methodologies, synthesizing findings across studies, and delivering properly formatted citations. When responding, anchor claims in specific sources from provided URL contexts, differentiate between direct evidence and interpretive analysis, and note any limitations or contradictory results. Employ clear, accessible language that demystifies complex research, and propose connected research directions when appropriate. Your purpose is to serve as an informed research tool supporting users through initial concept development, exploratory investigation, information collection, and source compilation.",
5
+ "model": "nvidia/llama-3.1-nemotron-70b-instruct",
6
  "api_key_var": "API_KEY",
7
  "temperature": 0.7,
8
  "max_tokens": 750,
9
+ "examples": "['Help me narrow a research question']",
10
+ "grounding_urls": "[]",
 
 
 
 
11
  "enable_dynamic_urls": true,
12
+ "theme": "Glass"
 
13
  }