arahman347 commited on
Commit
70971e2
·
verified ·
1 Parent(s): 92255da

updated color scheme

Browse files
Files changed (1) hide show
  1. app.py +18 -42
app.py CHANGED
@@ -1,77 +1,57 @@
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient #imports huggingface models
3
-
4
-
5
  # NEW LIBRARIES
6
-
7
  from sentence_transformers import SentenceTransformer
8
  import torch
9
  import numpy as np
10
-
11
  ## START NEW CODE
12
-
13
-
14
  # Load and process the knowledge base text file
15
  with open("knowledge.txt", "r", encoding="utf-8") as f:
16
  knowledge_text = f.read()
17
-
18
  # Split the text into chunks (for example, by paragraphs)
19
  chunks = [chunk.strip() for chunk in knowledge_text.split("\n\n") if chunk.strip()]
20
-
21
  # Load an embedding model (this one is light and fast)
22
  embedder = SentenceTransformer('all-MiniLM-L6-v2')
23
-
24
  # Precompute embeddings for all chunks (as a tensor for fast similarity search)
25
  chunk_embeddings = embedder.encode(chunks, convert_to_tensor=True)
26
-
27
  def get_relevant_context(query, top_k=3):
28
  """
29
  Compute the embedding for the query, compare it against all chunk embeddings,
30
  and return the top_k most similar chunks concatenated into a context string.
31
  """
32
-
33
  # Compute and normalize the query embedding
34
  query_embedding = embedder.encode(query, convert_to_tensor=True)
35
  query_embedding = query_embedding / query_embedding.norm()
36
-
37
  # Normalize chunk embeddings along the embedding dimension
38
  norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
39
-
40
  # Compute cosine similarity between the query and each chunk
41
  similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
42
-
43
  # Get the indices of the top_k most similar chunks
44
  top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
45
-
46
  # Concatenate the top chunks into a single context string
47
  context = "\n\n".join([chunks[i] for i in top_k_indices])
48
  return context
49
-
50
  ## END OF NEW CODE
51
-
52
  client = InferenceClient("google/gemma-2-2b-it")
53
-
54
  def respond(message, history):
55
  messages = [{"role": "system", "content": "You are PrisMate, an encouraging AI mentor and girlboss energy assistant for high school students and aspiring women/minorities in tech. Your mission is to share hidden tech history, resources, and communities that combat cultural erasure while building inclusive pathways into technology careers. You know the contributions of underrepresented pioneers, specific organizations and scholarships, mentorship programs, and practical career guidance. Be genuinely personable and helpful—keep responses short, concise, and clear while being warm, encouraging, and culturally aware with that empowering feminine energy. Only discuss topics relevant to tech careers, education, and supporting underrepresented groups in technology. If asked about unrelated topics (like food, entertainment, etc.), politely redirect by saying something like I'm here to support you on your tech journey! Let's talk about how I can help you succeed in technology. Provide actionable advice with concrete next steps, highlight overlooked historical figures, connect students to relevant communities, and help them see their backgrounds as strengths. Explain concepts at high school level and always end with something they can do right away."}]
56
-
57
  # NEW CODE
58
  # Retrieve context relevant to the current user message
59
  context = get_relevant_context(message, top_k=3)
60
-
61
  # add all previous messages to the messages list
62
  if history:
63
  for user_msg, assistant_msg in history:
64
  messages.append({"role": "user", "content": user_msg})
65
  messages.append({"role": "assistant", "content": assistant_msg})
66
-
67
  # add the current user's message to the messages list
68
  messages.append({"role": "user", "content": message})
69
-
70
  # makes the chat completion API call,
71
  # sending the messages and other parameters to the model
72
  # implements streaming, where one word/token appears at a time
73
  response = ""
74
-
75
  # iterate through each message in the method
76
  for message in client.chat_completion(
77
  messages,
@@ -82,36 +62,32 @@ def respond(message, history):
82
  token = message.choices[0].delta.content # capture the most recent toke
83
  response += token # Add it to the response
84
  yield response # yield the response:
85
-
86
- custom_theme = gr.themes.Default(
87
- primary_hue="purple" # Let's start with a blue primary color
88
- ).set(
89
- background_fill_primary="#352950",
90
- button_primary_background_fill="pink", # A specific shade of red-orange
91
- button_primary_background_fill_hover="hotpink", # Darker red-orange on hover
92
- # Change the chatbot bubble background
93
- #bubble_background_fill_user="#E0F7FA", # Light blue for user messages
94
- #bubble_background_fill_assistant="#F3E5F5", # Light purple for assistant messages
95
- # Change the border radius of inputs
96
- input_radius="0px", # Square input fields
97
- loader_color="purple",
98
- link_text_color_hover="darkgreen"
99
  )
100
-
101
- with gr.Blocks(theme=custom_theme) as chatbot:
 
 
 
 
 
 
 
 
102
  gr.Image(
103
  value="banner.jpg",
104
  show_label=False,
105
  show_share_button=False,
106
  show_download_button=False
107
- )
108
-
109
  # Define the chatbot interface outside the respond function
110
  gr.ChatInterface(
111
  respond,
112
  examples=["Teach me about minorities in tech", "Help me find statistics about Women in tech", "What are some communities/groups I can join for tech inclusiveness!"],
113
  #description="This is a minority inclusivity bot"
114
  )
115
-
116
  # Launch the chatbot interface
117
  chatbot.launch()
 
1
+
2
+ Alexandra Rahman
3
+ 10:41 AM
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient #imports huggingface models
 
 
6
  # NEW LIBRARIES
 
7
  from sentence_transformers import SentenceTransformer
8
  import torch
9
  import numpy as np
 
10
  ## START NEW CODE
 
 
11
  # Load and process the knowledge base text file
12
  with open("knowledge.txt", "r", encoding="utf-8") as f:
13
  knowledge_text = f.read()
 
14
  # Split the text into chunks (for example, by paragraphs)
15
  chunks = [chunk.strip() for chunk in knowledge_text.split("\n\n") if chunk.strip()]
 
16
  # Load an embedding model (this one is light and fast)
17
  embedder = SentenceTransformer('all-MiniLM-L6-v2')
 
18
  # Precompute embeddings for all chunks (as a tensor for fast similarity search)
19
  chunk_embeddings = embedder.encode(chunks, convert_to_tensor=True)
 
20
  def get_relevant_context(query, top_k=3):
21
  """
22
  Compute the embedding for the query, compare it against all chunk embeddings,
23
  and return the top_k most similar chunks concatenated into a context string.
24
  """
 
25
  # Compute and normalize the query embedding
26
  query_embedding = embedder.encode(query, convert_to_tensor=True)
27
  query_embedding = query_embedding / query_embedding.norm()
 
28
  # Normalize chunk embeddings along the embedding dimension
29
  norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
 
30
  # Compute cosine similarity between the query and each chunk
31
  similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
 
32
  # Get the indices of the top_k most similar chunks
33
  top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
 
34
  # Concatenate the top chunks into a single context string
35
  context = "\n\n".join([chunks[i] for i in top_k_indices])
36
  return context
 
37
  ## END OF NEW CODE
 
38
  client = InferenceClient("google/gemma-2-2b-it")
 
39
  def respond(message, history):
40
  messages = [{"role": "system", "content": "You are PrisMate, an encouraging AI mentor and girlboss energy assistant for high school students and aspiring women/minorities in tech. Your mission is to share hidden tech history, resources, and communities that combat cultural erasure while building inclusive pathways into technology careers. You know the contributions of underrepresented pioneers, specific organizations and scholarships, mentorship programs, and practical career guidance. Be genuinely personable and helpful—keep responses short, concise, and clear while being warm, encouraging, and culturally aware with that empowering feminine energy. Only discuss topics relevant to tech careers, education, and supporting underrepresented groups in technology. If asked about unrelated topics (like food, entertainment, etc.), politely redirect by saying something like I'm here to support you on your tech journey! Let's talk about how I can help you succeed in technology. Provide actionable advice with concrete next steps, highlight overlooked historical figures, connect students to relevant communities, and help them see their backgrounds as strengths. Explain concepts at high school level and always end with something they can do right away."}]
 
41
  # NEW CODE
42
  # Retrieve context relevant to the current user message
43
  context = get_relevant_context(message, top_k=3)
 
44
  # add all previous messages to the messages list
45
  if history:
46
  for user_msg, assistant_msg in history:
47
  messages.append({"role": "user", "content": user_msg})
48
  messages.append({"role": "assistant", "content": assistant_msg})
 
49
  # add the current user's message to the messages list
50
  messages.append({"role": "user", "content": message})
 
51
  # makes the chat completion API call,
52
  # sending the messages and other parameters to the model
53
  # implements streaming, where one word/token appears at a time
54
  response = ""
 
55
  # iterate through each message in the method
56
  for message in client.chat_completion(
57
  messages,
 
62
  token = message.choices[0].delta.content # capture the most recent toke
63
  response += token # Add it to the response
64
  yield response # yield the response:
65
+ custom_theme = gr.themes.Soft(
66
+ primary_hue="purple",
67
+ secondary_hue="pink",
68
+ neutral_hue="pink",
 
 
 
 
 
 
 
 
 
 
69
  )
70
+ css = """
71
+ body {
72
+ background-color: #C13B6F !important; /* A more vibrant and dark pink */
73
+ }
74
+ .gradio-block {
75
+ background-color: #D0CFCF !important; /* Timberwolf for chatbot background */
76
+ }
77
+ """
78
+ with gr.Blocks(theme=custom_theme) as chatbot: # Removed the css argument from here
79
+ gr.HTML(f"<style>{css}</style>") # Inject CSS for background color
80
  gr.Image(
81
  value="banner.jpg",
82
  show_label=False,
83
  show_share_button=False,
84
  show_download_button=False
85
+ )
 
86
  # Define the chatbot interface outside the respond function
87
  gr.ChatInterface(
88
  respond,
89
  examples=["Teach me about minorities in tech", "Help me find statistics about Women in tech", "What are some communities/groups I can join for tech inclusiveness!"],
90
  #description="This is a minority inclusivity bot"
91
  )
 
92
  # Launch the chatbot interface
93
  chatbot.launch()