oceddyyy commited on
Commit
d7940ce
·
verified ·
1 Parent(s): a9e9b1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -11
app.py CHANGED
@@ -7,7 +7,6 @@ import numpy as np
7
  import os
8
  from huggingface_hub import upload_file, hf_hub_download
9
 
10
- # === Custom PUP-themed CSS ===
11
  PUP_Themed_css = """
12
  html, body, .gradio-container, .gr-app {
13
  height: 100% !important;
@@ -19,7 +18,6 @@ html, body, .gradio-container, .gr-app {
19
  }
20
  """
21
 
22
- # === Load Models and Data ===
23
  embedding_model = SentenceTransformer('paraphrase-mpnet-base-v2')
24
  llm = pipeline("text2text-generation", model="google/flan-t5-small")
25
 
@@ -39,7 +37,6 @@ feedback_embeddings = None
39
  feedback_path = "outputs/feedback.json"
40
  os.makedirs("outputs", exist_ok=True)
41
 
42
- # === Load feedback from Hugging Face if available ===
43
  try:
44
  hf_token = os.getenv("PUP_AI_Chatbot_Token")
45
  downloaded_path = hf_hub_download(
@@ -55,7 +52,6 @@ try:
55
  if feedback_questions:
56
  feedback_embeddings = embedding_model.encode(feedback_questions, convert_to_tensor=True)
57
 
58
- # Save to local copy for later editing during runtime
59
  with open(feedback_path, "w") as f_local:
60
  json.dump(feedback_data, f_local, indent=4)
61
 
@@ -63,7 +59,6 @@ except Exception as e:
63
  print(f"[Startup] No feedback loaded from HF: {e}")
64
  feedback_data = []
65
 
66
- # === Hugging Face Upload ===
67
  def upload_feedback_to_hf():
68
  hf_token = os.getenv("PUP_AI_Chatbot_Token")
69
  if not hf_token:
@@ -81,11 +76,9 @@ def upload_feedback_to_hf():
81
  except Exception as e:
82
  print(f"Error uploading feedback to HF: {e}")
83
 
84
- # === Chatbot Response Function ===
85
  def chatbot_response(query, chat_history):
86
  query_embedding = embedding_model.encode([query], convert_to_tensor=True)
87
 
88
- # === Feedback Matching ===
89
  if feedback_embeddings is not None:
90
  feedback_scores = cosine_similarity(query_embedding.cpu().numpy(), feedback_embeddings.cpu().numpy())[0]
91
  best_idx = int(np.argmax(feedback_scores))
@@ -103,7 +96,6 @@ def chatbot_response(query, chat_history):
103
  chat_history.append((query, response))
104
  return "", chat_history, gr.update(visible=True)
105
 
106
- # === Main Handbook Matching ===
107
  similarity_scores = cosine_similarity(query_embedding.cpu().numpy(), question_embeddings.cpu().numpy())[0]
108
  best_idx = int(np.argmax(similarity_scores))
109
  best_score = similarity_scores[best_idx]
@@ -137,7 +129,6 @@ def chatbot_response(query, chat_history):
137
  chat_history.append((query, final_response))
138
  return "", chat_history, gr.update(visible=True)
139
 
140
- # === Feedback Save & Upvote/Downvote Tracking ===
141
  def record_feedback(feedback, chat_history):
142
  global feedback_embeddings
143
  if chat_history:
@@ -175,7 +166,6 @@ def record_feedback(feedback, chat_history):
175
 
176
  return gr.update(visible=False)
177
 
178
- # === Gradio UI ===
179
  with gr.Blocks(css=PUP_Themed_css, title="University Handbook AI Chatbot") as demo:
180
  gr.Markdown(
181
  "<div style='"
@@ -213,6 +203,5 @@ with gr.Blocks(css=PUP_Themed_css, title="University Handbook AI Chatbot") as de
213
  thumbs_up.click(lambda state: record_feedback("positive", state), inputs=[state], outputs=[feedback_row])
214
  thumbs_down.click(lambda state: record_feedback("negative", state), inputs=[state], outputs=[feedback_row])
215
 
216
- # === Launch App ===
217
  if __name__ == "__main__":
218
  demo.launch()
 
7
  import os
8
  from huggingface_hub import upload_file, hf_hub_download
9
 
 
10
  PUP_Themed_css = """
11
  html, body, .gradio-container, .gr-app {
12
  height: 100% !important;
 
18
  }
19
  """
20
 
 
21
  embedding_model = SentenceTransformer('paraphrase-mpnet-base-v2')
22
  llm = pipeline("text2text-generation", model="google/flan-t5-small")
23
 
 
37
  feedback_path = "outputs/feedback.json"
38
  os.makedirs("outputs", exist_ok=True)
39
 
 
40
  try:
41
  hf_token = os.getenv("PUP_AI_Chatbot_Token")
42
  downloaded_path = hf_hub_download(
 
52
  if feedback_questions:
53
  feedback_embeddings = embedding_model.encode(feedback_questions, convert_to_tensor=True)
54
 
 
55
  with open(feedback_path, "w") as f_local:
56
  json.dump(feedback_data, f_local, indent=4)
57
 
 
59
  print(f"[Startup] No feedback loaded from HF: {e}")
60
  feedback_data = []
61
 
 
62
  def upload_feedback_to_hf():
63
  hf_token = os.getenv("PUP_AI_Chatbot_Token")
64
  if not hf_token:
 
76
  except Exception as e:
77
  print(f"Error uploading feedback to HF: {e}")
78
 
 
79
  def chatbot_response(query, chat_history):
80
  query_embedding = embedding_model.encode([query], convert_to_tensor=True)
81
 
 
82
  if feedback_embeddings is not None:
83
  feedback_scores = cosine_similarity(query_embedding.cpu().numpy(), feedback_embeddings.cpu().numpy())[0]
84
  best_idx = int(np.argmax(feedback_scores))
 
96
  chat_history.append((query, response))
97
  return "", chat_history, gr.update(visible=True)
98
 
 
99
  similarity_scores = cosine_similarity(query_embedding.cpu().numpy(), question_embeddings.cpu().numpy())[0]
100
  best_idx = int(np.argmax(similarity_scores))
101
  best_score = similarity_scores[best_idx]
 
129
  chat_history.append((query, final_response))
130
  return "", chat_history, gr.update(visible=True)
131
 
 
132
  def record_feedback(feedback, chat_history):
133
  global feedback_embeddings
134
  if chat_history:
 
166
 
167
  return gr.update(visible=False)
168
 
 
169
  with gr.Blocks(css=PUP_Themed_css, title="University Handbook AI Chatbot") as demo:
170
  gr.Markdown(
171
  "<div style='"
 
203
  thumbs_up.click(lambda state: record_feedback("positive", state), inputs=[state], outputs=[feedback_row])
204
  thumbs_down.click(lambda state: record_feedback("negative", state), inputs=[state], outputs=[feedback_row])
205
 
 
206
  if __name__ == "__main__":
207
  demo.launch()