Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from transformers import pipeline
|
|
5 |
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
import numpy as np
|
7 |
import os
|
|
|
8 |
|
9 |
# === Custom PUP-themed CSS ===
|
10 |
PUP_Themed_css = """
|
@@ -49,6 +50,24 @@ if os.path.exists(feedback_path) and os.path.getsize(feedback_path) > 0:
|
|
49 |
except json.JSONDecodeError:
|
50 |
feedback_data = []
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
# === Chatbot Response Function ===
|
53 |
def chatbot_response(query, chat_history):
|
54 |
query_embedding = embedding_model.encode([query], convert_to_tensor=True)
|
@@ -84,7 +103,6 @@ def chatbot_response(query, chat_history):
|
|
84 |
return "", chat_history, gr.update(visible=True)
|
85 |
|
86 |
prompt = (
|
87 |
-
f"The following is an official university handbook statement:\n"
|
88 |
f"\"{matched_a}\"\n\n"
|
89 |
f"Please explain this to a student in a short, natural, and easy-to-understand way. "
|
90 |
f"Use simple words, and do not add new information."
|
@@ -140,6 +158,8 @@ def record_feedback(feedback, chat_history):
|
|
140 |
if feedback_questions:
|
141 |
feedback_embeddings = embedding_model.encode(feedback_questions, convert_to_tensor=True)
|
142 |
|
|
|
|
|
143 |
return gr.update(visible=False)
|
144 |
|
145 |
# === Gradio UI ===
|
|
|
5 |
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
import numpy as np
|
7 |
import os
|
8 |
+
from huggingface_hub import upload_file
|
9 |
|
10 |
# === Custom PUP-themed CSS ===
|
11 |
PUP_Themed_css = """
|
|
|
50 |
except json.JSONDecodeError:
|
51 |
feedback_data = []
|
52 |
|
53 |
+
# === Hugging Face Upload ===
|
54 |
+
def upload_feedback_to_hf():
|
55 |
+
hf_token = os.getenv("PUP_AI_Chatbot_Token") # Access the token from environment variables
|
56 |
+
if not hf_token:
|
57 |
+
raise ValueError("Hugging Face token not found in environment variables!")
|
58 |
+
|
59 |
+
try:
|
60 |
+
upload_file(
|
61 |
+
path_or_fileobj=feedback_path,
|
62 |
+
path_in_repo="feedback.json",
|
63 |
+
repo_id="oceddyyy/University_Inquiries_Feedback", # Replace with your actual HF dataset repo ID
|
64 |
+
repo_type="dataset",
|
65 |
+
token=hf_token
|
66 |
+
)
|
67 |
+
print("Feedback uploaded to Hugging Face successfully.")
|
68 |
+
except Exception as e:
|
69 |
+
print(f"Error uploading feedback to HF: {e}")
|
70 |
+
|
71 |
# === Chatbot Response Function ===
|
72 |
def chatbot_response(query, chat_history):
|
73 |
query_embedding = embedding_model.encode([query], convert_to_tensor=True)
|
|
|
103 |
return "", chat_history, gr.update(visible=True)
|
104 |
|
105 |
prompt = (
|
|
|
106 |
f"\"{matched_a}\"\n\n"
|
107 |
f"Please explain this to a student in a short, natural, and easy-to-understand way. "
|
108 |
f"Use simple words, and do not add new information."
|
|
|
158 |
if feedback_questions:
|
159 |
feedback_embeddings = embedding_model.encode(feedback_questions, convert_to_tensor=True)
|
160 |
|
161 |
+
upload_feedback_to_hf() # Upload feedback to Hugging Face after saving
|
162 |
+
|
163 |
return gr.update(visible=False)
|
164 |
|
165 |
# === Gradio UI ===
|