CCockrum commited on
Commit
6876886
Β·
verified Β·
1 Parent(s): d96b73b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -32
app.py CHANGED
@@ -11,12 +11,11 @@ from langdetect import detect # Ensure this package is installed
11
 
12
  # βœ… Environment Variables
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
- NASA_API_KEY = os.getenv("NASA_API_KEY")
15
-
16
- if not HF_TOKEN:
17
  raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
18
 
19
- if not NASA_API_KEY:
 
20
  raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
21
 
22
  # βœ… Set Up Streamlit
@@ -33,8 +32,8 @@ if "follow_up" not in st.session_state:
33
  # βœ… Model Configuration
34
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
35
 
36
- # βœ… Initialize Hugging Face Model with Limited Tokens
37
- def get_llm_hf_inference(model_id=model_id, max_new_tokens=300, temperature=0.7): # ⬅️ Lowered token limit
38
  return HuggingFaceEndpoint(
39
  repo_id=model_id,
40
  max_new_tokens=max_new_tokens,
@@ -53,8 +52,10 @@ def get_nasa_apod():
53
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
54
 
55
  # βœ… Sentiment Analysis
56
- sentiment_analyzer = pipeline("sentiment-analysis",
57
- model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
 
 
58
 
59
  def analyze_sentiment(user_text):
60
  result = sentiment_analyzer(user_text)[0]
@@ -66,18 +67,12 @@ def predict_action(user_text):
66
  return "nasa_info"
67
  return "general_query"
68
 
69
- # βœ… Ensure Every Response Has a Follow-Up Question
70
  def generate_follow_up(user_text):
71
- """Generates a follow-up question suggesting a related topic or next steps."""
72
- prompt_text = (
73
- f"Given the user's question: '{user_text}', generate a SHORT follow-up question "
74
- "suggesting a related topic or asking if they need further help. "
75
- "Example: 'Would you like to explore quantum superposition or ask about another physics concept?'"
76
- )
77
- hf = get_llm_hf_inference(max_new_tokens=40, temperature=0.8)
78
  output = hf.invoke(input=prompt_text).strip()
79
-
80
- return output if output else "Would you like to explore another related topic or ask about something else?"
81
 
82
  # βœ… Ensure English Responses
83
  def ensure_english(text):
@@ -89,9 +84,24 @@ def ensure_english(text):
89
  return "⚠️ Language detection failed. Please ask your question again."
90
  return text
91
 
92
- # βœ… Main Response Function (Shortened Answers + Separate Follow-Up)
93
- def get_response(system_message, chat_history, user_text, max_new_tokens=300):
94
- action = predict_action(user_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  # βœ… Handle NASA-Specific Queries
97
  if action == "nasa_info":
@@ -104,17 +114,17 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=300):
104
  return response, follow_up, chat_history, nasa_url
105
 
106
  # βœ… Set Up LLM Request
107
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.7)
108
 
109
  # βœ… Format Chat History
110
  filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
111
 
112
- # βœ… Prompt Engineering with Shortened Response Limit
113
  prompt = PromptTemplate.from_template(
114
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
115
  "User: {user_text}.\n [/INST]\n"
116
- "AI: Provide a **concise response** (5 sentences max). "
117
- "Make it conversational and engaging. Avoid excessive details."
118
  "🚨 Answer **only in English**."
119
  "\nHAL:"
120
  )
@@ -142,24 +152,41 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=300):
142
  # βœ… Streamlit UI
143
  st.title("πŸš€ HAL - NASA AI Assistant")
144
 
145
- # βœ… Justify All Chatbot Responses
146
  st.markdown("""
147
  <style>
148
- .user-msg, .assistant-msg {
 
 
 
 
 
 
 
 
 
 
 
 
149
  padding: 10px;
150
  border-radius: 10px;
151
  margin-bottom: 5px;
152
  width: fit-content;
153
  max-width: 80%;
154
- text-align: justify;
 
 
 
 
 
 
 
 
155
  }
156
- .user-msg { background-color: #696969; color: white; }
157
- .assistant-msg { background-color: #333333; color: white; }
158
- .container { display: flex; flex-direction: column; align-items: flex-start; }
159
- @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
160
  </style>
161
  """, unsafe_allow_html=True)
162
 
 
163
  # βœ… Reset Chat Button
164
  if st.sidebar.button("Reset Chat"):
165
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
@@ -170,12 +197,14 @@ if st.sidebar.button("Reset Chat"):
170
  user_input = st.chat_input("Type your message here...")
171
 
172
  if user_input:
 
173
  response, follow_up, st.session_state.chat_history, image_url = get_response(
174
  system_message="You are a helpful AI assistant.",
175
  user_text=user_input,
176
  chat_history=st.session_state.chat_history
177
  )
178
 
 
179
  if response:
180
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
181
 
@@ -185,6 +214,7 @@ if user_input:
185
  st.session_state.follow_up = follow_up
186
  st.session_state.response_ready = True
187
 
 
188
  if st.session_state.response_ready and st.session_state.follow_up:
189
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
190
  st.session_state.response_ready = False
 
11
 
12
  # βœ… Environment Variables
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
+ if HF_TOKEN is None:
 
 
15
  raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
16
 
17
+ NASA_API_KEY = os.getenv("NASA_API_KEY")
18
+ if NASA_API_KEY is None:
19
  raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
20
 
21
  # βœ… Set Up Streamlit
 
32
  # βœ… Model Configuration
33
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
34
 
35
+ # βœ… Initialize Hugging Face Model
36
+ def get_llm_hf_inference(model_id=model_id, max_new_tokens=1024, temperature=0.7):
37
  return HuggingFaceEndpoint(
38
  repo_id=model_id,
39
  max_new_tokens=max_new_tokens,
 
52
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
53
 
54
  # βœ… Sentiment Analysis
55
+ sentiment_analyzer = pipeline(
56
+ "sentiment-analysis",
57
+ model="distilbert/distilbert-base-uncased-finetuned-sst-2-english"
58
+ )
59
 
60
  def analyze_sentiment(user_text):
61
  result = sentiment_analyzer(user_text)[0]
 
67
  return "nasa_info"
68
  return "general_query"
69
 
70
+ # βœ… Follow-Up Question Generation
71
  def generate_follow_up(user_text):
72
+ prompt_text = f"Based on: '{user_text}', generate a concise, friendly follow-up."
73
+ hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
 
 
 
 
 
74
  output = hf.invoke(input=prompt_text).strip()
75
+ return output if output else "Would you like to explore this topic further?"
 
76
 
77
  # βœ… Ensure English Responses
78
  def ensure_english(text):
 
84
  return "⚠️ Language detection failed. Please ask your question again."
85
  return text
86
 
87
+ # βœ… Ensure Every Response Has a Follow-Up Question
88
+ def generate_follow_up(user_text):
89
+ """Generates a follow-up question to guide the user toward related topics or next steps."""
90
+ prompt_text = (
91
+ f"Given the user's question: '{user_text}', generate a SHORT follow-up question "
92
+ "suggesting either a related topic or asking if they need further help. "
93
+ "Example: 'Would you like to explore quantum superposition or ask about another physics concept?' "
94
+ "Keep it concise and engaging."
95
+ )
96
+ hf = get_llm_hf_inference(max_new_tokens=40, temperature=0.8)
97
+ output = hf.invoke(input=prompt_text).strip()
98
+
99
+ # Fallback in case of an empty response
100
+ return output if output else "Would you like to explore another related topic or ask about something else?"
101
+
102
+ # βœ… Main Response Function
103
+ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
104
+ action = predict_action(user_text) # πŸ”₯ Fix: Define 'action'
105
 
106
  # βœ… Handle NASA-Specific Queries
107
  if action == "nasa_info":
 
114
  return response, follow_up, chat_history, nasa_url
115
 
116
  # βœ… Set Up LLM Request
117
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
118
 
119
  # βœ… Format Chat History
120
  filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
121
 
122
+ # βœ… Prompt Engineering
123
  prompt = PromptTemplate.from_template(
124
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
125
  "User: {user_text}.\n [/INST]\n"
126
+ "AI: Provide a detailed explanation with depth. "
127
+ "Use a conversational style, starting with 'Certainly!', 'Of course!', or 'Great question!'."
128
  "🚨 Answer **only in English**."
129
  "\nHAL:"
130
  )
 
152
  # βœ… Streamlit UI
153
  st.title("πŸš€ HAL - NASA AI Assistant")
154
 
155
+ # βœ… Justify all chatbot responses
156
  st.markdown("""
157
  <style>
158
+ .user-msg {
159
+ background-color: #696969;
160
+ color: white;
161
+ padding: 10px;
162
+ border-radius: 10px;
163
+ margin-bottom: 5px;
164
+ width: fit-content;
165
+ max-width: 80%;
166
+ text-align: justify; /* βœ… Justify text */
167
+ }
168
+ .assistant-msg {
169
+ background-color: #333333;
170
+ color: white;
171
  padding: 10px;
172
  border-radius: 10px;
173
  margin-bottom: 5px;
174
  width: fit-content;
175
  max-width: 80%;
176
+ text-align: justify; /* βœ… Justify text */
177
+ }
178
+ .container {
179
+ display: flex;
180
+ flex-direction: column;
181
+ align-items: flex-start;
182
+ }
183
+ @media (max-width: 600px) {
184
+ .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
185
  }
 
 
 
 
186
  </style>
187
  """, unsafe_allow_html=True)
188
 
189
+
190
  # βœ… Reset Chat Button
191
  if st.sidebar.button("Reset Chat"):
192
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
197
  user_input = st.chat_input("Type your message here...")
198
 
199
  if user_input:
200
+ # βœ… Ensure get_response() returns a response
201
  response, follow_up, st.session_state.chat_history, image_url = get_response(
202
  system_message="You are a helpful AI assistant.",
203
  user_text=user_input,
204
  chat_history=st.session_state.chat_history
205
  )
206
 
207
+ # βœ… Ensure response is not empty before calling st.markdown()
208
  if response:
209
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
210
 
 
214
  st.session_state.follow_up = follow_up
215
  st.session_state.response_ready = True
216
 
217
+ # βœ… Check before displaying follow-up message
218
  if st.session_state.response_ready and st.session_state.follow_up:
219
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
220
  st.session_state.response_ready = False