CCockrum commited on
Commit
0b6b797
·
verified ·
1 Parent(s): e3130ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -68
app.py CHANGED
@@ -8,6 +8,50 @@ from langchain_core.prompts import PromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  # Use environment variables for keys
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  if HF_TOKEN is None:
@@ -64,14 +108,10 @@ def predict_action(user_text):
64
  return "general_query"
65
 
66
  def generate_follow_up(user_text):
67
- """
68
- Generates two variant follow-up questions and randomly selects one.
69
- It also cleans up any unwanted quotation marks or extra meta commentary.
70
- """
71
  prompt_text = (
72
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
73
- "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
74
- "and another might be 'Would you like to explore another aspect of quantum physics?' Do not include extra commentary."
75
  )
76
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
77
  output = hf.invoke(input=prompt_text).strip()
@@ -82,15 +122,8 @@ def generate_follow_up(user_text):
82
  return random.choice(cleaned)
83
 
84
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
85
- """
86
- Generates HAL's answer with depth and a follow-up question.
87
- The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
88
- If the answer comes back empty, a fallback answer is used.
89
- """
90
  sentiment = analyze_sentiment(user_text)
91
  action = predict_action(user_text)
92
-
93
- # Extract style instruction if present
94
  style_instruction = ""
95
  lower_text = user_text.lower()
96
  if "in the voice of" in lower_text or "speaking as" in lower_text:
@@ -98,7 +131,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
98
  if match:
99
  style_instruction = match.group(2).strip().capitalize()
100
  style_instruction = f" Please respond in the voice of {style_instruction}."
101
-
102
  if action == "nasa_info":
103
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
104
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
@@ -107,51 +139,37 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
107
  follow_up = generate_follow_up(user_text)
108
  chat_history.append({'role': 'assistant', 'content': follow_up})
109
  return response, follow_up, chat_history, nasa_url
110
-
111
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
112
  filtered_history = ""
113
  for message in chat_history:
114
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
115
  continue
116
  filtered_history += f"{message['role']}: {message['content']}\n"
117
-
118
  style_clause = style_instruction if style_instruction else ""
119
-
120
- # Instruct the model to generate a detailed, in-depth answer.
121
  prompt = PromptTemplate.from_template(
122
  (
123
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
124
  "User: {user_text}.\n [/INST]\n"
125
- "AI: Please provide a detailed explanation in depth. "
126
- "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
127
  "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
128
  "\nHAL:"
129
  )
130
  )
131
-
132
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
133
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
134
- # Remove any extra markers if present.
135
  response = response.split("HAL:")[-1].strip()
136
-
137
- # Fallback in case the generated answer is empty
138
  if not response:
139
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
140
-
141
  chat_history.append({'role': 'user', 'content': user_text})
142
  chat_history.append({'role': 'assistant', 'content': response})
143
-
144
  if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
145
  response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
146
  chat_history[-1]['content'] = response
147
-
148
  follow_up = generate_follow_up(user_text)
149
  chat_history.append({'role': 'assistant', 'content': follow_up})
150
-
151
  return response, follow_up, chat_history, None
152
 
153
- # --- Chat UI ---
154
- st.title("🚀 HAL - A NASA AI Assistant")
155
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
156
 
157
  if st.sidebar.button("Reset Chat"):
@@ -160,36 +178,13 @@ if st.sidebar.button("Reset Chat"):
160
  st.session_state.follow_up = ""
161
  st.experimental_rerun()
162
 
163
- st.markdown("""
164
- <style>
165
- .user-msg {
166
- background-color: #696969;
167
- color: white;
168
- padding: 10px;
169
- border-radius: 10px;
170
- margin-bottom: 5px;
171
- width: fit-content;
172
- max-width: 80%;
173
- }
174
- .assistant-msg {
175
- background-color: #333333;
176
- color: white;
177
- padding: 10px;
178
- border-radius: 10px;
179
- margin-bottom: 5px;
180
- width: fit-content;
181
- max-width: 80%;
182
- }
183
- .container {
184
- display: flex;
185
- flex-direction: column;
186
- align-items: flex-start;
187
- }
188
- @media (max-width: 600px) {
189
- .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
190
- }
191
- </style>
192
- """, unsafe_allow_html=True)
193
 
194
  user_input = st.chat_input("Type your message here...")
195
 
@@ -203,11 +198,3 @@ if user_input:
203
  st.image(image_url, caption="NASA Image of the Day")
204
  st.session_state.follow_up = follow_up
205
  st.session_state.response_ready = True
206
-
207
- st.markdown("<div class='container'>", unsafe_allow_html=True)
208
- for message in st.session_state.chat_history:
209
- if message["role"] == "user":
210
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
211
- else:
212
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
213
- st.markdown("</div>", unsafe_allow_html=True)
 
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
10
 
11
+ # Appearance Settings: Allow user to adjust UI appearance via sidebar.
12
+ user_bg_color = st.sidebar.color_picker("User Message Background", "#0078D7")
13
+ assistant_bg_color = st.sidebar.color_picker("Assistant Message Background", "#333333")
14
+ text_color = st.sidebar.color_picker("Message Text Color", "#FFFFFF")
15
+ font_choice = st.sidebar.selectbox("Font Family", ["sans serif", "serif", "monospace"])
16
+
17
+ # Inject custom CSS for appearance
18
+ custom_css = f"""
19
+ <style>
20
+ .user-msg {{
21
+ background-color: {user_bg_color};
22
+ color: {text_color};
23
+ padding: 10px;
24
+ border-radius: 10px;
25
+ margin-bottom: 5px;
26
+ width: fit-content;
27
+ max-width: 80%;
28
+ font-family: {font_choice};
29
+ }}
30
+ .assistant-msg {{
31
+ background-color: {assistant_bg_color};
32
+ color: {text_color};
33
+ padding: 10px;
34
+ border-radius: 10px;
35
+ margin-bottom: 5px;
36
+ width: fit-content;
37
+ max-width: 80%;
38
+ font-family: {font_choice};
39
+ }}
40
+ .container {{
41
+ display: flex;
42
+ flex-direction: column;
43
+ align-items: flex-start;
44
+ }}
45
+ @media (max-width: 600px) {{
46
+ .user-msg, .assistant-msg {{
47
+ font-size: 16px;
48
+ max-width: 100%;
49
+ }}
50
+ }}
51
+ </style>
52
+ """
53
+ st.markdown(custom_css, unsafe_allow_html=True)
54
+
55
  # Use environment variables for keys
56
  HF_TOKEN = os.getenv("HF_TOKEN")
57
  if HF_TOKEN is None:
 
108
  return "general_query"
109
 
110
  def generate_follow_up(user_text):
 
 
 
 
111
  prompt_text = (
112
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
113
+ "that invite further discussion (e.g., one might ask, 'Would you like to know more about the six types of quarks?' "
114
+ "and another might ask, 'Would you like to explore something else?'). Do not include extra meta commentary."
115
  )
116
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
117
  output = hf.invoke(input=prompt_text).strip()
 
122
  return random.choice(cleaned)
123
 
124
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
 
 
 
 
 
125
  sentiment = analyze_sentiment(user_text)
126
  action = predict_action(user_text)
 
 
127
  style_instruction = ""
128
  lower_text = user_text.lower()
129
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
131
  if match:
132
  style_instruction = match.group(2).strip().capitalize()
133
  style_instruction = f" Please respond in the voice of {style_instruction}."
 
134
  if action == "nasa_info":
135
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
136
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
 
139
  follow_up = generate_follow_up(user_text)
140
  chat_history.append({'role': 'assistant', 'content': follow_up})
141
  return response, follow_up, chat_history, nasa_url
 
142
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
143
  filtered_history = ""
144
  for message in chat_history:
145
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
146
  continue
147
  filtered_history += f"{message['role']}: {message['content']}\n"
 
148
  style_clause = style_instruction if style_instruction else ""
 
 
149
  prompt = PromptTemplate.from_template(
150
  (
151
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
152
  "User: {user_text}.\n [/INST]\n"
153
+ "AI: Please answer the user's question in depth and in a friendly, conversational tone, "
 
154
  "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
155
  "\nHAL:"
156
  )
157
  )
 
158
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
159
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
 
160
  response = response.split("HAL:")[-1].strip()
 
 
161
  if not response:
162
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
 
163
  chat_history.append({'role': 'user', 'content': user_text})
164
  chat_history.append({'role': 'assistant', 'content': response})
 
165
  if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
166
  response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
167
  chat_history[-1]['content'] = response
 
168
  follow_up = generate_follow_up(user_text)
169
  chat_history.append({'role': 'assistant', 'content': follow_up})
 
170
  return response, follow_up, chat_history, None
171
 
172
+ st.title("🚀 HAL - Your NASA AI Assistant")
 
173
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
174
 
175
  if st.sidebar.button("Reset Chat"):
 
178
  st.session_state.follow_up = ""
179
  st.experimental_rerun()
180
 
181
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
182
+ for message in st.session_state.chat_history:
183
+ if message["role"] == "user":
184
+ st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
185
+ else:
186
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
187
+ st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  user_input = st.chat_input("Type your message here...")
190
 
 
198
  st.image(image_url, caption="NASA Image of the Day")
199
  st.session_state.follow_up = follow_up
200
  st.session_state.response_ready = True