CCockrum commited on
Commit
1fd6803
Β·
verified Β·
1 Parent(s): ffbccbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -54
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import re
 
3
  import requests
4
  import streamlit as st
5
  from langchain_huggingface import HuggingFaceEndpoint
@@ -21,19 +22,17 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
21
 
22
  # --- Initialize Session State Variables ---
23
  if "chat_history" not in st.session_state:
24
- # The initial greeting is stored in chat_history.
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
26
 
27
  if "response_ready" not in st.session_state:
28
- st.session_state.response_ready = False # Tracks whether HAL has responded
29
 
30
  if "follow_up" not in st.session_state:
31
- st.session_state.follow_up = "" # Stores the follow-up question
32
 
33
  # --- Set Up Model & API Functions ---
34
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
35
 
36
- # Initialize sentiment analysis pipeline with explicit model specification
37
  sentiment_analyzer = pipeline(
38
  "sentiment-analysis",
39
  model="distilbert/distilbert-base-uncased-finetuned-sst-2-english",
@@ -41,7 +40,6 @@ sentiment_analyzer = pipeline(
41
  )
42
 
43
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
44
- # Specify task="text-generation" so that the endpoint uses the correct function.
45
  return HuggingFaceEndpoint(
46
  repo_id=model_id,
47
  max_new_tokens=max_new_tokens,
@@ -70,47 +68,43 @@ def predict_action(user_text):
70
 
71
  def generate_follow_up(user_text):
72
  """
73
- Generates a concise and conversational follow-up question related to the user's input.
74
- The prompt instructs the model to avoid meta commentary.
75
  """
76
  prompt_text = (
77
- f"Generate a concise, friendly follow-up question based on the user's question: '{user_text}'. "
78
- "Do not include meta instructions or commentary such as 'Never return just a statement.' "
79
- "For example, if the user asked about quarks, you might ask: "
80
- "'Would you like to know more about the six types of quarks, or is there another aspect of quantum physics you're curious about?'"
81
  )
82
- hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.8)
83
- follow_up = hf.invoke(input=prompt_text).strip()
84
- # Remove extraneous quotes if present.
85
- follow_up = follow_up.strip('\'"')
86
- # Optionally, remove any unwanted phrases (you can add more replacements if needed).
87
- follow_up = re.sub(r"Never return just a statement\.?", "", follow_up, flags=re.IGNORECASE).strip()
88
- # Ensure that something non-empty is returned.
89
- if not follow_up:
90
- follow_up = "Would you like to explore this topic further?"
91
- return follow_up
92
 
93
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
94
  """
95
- Generates HAL's response in a friendly, conversational manner.
96
- Uses sentiment analysis to adjust tone when appropriate and always generates a follow-up question.
97
- If the user's input includes style instructions (e.g., 'in the voice of an astrophysicist'),
98
- the prompt instructs HAL to adapt accordingly.
99
  """
100
  sentiment = analyze_sentiment(user_text)
101
  action = predict_action(user_text)
102
 
103
- # Check for style instructions in the user message.
104
  style_instruction = ""
105
  lower_text = user_text.lower()
106
  if "in the voice of" in lower_text or "speaking as" in lower_text:
107
- # Extract the style instruction (a simple heuristic: take the part after "in the voice of")
108
  match = re.search(r"(in the voice of|speaking as)(.*)", lower_text)
109
  if match:
110
  style_instruction = match.group(2).strip().capitalize()
111
  style_instruction = f" Please respond in the voice of {style_instruction}."
112
-
113
- # Handle NASA-related queries separately.
114
  if action == "nasa_info":
115
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
116
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
@@ -119,61 +113,54 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
119
  follow_up = generate_follow_up(user_text)
120
  chat_history.append({'role': 'assistant', 'content': follow_up})
121
  return response, follow_up, chat_history, nasa_url
122
-
123
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
124
-
125
- # Build a filtered conversation history excluding the initial greeting.
126
  filtered_history = ""
127
  for message in chat_history:
128
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
129
  continue
130
  filtered_history += f"{message['role']}: {message['content']}\n"
131
-
132
- # Add style instruction to the prompt if applicable.
133
- style_clause = ""
134
- if style_instruction:
135
- style_clause = style_instruction
136
-
137
  prompt = PromptTemplate.from_template(
138
  (
139
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
140
  "User: {user_text}.\n [/INST]\n"
141
- "AI: Please answer the user's question without repeating any previous greetings."
142
- " Keep your response friendly and conversational, starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." +
143
- style_clause +
144
  "\nHAL:"
145
  )
146
  )
147
-
148
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
149
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
150
  response = response.split("HAL:")[-1].strip()
151
-
152
  chat_history.append({'role': 'user', 'content': user_text})
153
  chat_history.append({'role': 'assistant', 'content': response})
154
-
155
- # Only override with an empathetic response for negative sentiment if the input is not a direct question.
156
  if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
157
  response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
158
  chat_history[-1]['content'] = response
159
-
160
  follow_up = generate_follow_up(user_text)
161
  chat_history.append({'role': 'assistant', 'content': follow_up})
162
-
163
  return response, follow_up, chat_history, None
164
 
165
  # --- Chat UI ---
166
  st.title("πŸš€ HAL - Your NASA AI Assistant")
167
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
168
 
169
- # Sidebar: Reset Chat
170
  if st.sidebar.button("Reset Chat"):
171
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
172
  st.session_state.response_ready = False
173
  st.session_state.follow_up = ""
174
  st.experimental_rerun()
175
 
176
- # Custom Chat Styling
177
  st.markdown("""
178
  <style>
179
  .user-msg {
@@ -205,8 +192,7 @@ st.markdown("""
205
  </style>
206
  """, unsafe_allow_html=True)
207
 
208
- # --- Single Input Box for Both Initial and Follow-Up Messages ---
209
- user_input = st.chat_input("Type your message here...") # Only ONE chat_input()
210
 
211
  if user_input:
212
  response, follow_up, st.session_state.chat_history, image_url = get_response(
@@ -214,14 +200,13 @@ if user_input:
214
  user_text=user_input,
215
  chat_history=st.session_state.chat_history
216
  )
217
-
218
  if image_url:
219
  st.image(image_url, caption="NASA Image of the Day")
220
-
221
  st.session_state.follow_up = follow_up
222
  st.session_state.response_ready = True
223
 
224
- # Render the entire chat history.
225
  st.markdown("<div class='container'>", unsafe_allow_html=True)
226
  for message in st.session_state.chat_history:
227
  if message["role"] == "user":
 
1
  import os
2
  import re
3
+ import random
4
  import requests
5
  import streamlit as st
6
  from langchain_huggingface import HuggingFaceEndpoint
 
22
 
23
  # --- Initialize Session State Variables ---
24
  if "chat_history" not in st.session_state:
 
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
26
 
27
  if "response_ready" not in st.session_state:
28
+ st.session_state.response_ready = False
29
 
30
  if "follow_up" not in st.session_state:
31
+ st.session_state.follow_up = ""
32
 
33
  # --- Set Up Model & API Functions ---
34
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
35
 
 
36
  sentiment_analyzer = pipeline(
37
  "sentiment-analysis",
38
  model="distilbert/distilbert-base-uncased-finetuned-sst-2-english",
 
40
  )
41
 
42
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
 
43
  return HuggingFaceEndpoint(
44
  repo_id=model_id,
45
  max_new_tokens=max_new_tokens,
 
68
 
69
  def generate_follow_up(user_text):
70
  """
71
+ Generates varied follow-up questions for the given user input.
72
+ The prompt instructs the LLM to produce two variants, and one is selected randomly.
73
  """
74
  prompt_text = (
75
+ f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
76
+ "that are relevant to the topic. One should ask something like, "
77
+ "'Would you like to know more about the six types of quarks?' and the other should ask, "
78
+ "'Would you like to explore something else?' Do not include any extra commentary or meta instructions."
79
  )
80
+ hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
81
+ output = hf.invoke(input=prompt_text).strip()
82
+ # Split the output into separate lines if the model returns multiple variants.
83
+ variants = re.split(r"\n|[;]+", output)
84
+ # Clean up any extraneous quotes or unwanted text.
85
+ cleaned = [v.strip(' "\'') for v in variants if v.strip()]
86
+ # If no valid variants are found, provide a default fallback.
87
+ if not cleaned:
88
+ cleaned = ["Would you like to explore this topic further?"]
89
+ return random.choice(cleaned)
90
 
91
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
92
  """
93
+ Generates HAL's response with a friendly, conversational tone.
94
+ Incorporates sentiment analysis and always generates a follow-up question with variation.
 
 
95
  """
96
  sentiment = analyze_sentiment(user_text)
97
  action = predict_action(user_text)
98
 
99
+ # Check for style instructions in the user's text (e.g., "in the voice of an astrophysicist")
100
  style_instruction = ""
101
  lower_text = user_text.lower()
102
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
103
  match = re.search(r"(in the voice of|speaking as)(.*)", lower_text)
104
  if match:
105
  style_instruction = match.group(2).strip().capitalize()
106
  style_instruction = f" Please respond in the voice of {style_instruction}."
107
+
 
108
  if action == "nasa_info":
109
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
110
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
 
113
  follow_up = generate_follow_up(user_text)
114
  chat_history.append({'role': 'assistant', 'content': follow_up})
115
  return response, follow_up, chat_history, nasa_url
116
+
117
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
118
+
 
119
  filtered_history = ""
120
  for message in chat_history:
121
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
122
  continue
123
  filtered_history += f"{message['role']}: {message['content']}\n"
124
+
125
+ style_clause = style_instruction if style_instruction else ""
126
+
 
 
 
127
  prompt = PromptTemplate.from_template(
128
  (
129
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
130
  "User: {user_text}.\n [/INST]\n"
131
+ "AI: Please answer the user's question without repeating previous greetings. "
132
+ "Keep your response friendly and conversational, starting with a phrase like "
133
+ "'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
134
  "\nHAL:"
135
  )
136
  )
137
+
138
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
139
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
140
  response = response.split("HAL:")[-1].strip()
141
+
142
  chat_history.append({'role': 'user', 'content': user_text})
143
  chat_history.append({'role': 'assistant', 'content': response})
144
+
 
145
  if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
146
  response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
147
  chat_history[-1]['content'] = response
148
+
149
  follow_up = generate_follow_up(user_text)
150
  chat_history.append({'role': 'assistant', 'content': follow_up})
151
+
152
  return response, follow_up, chat_history, None
153
 
154
  # --- Chat UI ---
155
  st.title("πŸš€ HAL - Your NASA AI Assistant")
156
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
157
 
 
158
  if st.sidebar.button("Reset Chat"):
159
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
160
  st.session_state.response_ready = False
161
  st.session_state.follow_up = ""
162
  st.experimental_rerun()
163
 
 
164
  st.markdown("""
165
  <style>
166
  .user-msg {
 
192
  </style>
193
  """, unsafe_allow_html=True)
194
 
195
+ user_input = st.chat_input("Type your message here...")
 
196
 
197
  if user_input:
198
  response, follow_up, st.session_state.chat_history, image_url = get_response(
 
200
  user_text=user_input,
201
  chat_history=st.session_state.chat_history
202
  )
203
+
204
  if image_url:
205
  st.image(image_url, caption="NASA Image of the Day")
206
+
207
  st.session_state.follow_up = follow_up
208
  st.session_state.response_ready = True
209
 
 
210
  st.markdown("<div class='container'>", unsafe_allow_html=True)
211
  for message in st.session_state.chat_history:
212
  if message["role"] == "user":