CCockrum commited on
Commit
f6bb49b
·
verified ·
1 Parent(s): ad1c148

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -20,14 +20,14 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
20
 
21
  # --- Initialize Session State Variables ---
22
  if "chat_history" not in st.session_state:
23
- # The initial greeting is stored in chat_history
24
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
25
 
26
  if "response_ready" not in st.session_state:
27
  st.session_state.response_ready = False # Tracks whether HAL has responded
28
 
29
  if "follow_up" not in st.session_state:
30
- st.session_state.follow_up = "" # Stores follow-up question
31
 
32
  # --- Set Up Model & API Functions ---
33
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
@@ -70,23 +70,27 @@ def predict_action(user_text):
70
  def generate_follow_up(user_text):
71
  """
72
  Generates a concise and conversational follow-up question related to the user's input.
 
73
  """
74
  prompt_text = (
75
- f"Given the user's question: '{user_text}', generate a SHORT and SIMPLE follow-up question. "
76
- "Make it conversational and friendly. Example: 'Would you like to learn more about the six types of quarks?' "
77
- "Do NOT provide long explanations—just ask a friendly follow-up question."
 
78
  )
79
- hf = get_llm_hf_inference(max_new_tokens=32, temperature=0.7)
80
  return hf.invoke(input=prompt_text).strip()
81
 
82
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
83
  """
84
  Generates HAL's response in a friendly, conversational manner.
85
- This version filters out the initial greeting from chat_history to prevent repetition.
 
86
  """
87
  sentiment = analyze_sentiment(user_text)
88
  action = predict_action(user_text)
89
 
 
90
  if action == "nasa_info":
91
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
92
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
@@ -109,8 +113,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
109
  (
110
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
111
  "User: {user_text}.\n [/INST]\n"
112
- "AI: Please answer the user's question without repeating any previous greetings. "
113
- "Keep your response friendly and conversational, starting with a phrase like "
114
  "'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
115
  )
116
  )
@@ -119,11 +123,15 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
119
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
120
  response = response.split("HAL:")[-1].strip()
121
 
 
122
  chat_history.append({'role': 'user', 'content': user_text})
123
  chat_history.append({'role': 'assistant', 'content': response})
124
 
 
125
  if sentiment == "NEGATIVE":
126
- response = "I'm here to help. Let me know what I can do for you. 😊"
 
 
127
 
128
  follow_up = generate_follow_up(user_text)
129
  chat_history.append({'role': 'assistant', 'content': follow_up})
@@ -189,7 +197,7 @@ if user_input:
189
  st.session_state.follow_up = follow_up
190
  st.session_state.response_ready = True
191
 
192
- # Now render the entire chat history after processing new input.
193
  st.markdown("<div class='container'>", unsafe_allow_html=True)
194
  for message in st.session_state.chat_history:
195
  if message["role"] == "user":
@@ -197,4 +205,3 @@ for message in st.session_state.chat_history:
197
  else:
198
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
199
  st.markdown("</div>", unsafe_allow_html=True)
200
-
 
20
 
21
  # --- Initialize Session State Variables ---
22
  if "chat_history" not in st.session_state:
23
+ # The initial greeting is stored in chat_history.
24
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
25
 
26
  if "response_ready" not in st.session_state:
27
  st.session_state.response_ready = False # Tracks whether HAL has responded
28
 
29
  if "follow_up" not in st.session_state:
30
+ st.session_state.follow_up = "" # Stores the follow-up question
31
 
32
  # --- Set Up Model & API Functions ---
33
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
 
70
  def generate_follow_up(user_text):
71
  """
72
  Generates a concise and conversational follow-up question related to the user's input.
73
+ This version is designed to prompt a follow-up question that is relevant to the topic.
74
  """
75
  prompt_text = (
76
+ f"Given the user's question: '{user_text}', generate a friendly, varied follow-up question. "
77
+ "For example, if the question is about quarks, you might ask, "
78
+ "'Would you like to know more about the six types of quarks, or is there another topic you'd like to explore?' "
79
+ "Make sure the follow-up is concise and conversational."
80
  )
81
+ hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
82
  return hf.invoke(input=prompt_text).strip()
83
 
84
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
85
  """
86
  Generates HAL's response in a friendly, conversational manner.
87
+ It uses sentiment analysis to adjust the tone if the user's input is negative.
88
+ The function also generates a follow-up question.
89
  """
90
  sentiment = analyze_sentiment(user_text)
91
  action = predict_action(user_text)
92
 
93
+ # Handle NASA-related queries separately
94
  if action == "nasa_info":
95
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
96
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
 
113
  (
114
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
115
  "User: {user_text}.\n [/INST]\n"
116
+ "AI: Please answer the user's question without repeating previous greetings. "
117
+ "Keep your response friendly and conversational, beginning with a phrase like "
118
  "'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
119
  )
120
  )
 
123
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
124
  response = response.split("HAL:")[-1].strip()
125
 
126
+ # Append user and assistant messages to the chat history
127
  chat_history.append({'role': 'user', 'content': user_text})
128
  chat_history.append({'role': 'assistant', 'content': response})
129
 
130
+ # Adjust the response if the sentiment is negative
131
  if sentiment == "NEGATIVE":
132
+ response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
133
+ # Update the last assistant message in chat_history with the empathetic response
134
+ chat_history[-1]['content'] = response
135
 
136
  follow_up = generate_follow_up(user_text)
137
  chat_history.append({'role': 'assistant', 'content': follow_up})
 
197
  st.session_state.follow_up = follow_up
198
  st.session_state.response_ready = True
199
 
200
+ # Render the entire chat history after processing new input.
201
  st.markdown("<div class='container'>", unsafe_allow_html=True)
202
  for message in st.session_state.chat_history:
203
  if message["role"] == "user":
 
205
  else:
206
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
207
  st.markdown("</div>", unsafe_allow_html=True)