CCockrum commited on
Commit
ad1c148
·
verified ·
1 Parent(s): fc42bd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -20
app.py CHANGED
@@ -20,7 +20,7 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
20
 
21
  # --- Initialize Session State Variables ---
22
  if "chat_history" not in st.session_state:
23
- # Initial greeting stored in chat history
24
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
25
 
26
  if "response_ready" not in st.session_state:
@@ -40,7 +40,7 @@ sentiment_analyzer = pipeline(
40
  )
41
 
42
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
43
- # Specify task="text-generation" so that the endpoint uses the right model function.
44
  return HuggingFaceEndpoint(
45
  repo_id=model_id,
46
  max_new_tokens=max_new_tokens,
@@ -82,7 +82,7 @@ def generate_follow_up(user_text):
82
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
83
  """
84
  Generates HAL's response in a friendly, conversational manner.
85
- The prompt instructs the model to ignore previous greetings and focus on the new user question.
86
  """
87
  sentiment = analyze_sentiment(user_text)
88
  action = predict_action(user_text)
@@ -92,14 +92,19 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
92
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
93
  chat_history.append({'role': 'user', 'content': user_text})
94
  chat_history.append({'role': 'assistant', 'content': response})
95
-
96
  follow_up = generate_follow_up(user_text)
97
  chat_history.append({'role': 'assistant', 'content': follow_up})
98
  return response, follow_up, chat_history, nasa_url
99
 
100
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
101
 
102
- # Updated prompt: Instruct the model not to repeat previous greetings.
 
 
 
 
 
 
103
  prompt = PromptTemplate.from_template(
104
  (
105
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
@@ -109,8 +114,9 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
109
  "'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
110
  )
111
  )
 
112
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
113
- response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
114
  response = response.split("HAL:")[-1].strip()
115
 
116
  chat_history.append({'role': 'user', 'content': user_text})
@@ -167,15 +173,6 @@ st.markdown("""
167
  </style>
168
  """, unsafe_allow_html=True)
169
 
170
- # Chat History Display
171
- st.markdown("<div class='container'>", unsafe_allow_html=True)
172
- for message in st.session_state.chat_history:
173
- if message["role"] == "user":
174
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
175
- else:
176
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
177
- st.markdown("</div>", unsafe_allow_html=True)
178
-
179
  # --- Single Input Box for Both Initial and Follow-Up Messages ---
180
  user_input = st.chat_input("Type your message here...") # Only ONE chat_input()
181
 
@@ -186,14 +183,18 @@ if user_input:
186
  chat_history=st.session_state.chat_history
187
  )
188
 
189
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
190
-
191
  if image_url:
192
  st.image(image_url, caption="NASA Image of the Day")
193
 
194
  st.session_state.follow_up = follow_up
195
  st.session_state.response_ready = True
196
 
197
- if st.session_state.response_ready and st.session_state.follow_up:
198
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
199
- st.session_state.response_ready = False
 
 
 
 
 
 
 
20
 
21
  # --- Initialize Session State Variables ---
22
  if "chat_history" not in st.session_state:
23
+ # The initial greeting is stored in chat_history
24
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
25
 
26
  if "response_ready" not in st.session_state:
 
40
  )
41
 
42
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
43
+ # Specify task="text-generation" so that the endpoint uses the correct function.
44
  return HuggingFaceEndpoint(
45
  repo_id=model_id,
46
  max_new_tokens=max_new_tokens,
 
82
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
83
  """
84
  Generates HAL's response in a friendly, conversational manner.
85
+ This version filters out the initial greeting from chat_history to prevent repetition.
86
  """
87
  sentiment = analyze_sentiment(user_text)
88
  action = predict_action(user_text)
 
92
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
93
  chat_history.append({'role': 'user', 'content': user_text})
94
  chat_history.append({'role': 'assistant', 'content': response})
 
95
  follow_up = generate_follow_up(user_text)
96
  chat_history.append({'role': 'assistant', 'content': follow_up})
97
  return response, follow_up, chat_history, nasa_url
98
 
99
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
100
 
101
+ # Build a filtered conversation history excluding the initial greeting
102
+ filtered_history = ""
103
+ for message in chat_history:
104
+ if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
105
+ continue
106
+ filtered_history += f"{message['role']}: {message['content']}\n"
107
+
108
  prompt = PromptTemplate.from_template(
109
  (
110
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
 
114
  "'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
115
  )
116
  )
117
+
118
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
119
+ response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
120
  response = response.split("HAL:")[-1].strip()
121
 
122
  chat_history.append({'role': 'user', 'content': user_text})
 
173
  </style>
174
  """, unsafe_allow_html=True)
175
 
 
 
 
 
 
 
 
 
 
176
  # --- Single Input Box for Both Initial and Follow-Up Messages ---
177
  user_input = st.chat_input("Type your message here...") # Only ONE chat_input()
178
 
 
183
  chat_history=st.session_state.chat_history
184
  )
185
 
 
 
186
  if image_url:
187
  st.image(image_url, caption="NASA Image of the Day")
188
 
189
  st.session_state.follow_up = follow_up
190
  st.session_state.response_ready = True
191
 
192
+ # Now render the entire chat history after processing new input.
193
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
194
+ for message in st.session_state.chat_history:
195
+ if message["role"] == "user":
196
+ st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
197
+ else:
198
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
199
+ st.markdown("</div>", unsafe_allow_html=True)
200
+