CCockrum commited on
Commit
74bfc30
Β·
verified Β·
1 Parent(s): 63c57e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -16
app.py CHANGED
@@ -104,7 +104,20 @@ def generate_follow_up(user_text):
104
  return cleaned_output
105
 
106
  # βœ… Main Response Function
107
- def get_response(system_message, chat_history, user_text, max_new_tokens=800):
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  action = predict_action(user_text)
109
 
110
  # βœ… Handle NASA-Specific Queries
@@ -112,18 +125,24 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
112
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
113
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
114
  follow_up = generate_follow_up(user_text)
115
- chat_history.extend([
116
- {'role': 'user', 'content': user_text},
117
- {'role': 'assistant', 'content': response},
118
- {'role': 'assistant', 'content': follow_up}
119
- ])
120
- return response, follow_up, chat_history, nasa_url
121
 
122
- # βœ… Invoke Hugging Face Model
123
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.7)
 
 
 
 
 
124
 
125
- filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
126
 
 
 
 
 
 
 
 
127
  prompt = PromptTemplate.from_template(
128
  "[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
129
  "User: {user_text}.\n [/INST]\n"
@@ -133,23 +152,33 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
133
  "\nHAL:"
134
  )
135
 
 
136
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
137
- response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
 
 
138
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
139
 
 
140
  response = ensure_english(response)
141
 
 
142
  if not response:
143
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
144
 
 
145
  follow_up = generate_follow_up(user_text)
146
 
147
- # βœ… Preserve conversation history
148
- st.session_state.chat_history.append({'role': 'user', 'content': user_text})
149
- st.session_state.chat_history.append({'role': 'assistant', 'content': response})
150
- st.session_state.chat_history.append({'role': 'assistant', 'content': follow_up})
 
 
 
 
 
151
 
152
- return response, follow_up, chat_history, None
153
 
154
  # βœ… Streamlit UI
155
  st.title("πŸš€ HAL - NASA AI Assistant")
 
104
  return cleaned_output
105
 
106
  # βœ… Main Response Function
107
+ # βœ… Main Response Function
108
+ def get_response(system_message, user_text, max_new_tokens=800):
109
+ """
110
+ Generates a response from the chatbot, ensures conversation history is updated, and includes a follow-up question.
111
+ """
112
+
113
+ # βœ… Ensure Chat History is Initialized
114
+ if "chat_history" not in st.session_state:
115
+ st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
116
+
117
+ # βœ… Get Chat History Reference
118
+ chat_history = st.session_state.chat_history
119
+
120
+ # βœ… Detect Intent (NASA query vs General AI chat)
121
  action = predict_action(user_text)
122
 
123
  # βœ… Handle NASA-Specific Queries
 
125
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
126
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
127
  follow_up = generate_follow_up(user_text)
 
 
 
 
 
 
128
 
129
+ # βœ… Append to chat history
130
+ chat_history.append({'role': 'user', 'content': user_text})
131
+ chat_history.append({'role': 'assistant', 'content': response})
132
+ chat_history.append({'role': 'assistant', 'content': follow_up})
133
+
134
+ # βœ… Update `st.session_state.chat_history`
135
+ st.session_state.chat_history = chat_history
136
 
137
+ return response, follow_up, chat_history, nasa_url # βœ… Always return 4 values
138
 
139
+ # βœ… Format Conversation History for Model Input
140
+ formatted_chat_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
141
+
142
+ # βœ… Invoke Hugging Face Model
143
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
144
+
145
+ # βœ… Define the Chat Prompt Template
146
  prompt = PromptTemplate.from_template(
147
  "[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
148
  "User: {user_text}.\n [/INST]\n"
 
152
  "\nHAL:"
153
  )
154
 
155
+ # βœ… Generate AI Response
156
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
157
+ response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=formatted_chat_history))
158
+
159
+ # βœ… Extract and Clean Response
160
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
161
 
162
+ # βœ… Ensure Response is in English
163
  response = ensure_english(response)
164
 
165
+ # βœ… Fallback Response Handling
166
  if not response:
167
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
168
 
169
+ # βœ… Generate Follow-Up Question
170
  follow_up = generate_follow_up(user_text)
171
 
172
+ # βœ… Append Conversation History
173
+ chat_history.append({'role': 'user', 'content': user_text})
174
+ chat_history.append({'role': 'assistant', 'content': response})
175
+ chat_history.append({'role': 'assistant', 'content': follow_up})
176
+
177
+ # βœ… Update the Global Session State to Preserve History
178
+ st.session_state.chat_history = chat_history
179
+
180
+ return response, follow_up, chat_history, None # βœ… Ensure 4 values are returned
181
 
 
182
 
183
  # βœ… Streamlit UI
184
  st.title("πŸš€ HAL - NASA AI Assistant")