CCockrum commited on
Commit
b0211dd
·
verified ·
1 Parent(s): 996e935

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -12
app.py CHANGED
@@ -8,7 +8,7 @@ from transformers import pipeline
8
  from config import NASA_API_KEY # Ensure this file exists with your NASA API Key
9
 
10
  # Set up Streamlit UI
11
- st.set_page_config(page_title="HAL - A NASA ChatBot", page_icon="🚀")
12
 
13
  # --- Ensure Session State Variables are Initialized ---
14
  if "chat_history" not in st.session_state:
@@ -26,7 +26,7 @@ model_id = "mistralai/Mistral-7B-Instruct-v0.3"
26
  # Initialize sentiment analysis pipeline
27
  sentiment_analyzer = pipeline("sentiment-analysis")
28
 
29
- def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
30
  return HuggingFaceEndpoint(
31
  repo_id=model_id,
32
  max_new_tokens=max_new_tokens,
@@ -53,14 +53,24 @@ def predict_action(user_text):
53
  return "general_query"
54
 
55
  def generate_follow_up(user_text):
 
 
 
56
  prompt_text = (
57
- f"Based on the user's message: '{user_text}', suggest a natural follow-up question "
58
- "to keep the conversation engaging."
 
 
 
59
  )
60
- hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
 
61
  return hf.invoke(input=prompt_text).strip()
62
 
63
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
 
 
 
64
  sentiment = analyze_sentiment(user_text)
65
  action = predict_action(user_text)
66
 
@@ -74,20 +84,30 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
74
  chat_history.append({'role': 'assistant', 'content': follow_up})
75
  return response, follow_up, chat_history, nasa_url
76
 
77
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
78
 
79
  prompt = PromptTemplate.from_template(
80
- "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\nUser: {user_text}.\n [/INST]\nAI:"
 
 
 
 
 
 
 
 
81
  )
 
82
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
83
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
84
- response = response.split("AI:")[-1]
85
 
86
  chat_history.append({'role': 'user', 'content': user_text})
87
  chat_history.append({'role': 'assistant', 'content': response})
88
 
 
89
  if sentiment == "NEGATIVE":
90
- response += "\n😞 I'm sorry to hear that. How can I assist you further?"
91
 
92
  follow_up = generate_follow_up(user_text)
93
  chat_history.append({'role': 'assistant', 'content': follow_up})
@@ -156,14 +176,11 @@ if user_input:
156
  chat_history=st.session_state.chat_history
157
  )
158
 
159
- # Display HAL's response
160
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
161
 
162
- # Display NASA image if available
163
  if image_url:
164
  st.image(image_url, caption="NASA Image of the Day")
165
 
166
- # Store follow-up question in session state
167
  st.session_state.follow_up = follow_up
168
  st.session_state.response_ready = True # Enables follow-up response cycle
169
 
 
8
  from config import NASA_API_KEY # Ensure this file exists with your NASA API Key
9
 
10
  # Set up Streamlit UI
11
+ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
12
 
13
  # --- Ensure Session State Variables are Initialized ---
14
  if "chat_history" not in st.session_state:
 
26
  # Initialize sentiment analysis pipeline
27
  sentiment_analyzer = pipeline("sentiment-analysis")
28
 
29
+ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
30
  return HuggingFaceEndpoint(
31
  repo_id=model_id,
32
  max_new_tokens=max_new_tokens,
 
53
  return "general_query"
54
 
55
  def generate_follow_up(user_text):
56
+ """
57
+ Generates a concise and conversational follow-up question related to the user's input.
58
+ """
59
  prompt_text = (
60
+ f"Given the user's question: '{user_text}', generate a single friendly follow-up question. "
61
+ "Make it short, conversational, and natural—like a human would ask. "
62
+ "Example: If the user asks 'What is a quark?', respond with something like "
63
+ "'Would you like to learn about the six types of quarks?' "
64
+ "Do NOT include phrases like 'A natural follow-up question could be'."
65
  )
66
+
67
+ hf = get_llm_hf_inference(max_new_tokens=32, temperature=0.7)
68
  return hf.invoke(input=prompt_text).strip()
69
 
70
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
71
+ """
72
+ Generates HAL's response, making it more conversational and engaging.
73
+ """
74
  sentiment = analyze_sentiment(user_text)
75
  action = predict_action(user_text)
76
 
 
84
  chat_history.append({'role': 'assistant', 'content': follow_up})
85
  return response, follow_up, chat_history, nasa_url
86
 
87
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
88
 
89
  prompt = PromptTemplate.from_template(
90
+ (
91
+ "[INST] {system_message}"
92
+ "\nCurrent Conversation:\n{chat_history}\n\n"
93
+ "\nUser: {user_text}.\n [/INST]"
94
+ "\nAI: Keep responses conversational and engaging. Start with a friendly phrase like "
95
+ "'Certainly!', 'Of course!', or 'Great question!' before answering."
96
+ " Keep responses concise but engaging."
97
+ "\nHAL:"
98
+ )
99
  )
100
+
101
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
102
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
103
+ response = response.split("HAL:")[-1].strip()
104
 
105
  chat_history.append({'role': 'user', 'content': user_text})
106
  chat_history.append({'role': 'assistant', 'content': response})
107
 
108
+ # ✅ Removes unnecessary sentiment apology
109
  if sentiment == "NEGATIVE":
110
+ response += "" # Keeps response normal instead of adding "I'm sorry to hear that"
111
 
112
  follow_up = generate_follow_up(user_text)
113
  chat_history.append({'role': 'assistant', 'content': follow_up})
 
176
  chat_history=st.session_state.chat_history
177
  )
178
 
 
179
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
180
 
 
181
  if image_url:
182
  st.image(image_url, caption="NASA Image of the Day")
183
 
 
184
  st.session_state.follow_up = follow_up
185
  st.session_state.response_ready = True # Enables follow-up response cycle
186