CCockrum commited on
Commit
233eda7
Β·
verified Β·
1 Parent(s): e7b8136

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -54
app.py CHANGED
@@ -7,13 +7,22 @@ from langchain_core.output_parsers import StrOutputParser
7
  from transformers import pipeline
8
  from config import NASA_API_KEY # Ensure this file exists with your NASA API Key
9
 
10
- # Model settings
 
 
 
 
 
 
 
 
 
 
11
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
12
 
13
  # Initialize sentiment analysis pipeline
14
  sentiment_analyzer = pipeline("sentiment-analysis")
15
 
16
- # Function to initialize Hugging Face model
17
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
18
  return HuggingFaceEndpoint(
19
  repo_id=model_id,
@@ -22,7 +31,6 @@ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1)
22
  token=os.getenv("HF_TOKEN") # Hugging Face API Token
23
  )
24
 
25
- # Function to get NASA Astronomy Picture of the Day
26
  def get_nasa_apod():
27
  url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
28
  response = requests.get(url)
@@ -32,18 +40,15 @@ def get_nasa_apod():
32
  else:
33
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now. Please try again later."
34
 
35
- # Function to analyze sentiment of user input
36
  def analyze_sentiment(user_text):
37
  result = sentiment_analyzer(user_text)[0]
38
  return result['label']
39
 
40
- # Function to predict user intent
41
  def predict_action(user_text):
42
  if "NASA" in user_text or "space" in user_text:
43
  return "nasa_info"
44
  return "general_query"
45
 
46
- # Function to generate a follow-up question
47
  def generate_follow_up(user_text):
48
  prompt_text = (
49
  f"Based on the user's message: '{user_text}', suggest a natural follow-up question "
@@ -52,7 +57,6 @@ def generate_follow_up(user_text):
52
  hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
53
  return hf.invoke(input=prompt_text).strip()
54
 
55
- # Function to process user input and generate a response
56
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
57
  sentiment = analyze_sentiment(user_text)
58
  action = predict_action(user_text)
@@ -87,26 +91,21 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
87
 
88
  return response, follow_up, chat_history, None
89
 
90
- # --- Streamlit UI Setup ---
91
- st.set_page_config(page_title="NASA ChatBot", page_icon="πŸš€")
92
-
93
  st.title("πŸš€ HAL - Your NASA AI Assistant")
94
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
95
 
96
- # Ensure chat history is initialized
97
- if "chat_history" not in st.session_state:
98
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
99
-
100
- # Sidebar for chat reset
101
  if st.sidebar.button("Reset Chat"):
102
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
103
  st.experimental_rerun()
104
 
105
- # Chat Display Styling
106
  st.markdown("""
107
  <style>
108
  .user-msg {
109
- background-color: #0078D7; /* Dark Blue */
110
  color: white;
111
  padding: 10px;
112
  border-radius: 10px;
@@ -115,7 +114,7 @@ st.markdown("""
115
  max-width: 80%;
116
  }
117
  .assistant-msg {
118
- background-color: #333333; /* Dark Gray */
119
  color: white;
120
  padding: 10px;
121
  border-radius: 10px;
@@ -134,56 +133,47 @@ st.markdown("""
134
  </style>
135
  """, unsafe_allow_html=True)
136
 
137
- # Chat Display
138
  st.markdown("<div class='container'>", unsafe_allow_html=True)
139
-
140
  for message in st.session_state.chat_history:
141
  if message["role"] == "user":
142
  st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
143
  else:
144
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
145
-
146
  st.markdown("</div>", unsafe_allow_html=True)
147
 
148
- # User Input Section
149
  user_input = st.text_area("Type your message:", height=100)
150
 
151
- if st.button("Send"):
152
- if user_input:
153
- response, follow_up, st.session_state.chat_history, image_url = get_response(
154
- system_message="You are a helpful AI assistant.",
155
- user_text=user_input,
156
- chat_history=st.session_state.chat_history
157
- )
158
 
159
- # Display response
160
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
 
 
 
 
 
 
161
 
162
- # Display NASA image if available
163
- if image_url:
164
- st.image(image_url, caption="NASA Image of the Day")
165
 
166
- # Display a conversational follow-up message
167
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)
168
 
169
- # Provide a text box for the user's next response
170
- next_input = st.text_input("HAL is waiting for your response...")
171
 
172
- # If the user enters a follow-up message
173
- if next_input:
174
- response, _, st.session_state.chat_history, _ = get_response(
175
- system_message="You are a helpful AI assistant.",
176
- user_text=next_input,
177
- chat_history=st.session_state.chat_history
178
- )
179
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
180
 
 
181
 
182
- if st.button("Continue"):
183
- if selected_option:
184
- response, _, st.session_state.chat_history, _ = get_response(
185
- system_message="You are a helpful AI assistant.",
186
- user_text=selected_option,
187
- chat_history=st.session_state.chat_history
188
- )
189
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
 
7
  from transformers import pipeline
8
  from config import NASA_API_KEY # Ensure this file exists with your NASA API Key
9
 
10
+ # Set up Streamlit UI
11
+ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
12
+
13
+ # --- Ensure Session State Variables are Initialized ---
14
+ if "chat_history" not in st.session_state:
15
+ st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
16
+
17
+ if "response_ready" not in st.session_state:
18
+ st.session_state.response_ready = False # Tracks whether HAL has responded
19
+
20
+ # --- Set Up Model & API Functions ---
21
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
22
 
23
  # Initialize sentiment analysis pipeline
24
  sentiment_analyzer = pipeline("sentiment-analysis")
25
 
 
26
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
27
  return HuggingFaceEndpoint(
28
  repo_id=model_id,
 
31
  token=os.getenv("HF_TOKEN") # Hugging Face API Token
32
  )
33
 
 
34
  def get_nasa_apod():
35
  url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
36
  response = requests.get(url)
 
40
  else:
41
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now. Please try again later."
42
 
 
43
  def analyze_sentiment(user_text):
44
  result = sentiment_analyzer(user_text)[0]
45
  return result['label']
46
 
 
47
  def predict_action(user_text):
48
  if "NASA" in user_text or "space" in user_text:
49
  return "nasa_info"
50
  return "general_query"
51
 
 
52
  def generate_follow_up(user_text):
53
  prompt_text = (
54
  f"Based on the user's message: '{user_text}', suggest a natural follow-up question "
 
57
  hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
58
  return hf.invoke(input=prompt_text).strip()
59
 
 
60
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
61
  sentiment = analyze_sentiment(user_text)
62
  action = predict_action(user_text)
 
91
 
92
  return response, follow_up, chat_history, None
93
 
94
+ # --- Chat UI ---
 
 
95
  st.title("πŸš€ HAL - Your NASA AI Assistant")
96
  st.markdown("🌌 *Ask me about space, NASA, and beyond!*")
97
 
98
+ # Sidebar: Reset Chat
 
 
 
 
99
  if st.sidebar.button("Reset Chat"):
100
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
101
+ st.session_state.response_ready = False
102
  st.experimental_rerun()
103
 
104
+ # Custom Chat Styling
105
  st.markdown("""
106
  <style>
107
  .user-msg {
108
+ background-color: #0078D7;
109
  color: white;
110
  padding: 10px;
111
  border-radius: 10px;
 
114
  max-width: 80%;
115
  }
116
  .assistant-msg {
117
+ background-color: #333333;
118
  color: white;
119
  padding: 10px;
120
  border-radius: 10px;
 
133
  </style>
134
  """, unsafe_allow_html=True)
135
 
136
+ # Chat History Display
137
  st.markdown("<div class='container'>", unsafe_allow_html=True)
 
138
  for message in st.session_state.chat_history:
139
  if message["role"] == "user":
140
  st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
141
  else:
142
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
 
143
  st.markdown("</div>", unsafe_allow_html=True)
144
 
145
+ # --- Input & Button Handling ---
146
  user_input = st.text_area("Type your message:", height=100)
147
 
148
+ send_button_placeholder = st.empty()
 
 
 
 
 
 
149
 
150
+ if not st.session_state.response_ready:
151
+ if send_button_placeholder.button("Send"):
152
+ if user_input:
153
+ response, follow_up, st.session_state.chat_history, image_url = get_response(
154
+ system_message="You are a helpful AI assistant.",
155
+ user_text=user_input,
156
+ chat_history=st.session_state.chat_history
157
+ )
158
 
159
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
 
 
160
 
161
+ if image_url:
162
+ st.image(image_url, caption="NASA Image of the Day")
163
 
164
+ st.session_state.response_ready = True # Hide Send button after response
 
165
 
166
+ # Conversational Follow-up
167
+ if st.session_state.response_ready:
168
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)
 
 
 
 
 
169
 
170
+ next_input = st.text_input("HAL is waiting for your response...")
171
 
172
+ if next_input:
173
+ response, _, st.session_state.chat_history, _ = get_response(
174
+ system_message="You are a helpful AI assistant.",
175
+ user_text=next_input,
176
+ chat_history=st.session_state.chat_history
177
+ )
178
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
179
+ st.session_state.response_ready = False # Allow new input