CCockrum commited on
Commit
3936db5
Β·
verified Β·
1 Parent(s): 6036241

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -162
app.py CHANGED
@@ -11,29 +11,32 @@ from langdetect import detect # Ensure this package is installed
11
 
12
  # βœ… Environment Variables
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
- if HF_TOKEN is None:
 
 
15
  raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
16
 
17
- NASA_API_KEY = os.getenv("NASA_API_KEY")
18
- if NASA_API_KEY is None:
19
  raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
20
 
21
  # βœ… Set Up Streamlit
22
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
23
 
24
- # βœ… Ensure Session State Variables
25
  if "chat_history" not in st.session_state:
26
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
27
  if "response_ready" not in st.session_state:
28
  st.session_state.response_ready = False
 
29
  if "follow_up" not in st.session_state:
30
  st.session_state.follow_up = ""
31
 
32
  # βœ… Model Configuration
33
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
34
 
35
- # βœ… Initialize Hugging Face Model
36
- def get_llm_hf_inference(model_id=model_id, max_new_tokens=1024, temperature=0.7):
37
  return HuggingFaceEndpoint(
38
  repo_id=model_id,
39
  max_new_tokens=max_new_tokens,
@@ -52,10 +55,8 @@ def get_nasa_apod():
52
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
53
 
54
  # βœ… Sentiment Analysis
55
- sentiment_analyzer = pipeline(
56
- "sentiment-analysis",
57
- model="distilbert/distilbert-base-uncased-finetuned-sst-2-english"
58
- )
59
 
60
  def analyze_sentiment(user_text):
61
  result = sentiment_analyzer(user_text)[0]
@@ -67,154 +68,5 @@ def predict_action(user_text):
67
  return "nasa_info"
68
  return "general_query"
69
 
70
- # βœ… Follow-Up Question Generation
71
- def generate_follow_up(user_text):
72
- prompt_text = f"Based on: '{user_text}', generate a concise, friendly follow-up."
73
- hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
74
- output = hf.invoke(input=prompt_text).strip()
75
- return output if output else "Would you like to explore this topic further?"
76
-
77
- # βœ… Ensure English Responses
78
- def ensure_english(text):
79
- try:
80
- detected_lang = detect(text)
81
- if detected_lang != "en":
82
- return "⚠️ Sorry, I only respond in English. Can you rephrase your question?"
83
- except:
84
- return "⚠️ Language detection failed. Please ask your question again."
85
- return text
86
-
87
- # βœ… Ensure Every Response Has a Follow-Up Question
88
- def generate_follow_up(user_text):
89
- """Generates a follow-up question to guide the user toward related topics or next steps."""
90
- prompt_text = (
91
- f"Given the user's question: '{user_text}', generate a SHORT follow-up question "
92
- "suggesting either a related topic or asking if they need further help. "
93
- "Example: 'Would you like to explore quantum superposition or ask about another physics concept?' "
94
- "Keep it concise and engaging."
95
- )
96
- hf = get_llm_hf_inference(max_new_tokens=40, temperature=0.8)
97
- output = hf.invoke(input=prompt_text).strip()
98
-
99
- # Fallback in case of an empty response
100
- return output if output else "Would you like to explore another related topic or ask about something else?"
101
-
102
- # βœ… Main Response Function
103
- def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
104
- action = predict_action(user_text) # πŸ”₯ Fix: Define 'action'
105
-
106
- # βœ… Handle NASA-Specific Queries
107
- if action == "nasa_info":
108
- nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
109
- response = f"**{nasa_title}**\n\n{nasa_explanation}"
110
- chat_history.append({'role': 'user', 'content': user_text})
111
- chat_history.append({'role': 'assistant', 'content': response})
112
- follow_up = generate_follow_up(user_text)
113
- chat_history.append({'role': 'assistant', 'content': follow_up})
114
- return response, follow_up, chat_history, nasa_url
115
-
116
- # βœ… Set Up LLM Request
117
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
118
-
119
- # βœ… Format Chat History
120
- filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
121
-
122
- # βœ… Prompt Engineering
123
- prompt = PromptTemplate.from_template(
124
- "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
125
- "User: {user_text}.\n [/INST]\n"
126
- "AI: Provide a detailed explanation with depth. "
127
- "Use a conversational style, starting with 'Certainly!', 'Of course!', or 'Great question!'."
128
- "🚨 Answer **only in English**."
129
- "\nHAL:"
130
- )
131
-
132
- # βœ… Invoke LLM Model
133
- chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
134
- response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
135
- response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
136
-
137
- # βœ… Ensure English
138
- response = ensure_english(response)
139
-
140
- # βœ… Fallback Response
141
- if not response:
142
- response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
143
-
144
- chat_history.append({'role': 'user', 'content': user_text})
145
- chat_history.append({'role': 'assistant', 'content': response})
146
-
147
- follow_up = generate_follow_up(user_text)
148
- chat_history.append({'role': 'assistant', 'content': follow_up})
149
-
150
- return response, follow_up, chat_history, None
151
-
152
- # βœ… Streamlit UI
153
- st.title("πŸš€ HAL - NASA AI Assistant")
154
-
155
- # βœ… Justify all chatbot responses
156
- st.markdown("""
157
- <style>
158
- .user-msg {
159
- background-color: #696969;
160
- color: white;
161
- padding: 10px;
162
- border-radius: 10px;
163
- margin-bottom: 5px;
164
- width: fit-content;
165
- max-width: 80%;
166
- text-align: justify; /* βœ… Justify text */
167
- }
168
- .assistant-msg {
169
- background-color: #333333;
170
- color: white;
171
- padding: 10px;
172
- border-radius: 10px;
173
- margin-bottom: 5px;
174
- width: fit-content;
175
- max-width: 80%;
176
- text-align: justify; /* βœ… Justify text */
177
- }
178
- .container {
179
- display: flex;
180
- flex-direction: column;
181
- align-items: flex-start;
182
- }
183
- @media (max-width: 600px) {
184
- .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
185
- }
186
- </style>
187
- """, unsafe_allow_html=True)
188
-
189
-
190
- # βœ… Reset Chat Button
191
- if st.sidebar.button("Reset Chat"):
192
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
193
- st.session_state.response_ready = False
194
- st.session_state.follow_up = ""
195
-
196
- # βœ… Chat UI
197
- user_input = st.chat_input("Type your message here...")
198
-
199
- if user_input:
200
- # βœ… Ensure get_response() returns a response
201
- response, follow_up, st.session_state.chat_history, image_url = get_response(
202
- system_message="You are a helpful AI assistant.",
203
- user_text=user_input,
204
- chat_history=st.session_state.chat_history
205
- )
206
-
207
- # βœ… Ensure response is not empty before calling st.markdown()
208
- if response:
209
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
210
-
211
- if image_url:
212
- st.image(image_url, caption="NASA Image of the Day")
213
-
214
- st.session_state.follow_up = follow_up
215
- st.session_state.response_ready = True
216
-
217
- # βœ… Check before displaying follow-up message
218
- if st.session_state.response_ready and st.session_state.follow_up:
219
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
220
- st.session_state.response_ready = False
 
11
 
12
  # βœ… Environment Variables
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
+ NASA_API_KEY = os.getenv("NASA_API_KEY")
15
+
16
+ if not HF_TOKEN:
17
  raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
18
 
19
+ if not NASA_API_KEY:
 
20
  raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
21
 
22
  # βœ… Set Up Streamlit
23
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
24
 
25
+ # βœ… Ensure Session State Variables (Preserve History)
26
  if "chat_history" not in st.session_state:
27
+ st.session_state.chat_history = []
28
+
29
  if "response_ready" not in st.session_state:
30
  st.session_state.response_ready = False
31
+
32
  if "follow_up" not in st.session_state:
33
  st.session_state.follow_up = ""
34
 
35
  # βœ… Model Configuration
36
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
37
 
38
+ # βœ… Initialize Hugging Face Model with Limited Tokens
39
+ def get_llm_hf_inference(model_id=model_id, max_new_tokens=300, temperature=0.7):
40
  return HuggingFaceEndpoint(
41
  repo_id=model_id,
42
  max_new_tokens=max_new_tokens,
 
55
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
56
 
57
  # βœ… Sentiment Analysis
58
+ sentiment_analyzer = pipeline("sentiment-analysis",
59
+ model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
 
 
60
 
61
  def analyze_sentiment(user_text):
62
  result = sentiment_analyzer(user_text)[0]
 
68
  return "nasa_info"
69
  return "general_query"
70
 
71
+ # βœ… Generate Follow-Up Question
72
+ def generate_foll