CCockrum commited on
Commit
fe8e64b
Β·
verified Β·
1 Parent(s): 47a03de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -51
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import re
3
  import requests
 
4
  import streamlit as st
5
  from langchain_huggingface import HuggingFaceEndpoint
6
  from langchain_core.prompts import PromptTemplate
@@ -8,6 +9,10 @@ from langchain_core.output_parsers import StrOutputParser
8
  from transformers import pipeline
9
  from langdetect import detect # Ensure this package is installed
10
 
 
 
 
 
11
  # βœ… Environment Variables
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  if HF_TOKEN is None:
@@ -20,7 +25,7 @@ if NASA_API_KEY is None:
20
  # βœ… Set Up Streamlit
21
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
22
 
23
- # βœ… Initialize Session State Variables
24
  if "chat_history" not in st.session_state:
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
26
  if "response_ready" not in st.session_state:
@@ -28,17 +33,15 @@ if "response_ready" not in st.session_state:
28
  if "follow_up" not in st.session_state:
29
  st.session_state.follow_up = ""
30
 
31
- # βœ… Model Configuration
32
- model_id = "mistralai/Mistral-7B-Instruct-v0.3"
33
-
34
- # βœ… Initialize Hugging Face Model
35
- def get_llm_hf_inference(model_id=model_id, max_new_tokens=512, temperature=0.7):
36
  return HuggingFaceEndpoint(
37
  repo_id=model_id,
38
  max_new_tokens=max_new_tokens,
39
  temperature=temperature,
40
  token=HF_TOKEN,
41
- task="text-generation"
 
42
  )
43
 
44
  # βœ… NASA API Function
@@ -50,10 +53,11 @@ def get_nasa_apod():
50
  return data.get("url", ""), data.get("title", ""), data.get("explanation", "")
51
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
52
 
53
- # βœ… Sentiment Analysis
54
  sentiment_analyzer = pipeline(
55
  "sentiment-analysis",
56
- model="distilbert/distilbert-base-uncased-finetuned-sst-2-english"
 
57
  )
58
 
59
  def analyze_sentiment(user_text):
@@ -66,35 +70,30 @@ def predict_action(user_text):
66
  return "nasa_info"
67
  return "general_query"
68
 
 
 
 
 
 
 
 
 
 
 
69
  # βœ… Follow-Up Question Generation
70
  def generate_follow_up(user_text):
71
- """Generates a clean follow-up question to guide the user toward related topics or next steps."""
72
  prompt_text = (
73
  f"Given the user's question: '{user_text}', generate a SHORT follow-up question "
74
- "suggesting either a related topic or asking if they need further help. "
75
- "Example: 'Would you like to explore quantum superposition or ask about another physics concept?' "
76
- "Keep it concise and engaging."
77
  )
78
-
79
  hf = get_llm_hf_inference(max_new_tokens=40, temperature=0.8)
80
  output = hf.invoke(input=prompt_text).strip()
81
-
82
- # βœ… Remove unnecessary characters (like backticks and misplaced formatting)
83
  cleaned_output = re.sub(r"```|''|\"", "", output).strip()
84
 
85
- # βœ… Fallback in case the response is empty or invalid
86
  return cleaned_output if cleaned_output else "Would you like to explore another related topic or ask about something else?"
87
 
88
- # βœ… Ensure English Responses
89
- def ensure_english(text):
90
- try:
91
- detected_lang = detect(text)
92
- if detected_lang != "en":
93
- return "⚠️ Sorry, I only respond in English. Can you rephrase your question?"
94
- except:
95
- return "⚠️ Language detection failed. Please ask your question again."
96
- return text
97
-
98
  # βœ… Main Response Function
99
  def get_response(system_message, chat_history, user_text, max_new_tokens=512):
100
  action = predict_action(user_text)
@@ -104,49 +103,66 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
104
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
105
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
106
  follow_up = generate_follow_up(user_text)
107
- chat_history.append({'role': 'user', 'content': user_text})
108
- chat_history.append({'role': 'assistant', 'content': response})
109
- chat_history.append({'role': 'assistant', 'content': follow_up})
 
 
110
  return response, follow_up, chat_history, nasa_url
111
 
112
- # βœ… Set Up LLM Request
113
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
114
 
115
- # βœ… Format Chat History
116
  filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
117
 
118
- # βœ… Prompt Engineering
119
  prompt = PromptTemplate.from_template(
120
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
121
  "User: {user_text}.\n [/INST]\n"
122
- "AI: Provide a detailed explanation with depth. "
123
- "Use a conversational style, starting with 'Certainly!', 'Of course!', or 'Great question!'."
124
  "🚨 Answer **only in English**."
125
  "\nHAL:"
126
  )
127
 
128
- # βœ… Invoke LLM Model
129
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
130
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
131
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
132
 
133
- # βœ… Ensure English
134
  response = ensure_english(response)
135
 
136
- # βœ… Fallback Response
137
  if not response:
138
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
139
 
140
  follow_up = generate_follow_up(user_text)
141
- chat_history.append({'role': 'user', 'content': user_text})
142
- chat_history.append({'role': 'assistant', 'content': response})
143
- chat_history.append({'role': 'assistant', 'content': follow_up})
 
 
 
144
 
145
  return response, follow_up, chat_history, None
146
 
147
  # βœ… Streamlit UI
148
  st.title("πŸš€ HAL - NASA AI Assistant")
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  # βœ… Reset Chat Button
151
  if st.sidebar.button("Reset Chat"):
152
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
@@ -157,27 +173,23 @@ if st.sidebar.button("Reset Chat"):
157
  user_input = st.chat_input("Type your message here...")
158
 
159
  if user_input:
160
- # βœ… Ensure `get_response()` is executed BEFORE using `response`
161
  response, follow_up, st.session_state.chat_history, image_url = get_response(
162
  system_message="You are a helpful AI assistant.",
163
  user_text=user_input,
164
  chat_history=st.session_state.chat_history
165
  )
166
 
167
- # βœ… Ensure `response` is not None before using it
168
- if not response:
169
- response = "I'm sorry, but I couldn't generate a response."
170
-
171
- # βœ… Display chatbot response
172
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
173
 
174
- # βœ… Handle follow-up question
175
  if follow_up:
176
- st.session_state.chat_history.append({'role': 'assistant', 'content': follow_up})
177
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)
178
 
179
- # βœ… Display NASA image if available
180
  if image_url:
181
  st.image(image_url, caption="NASA Image of the Day")
182
 
183
  st.session_state.response_ready = True
 
 
 
 
 
1
  import os
2
  import re
3
  import requests
4
+ import torch
5
  import streamlit as st
6
  from langchain_huggingface import HuggingFaceEndpoint
7
  from langchain_core.prompts import PromptTemplate
 
9
  from transformers import pipeline
10
  from langdetect import detect # Ensure this package is installed
11
 
12
+ # βœ… Check for GPU or Default to CPU
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+ print(f"βœ… Using device: {device}") # Debugging info
15
+
16
  # βœ… Environment Variables
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  if HF_TOKEN is None:
 
25
  # βœ… Set Up Streamlit
26
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
27
 
28
+ # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
31
  if "response_ready" not in st.session_state:
 
33
  if "follow_up" not in st.session_state:
34
  st.session_state.follow_up = ""
35
 
36
+ # βœ… Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
37
+ def get_llm_hf_inference(model_id="mistralai/Mistral-7B-Instruct-v0.3", max_new_tokens=512, temperature=0.7):
 
 
 
38
  return HuggingFaceEndpoint(
39
  repo_id=model_id,
40
  max_new_tokens=max_new_tokens,
41
  temperature=temperature,
42
  token=HF_TOKEN,
43
+ task="text-generation",
44
+ device=-1 if device == "cpu" else 0 # βœ… Force CPU (-1) or GPU (0)
45
  )
46
 
47
  # βœ… NASA API Function
 
53
  return data.get("url", ""), data.get("title", ""), data.get("explanation", "")
54
  return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now."
55
 
56
+ # βœ… Sentiment Analysis (Now Uses Explicit Device)
57
  sentiment_analyzer = pipeline(
58
  "sentiment-analysis",
59
+ model="distilbert/distilbert-base-uncased-finetuned-sst-2-english",
60
+ device=-1 if device == "cpu" else 0 # βœ… Force CPU (-1) or GPU (0)
61
  )
62
 
63
  def analyze_sentiment(user_text):
 
70
  return "nasa_info"
71
  return "general_query"
72
 
73
+ # βœ… Ensure English Responses
74
+ def ensure_english(text):
75
+ try:
76
+ detected_lang = detect(text)
77
+ if detected_lang != "en":
78
+ return "⚠️ Sorry, I only respond in English. Can you rephrase your question?"
79
+ except:
80
+ return "⚠️ Language detection failed. Please ask your question again."
81
+ return text
82
+
83
  # βœ… Follow-Up Question Generation
84
  def generate_follow_up(user_text):
 
85
  prompt_text = (
86
  f"Given the user's question: '{user_text}', generate a SHORT follow-up question "
87
+ "suggesting a related topic or asking if they need more details."
 
 
88
  )
89
+
90
  hf = get_llm_hf_inference(max_new_tokens=40, temperature=0.8)
91
  output = hf.invoke(input=prompt_text).strip()
92
+
 
93
  cleaned_output = re.sub(r"```|''|\"", "", output).strip()
94
 
 
95
  return cleaned_output if cleaned_output else "Would you like to explore another related topic or ask about something else?"
96
 
 
 
 
 
 
 
 
 
 
 
97
  # βœ… Main Response Function
98
  def get_response(system_message, chat_history, user_text, max_new_tokens=512):
99
  action = predict_action(user_text)
 
103
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
104
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
105
  follow_up = generate_follow_up(user_text)
106
+ chat_history.extend([
107
+ {'role': 'user', 'content': user_text},
108
+ {'role': 'assistant', 'content': response},
109
+ {'role': 'assistant', 'content': follow_up}
110
+ ])
111
  return response, follow_up, chat_history, nasa_url
112
 
113
+ # βœ… Invoke Hugging Face Model
114
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
115
 
 
116
  filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
117
 
 
118
  prompt = PromptTemplate.from_template(
119
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
120
  "User: {user_text}.\n [/INST]\n"
121
+ "AI: Provide a detailed explanation. Use a conversational tone. "
 
122
  "🚨 Answer **only in English**."
123
  "\nHAL:"
124
  )
125
 
 
126
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
127
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
128
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
129
 
 
130
  response = ensure_english(response)
131
 
 
132
  if not response:
133
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
134
 
135
  follow_up = generate_follow_up(user_text)
136
+
137
+ chat_history.extend([
138
+ {'role': 'user', 'content': user_text},
139
+ {'role': 'assistant', 'content': response},
140
+ {'role': 'assistant', 'content': follow_up}
141
+ ])
142
 
143
  return response, follow_up, chat_history, None
144
 
145
  # βœ… Streamlit UI
146
  st.title("πŸš€ HAL - NASA AI Assistant")
147
 
148
+ # βœ… Justify all chatbot responses
149
+ st.markdown("""
150
+ <style>
151
+ .user-msg, .assistant-msg {
152
+ padding: 10px;
153
+ border-radius: 10px;
154
+ margin-bottom: 5px;
155
+ width: fit-content;
156
+ max-width: 80%;
157
+ text-align: justify;
158
+ }
159
+ .user-msg { background-color: #696969; color: white; }
160
+ .assistant-msg { background-color: #333333; color: white; }
161
+ .container { display: flex; flex-direction: column; align-items: flex-start; }
162
+ @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
163
+ </style>
164
+ """, unsafe_allow_html=True)
165
+
166
  # βœ… Reset Chat Button
167
  if st.sidebar.button("Reset Chat"):
168
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
173
  user_input = st.chat_input("Type your message here...")
174
 
175
  if user_input:
 
176
  response, follow_up, st.session_state.chat_history, image_url = get_response(
177
  system_message="You are a helpful AI assistant.",
178
  user_text=user_input,
179
  chat_history=st.session_state.chat_history
180
  )
181
 
182
+ if response:
183
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
 
 
 
 
184
 
 
185
  if follow_up:
 
186
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)
187
 
 
188
  if image_url:
189
  st.image(image_url, caption="NASA Image of the Day")
190
 
191
  st.session_state.response_ready = True
192
+
193
+ if st.session_state.response_ready and st.session_state.follow_up:
194
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
195
+ st.session_state.response_ready = False