CCockrum commited on
Commit
ba9c3bd
·
verified ·
1 Parent(s): 6ef9d8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -65
app.py CHANGED
@@ -3,23 +3,18 @@ import re
3
  import random
4
  import requests
5
  import streamlit as st
 
6
  from langchain_huggingface import HuggingFaceEndpoint
7
  from langchain_core.prompts import PromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
10
 
11
- # Use environment variables for keys
12
- HF_TOKEN = os.getenv("HF_TOKEN")
13
- if HF_TOKEN is None:
14
- raise ValueError("HF_TOKEN environment variable not set. Please set it in your Hugging Face Space settings.")
15
-
16
- NASA_API_KEY = os.getenv("NASA_API_KEY")
17
- if NASA_API_KEY is None:
18
- raise ValueError("NASA_API_KEY environment variable not set. Please set it in your Hugging Face Space settings.")
19
-
20
- # Set up Streamlit UI
21
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
22
 
 
 
 
23
  # --- Initialize Session State Variables ---
24
  if "chat_history" not in st.session_state:
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
@@ -28,6 +23,18 @@ if "response_ready" not in st.session_state:
28
  if "follow_up" not in st.session_state:
29
  st.session_state.follow_up = ""
30
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # --- Set Up Model & API Functions ---
32
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
33
  sentiment_analyzer = pipeline(
@@ -41,12 +48,12 @@ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7)
41
  repo_id=model_id,
42
  max_new_tokens=max_new_tokens,
43
  temperature=temperature,
44
- token=HF_TOKEN,
45
  task="text-generation"
46
  )
47
 
48
  def get_nasa_apod():
49
- url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
50
  response = requests.get(url)
51
  if response.status_code == 200:
52
  data = response.json()
@@ -59,19 +66,19 @@ def analyze_sentiment(user_text):
59
  return result['label']
60
 
61
  def predict_action(user_text):
62
- if "NASA" in user_text or "space" in user_text:
63
  return "nasa_info"
64
  return "general_query"
65
 
66
  def generate_follow_up(user_text):
67
  """
68
  Generates two variant follow-up questions and randomly selects one.
69
- It also cleans up any unwanted quotation marks or extra meta commentary.
70
  """
71
  prompt_text = (
72
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
73
  "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
74
- "and another might be 'Would you like to explore another aspect of quantum physics?' Do not include extra commentary."
75
  )
76
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
77
  output = hf.invoke(input=prompt_text).strip()
@@ -83,14 +90,20 @@ def generate_follow_up(user_text):
83
 
84
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
85
  """
86
- Generates HAL's answer with depth and a follow-up question.
87
- The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
88
- If the answer comes back empty, a fallback answer is used.
89
  """
90
  sentiment = analyze_sentiment(user_text)
91
  action = predict_action(user_text)
92
 
93
- # Extract style instruction if present
 
 
 
 
 
 
 
94
  style_instruction = ""
95
  lower_text = user_text.lower()
96
  if "in the voice of" in lower_text or "speaking as" in lower_text:
@@ -117,27 +130,21 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
117
 
118
  style_clause = style_instruction if style_instruction else ""
119
 
120
- # Instruct the model to generate a detailed, in-depth answer.
121
  prompt = PromptTemplate.from_template(
122
  (
123
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
124
  "User: {user_text}.\n [/INST]\n"
125
- "AI: Please provide a detailed explanation in depth. "
126
- "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
127
- "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
128
  "\nHAL:"
129
  )
130
  )
131
 
132
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
133
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
134
- # Remove any extra markers if present.
135
  response = response.split("HAL:")[-1].strip()
136
-
137
- # Fallback in case the generated answer is empty
138
  if not response:
139
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
140
-
141
  chat_history.append({'role': 'user', 'content': user_text})
142
  chat_history.append({'role': 'assistant', 'content': response})
143
 
@@ -160,36 +167,14 @@ if st.sidebar.button("Reset Chat"):
160
  st.session_state.follow_up = ""
161
  st.experimental_rerun()
162
 
163
- st.markdown("""
164
- <style>
165
- .user-msg {
166
- background-color: #696969;
167
- color: white;
168
- padding: 10px;
169
- border-radius: 10px;
170
- margin-bottom: 5px;
171
- width: fit-content;
172
- max-width: 80%;
173
- }
174
- .assistant-msg {
175
- background-color: #333333;
176
- color: white;
177
- padding: 10px;
178
- border-radius: 10px;
179
- margin-bottom: 5px;
180
- width: fit-content;
181
- max-width: 80%;
182
- }
183
- .container {
184
- display: flex;
185
- flex-direction: column;
186
- align-items: flex-start;
187
- }
188
- @media (max-width: 600px) {
189
- .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
190
- }
191
- </style>
192
- """, unsafe_allow_html=True)
193
 
194
  user_input = st.chat_input("Type your message here...")
195
 
@@ -203,11 +188,3 @@ if user_input:
203
  st.image(image_url, caption="NASA Image of the Day")
204
  st.session_state.follow_up = follow_up
205
  st.session_state.response_ready = True
206
-
207
- st.markdown("<div class='container'>", unsafe_allow_html=True)
208
- for message in st.session_state.chat_history:
209
- if message["role"] == "user":
210
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
211
- else:
212
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
213
- st.markdown("</div>", unsafe_allow_html=True)
 
3
  import random
4
  import requests
5
  import streamlit as st
6
+ import spacy # for additional NLP processing
7
  from langchain_huggingface import HuggingFaceEndpoint
8
  from langchain_core.prompts import PromptTemplate
9
  from langchain_core.output_parsers import StrOutputParser
10
  from transformers import pipeline
11
 
12
+ # Must be the first Streamlit command!
 
 
 
 
 
 
 
 
 
13
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
14
 
15
+ # --- Appearance Section (optional) ---
16
+ # (You can adjust CSS or appearance settings here if needed)
17
+
18
  # --- Initialize Session State Variables ---
19
  if "chat_history" not in st.session_state:
20
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
23
  if "follow_up" not in st.session_state:
24
  st.session_state.follow_up = ""
25
 
26
+ # --- Load spaCy Model for Additional NLP ---
27
+ nlp_spacy = spacy.load("en_core_web_sm")
28
+
29
+ def extract_context(text):
30
+ """
31
+ Extract key entities from the text using spaCy to provide extra context.
32
+ Returns a comma-separated string of entities (if any).
33
+ """
34
+ doc = nlp_spacy(text)
35
+ entities = [ent.text for ent in doc.ents]
36
+ return ", ".join(entities) if entities else ""
37
+
38
  # --- Set Up Model & API Functions ---
39
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
40
  sentiment_analyzer = pipeline(
 
48
  repo_id=model_id,
49
  max_new_tokens=max_new_tokens,
50
  temperature=temperature,
51
+ token=os.getenv("HF_TOKEN"),
52
  task="text-generation"
53
  )
54
 
55
  def get_nasa_apod():
56
+ url = f"https://api.nasa.gov/planetary/apod?api_key={os.getenv('NASA_API_KEY')}"
57
  response = requests.get(url)
58
  if response.status_code == 200:
59
  data = response.json()
 
66
  return result['label']
67
 
68
  def predict_action(user_text):
69
+ if "nasa" in user_text.lower() or "space" in user_text.lower():
70
  return "nasa_info"
71
  return "general_query"
72
 
73
  def generate_follow_up(user_text):
74
  """
75
  Generates two variant follow-up questions and randomly selects one.
76
+ Cleans up extraneous quotation marks.
77
  """
78
  prompt_text = (
79
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
80
  "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
81
+ "and another 'Would you like to explore another aspect of quantum physics?'. Do not include extra commentary."
82
  )
83
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
84
  output = hf.invoke(input=prompt_text).strip()
 
90
 
91
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
92
  """
93
+ Generates HAL's response with a detailed explanation and a follow-up question.
94
+ Uses sentiment analysis and extracts additional context from the user's text via spaCy.
 
95
  """
96
  sentiment = analyze_sentiment(user_text)
97
  action = predict_action(user_text)
98
 
99
+ # Extract additional context using spaCy
100
+ context_info = extract_context(user_text)
101
+ if context_info:
102
+ context_clause = f" The key topics here are: {context_info}."
103
+ else:
104
+ context_clause = ""
105
+
106
+ # Extract style instruction if present.
107
  style_instruction = ""
108
  lower_text = user_text.lower()
109
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
130
 
131
  style_clause = style_instruction if style_instruction else ""
132
 
 
133
  prompt = PromptTemplate.from_template(
134
  (
135
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
136
  "User: {user_text}.\n [/INST]\n"
137
+ "AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that covers the topic thoroughly."
138
+ + style_clause + context_clause +
 
139
  "\nHAL:"
140
  )
141
  )
142
 
143
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
144
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
 
145
  response = response.split("HAL:")[-1].strip()
 
 
146
  if not response:
147
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
 
148
  chat_history.append({'role': 'user', 'content': user_text})
149
  chat_history.append({'role': 'assistant', 'content': response})
150
 
 
167
  st.session_state.follow_up = ""
168
  st.experimental_rerun()
169
 
170
+ # Render the chat history.
171
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
172
+ for message in st.session_state.chat_history:
173
+ if message["role"] == "user":
174
+ st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
175
+ else:
176
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
177
+ st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  user_input = st.chat_input("Type your message here...")
180
 
 
188
  st.image(image_url, caption="NASA Image of the Day")
189
  st.session_state.follow_up = follow_up
190
  st.session_state.response_ready = True