CCockrum commited on
Commit
cca620f
·
verified ·
1 Parent(s): 8ba2440

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -74
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import os
2
  import re
3
  import random
4
- import subprocess
5
  import requests
6
  import streamlit as st
7
- import spacy # for additional NLP processing
8
  from langchain_huggingface import HuggingFaceEndpoint
9
  from langchain_core.prompts import PromptTemplate
10
  from langchain_core.output_parsers import StrOutputParser
11
  from transformers import pipeline
12
 
13
- # Must be the first Streamlit command!
14
- st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
 
 
15
 
16
- # --- Helper to load spaCy model with fallback ---
17
- def load_spacy_model():
18
- try:
19
- return spacy.load("en_core_web_sm")
20
- except OSError:
21
- subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
22
- return spacy.load("en_core_web_sm")
23
 
24
- nlp_spacy = load_spacy_model()
 
25
 
26
  # --- Initialize Session State Variables ---
27
  if "chat_history" not in st.session_state:
@@ -31,38 +28,6 @@ if "response_ready" not in st.session_state:
31
  if "follow_up" not in st.session_state:
32
  st.session_state.follow_up = ""
33
 
34
- # --- Appearance CSS ---
35
- st.markdown("""
36
- <style>
37
- .user-msg {
38
- background-color: #696969;
39
- color: white;
40
- padding: 10px;
41
- border-radius: 10px;
42
- margin-bottom: 5px;
43
- width: fit-content;
44
- max-width: 80%;
45
- }
46
- .assistant-msg {
47
- background-color: #333333;
48
- color: white;
49
- padding: 10px;
50
- border-radius: 10px;
51
- margin-bottom: 5px;
52
- width: fit-content;
53
- max-width: 80%;
54
- }
55
- .container {
56
- display: flex;
57
- flex-direction: column;
58
- align-items: flex-start;
59
- }
60
- @media (max-width: 600px) {
61
- .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
62
- }
63
- </style>
64
- """, unsafe_allow_html=True)
65
-
66
  # --- Set Up Model & API Functions ---
67
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
68
  sentiment_analyzer = pipeline(
@@ -76,12 +41,12 @@ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7)
76
  repo_id=model_id,
77
  max_new_tokens=max_new_tokens,
78
  temperature=temperature,
79
- token=os.getenv("HF_TOKEN"),
80
  task="text-generation"
81
  )
82
 
83
  def get_nasa_apod():
84
- url = f"https://api.nasa.gov/planetary/apod?api_key={os.getenv('NASA_API_KEY')}"
85
  response = requests.get(url)
86
  if response.status_code == 200:
87
  data = response.json()
@@ -94,26 +59,19 @@ def analyze_sentiment(user_text):
94
  return result['label']
95
 
96
  def predict_action(user_text):
97
- if "nasa" in user_text.lower() or "space" in user_text.lower():
98
  return "nasa_info"
99
  return "general_query"
100
 
101
- def extract_context(text):
102
- """
103
- Uses spaCy to extract named entities for additional context.
104
- """
105
- doc = nlp_spacy(text)
106
- entities = [ent.text for ent in doc.ents]
107
- return ", ".join(entities) if entities else ""
108
-
109
  def generate_follow_up(user_text):
110
  """
111
  Generates two variant follow-up questions and randomly selects one.
 
112
  """
113
  prompt_text = (
114
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
115
  "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
116
- "and another 'Would you like to explore another aspect of quantum physics?'. Do not include extra commentary."
117
  )
118
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
119
  output = hf.invoke(input=prompt_text).strip()
@@ -125,15 +83,14 @@ def generate_follow_up(user_text):
125
 
126
  def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
127
  """
128
- Generates HAL's detailed, in-depth answer and a follow-up question.
129
- Incorporates sentiment analysis, additional NLP context, and style instructions.
 
130
  """
131
  sentiment = analyze_sentiment(user_text)
132
  action = predict_action(user_text)
133
 
134
- context_info = extract_context(user_text)
135
- context_clause = f" The key topics here are: {context_info}." if context_info else ""
136
-
137
  style_instruction = ""
138
  lower_text = user_text.lower()
139
  if "in the voice of" in lower_text or "speaking as" in lower_text:
@@ -142,8 +99,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
142
  style_instruction = match.group(2).strip().capitalize()
143
  style_instruction = f" Please respond in the voice of {style_instruction}."
144
 
145
- language_clause = " Answer exclusively in English."
146
-
147
  if action == "nasa_info":
148
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
149
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
@@ -162,19 +117,24 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
162
 
163
  style_clause = style_instruction if style_instruction else ""
164
 
 
165
  prompt = PromptTemplate.from_template(
166
  (
167
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
168
  "User: {user_text}.\n [/INST]\n"
169
- "AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
170
- + style_clause + context_clause + language_clause +
 
171
  "\nHAL:"
172
  )
173
  )
174
 
175
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
176
- raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
177
- response = raw_output.split("HAL:")[-1].strip()
 
 
 
178
  if not response:
179
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
180
 
@@ -200,13 +160,36 @@ if st.sidebar.button("Reset Chat"):
200
  st.session_state.follow_up = ""
201
  st.experimental_rerun()
202
 
203
- st.markdown("<div class='container'>", unsafe_allow_html=True)
204
- for message in st.session_state.chat_history:
205
- if message["role"] == "user":
206
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
207
- else:
208
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
209
- st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  user_input = st.chat_input("Type your message here...")
212
 
@@ -220,3 +203,11 @@ if user_input:
220
  st.image(image_url, caption="NASA Image of the Day")
221
  st.session_state.follow_up = follow_up
222
  st.session_state.response_ready = True
 
 
 
 
 
 
 
 
 
1
  import os
2
  import re
3
  import random
 
4
  import requests
5
  import streamlit as st
 
6
  from langchain_huggingface import HuggingFaceEndpoint
7
  from langchain_core.prompts import PromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
10
 
11
+ # Use environment variables for keys
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
+ if HF_TOKEN is None:
14
+ raise ValueError("HF_TOKEN environment variable not set. Please set it in your Hugging Face Space settings.")
15
 
16
+ NASA_API_KEY = os.getenv("NASA_API_KEY")
17
+ if NASA_API_KEY is None:
18
+ raise ValueError("NASA_API_KEY environment variable not set. Please set it in your Hugging Face Space settings.")
 
 
 
 
19
 
20
+ # Set up Streamlit UI
21
+ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
22
 
23
  # --- Initialize Session State Variables ---
24
  if "chat_history" not in st.session_state:
 
28
  if "follow_up" not in st.session_state:
29
  st.session_state.follow_up = ""
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # --- Set Up Model & API Functions ---
32
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
33
  sentiment_analyzer = pipeline(
 
41
  repo_id=model_id,
42
  max_new_tokens=max_new_tokens,
43
  temperature=temperature,
44
+ token=HF_TOKEN,
45
  task="text-generation"
46
  )
47
 
48
  def get_nasa_apod():
49
+ url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
50
  response = requests.get(url)
51
  if response.status_code == 200:
52
  data = response.json()
 
59
  return result['label']
60
 
61
  def predict_action(user_text):
62
+ if "NASA" in user_text or "space" in user_text:
63
  return "nasa_info"
64
  return "general_query"
65
 
 
 
 
 
 
 
 
 
66
  def generate_follow_up(user_text):
67
  """
68
  Generates two variant follow-up questions and randomly selects one.
69
+ It also cleans up any unwanted quotation marks or extra meta commentary.
70
  """
71
  prompt_text = (
72
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
73
  "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
74
+ "and another might be 'Would you like to explore another aspect of quantum physics?' Do not include extra commentary."
75
  )
76
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
77
  output = hf.invoke(input=prompt_text).strip()
 
83
 
84
  def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
85
  """
86
+ Generates HAL's answer with depth and a follow-up question.
87
+ The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
88
+ If the answer comes back empty, a fallback answer is used.
89
  """
90
  sentiment = analyze_sentiment(user_text)
91
  action = predict_action(user_text)
92
 
93
+ # Extract style instruction if present
 
 
94
  style_instruction = ""
95
  lower_text = user_text.lower()
96
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
99
  style_instruction = match.group(2).strip().capitalize()
100
  style_instruction = f" Please respond in the voice of {style_instruction}."
101
 
 
 
102
  if action == "nasa_info":
103
  nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
104
  response = f"**{nasa_title}**\n\n{nasa_explanation}"
 
117
 
118
  style_clause = style_instruction if style_instruction else ""
119
 
120
+ # Instruct the model to generate a detailed, in-depth answer.
121
  prompt = PromptTemplate.from_template(
122
  (
123
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
124
  "User: {user_text}.\n [/INST]\n"
125
+ "AI: Please provide a detailed explanation in depth. "
126
+ "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
127
+ "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
128
  "\nHAL:"
129
  )
130
  )
131
 
132
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
133
+ response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
134
+ # Remove any extra markers if present.
135
+ response = response.split("HAL:")[-1].strip()
136
+
137
+ # Fallback in case the generated answer is empty
138
  if not response:
139
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
140
 
 
160
  st.session_state.follow_up = ""
161
  st.experimental_rerun()
162
 
163
+ st.markdown("""
164
+ <style>
165
+ .user-msg {
166
+ background-color: #696969;
167
+ color: white;
168
+ padding: 10px;
169
+ border-radius: 10px;
170
+ margin-bottom: 5px;
171
+ width: fit-content;
172
+ max-width: 80%;
173
+ }
174
+ .assistant-msg {
175
+ background-color: #333333;
176
+ color: white;
177
+ padding: 10px;
178
+ border-radius: 10px;
179
+ margin-bottom: 5px;
180
+ width: fit-content;
181
+ max-width: 80%;
182
+ }
183
+ .container {
184
+ display: flex;
185
+ flex-direction: column;
186
+ align-items: flex-start;
187
+ }
188
+ @media (max-width: 600px) {
189
+ .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
190
+ }
191
+ </style>
192
+ """, unsafe_allow_html=True)
193
 
194
  user_input = st.chat_input("Type your message here...")
195
 
 
203
  st.image(image_url, caption="NASA Image of the Day")
204
  st.session_state.follow_up = follow_up
205
  st.session_state.response_ready = True
206
+
207
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
208
+ for message in st.session_state.chat_history:
209
+ if message["role"] == "user":
210
+ st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
211
+ else:
212
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
213
+ st.markdown("</div>", unsafe_allow_html=True)