CCockrum commited on
Commit
3cc060e
Β·
verified Β·
1 Parent(s): 1bdef79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -47
app.py CHANGED
@@ -4,8 +4,8 @@ import streamlit as st
4
  from langchain_huggingface import HuggingFaceEndpoint
5
  from langchain_core.prompts import PromptTemplate
6
  from langchain_core.output_parsers import StrOutputParser
7
- from transformers import pipeline # for Sentiment Analysis
8
- from config import NASA_API_KEY # Import the NASA API key from the configuration file
9
 
10
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
11
 
@@ -23,41 +23,38 @@ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1)
23
 
24
  def get_nasa_apod():
25
  """
26
- Fetch the Astronomy Picture of the Day (APOD) from the NASA API.
27
  """
28
  url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
29
  response = requests.get(url)
30
  if response.status_code == 200:
31
  data = response.json()
32
- return f"Title: {data['title']}\nExplanation: {data['explanation']}\nURL: {data['url']}"
33
  else:
34
- return "I couldn't fetch data from NASA right now. Please try again later."
35
 
36
  def analyze_sentiment(user_text):
37
  """
38
- Analyzes the sentiment of the user's input to adjust responses.
39
  """
40
  result = sentiment_analyzer(user_text)[0]
41
- sentiment = result['label']
42
- return sentiment
43
 
44
  def predict_action(user_text):
45
  """
46
- Predicts actions based on user input (e.g., fetch space info or general knowledge).
47
  """
48
  if "NASA" in user_text or "space" in user_text:
49
  return "nasa_info"
50
- if "weather" in user_text:
51
- return "weather_info"
52
  return "general_query"
53
 
54
  def generate_follow_up(user_text):
55
  """
56
- Generates a relevant follow-up question based on the user's input.
57
  """
58
  prompt_text = (
59
- f"Given the user's message: '{user_text}', ask one natural follow-up question "
60
- "that suggests a related topic or offers user the opportunity to go in a new direction."
61
  )
62
 
63
  hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
@@ -65,30 +62,26 @@ def generate_follow_up(user_text):
65
 
66
  return chat.strip()
67
 
68
- def get_response(system_message, chat_history, user_text,
69
- eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
70
  sentiment = analyze_sentiment(user_text)
71
  action = predict_action(user_text)
72
 
73
  if action == "nasa_info":
74
- nasa_response = get_nasa_apod()
 
75
  chat_history.append({'role': 'user', 'content': user_text})
76
- chat_history.append({'role': 'assistant', 'content': nasa_response})
77
 
78
  follow_up = generate_follow_up(user_text)
79
  chat_history.append({'role': 'assistant', 'content': follow_up})
80
- return f"{nasa_response}\n\n{follow_up}", chat_history
81
 
82
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
83
 
84
  prompt = PromptTemplate.from_template(
85
- (
86
- "[INST] {system_message}"
87
- "\nCurrent Conversation:\n{chat_history}\n\n"
88
- "\nUser: {user_text}.\n [/INST]"
89
- "\nAI:"
90
- )
91
  )
 
92
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
93
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
94
  response = response.split("AI:")[-1]
@@ -96,37 +89,72 @@ def get_response(system_message, chat_history, user_text,
96
  chat_history.append({'role': 'user', 'content': user_text})
97
  chat_history.append({'role': 'assistant', 'content': response})
98
 
99
- # Modify response based on sentiment analysis (e.g., offer help for negative sentiments)
100
  if sentiment == "NEGATIVE":
101
- response += "\nI'm sorry to hear that. How can I assist you further?"
102
 
103
  follow_up = generate_follow_up(user_text)
104
  chat_history.append({'role': 'assistant', 'content': follow_up})
105
 
106
- return f"{response}\n\n{follow_up}", chat_history
 
 
 
107
 
108
- # Streamlit setup
109
- st.set_page_config(page_title="HuggingFace ChatBot", page_icon="πŸ€—")
110
- st.title("NASA Personal Assistant")
111
- st.markdown(f"*This chatbot uses {model_id} and NASA's APIs to provide information and responses.*")
112
 
113
- # Initialize session state
 
 
 
 
 
 
 
 
 
114
  if "chat_history" not in st.session_state:
115
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
116
-
117
- # Sidebar for settings
118
  if st.sidebar.button("Reset Chat"):
119
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
120
 
121
- # Main chat interface
122
- user_input = st.chat_input(placeholder="Type your message here...")
123
- if user_input:
124
- response, st.session_state.chat_history = get_response(
125
- system_message="You are a helpful AI assistant.",
126
- user_text=user_input,
127
- chat_history=st.session_state.chat_history,
128
- max_new_tokens=128
129
- )
130
- # Display messages
131
- for message in st.session_state.chat_history:
132
- st.chat_message(message["role"]).write(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from langchain_huggingface import HuggingFaceEndpoint
5
  from langchain_core.prompts import PromptTemplate
6
  from langchain_core.output_parsers import StrOutputParser
7
+ from transformers import pipeline
8
+ from config import NASA_API_KEY # Import the NASA API key
9
 
10
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
11
 
 
23
 
24
  def get_nasa_apod():
25
  """
26
+ Fetch NASA Astronomy Picture of the Day (APOD).
27
  """
28
  url = f"https://api.nasa.gov/planetary/apod?api_key={NASA_API_KEY}"
29
  response = requests.get(url)
30
  if response.status_code == 200:
31
  data = response.json()
32
+ return data.get("url", ""), data.get("title", ""), data.get("explanation", "")
33
  else:
34
+ return "", "NASA Data Unavailable", "I couldn't fetch data from NASA right now. Please try again later."
35
 
36
  def analyze_sentiment(user_text):
37
  """
38
+ Analyze sentiment of user input.
39
  """
40
  result = sentiment_analyzer(user_text)[0]
41
+ return result['label']
 
42
 
43
  def predict_action(user_text):
44
  """
45
+ Predicts user's intent based on input.
46
  """
47
  if "NASA" in user_text or "space" in user_text:
48
  return "nasa_info"
 
 
49
  return "general_query"
50
 
51
  def generate_follow_up(user_text):
52
  """
53
+ Generates a follow-up question to continue the conversation.
54
  """
55
  prompt_text = (
56
+ f"Based on the user's message: '{user_text}', suggest a natural follow-up question "
57
+ "to keep the conversation engaging."
58
  )
59
 
60
  hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
 
62
 
63
  return chat.strip()
64
 
65
+ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
 
66
  sentiment = analyze_sentiment(user_text)
67
  action = predict_action(user_text)
68
 
69
  if action == "nasa_info":
70
+ nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
71
+ response = f"**{nasa_title}**\n\n{nasa_explanation}"
72
  chat_history.append({'role': 'user', 'content': user_text})
73
+ chat_history.append({'role': 'assistant', 'content': response})
74
 
75
  follow_up = generate_follow_up(user_text)
76
  chat_history.append({'role': 'assistant', 'content': follow_up})
77
+ return response, follow_up, chat_history, nasa_url
78
 
79
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
80
 
81
  prompt = PromptTemplate.from_template(
82
+ "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\nUser: {user_text}.\n [/INST]\nAI:"
 
 
 
 
 
83
  )
84
+
85
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
86
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
87
  response = response.split("AI:")[-1]
 
89
  chat_history.append({'role': 'user', 'content': user_text})
90
  chat_history.append({'role': 'assistant', 'content': response})
91
 
 
92
  if sentiment == "NEGATIVE":
93
+ response += "\n😞 I'm sorry to hear that. How can I assist you further?"
94
 
95
  follow_up = generate_follow_up(user_text)
96
  chat_history.append({'role': 'assistant', 'content': follow_up})
97
 
98
+ return response, follow_up, chat_history, None
99
+
100
+ # Streamlit UI Setup
101
+ st.set_page_config(page_title="NASA ChatBot", page_icon="πŸš€")
102
 
103
+ st.title("πŸš€ NASA AI ChatBot")
104
+ st.markdown("🌌 *Powered by Hugging Face & NASA APIs!*")
 
 
105
 
106
+ # Custom CSS for chat styling
107
+ st.markdown("""
108
+ <style>
109
+ .user-msg { background-color: #ADD8E6; padding: 10px; border-radius: 10px; margin-bottom: 5px; }
110
+ .assistant-msg { background-color: #F0F0F0; padding: 10px; border-radius: 10px; margin-bottom: 5px; }
111
+ @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; } }
112
+ </style>
113
+ """, unsafe_allow_html=True)
114
+
115
+ # Initialize chat history
116
  if "chat_history" not in st.session_state:
117
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
118
+
119
+ # Sidebar for chat reset
120
  if st.sidebar.button("Reset Chat"):
121
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
122
+ st.experimental_rerun()
123
 
124
+ # Chat display
125
+ for message in st.session_state.chat_history:
126
+ if message["role"] == "user":
127
+ st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
128
+ else:
129
+ st.markdown(f"<div class='assistant-msg'><strong>Bot:</strong> {message['content']}</div>", unsafe_allow_html=True)
130
+
131
+ # User input
132
+ user_input = st.text_area("Type your message:", height=100)
133
+
134
+ if st.button("Send"):
135
+ if user_input:
136
+ response, follow_up, st.session_state.chat_history, image_url = get_response(
137
+ system_message="You are a helpful AI assistant.",
138
+ user_text=user_input,
139
+ chat_history=st.session_state.chat_history
140
+ )
141
+
142
+ # Display response
143
+ st.markdown(f"<div class='assistant-msg'><strong>Bot:</strong> {response}</div>", unsafe_allow_html=True)
144
+
145
+ # Display NASA image if available
146
+ if image_url:
147
+ st.image(image_url, caption="NASA Image of the Day")
148
+
149
+ # Follow-up options
150
+ follow_up_options = [follow_up, "Explain differently", "Give me an example"]
151
+ selected_option = st.radio("What would you like to do next?", follow_up_options)
152
+
153
+ if st.button("Continue"):
154
+ if selected_option:
155
+ response, _, st.session_state.chat_history, _ = get_response(
156
+ system_message="You are a helpful AI assistant.",
157
+ user_text=selected_option,
158
+ chat_history=st.session_state.chat_history
159
+ )
160
+ st.markdown(f"<div class='assistant-msg'><strong>Bot:</strong> {response}</div>", unsafe_allow_html=True)