CCockrum commited on
Commit
fc5f1c7
·
verified ·
1 Parent(s): d6f5773

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -5
app.py CHANGED
@@ -1,13 +1,17 @@
1
  import os
2
- from langchain_huggingface import HuggingFaceEndpoint
3
  import streamlit as st
 
4
  from langchain_core.prompts import PromptTemplate
5
  from langchain_core.output_parsers import StrOutputParser
6
- import requests
7
  from config import NASA_API_KEY # Import the NASA API key from the configuration file
8
 
9
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
10
 
 
 
 
11
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
12
  llm = HuggingFaceEndpoint(
13
  repo_id=model_id,
@@ -29,13 +33,51 @@ def get_nasa_apod():
29
  else:
30
  return "I couldn't fetch data from NASA right now. Please try again later."
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def get_response(system_message, chat_history, user_text,
33
  eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
34
- if "NASA" in user_text or "space" in user_text:
 
 
 
35
  nasa_response = get_nasa_apod()
36
  chat_history.append({'role': 'user', 'content': user_text})
37
  chat_history.append({'role': 'assistant', 'content': nasa_response})
38
- return nasa_response, chat_history
 
 
 
39
 
40
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
41
 
@@ -53,7 +95,15 @@ def get_response(system_message, chat_history, user_text,
53
 
54
  chat_history.append({'role': 'user', 'content': user_text})
55
  chat_history.append({'role': 'assistant', 'content': response})
56
- return response, chat_history
 
 
 
 
 
 
 
 
57
 
58
  # Streamlit setup
59
  st.set_page_config(page_title="HuggingFace ChatBot", page_icon="🤗")
@@ -81,3 +131,33 @@ if user_input:
81
  for message in st.session_state.chat_history:
82
  st.chat_message(message["role"]).write(message["content"])
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import requests
3
  import streamlit as st
4
+ from langchain_huggingface import HuggingFaceEndpoint
5
  from langchain_core.prompts import PromptTemplate
6
  from langchain_core.output_parsers import StrOutputParser
7
+ from transformers import pipeline # for Sentiment Analysis
8
  from config import NASA_API_KEY # Import the NASA API key from the configuration file
9
 
10
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
11
 
12
+ # Initialize sentiment analysis pipeline
13
+ sentiment_analyzer = pipeline("sentiment-analysis")
14
+
15
  def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
16
  llm = HuggingFaceEndpoint(
17
  repo_id=model_id,
 
33
  else:
34
  return "I couldn't fetch data from NASA right now. Please try again later."
35
 
36
+ def analyze_sentiment(user_text):
37
+ """
38
+ Analyzes the sentiment of the user's input to adjust responses.
39
+ """
40
+ result = sentiment_analyzer(user_text)[0]
41
+ sentiment = result['label']
42
+ return sentiment
43
+
44
+ def predict_action(user_text):
45
+ """
46
+ Predicts actions based on user input (e.g., fetch space info or general knowledge).
47
+ """
48
+ if "NASA" in user_text or "space" in user_text:
49
+ return "nasa_info"
50
+ if "weather" in user_text:
51
+ return "weather_info"
52
+ return "general_query"
53
+
54
+ def generate_follow_up(user_text):
55
+ """
56
+ Generates a relevant follow-up question based on the user's input.
57
+ """
58
+ prompt_text = (
59
+ f"Given the user's message: '{user_text}', ask one natural follow-up question "
60
+ "that suggests a related topic or offers user the opportunity to go in a new direction."
61
+ )
62
+
63
+ hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
64
+ chat = hf.invoke(input=prompt_text)
65
+
66
+ return chat.strip()
67
+
68
  def get_response(system_message, chat_history, user_text,
69
  eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
70
+ sentiment = analyze_sentiment(user_text)
71
+ action = predict_action(user_text)
72
+
73
+ if action == "nasa_info":
74
  nasa_response = get_nasa_apod()
75
  chat_history.append({'role': 'user', 'content': user_text})
76
  chat_history.append({'role': 'assistant', 'content': nasa_response})
77
+
78
+ follow_up = generate_follow_up(user_text)
79
+ chat_history.append({'role': 'assistant', 'content': follow_up})
80
+ return f"{nasa_response}\n\n{follow_up}", chat_history
81
 
82
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
83
 
 
95
 
96
  chat_history.append({'role': 'user', 'content': user_text})
97
  chat_history.append({'role': 'assistant', 'content': response})
98
+
99
+ # Modify response based on sentiment analysis (e.g., offer help for negative sentiments)
100
+ if sentiment == "NEGATIVE":
101
+ response += "\nI'm sorry to hear that. How can I assist you further?"
102
+
103
+ follow_up = generate_follow_up(user_text)
104
+ chat_history.append({'role': 'assistant', 'content': follow_up})
105
+
106
+ return f"{response}\n\n{follow_up}", chat_history
107
 
108
  # Streamlit setup
109
  st.set_page_config(page_title="HuggingFace ChatBot", page_icon="🤗")
 
131
  for message in st.session_state.chat_history:
132
  st.chat_message(message["role"]).write(message["content"])
133
 
134
+
135
+
136
+
137
+ if st.button("Send"):
138
+ if user_input:
139
+ response, follow_up, st.session_state.chat_history, image_url = get_response(
140
+ system_message="You are a helpful AI assistant.",
141
+ user_text=user_input,
142
+ chat_history=st.session_state.chat_history
143
+ )
144
+
145
+ # Display response
146
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
147
+
148
+ # Display NASA image if available
149
+ if image_url:
150
+ st.image(image_url, caption="NASA Image of the Day")
151
+
152
+ # Follow-up question suggestions
153
+ follow_up_options = [follow_up, "Explain differently", "Give me an example"]
154
+ selected_option = st.radio("What would you like to do next?", follow_up_options)
155
+
156
+ if st.button("Continue"):
157
+ if selected_option:
158
+ response, _, st.session_state.chat_history, _ = get_response(
159
+ system_message="You are a helpful AI assistant.",
160
+ user_text=selected_option,
161
+ chat_history=st.session_state.chat_history
162
+ )
163
+ st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)