CCockrum commited on
Commit
fdf3b40
·
verified ·
1 Parent(s): 1fd6803

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -20
app.py CHANGED
@@ -23,16 +23,13 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
23
  # --- Initialize Session State Variables ---
24
  if "chat_history" not in st.session_state:
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
26
-
27
  if "response_ready" not in st.session_state:
28
  st.session_state.response_ready = False
29
-
30
  if "follow_up" not in st.session_state:
31
  st.session_state.follow_up = ""
32
 
33
  # --- Set Up Model & API Functions ---
34
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
35
-
36
  sentiment_analyzer = pipeline(
37
  "sentiment-analysis",
38
  model="distilbert/distilbert-base-uncased-finetuned-sst-2-english",
@@ -68,35 +65,32 @@ def predict_action(user_text):
68
 
69
  def generate_follow_up(user_text):
70
  """
71
- Generates varied follow-up questions for the given user input.
72
- The prompt instructs the LLM to produce two variants, and one is selected randomly.
73
  """
74
  prompt_text = (
75
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
76
- "that are relevant to the topic. One should ask something like, "
77
- "'Would you like to know more about the six types of quarks?' and the other should ask, "
78
- "'Would you like to explore something else?' Do not include any extra commentary or meta instructions."
79
  )
80
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
81
  output = hf.invoke(input=prompt_text).strip()
82
- # Split the output into separate lines if the model returns multiple variants.
83
  variants = re.split(r"\n|[;]+", output)
84
- # Clean up any extraneous quotes or unwanted text.
85
  cleaned = [v.strip(' "\'') for v in variants if v.strip()]
86
- # If no valid variants are found, provide a default fallback.
87
  if not cleaned:
88
  cleaned = ["Would you like to explore this topic further?"]
89
  return random.choice(cleaned)
90
 
91
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
92
  """
93
- Generates HAL's response with a friendly, conversational tone.
94
- Incorporates sentiment analysis and always generates a follow-up question with variation.
 
95
  """
96
  sentiment = analyze_sentiment(user_text)
97
  action = predict_action(user_text)
98
 
99
- # Check for style instructions in the user's text (e.g., "in the voice of an astrophysicist")
100
  style_instruction = ""
101
  lower_text = user_text.lower()
102
  if "in the voice of" in lower_text or "speaking as" in lower_text:
@@ -115,7 +109,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
115
  return response, follow_up, chat_history, nasa_url
116
 
117
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
118
-
119
  filtered_history = ""
120
  for message in chat_history:
121
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
@@ -124,21 +117,27 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
124
 
125
  style_clause = style_instruction if style_instruction else ""
126
 
 
127
  prompt = PromptTemplate.from_template(
128
  (
129
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
130
  "User: {user_text}.\n [/INST]\n"
131
- "AI: Please answer the user's question without repeating previous greetings. "
132
- "Keep your response friendly and conversational, starting with a phrase like "
133
- "'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
134
  "\nHAL:"
135
  )
136
  )
137
 
138
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
139
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
 
140
  response = response.split("HAL:")[-1].strip()
141
 
 
 
 
 
142
  chat_history.append({'role': 'user', 'content': user_text})
143
  chat_history.append({'role': 'assistant', 'content': response})
144
 
@@ -200,10 +199,8 @@ if user_input:
200
  user_text=user_input,
201
  chat_history=st.session_state.chat_history
202
  )
203
-
204
  if image_url:
205
  st.image(image_url, caption="NASA Image of the Day")
206
-
207
  st.session_state.follow_up = follow_up
208
  st.session_state.response_ready = True
209
 
 
23
  # --- Initialize Session State Variables ---
24
  if "chat_history" not in st.session_state:
25
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
26
  if "response_ready" not in st.session_state:
27
  st.session_state.response_ready = False
 
28
  if "follow_up" not in st.session_state:
29
  st.session_state.follow_up = ""
30
 
31
  # --- Set Up Model & API Functions ---
32
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
 
33
  sentiment_analyzer = pipeline(
34
  "sentiment-analysis",
35
  model="distilbert/distilbert-base-uncased-finetuned-sst-2-english",
 
65
 
66
  def generate_follow_up(user_text):
67
  """
68
+ Generates two variant follow-up questions and randomly selects one.
69
+ It also cleans up any unwanted quotation marks or extra meta commentary.
70
  """
71
  prompt_text = (
72
  f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
73
+ "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
74
+ "and another might be 'Would you like to explore another aspect of quantum physics?' Do not include extra commentary."
 
75
  )
76
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
77
  output = hf.invoke(input=prompt_text).strip()
 
78
  variants = re.split(r"\n|[;]+", output)
 
79
  cleaned = [v.strip(' "\'') for v in variants if v.strip()]
 
80
  if not cleaned:
81
  cleaned = ["Would you like to explore this topic further?"]
82
  return random.choice(cleaned)
83
 
84
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
85
  """
86
+ Generates HAL's answer with depth and a follow-up question.
87
+ The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
88
+ If the answer comes back empty, a fallback answer is used.
89
  """
90
  sentiment = analyze_sentiment(user_text)
91
  action = predict_action(user_text)
92
 
93
+ # Extract style instruction if present
94
  style_instruction = ""
95
  lower_text = user_text.lower()
96
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
109
  return response, follow_up, chat_history, nasa_url
110
 
111
  hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
 
112
  filtered_history = ""
113
  for message in chat_history:
114
  if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
 
117
 
118
  style_clause = style_instruction if style_instruction else ""
119
 
120
+ # Instruct the model to generate a detailed, in-depth answer.
121
  prompt = PromptTemplate.from_template(
122
  (
123
  "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
124
  "User: {user_text}.\n [/INST]\n"
125
+ "AI: Please provide a detailed explanation in depth. "
126
+ "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
127
+ "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." + style_clause +
128
  "\nHAL:"
129
  )
130
  )
131
 
132
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
133
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
134
+ # Remove any extra markers if present.
135
  response = response.split("HAL:")[-1].strip()
136
 
137
+ # Fallback in case the generated answer is empty
138
+ if not response:
139
+ response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
140
+
141
  chat_history.append({'role': 'user', 'content': user_text})
142
  chat_history.append({'role': 'assistant', 'content': response})
143
 
 
199
  user_text=user_input,
200
  chat_history=st.session_state.chat_history
201
  )
 
202
  if image_url:
203
  st.image(image_url, caption="NASA Image of the Day")
 
204
  st.session_state.follow_up = follow_up
205
  st.session_state.response_ready = True
206