CCockrum commited on
Commit
f7f1088
·
verified ·
1 Parent(s): fb983b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -18
app.py CHANGED
@@ -18,7 +18,6 @@ def load_spacy_model():
18
  try:
19
  return spacy.load("en_core_web_sm")
20
  except OSError:
21
- st.warning("Downloading spaCy model en_core_web_sm... This may take a moment.")
22
  subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
23
  return spacy.load("en_core_web_sm")
24
 
@@ -110,9 +109,9 @@ def generate_follow_up(user_text):
110
  Generates two variant follow-up questions and randomly selects one.
111
  """
112
  prompt_text = (
113
- f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions that invite further discussion. "
114
- "For example, one might be 'Would you like to know more about the six types of quarks?' and another 'Would you like to explore another aspect of quantum physics?'. "
115
- "Answer exclusively in English, and do not include extra commentary."
116
  )
117
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
118
  output = hf.invoke(input=prompt_text).strip()
@@ -124,17 +123,15 @@ def generate_follow_up(user_text):
124
 
125
  def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
126
  """
127
- Generates HAL's detailed, in-depth response and a follow-up question.
128
- It incorporates sentiment analysis, additional NLP context, and style instructions.
129
  """
130
  sentiment = analyze_sentiment(user_text)
131
  action = predict_action(user_text)
132
 
133
- # Extract extra context (e.g., named entities)
134
  context_info = extract_context(user_text)
135
  context_clause = f" The key topics here are: {context_info}." if context_info else ""
136
 
137
- # Extract style instruction if provided.
138
  style_instruction = ""
139
  lower_text = user_text.lower()
140
  if "in the voice of" in lower_text or "speaking as" in lower_text:
@@ -143,7 +140,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
143
  style_instruction = match.group(2).strip().capitalize()
144
  style_instruction = f" Please respond in the voice of {style_instruction}."
145
 
146
- # Force output in English.
147
  language_clause = " Answer exclusively in English."
148
 
149
  if action == "nasa_info":
@@ -174,15 +170,9 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
174
  )
175
  )
176
 
177
- st.write("DEBUG: Prompt sent to model:")
178
- st.write(prompt.format(system_message=system_message, chat_history=filtered_history, user_text=user_text))
179
-
180
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
181
  raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
182
- st.write("DEBUG: Raw model output:")
183
- st.write(raw_output)
184
-
185
- response = raw_output # Using the full raw output without splitting
186
  if not response:
187
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
188
 
@@ -196,8 +186,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
196
  follow_up = generate_follow_up(user_text)
197
  chat_history.append({'role': 'assistant', 'content': follow_up})
198
 
199
- st.write("DEBUG: Generated follow-up question:", follow_up)
200
-
201
  return response, follow_up, chat_history, None
202
 
203
  # --- Chat UI ---
 
18
  try:
19
  return spacy.load("en_core_web_sm")
20
  except OSError:
 
21
  subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
22
  return spacy.load("en_core_web_sm")
23
 
 
109
  Generates two variant follow-up questions and randomly selects one.
110
  """
111
  prompt_text = (
112
+ f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
113
+ "that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
114
+ "and another 'Would you like to explore another aspect of quantum physics?'. Do not include extra commentary."
115
  )
116
  hf = get_llm_hf_inference(max_new_tokens=80, temperature=0.9)
117
  output = hf.invoke(input=prompt_text).strip()
 
123
 
124
  def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
125
  """
126
+ Generates HAL's detailed, in-depth answer and a follow-up question.
127
+ Incorporates sentiment analysis, additional NLP context, and style instructions.
128
  """
129
  sentiment = analyze_sentiment(user_text)
130
  action = predict_action(user_text)
131
 
 
132
  context_info = extract_context(user_text)
133
  context_clause = f" The key topics here are: {context_info}." if context_info else ""
134
 
 
135
  style_instruction = ""
136
  lower_text = user_text.lower()
137
  if "in the voice of" in lower_text or "speaking as" in lower_text:
 
140
  style_instruction = match.group(2).strip().capitalize()
141
  style_instruction = f" Please respond in the voice of {style_instruction}."
142
 
 
143
  language_clause = " Answer exclusively in English."
144
 
145
  if action == "nasa_info":
 
170
  )
171
  )
172
 
 
 
 
173
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
174
  raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
175
+ response = raw_output.split("HAL:")[-1].strip()
 
 
 
176
  if not response:
177
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
178
 
 
186
  follow_up = generate_follow_up(user_text)
187
  chat_history.append({'role': 'assistant', 'content': follow_up})
188
 
 
 
189
  return response, follow_up, chat_history, None
190
 
191
  # --- Chat UI ---