CCockrum commited on
Commit
8e81487
·
verified ·
1 Parent(s): 646ae18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -26
app.py CHANGED
@@ -26,7 +26,7 @@ model_id = "mistralai/Mistral-7B-Instruct-v0.3"
26
  # Initialize sentiment analysis pipeline
27
  sentiment_analyzer = pipeline("sentiment-analysis")
28
 
29
- def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
30
  return HuggingFaceEndpoint(
31
  repo_id=model_id,
32
  max_new_tokens=max_new_tokens,
@@ -53,23 +53,14 @@ def predict_action(user_text):
53
  return "general_query"
54
 
55
  def generate_follow_up(user_text):
56
- """
57
- Generates a concise and conversational follow-up question related to the user's input.
58
- """
59
  prompt_text = (
60
- f"Given the user's question: '{user_text}', generate a SHORT and SIMPLE follow-up question. "
61
- "Make it conversational and friendly. Example: "
62
- "'Would you like to learn more about the six types of quarks?' "
63
- "Do NOT provide long explanations—just ask a friendly follow-up question."
64
  )
65
-
66
- hf = get_llm_hf_inference(max_new_tokens=32, temperature=0.7)
67
  return hf.invoke(input=prompt_text).strip()
68
 
69
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
70
- """
71
- Generates HAL's response, making it more conversational and engaging.
72
- """
73
  sentiment = analyze_sentiment(user_text)
74
  action = predict_action(user_text)
75
 
@@ -83,29 +74,20 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
83
  chat_history.append({'role': 'assistant', 'content': follow_up})
84
  return response, follow_up, chat_history, nasa_url
85
 
86
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
87
 
88
  prompt = PromptTemplate.from_template(
89
- (
90
- "[INST] {system_message}"
91
- "\nCurrent Conversation:\n{chat_history}\n\n"
92
- "\nUser: {user_text}.\n [/INST]"
93
- "\nAI: Keep responses conversational and engaging. Start with a friendly phrase like "
94
- "'Certainly!', 'Of course!', or 'Great question!' before answering."
95
- " Keep responses concise but engaging."
96
- "\nHAL:"
97
- )
98
  )
99
-
100
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
101
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
102
- response = response.split("HAL:")[-1].strip()
103
 
104
  chat_history.append({'role': 'user', 'content': user_text})
105
  chat_history.append({'role': 'assistant', 'content': response})
106
 
107
  if sentiment == "NEGATIVE":
108
- response = "I'm here to help. Let me know what I can do for you. 😊"
109
 
110
  follow_up = generate_follow_up(user_text)
111
  chat_history.append({'role': 'assistant', 'content': follow_up})
@@ -174,11 +156,14 @@ if user_input:
174
  chat_history=st.session_state.chat_history
175
  )
176
 
 
177
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
178
 
 
179
  if image_url:
180
  st.image(image_url, caption="NASA Image of the Day")
181
 
 
182
  st.session_state.follow_up = follow_up
183
  st.session_state.response_ready = True # Enables follow-up response cycle
184
 
 
26
  # Initialize sentiment analysis pipeline
27
  sentiment_analyzer = pipeline("sentiment-analysis")
28
 
29
+ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
30
  return HuggingFaceEndpoint(
31
  repo_id=model_id,
32
  max_new_tokens=max_new_tokens,
 
53
  return "general_query"
54
 
55
  def generate_follow_up(user_text):
 
 
 
56
  prompt_text = (
57
+ f"Based on the user's message: '{user_text}', suggest a natural follow-up question "
58
+ "to keep the conversation engaging."
 
 
59
  )
60
+ hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
 
61
  return hf.invoke(input=prompt_text).strip()
62
 
63
  def get_response(system_message, chat_history, user_text, max_new_tokens=256):
 
 
 
64
  sentiment = analyze_sentiment(user_text)
65
  action = predict_action(user_text)
66
 
 
74
  chat_history.append({'role': 'assistant', 'content': follow_up})
75
  return response, follow_up, chat_history, nasa_url
76
 
77
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
78
 
79
  prompt = PromptTemplate.from_template(
80
+ "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\nUser: {user_text}.\n [/INST]\nAI:"
 
 
 
 
 
 
 
 
81
  )
 
82
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
83
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
84
+ response = response.split("AI:")[-1]
85
 
86
  chat_history.append({'role': 'user', 'content': user_text})
87
  chat_history.append({'role': 'assistant', 'content': response})
88
 
89
  if sentiment == "NEGATIVE":
90
+ response += "\n😞 I'm sorry to hear that. How can I assist you further?"
91
 
92
  follow_up = generate_follow_up(user_text)
93
  chat_history.append({'role': 'assistant', 'content': follow_up})
 
156
  chat_history=st.session_state.chat_history
157
  )
158
 
159
+ # Display HAL's response
160
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
161
 
162
+ # Display NASA image if available
163
  if image_url:
164
  st.image(image_url, caption="NASA Image of the Day")
165
 
166
+ # Store follow-up question in session state
167
  st.session_state.follow_up = follow_up
168
  st.session_state.response_ready = True # Enables follow-up response cycle
169