CCockrum commited on
Commit
304d58b
Β·
verified Β·
1 Parent(s): 1755fdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -2
app.py CHANGED
@@ -34,7 +34,7 @@ if "follow_up" not in st.session_state:
34
  st.session_state.follow_up = ""
35
 
36
  # βœ… Initialize Hugging Face Model (CPU/GPU Compatible)
37
- def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.6):
38
  return HuggingFaceEndpoint(
39
  repo_id=model_id,
40
  max_new_tokens=max_new_tokens,
@@ -130,7 +130,7 @@ def get_response(system_message, user_text, max_new_tokens=800):
130
  formatted_chat_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
131
 
132
  # βœ… Invoke Hugging Face Model
133
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.7)
134
 
135
  prompt = PromptTemplate.from_template(
136
  "[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
@@ -162,6 +162,40 @@ def get_response(system_message, user_text, max_new_tokens=800):
162
  # βœ… Streamlit UI
163
  st.title("πŸš€ HAL - NASA AI Assistant")
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  # βœ… Display Chat History
166
  for message in st.session_state.chat_history:
167
  st.markdown(f"**{message['role'].capitalize()}**: {message['content']}")
 
34
  st.session_state.follow_up = ""
35
 
36
  # βœ… Initialize Hugging Face Model (CPU/GPU Compatible)
37
+ def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
38
  return HuggingFaceEndpoint(
39
  repo_id=model_id,
40
  max_new_tokens=max_new_tokens,
 
130
  formatted_chat_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in chat_history)
131
 
132
  # βœ… Invoke Hugging Face Model
133
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.3)
134
 
135
  prompt = PromptTemplate.from_template(
136
  "[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
 
162
  # βœ… Streamlit UI
163
  st.title("πŸš€ HAL - NASA AI Assistant")
164
 
165
+ # βœ… Justify all chatbot responses
166
+ st.markdown("""
167
+ <style>
168
+ .user-msg {
169
+ background-color: #696969;
170
+ color: white;
171
+ padding: 10px;
172
+ border-radius: 10px;
173
+ margin-bottom: 5px;
174
+ width: fit-content;
175
+ max-width: 80%;
176
+ text-align: justify; /* βœ… Justify text */
177
+ }
178
+ .assistant-msg {
179
+ background-color: #333333;
180
+ color: white;
181
+ padding: 10px;
182
+ border-radius: 10px;
183
+ margin-bottom: 5px;
184
+ width: fit-content;
185
+ max-width: 80%;
186
+ text-align: justify; /* βœ… Justify text */
187
+ }
188
+ .container {
189
+ display: flex;
190
+ flex-direction: column;
191
+ align-items: flex-start;
192
+ }
193
+ @media (max-width: 600px) {
194
+ .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
195
+ }
196
+ </style>
197
+ """, unsafe_allow_html=True)
198
+
199
  # βœ… Display Chat History
200
  for message in st.session_state.chat_history:
201
  st.markdown(f"**{message['role'].capitalize()}**: {message['content']}")