CCockrum commited on
Commit
1084147
Β·
verified Β·
1 Parent(s): 2994700

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -28,8 +28,6 @@ st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
28
  # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
31
- if "response_ready" not in st.session_state:
32
- st.session_state.response_ready = False
33
 
34
  # βœ… Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
35
  def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
@@ -58,7 +56,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
58
  filtered_history = "\n".join(
59
  f"{msg['role'].capitalize()}: {msg['content']}"
60
  for msg in chat_history[-5:] # βœ… Only keep the last 5 exchanges to prevent overflow
61
- )
62
 
63
  prompt = PromptTemplate.from_template(
64
  "[INST] You are a highly knowledgeable AI assistant. Answer concisely, avoid repetition, and structure responses well."
@@ -66,7 +64,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
66
  "\nLATEST USER INPUT:\nUser: {user_text}\n"
67
  "\n[END CONTEXT]\n"
68
  "Assistant:"
69
-
70
  )
71
 
72
  # βœ… Invoke Hugging Face Model
@@ -74,21 +71,20 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=800):
74
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
75
 
76
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
 
 
77
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
78
-
79
  response = ensure_english(response)
80
 
81
  if not response:
82
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
83
 
84
- # βœ… Preserve conversation history
85
  chat_history.append({'role': 'user', 'content': user_text})
86
  chat_history.append({'role': 'assistant', 'content': response})
87
 
88
  # βœ… Keep only last 10 exchanges to prevent unnecessary repetition
89
- st.session_state.chat_history = chat_history[-10:]
90
-
91
- return response, st.session_state.chat_history
92
 
93
  # βœ… Streamlit UI
94
  st.title("πŸš€ HAL - NASA AI Assistant")
@@ -115,22 +111,18 @@ st.markdown("""
115
  user_input = st.chat_input("Type your message here...")
116
 
117
  if user_input:
 
118
  response, st.session_state.chat_history = get_response(
119
  system_message="You are a helpful AI assistant.",
120
  user_text=user_input,
121
  chat_history=st.session_state.chat_history
122
  )
123
 
124
- if response:
125
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
126
-
127
- # βœ… Display chat history
128
  st.markdown("<div class='container'>", unsafe_allow_html=True)
129
  for message in st.session_state.chat_history:
130
  if message["role"] == "user":
131
  st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
132
  else:
133
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
134
- st.markdown("</div>", unsafe_allow_html=True)
135
-
136
-
 
28
  # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
 
31
 
32
  # βœ… Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
33
  def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
 
56
  filtered_history = "\n".join(
57
  f"{msg['role'].capitalize()}: {msg['content']}"
58
  for msg in chat_history[-5:] # βœ… Only keep the last 5 exchanges to prevent overflow
59
+ )
60
 
61
  prompt = PromptTemplate.from_template(
62
  "[INST] You are a highly knowledgeable AI assistant. Answer concisely, avoid repetition, and structure responses well."
 
64
  "\nLATEST USER INPUT:\nUser: {user_text}\n"
65
  "\n[END CONTEXT]\n"
66
  "Assistant:"
 
67
  )
68
 
69
  # βœ… Invoke Hugging Face Model
 
71
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
72
 
73
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
74
+
75
+ # Clean up the response - remove any "HAL:" prefix if present
76
  response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
 
77
  response = ensure_english(response)
78
 
79
  if not response:
80
  response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
81
 
82
+ # βœ… Update conversation history
83
  chat_history.append({'role': 'user', 'content': user_text})
84
  chat_history.append({'role': 'assistant', 'content': response})
85
 
86
  # βœ… Keep only last 10 exchanges to prevent unnecessary repetition
87
+ return response, chat_history[-10:]
 
 
88
 
89
  # βœ… Streamlit UI
90
  st.title("πŸš€ HAL - NASA AI Assistant")
 
111
  user_input = st.chat_input("Type your message here...")
112
 
113
  if user_input:
114
+ # Get response and update chat history
115
  response, st.session_state.chat_history = get_response(
116
  system_message="You are a helpful AI assistant.",
117
  user_text=user_input,
118
  chat_history=st.session_state.chat_history
119
  )
120
 
121
+ # βœ… Display chat history (ONLY display from history, not separately)
 
 
 
122
  st.markdown("<div class='container'>", unsafe_allow_html=True)
123
  for message in st.session_state.chat_history:
124
  if message["role"] == "user":
125
  st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
126
  else:
127
  st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
128
+ st.markdown("</div>", unsafe_allow_html=True)