CCockrum commited on
Commit
ffc1c71
Β·
verified Β·
1 Parent(s): 75d0129

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -134
app.py CHANGED
@@ -25,63 +25,6 @@ if NASA_API_KEY is None:
25
  # βœ… Set Up Streamlit
26
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
27
 
28
- # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
29
- if "chat_history" not in st.session_state:
30
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
31
- if "response_ready" not in st.session_state:
32
- st.session_state.response_ready = False
33
-
34
- # βœ… Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
35
- def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
36
- return HuggingFaceEndpoint(
37
- repo_id=model_id,
38
- max_new_tokens=max_new_tokens,
39
- temperature=temperature, # πŸ”₯ Lowered temperature for more factual and structured responses
40
- token=HF_TOKEN,
41
- task="text-generation",
42
- device=-1 if device == "cpu" else 0 # βœ… Force CPU (-1) or GPU (0)
43
- )
44
-
45
- # βœ… Ensure English Responses
46
- def ensure_english(text):
47
- try:
48
- detected_lang = detect(text)
49
- if detected_lang != "en":
50
- return "⚠️ Sorry, I only respond in English. Can you rephrase your question?"
51
- except:
52
- return "⚠️ Language detection failed. Please ask your question again."
53
- return text
54
-
55
- # βœ… Main Response Function (Fixing Repetition & Context)
56
- def get_response(system_message, chat_history, user_text, max_new_tokens=800):
57
- # βœ… Ensure conversation history is included correctly
58
- import os
59
- import re
60
- import requests
61
- import torch
62
- import streamlit as st
63
- from langchain_huggingface import HuggingFaceEndpoint
64
- from langchain_core.prompts import PromptTemplate
65
- from langchain_core.output_parsers import StrOutputParser
66
- from transformers import pipeline
67
- from langdetect import detect # Ensure this package is installed
68
-
69
- # βœ… Check for GPU or Default to CPU
70
- device = "cuda" if torch.cuda.is_available() else "cpu"
71
- print(f"βœ… Using device: {device}") # Debugging info
72
-
73
- # βœ… Environment Variables
74
- HF_TOKEN = os.getenv("HF_TOKEN")
75
- if HF_TOKEN is None:
76
- raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
77
-
78
- NASA_API_KEY = os.getenv("NASA_API_KEY")
79
- if NASA_API_KEY is None:
80
- raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
81
-
82
- # βœ… Set Up Streamlit
83
- st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
84
-
85
  # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
86
  if "chat_history" not in st.session_state:
87
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
@@ -196,80 +139,3 @@ for message in st.session_state.chat_history:
196
  st.markdown("</div>", unsafe_allow_html=True)
197
 
198
 
199
- prompt = PromptTemplate.from_template(
200
- "[INST] You are a knowledgeable and formal AI assistant. Please provide detailed, structured answers "
201
- "without unnecessary enthusiasm or emojis.\n\n"
202
- "Ensure responses are structured and non-repetitive."
203
- "\nPrevious Conversation:\n{chat_history}\n\n"
204
- "Current Conversation:\n{chat_history}\n\n"
205
- "User: {user_text}.\n [/INST]\n"
206
- "AI: Provide a structured and informative response while maintaining a neutral and professional tone."
207
- "Ensure your response is engaging yet clear."
208
- "\nHAL:"
209
- )
210
-
211
- # βœ… Invoke Hugging Face Model
212
- hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.3) # πŸ”₯ Lowered temperature
213
- chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
214
-
215
- response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
216
- response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
217
-
218
- response = ensure_english(response)
219
-
220
- if not response:
221
- response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
222
-
223
- # βœ… Preserve conversation history
224
- st.session_state.chat_history.append({'role': 'user', 'content': user_text})
225
- st.session_state.chat_history.append({'role': 'assistant', 'content': response})
226
-
227
- return response, st.session_state.chat_history
228
-
229
- # βœ… Streamlit UI
230
- st.title("πŸš€ HAL - NASA AI Assistant")
231
-
232
- # βœ… Justify all chatbot responses
233
- st.markdown("""
234
- <style>
235
- .user-msg, .assistant-msg {
236
- padding: 11px;
237
- border-radius: 10px;
238
- margin-bottom: 5px;
239
- width: fit-content;
240
- max-width: 80%;
241
- text-align: justify;
242
- }
243
- .user-msg { background-color: #696969; color: white; }
244
- .assistant-msg { background-color: #333333; color: white; }
245
- .container { display: flex; flex-direction: column; align-items: flex-start; }
246
- @media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
247
- </style>
248
- """, unsafe_allow_html=True)
249
-
250
- # βœ… Reset Chat Button
251
- if st.sidebar.button("Reset Chat"):
252
- st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
253
- st.session_state.response_ready = False
254
-
255
- # βœ… Chat UI
256
- user_input = st.chat_input("Type your message here...")
257
-
258
- if user_input:
259
- response, st.session_state.chat_history = get_response(
260
- system_message="You are a helpful AI assistant.",
261
- user_text=user_input,
262
- chat_history=st.session_state.chat_history
263
- )
264
-
265
- if response:
266
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
267
-
268
- # βœ… Display chat history
269
- st.markdown("<div class='container'>", unsafe_allow_html=True)
270
- for message in st.session_state.chat_history:
271
- if message["role"] == "user":
272
- st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
273
- else:
274
- st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
275
- st.markdown("</div>", unsafe_allow_html=True)
 
25
  # βœ… Set Up Streamlit
26
  st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  # βœ… Initialize Session State Variables (Ensuring Chat History Persists)
29
  if "chat_history" not in st.session_state:
30
  st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
 
139
  st.markdown("</div>", unsafe_allow_html=True)
140
 
141