Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -55,7 +55,146 @@ def ensure_english(text):
|
|
55 |
# β
Main Response Function (Fixing Repetition & Context)
|
56 |
def get_response(system_message, chat_history, user_text, max_new_tokens=800):
|
57 |
# β
Ensure conversation history is included correctly
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
prompt = PromptTemplate.from_template(
|
61 |
"[INST] You are a knowledgeable and formal AI assistant. Please provide detailed, structured answers "
|
|
|
55 |
# β
Main Response Function (Fixing Repetition & Context)
|
56 |
def get_response(system_message, chat_history, user_text, max_new_tokens=800):
|
57 |
# β
Ensure conversation history is included correctly
|
58 |
+
import os
|
59 |
+
import re
|
60 |
+
import requests
|
61 |
+
import torch
|
62 |
+
import streamlit as st
|
63 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
64 |
+
from langchain_core.prompts import PromptTemplate
|
65 |
+
from langchain_core.output_parsers import StrOutputParser
|
66 |
+
from transformers import pipeline
|
67 |
+
from langdetect import detect # Ensure this package is installed
|
68 |
+
|
69 |
+
# β
Check for GPU or Default to CPU
|
70 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
71 |
+
print(f"β
Using device: {device}") # Debugging info
|
72 |
+
|
73 |
+
# β
Environment Variables
|
74 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
75 |
+
if HF_TOKEN is None:
|
76 |
+
raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
|
77 |
+
|
78 |
+
NASA_API_KEY = os.getenv("NASA_API_KEY")
|
79 |
+
if NASA_API_KEY is None:
|
80 |
+
raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
|
81 |
+
|
82 |
+
# β
Set Up Streamlit
|
83 |
+
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="π")
|
84 |
+
|
85 |
+
# β
Initialize Session State Variables (Ensuring Chat History Persists)
|
86 |
+
if "chat_history" not in st.session_state:
|
87 |
+
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
88 |
+
if "response_ready" not in st.session_state:
|
89 |
+
st.session_state.response_ready = False
|
90 |
+
|
91 |
+
# β
Initialize Hugging Face Model (Explicitly Set to CPU/GPU)
|
92 |
+
def get_llm_hf_inference(model_id="meta-llama/Llama-2-7b-chat-hf", max_new_tokens=800, temperature=0.3):
|
93 |
+
return HuggingFaceEndpoint(
|
94 |
+
repo_id=model_id,
|
95 |
+
max_new_tokens=max_new_tokens,
|
96 |
+
temperature=temperature, # π₯ Lowered temperature for more factual and structured responses
|
97 |
+
token=HF_TOKEN,
|
98 |
+
task="text-generation",
|
99 |
+
device=-1 if device == "cpu" else 0 # β
Force CPU (-1) or GPU (0)
|
100 |
+
)
|
101 |
+
|
102 |
+
# β
Ensure English Responses
|
103 |
+
def ensure_english(text):
|
104 |
+
try:
|
105 |
+
detected_lang = detect(text)
|
106 |
+
if detected_lang != "en":
|
107 |
+
return "β οΈ Sorry, I only respond in English. Can you rephrase your question?"
|
108 |
+
except:
|
109 |
+
return "β οΈ Language detection failed. Please ask your question again."
|
110 |
+
return text
|
111 |
+
|
112 |
+
# β
Main Response Function (Fixing Repetition & Context)
|
113 |
+
def get_response(system_message, chat_history, user_text, max_new_tokens=800):
|
114 |
+
# β
Ensure conversation history is included correctly
|
115 |
+
filtered_history = "\n".join(
|
116 |
+
f"{msg['role'].capitalize()}: {msg['content']}"
|
117 |
+
for msg in chat_history
|
118 |
+
)
|
119 |
+
|
120 |
+
prompt = PromptTemplate.from_template(
|
121 |
+
"[INST] You are a knowledgeable and formal AI assistant. Please provide detailed, structured answers "
|
122 |
+
"without repetition, unnecessary enthusiasm or emojis.\n\n"
|
123 |
+
"Ensure responses are structured and non-repetitive."
|
124 |
+
"\nPrevious Conversation:\n{chat_history}\n\n"
|
125 |
+
"Current Conversation:\n{chat_history}\n\n"
|
126 |
+
"User: {user_text}.\n [/INST]\n"
|
127 |
+
"AI: Provide a structured and informative response while maintaining a neutral and professional tone."
|
128 |
+
"Ensure your response is engaging yet clear."
|
129 |
+
"\nHAL:"
|
130 |
+
)
|
131 |
+
|
132 |
+
# β
Invoke Hugging Face Model
|
133 |
+
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.3) # π₯ Lowered temperature
|
134 |
+
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
135 |
+
|
136 |
+
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
137 |
+
response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
|
138 |
+
|
139 |
+
response = ensure_english(response)
|
140 |
+
|
141 |
+
if not response:
|
142 |
+
response = "I'm sorry, but I couldn't generate a response. Can you rephrase your question?"
|
143 |
+
|
144 |
+
# β
Preserve conversation history
|
145 |
+
st.session_state.chat_history.append({'role': 'user', 'content': user_text})
|
146 |
+
st.session_state.chat_history.append({'role': 'assistant', 'content': response})
|
147 |
+
|
148 |
+
return response, st.session_state.chat_history
|
149 |
+
|
150 |
+
# β
Streamlit UI
|
151 |
+
st.title("π HAL - NASA AI Assistant")
|
152 |
+
|
153 |
+
# β
Justify all chatbot responses
|
154 |
+
st.markdown("""
|
155 |
+
<style>
|
156 |
+
.user-msg, .assistant-msg {
|
157 |
+
padding: 11px;
|
158 |
+
border-radius: 10px;
|
159 |
+
margin-bottom: 5px;
|
160 |
+
width: fit-content;
|
161 |
+
max-width: 80%;
|
162 |
+
text-align: justify;
|
163 |
+
}
|
164 |
+
.user-msg { background-color: #696969; color: white; }
|
165 |
+
.assistant-msg { background-color: #333333; color: white; }
|
166 |
+
.container { display: flex; flex-direction: column; align-items: flex-start; }
|
167 |
+
@media (max-width: 600px) { .user-msg, .assistant-msg { font-size: 16px; max-width: 100%; } }
|
168 |
+
</style>
|
169 |
+
""", unsafe_allow_html=True)
|
170 |
+
|
171 |
+
# β
Reset Chat Button
|
172 |
+
if st.sidebar.button("Reset Chat"):
|
173 |
+
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
174 |
+
st.session_state.response_ready = False
|
175 |
+
|
176 |
+
# β
Chat UI
|
177 |
+
user_input = st.chat_input("Type your message here...")
|
178 |
+
|
179 |
+
if user_input:
|
180 |
+
response, st.session_state.chat_history = get_response(
|
181 |
+
system_message="You are a helpful AI assistant.",
|
182 |
+
user_text=user_input,
|
183 |
+
chat_history=st.session_state.chat_history
|
184 |
+
)
|
185 |
+
|
186 |
+
if response:
|
187 |
+
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
|
188 |
+
|
189 |
+
# β
Display chat history
|
190 |
+
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
191 |
+
for message in st.session_state.chat_history:
|
192 |
+
if message["role"] == "user":
|
193 |
+
st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
|
194 |
+
else:
|
195 |
+
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
|
196 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
197 |
+
|
198 |
|
199 |
prompt = PromptTemplate.from_template(
|
200 |
"[INST] You are a knowledgeable and formal AI assistant. Please provide detailed, structured answers "
|