NASA-AI-Chatbot / app.py
CCockrum's picture
Update app.py
c78f510 verified
raw
history blame
4.2 kB
import os
import re
import requests
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from transformers import pipeline
from langdetect import detect
# βœ… Environment Variables
HF_TOKEN = os.getenv("HF_TOKEN")
NASA_API_KEY = os.getenv("NASA_API_KEY")
if not HF_TOKEN:
raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")
if not NASA_API_KEY:
raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")
# βœ… Set Streamlit
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")
# βœ… Ensure Session State for Chat History
if "chat_history" not in st.session_state:
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
# βœ… Define AI Model
def get_llm_hf_inference(model_id="mistralai/Mistral-7B-Instruct-v0.3", max_new_tokens=512, temperature=0.7):
return HuggingFaceEndpoint(
repo_id=model_id,
max_new_tokens=max_new_tokens,
temperature=temperature,
token=HF_TOKEN,
task="text-generation"
)
# βœ… Generate Follow-Up Question (Preserving Format)
def generate_follow_up(user_text):
prompt_text = (
f"Given the user's question: '{user_text}', generate a SHORT follow-up question in the format: "
"'Would you like to learn more about [related topic] or explore something else?'."
"Ensure it is concise and strictly follows this format."
)
hf = get_llm_hf_inference(max_new_tokens=30, temperature=0.6)
output = hf.invoke(input=prompt_text).strip()
cleaned_output = re.sub(r"```|''|\"", "", output).strip()
return cleaned_output if "Would you like to learn more about" in cleaned_output else "Would you like to explore another related topic or ask about something else?"
# βœ… Get AI Response and Maintain Chat History
def get_response(user_text):
"""Generates a response and updates chat history."""
hf = get_llm_hf_inference(max_new_tokens=512, temperature=0.9)
# Format chat history for context
filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in st.session_state.chat_history)
# Create prompt
prompt = PromptTemplate.from_template(
"[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
"User: {user_text}.\n [/INST]\n"
"AI: Provide a detailed but concise explanation with depth. "
"Ensure a friendly, engaging tone."
"\nHAL:"
)
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
response = chat.invoke(input=dict(user_text=user_text, chat_history=filtered_history))
response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()
# Generate follow-up question
follow_up = generate_follow_up(user_text)
# βœ… Preserve conversation history
st.session_state.chat_history.append({'role': 'user', 'content': user_text})
st.session_state.chat_history.append({'role': 'assistant', 'content': response})
st.session_state.chat_history.append({'role': 'assistant', 'content': follow_up})
return response, follow_up
# βœ… Chat UI
st.title("πŸš€ HAL - NASA AI Assistant")
# βœ… Display Conversation History BEFORE User Input
for message in st.session_state.chat_history:
if message["role"] == "user":
st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
else:
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)
# βœ… User Input
user_input = st.chat_input("Type your message here...")
if user_input:
response, follow_up = get_response(user_input)
# βœ… Display AI response
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)
# βœ… Display Follow-Up Question
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)