File size: 4,199 Bytes
a3e0475
ffbccbb
fc5f1c7
d6f5773
fc5f1c7
a3e0475
 
5c095c6
8b550ae
fe8e64b
546ff54
ab8074b
8b550ae
 
 
546ff54
ab8074b
8b550ae
546ff54
ab8074b
c78f510
b256ef1
5c095c6
8b550ae
5c095c6
594a593
391ca85
8b550ae
fe8e64b
5c095c6
a3e0475
 
 
258dcf5
8b550ae
a3e0475
 
8b550ae
594a593
 
cb6f832
8b550ae
 
594a593
8b550ae
 
594a593
cb6f832
c567c97
 
8b550ae
594a593
8b550ae
 
 
 
 
594a593
8b550ae
 
594a593
8b550ae
594a593
8b550ae
594a593
8b550ae
 
594a593
 
 
 
8b550ae
594a593
 
8b550ae
792148f
fe8e64b
8b550ae
 
 
 
594a593
8b550ae
594a593
8b550ae
594a593
 
8b550ae
 
 
 
 
 
594a593
8b550ae
47a03de
594a593
b54b055
8b550ae
b744871
8b550ae
 
b744871
8b550ae
 
fe8e64b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import re
import requests
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from transformers import pipeline
from langdetect import detect

# βœ… Environment Variables
HF_TOKEN = os.getenv("HF_TOKEN")
NASA_API_KEY = os.getenv("NASA_API_KEY")

if not HF_TOKEN:
    raise ValueError("HF_TOKEN is not set. Please add it to your environment variables.")

if not NASA_API_KEY:
    raise ValueError("NASA_API_KEY is not set. Please add it to your environment variables.")

# βœ… Set Streamlit
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="πŸš€")

# βœ… Ensure Session State for Chat History
if "chat_history" not in st.session_state:
    st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]

# βœ… Define AI Model
def get_llm_hf_inference(model_id="mistralai/Mistral-7B-Instruct-v0.3", max_new_tokens=512, temperature=0.7):
    return HuggingFaceEndpoint(
        repo_id=model_id,
        max_new_tokens=max_new_tokens,
        temperature=temperature,
        token=HF_TOKEN,
        task="text-generation"
    )

# βœ… Generate Follow-Up Question (Preserving Format)
def generate_follow_up(user_text):
    prompt_text = (
        f"Given the user's question: '{user_text}', generate a SHORT follow-up question in the format: "
        "'Would you like to learn more about [related topic] or explore something else?'."
        "Ensure it is concise and strictly follows this format."
    )
    
    hf = get_llm_hf_inference(max_new_tokens=30, temperature=0.6)
    output = hf.invoke(input=prompt_text).strip()

    cleaned_output = re.sub(r"```|''|\"", "", output).strip()

    return cleaned_output if "Would you like to learn more about" in cleaned_output else "Would you like to explore another related topic or ask about something else?"

# βœ… Get AI Response and Maintain Chat History
def get_response(user_text):
    """Generates a response and updates chat history."""
    
    hf = get_llm_hf_inference(max_new_tokens=512, temperature=0.9)

    # Format chat history for context
    filtered_history = "\n".join(f"{msg['role']}: {msg['content']}" for msg in st.session_state.chat_history)

    # Create prompt
    prompt = PromptTemplate.from_template(
        "[INST] You are a helpful AI assistant.\n\nCurrent Conversation:\n{chat_history}\n\n"
        "User: {user_text}.\n [/INST]\n"
        "AI: Provide a detailed but concise explanation with depth. "
        "Ensure a friendly, engaging tone."
        "\nHAL:"
    )

    chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
    response = chat.invoke(input=dict(user_text=user_text, chat_history=filtered_history))
    response = response.split("HAL:")[-1].strip() if "HAL:" in response else response.strip()

    # Generate follow-up question
    follow_up = generate_follow_up(user_text)

    # βœ… Preserve conversation history
    st.session_state.chat_history.append({'role': 'user', 'content': user_text})
    st.session_state.chat_history.append({'role': 'assistant', 'content': response})
    st.session_state.chat_history.append({'role': 'assistant', 'content': follow_up})

    return response, follow_up

# βœ… Chat UI
st.title("πŸš€ HAL - NASA AI Assistant")

# βœ… Display Conversation History BEFORE User Input
for message in st.session_state.chat_history:
    if message["role"] == "user":
        st.markdown(f"<div class='user-msg'><strong>You:</strong> {message['content']}</div>", unsafe_allow_html=True)
    else:
        st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {message['content']}</div>", unsafe_allow_html=True)

# βœ… User Input
user_input = st.chat_input("Type your message here...")

if user_input:
    response, follow_up = get_response(user_input)

    # βœ… Display AI response
    st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {response}</div>", unsafe_allow_html=True)

    # βœ… Display Follow-Up Question
    st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {follow_up}</div>", unsafe_allow_html=True)