Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,12 +6,11 @@ from langchain_core.prompts import PromptTemplate
|
|
6 |
from langchain_core.output_parsers import StrOutputParser
|
7 |
from transformers import pipeline
|
8 |
|
9 |
-
# Use
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
if HF_TOKEN is None:
|
12 |
raise ValueError("HF_TOKEN environment variable not set. Please set it in your Hugging Face Space settings.")
|
13 |
|
14 |
-
# Use environment variable for the NASA API key too
|
15 |
NASA_API_KEY = os.getenv("NASA_API_KEY")
|
16 |
if NASA_API_KEY is None:
|
17 |
raise ValueError("NASA_API_KEY environment variable not set. Please set it in your Hugging Face Space settings.")
|
@@ -19,8 +18,9 @@ if NASA_API_KEY is None:
|
|
19 |
# Set up Streamlit UI
|
20 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
21 |
|
22 |
-
# ---
|
23 |
if "chat_history" not in st.session_state:
|
|
|
24 |
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
25 |
|
26 |
if "response_ready" not in st.session_state:
|
@@ -30,7 +30,6 @@ if "follow_up" not in st.session_state:
|
|
30 |
st.session_state.follow_up = "" # Stores follow-up question
|
31 |
|
32 |
# --- Set Up Model & API Functions ---
|
33 |
-
# Specify your intended model here. If you need to change it, update the model_id.
|
34 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
35 |
|
36 |
# Initialize sentiment analysis pipeline with explicit model specification
|
@@ -41,7 +40,7 @@ sentiment_analyzer = pipeline(
|
|
41 |
)
|
42 |
|
43 |
def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
|
44 |
-
#
|
45 |
return HuggingFaceEndpoint(
|
46 |
repo_id=model_id,
|
47 |
max_new_tokens=max_new_tokens,
|
@@ -82,7 +81,8 @@ def generate_follow_up(user_text):
|
|
82 |
|
83 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
84 |
"""
|
85 |
-
Generates HAL's response
|
|
|
86 |
"""
|
87 |
sentiment = analyze_sentiment(user_text)
|
88 |
action = predict_action(user_text)
|
@@ -99,13 +99,14 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
99 |
|
100 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
101 |
|
|
|
102 |
prompt = PromptTemplate.from_template(
|
103 |
(
|
104 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
105 |
"User: {user_text}.\n [/INST]\n"
|
106 |
-
"AI:
|
107 |
-
"
|
108 |
-
"
|
109 |
)
|
110 |
)
|
111 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
|
|
6 |
from langchain_core.output_parsers import StrOutputParser
|
7 |
from transformers import pipeline
|
8 |
|
9 |
+
# Use environment variables for keys
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
if HF_TOKEN is None:
|
12 |
raise ValueError("HF_TOKEN environment variable not set. Please set it in your Hugging Face Space settings.")
|
13 |
|
|
|
14 |
NASA_API_KEY = os.getenv("NASA_API_KEY")
|
15 |
if NASA_API_KEY is None:
|
16 |
raise ValueError("NASA_API_KEY environment variable not set. Please set it in your Hugging Face Space settings.")
|
|
|
18 |
# Set up Streamlit UI
|
19 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
20 |
|
21 |
+
# --- Initialize Session State Variables ---
|
22 |
if "chat_history" not in st.session_state:
|
23 |
+
# Initial greeting stored in chat history
|
24 |
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
25 |
|
26 |
if "response_ready" not in st.session_state:
|
|
|
30 |
st.session_state.follow_up = "" # Stores follow-up question
|
31 |
|
32 |
# --- Set Up Model & API Functions ---
|
|
|
33 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
34 |
|
35 |
# Initialize sentiment analysis pipeline with explicit model specification
|
|
|
40 |
)
|
41 |
|
42 |
def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
|
43 |
+
# Specify task="text-generation" so that the endpoint uses the right model function.
|
44 |
return HuggingFaceEndpoint(
|
45 |
repo_id=model_id,
|
46 |
max_new_tokens=max_new_tokens,
|
|
|
81 |
|
82 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
83 |
"""
|
84 |
+
Generates HAL's response in a friendly, conversational manner.
|
85 |
+
The prompt instructs the model to ignore previous greetings and focus on the new user question.
|
86 |
"""
|
87 |
sentiment = analyze_sentiment(user_text)
|
88 |
action = predict_action(user_text)
|
|
|
99 |
|
100 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
101 |
|
102 |
+
# Updated prompt: Instruct the model not to repeat previous greetings.
|
103 |
prompt = PromptTemplate.from_template(
|
104 |
(
|
105 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
106 |
"User: {user_text}.\n [/INST]\n"
|
107 |
+
"AI: Please answer the user's question without repeating any previous greetings. "
|
108 |
+
"Keep your response friendly and conversational, starting with a phrase like "
|
109 |
+
"'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
|
110 |
)
|
111 |
)
|
112 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|