Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,16 @@ from langchain_huggingface import HuggingFaceEndpoint
|
|
5 |
from langchain_core.prompts import PromptTemplate
|
6 |
from langchain_core.output_parsers import StrOutputParser
|
7 |
from transformers import pipeline
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Set up Streamlit UI
|
11 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
@@ -21,6 +30,7 @@ if "follow_up" not in st.session_state:
|
|
21 |
st.session_state.follow_up = "" # Stores follow-up question
|
22 |
|
23 |
# --- Set Up Model & API Functions ---
|
|
|
24 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
25 |
|
26 |
# Initialize sentiment analysis pipeline with explicit model specification
|
@@ -31,12 +41,12 @@ sentiment_analyzer = pipeline(
|
|
31 |
)
|
32 |
|
33 |
def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
|
34 |
-
# Explicitly specify task="text-generation" so that the endpoint knows which task to run
|
35 |
return HuggingFaceEndpoint(
|
36 |
repo_id=model_id,
|
37 |
max_new_tokens=max_new_tokens,
|
38 |
temperature=temperature,
|
39 |
-
token=
|
40 |
task="text-generation"
|
41 |
)
|
42 |
|
@@ -91,7 +101,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
91 |
|
92 |
prompt = PromptTemplate.from_template(
|
93 |
(
|
94 |
-
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\
|
|
|
95 |
"AI: Keep responses conversational and engaging. Start with a friendly phrase like "
|
96 |
"'Certainly!', 'Of course!', or 'Great question!' before answering. "
|
97 |
"Keep responses concise but engaging.\nHAL:"
|
@@ -185,3 +196,4 @@ if user_input:
|
|
185 |
if st.session_state.response_ready and st.session_state.follow_up:
|
186 |
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
|
187 |
st.session_state.response_ready = False
|
|
|
|
5 |
from langchain_core.prompts import PromptTemplate
|
6 |
from langchain_core.output_parsers import StrOutputParser
|
7 |
from transformers import pipeline
|
8 |
+
|
9 |
+
# Use the HF_TOKEN environment variable (make sure it's set in your Hugging Face Space)
|
10 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
+
if HF_TOKEN is None:
|
12 |
+
raise ValueError("HF_TOKEN environment variable not set. Please set it in your Hugging Face Space settings.")
|
13 |
+
|
14 |
+
# Use environment variable for the NASA API key too
|
15 |
+
NASA_API_KEY = os.getenv("NASA_API_KEY")
|
16 |
+
if NASA_API_KEY is None:
|
17 |
+
raise ValueError("NASA_API_KEY environment variable not set. Please set it in your Hugging Face Space settings.")
|
18 |
|
19 |
# Set up Streamlit UI
|
20 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
|
|
30 |
st.session_state.follow_up = "" # Stores follow-up question
|
31 |
|
32 |
# --- Set Up Model & API Functions ---
|
33 |
+
# Specify your intended model here. If you need to change it, update the model_id.
|
34 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
35 |
|
36 |
# Initialize sentiment analysis pipeline with explicit model specification
|
|
|
41 |
)
|
42 |
|
43 |
def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.7):
|
44 |
+
# Explicitly specify task="text-generation" so that the endpoint knows which task to run.
|
45 |
return HuggingFaceEndpoint(
|
46 |
repo_id=model_id,
|
47 |
max_new_tokens=max_new_tokens,
|
48 |
temperature=temperature,
|
49 |
+
token=HF_TOKEN,
|
50 |
task="text-generation"
|
51 |
)
|
52 |
|
|
|
101 |
|
102 |
prompt = PromptTemplate.from_template(
|
103 |
(
|
104 |
+
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
105 |
+
"User: {user_text}.\n [/INST]\n"
|
106 |
"AI: Keep responses conversational and engaging. Start with a friendly phrase like "
|
107 |
"'Certainly!', 'Of course!', or 'Great question!' before answering. "
|
108 |
"Keep responses concise but engaging.\nHAL:"
|
|
|
196 |
if st.session_state.response_ready and st.session_state.follow_up:
|
197 |
st.markdown(f"<div class='assistant-msg'><strong>HAL:</strong> {st.session_state.follow_up}</div>", unsafe_allow_html=True)
|
198 |
st.session_state.response_ready = False
|
199 |
+
|