Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import time
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
import os
|
6 |
-
|
7 |
|
8 |
# Import transformers and cache the help agent for performance
|
9 |
@st.cache_resource
|
@@ -233,41 +233,22 @@ def ask_llama(conversation_history, category, is_final_guess=False):
|
|
233 |
return "Could not generate question"
|
234 |
|
235 |
# New function for the help AI assistant using the Hugging Face InferenceClient
|
|
|
236 |
def ask_help_agent(query):
|
237 |
try:
|
238 |
-
|
239 |
-
|
240 |
-
# System message setup
|
241 |
-
system_message = "You are a friendly Chatbot."
|
242 |
-
|
243 |
-
# Build conversation history in Ollama format
|
244 |
-
messages = [{"role": "system", "content": system_message}]
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
if msg.get("response"):
|
251 |
-
messages.append({"role": "assistant", "content": msg["response"]})
|
252 |
-
|
253 |
-
# Add current query
|
254 |
-
messages.append({"role": "user", "content": query})
|
255 |
-
|
256 |
-
# Get response from Ollama
|
257 |
-
response = ollama.chat(
|
258 |
-
model="llama3", # Can also use "mistral", "phi3", etc.
|
259 |
-
messages=messages,
|
260 |
-
options={
|
261 |
-
"temperature": 0.7,
|
262 |
-
"top_p": 0.95,
|
263 |
-
"num_predict": 150 # Similar to max_tokens
|
264 |
-
}
|
265 |
-
)
|
266 |
-
|
267 |
-
return response['message']['content']
|
268 |
|
|
|
|
|
269 |
except Exception as e:
|
270 |
-
return f"
|
|
|
271 |
# Main game logic with enhanced UI
|
272 |
def main():
|
273 |
inject_custom_css()
|
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
import os
|
6 |
+
|
7 |
|
8 |
# Import transformers and cache the help agent for performance
|
9 |
@st.cache_resource
|
|
|
233 |
return "Could not generate question"
|
234 |
|
235 |
# New function for the help AI assistant using the Hugging Face InferenceClient
|
236 |
+
|
237 |
def ask_help_agent(query):
|
238 |
try:
|
239 |
+
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
|
240 |
+
headers = {"Authorization": "Bearer HF_TOKEN"} # Get from huggingface.co/settings/tokens
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
+
prompt = f"""<s>[INST] <<SYS>>
|
243 |
+
You are a helpful assistant. Answer concisely.
|
244 |
+
<</SYS>>
|
245 |
+
{query} [/INST]"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
|
247 |
+
response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
|
248 |
+
return response.json()[0]['generated_text'].split("[/INST]")[-1].strip()
|
249 |
except Exception as e:
|
250 |
+
return f"Assistant unavailable: {str(e)}"
|
251 |
+
|
252 |
# Main game logic with enhanced UI
|
253 |
def main():
|
254 |
inject_custom_css()
|