Update app.py
Browse files
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import time
|
|
| 3 |
import requests
|
| 4 |
from streamlit.components.v1 import html
|
| 5 |
import os
|
|
|
|
| 6 |
|
| 7 |
# Import transformers and cache the help agent for performance
|
| 8 |
@st.cache_resource
|
|
@@ -234,41 +235,39 @@ def ask_llama(conversation_history, category, is_final_guess=False):
|
|
| 234 |
# New function for the help AI assistant using the Hugging Face InferenceClient
|
| 235 |
def ask_help_agent(query):
|
| 236 |
try:
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
system_message = "You are a friendly Chatbot."
|
| 241 |
-
|
| 242 |
-
# Build history
|
| 243 |
-
|
|
|
|
| 244 |
if "help_conversation" in st.session_state:
|
| 245 |
for msg in st.session_state.help_conversation:
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
messages.append({"role": "user", "content": user_msg})
|
| 253 |
-
if bot_msg:
|
| 254 |
-
messages.append({"role": "assistant", "content": bot_msg})
|
| 255 |
messages.append({"role": "user", "content": query})
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
messages,
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
return
|
|
|
|
| 269 |
except Exception as e:
|
| 270 |
return f"Error in help agent: {str(e)}"
|
| 271 |
-
|
| 272 |
# Main game logic with enhanced UI
|
| 273 |
def main():
|
| 274 |
inject_custom_css()
|
|
|
|
| 3 |
import requests
|
| 4 |
from streamlit.components.v1 import html
|
| 5 |
import os
|
| 6 |
+
import ollama
|
| 7 |
|
| 8 |
# Import transformers and cache the help agent for performance
|
| 9 |
@st.cache_resource
|
|
|
|
| 235 |
# New function for the help AI assistant using the Hugging Face InferenceClient
|
| 236 |
def ask_help_agent(query):
|
| 237 |
try:
|
| 238 |
+
import ollama
|
| 239 |
+
|
| 240 |
+
# System message setup
|
| 241 |
system_message = "You are a friendly Chatbot."
|
| 242 |
+
|
| 243 |
+
# Build conversation history in Ollama format
|
| 244 |
+
messages = [{"role": "system", "content": system_message}]
|
| 245 |
+
|
| 246 |
if "help_conversation" in st.session_state:
|
| 247 |
for msg in st.session_state.help_conversation:
|
| 248 |
+
if msg.get("query"):
|
| 249 |
+
messages.append({"role": "user", "content": msg["query"]})
|
| 250 |
+
if msg.get("response"):
|
| 251 |
+
messages.append({"role": "assistant", "content": msg["response"]})
|
| 252 |
+
|
| 253 |
+
# Add current query
|
|
|
|
|
|
|
|
|
|
| 254 |
messages.append({"role": "user", "content": query})
|
| 255 |
+
|
| 256 |
+
# Get response from Ollama
|
| 257 |
+
response = ollama.chat(
|
| 258 |
+
model="llama3", # Can also use "mistral", "phi3", etc.
|
| 259 |
+
messages=messages,
|
| 260 |
+
options={
|
| 261 |
+
"temperature": 0.7,
|
| 262 |
+
"top_p": 0.95,
|
| 263 |
+
"num_predict": 150 # Similar to max_tokens
|
| 264 |
+
}
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
return response['message']['content']
|
| 268 |
+
|
| 269 |
except Exception as e:
|
| 270 |
return f"Error in help agent: {str(e)}"
|
|
|
|
| 271 |
# Main game logic with enhanced UI
|
| 272 |
def main():
|
| 273 |
inject_custom_css()
|