iisadia commited on
Commit
e2cdfab
·
verified ·
1 Parent(s): 0c034cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -29
app.py CHANGED
@@ -3,6 +3,7 @@ import time
3
  import requests
4
  from streamlit.components.v1 import html
5
  import os
 
6
 
7
  # Import transformers and cache the help agent for performance
8
  @st.cache_resource
@@ -234,41 +235,39 @@ def ask_llama(conversation_history, category, is_final_guess=False):
234
  # New function for the help AI assistant using the Hugging Face InferenceClient
235
  def ask_help_agent(query):
236
  try:
237
- from huggingface_hub import InferenceClient
238
- # Initialize the client with the provided model
239
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
240
  system_message = "You are a friendly Chatbot."
241
-
242
- # Build history from session state (if any)
243
- history = []
 
244
  if "help_conversation" in st.session_state:
245
  for msg in st.session_state.help_conversation:
246
- # Each history entry is a tuple: (user query, assistant response)
247
- history.append((msg.get("query", ""), msg.get("response", "")))
248
-
249
- messages = [{"role": "system", "content": system_message}]
250
- for user_msg, bot_msg in history:
251
- if user_msg:
252
- messages.append({"role": "user", "content": user_msg})
253
- if bot_msg:
254
- messages.append({"role": "assistant", "content": bot_msg})
255
  messages.append({"role": "user", "content": query})
256
-
257
- response_text = ""
258
- # Using streaming to collect the entire response from the model
259
- for message in client.chat_completion(
260
- messages,
261
- max_tokens=150,
262
- stream=True,
263
- temperature=0.7,
264
- top_p=0.95,
265
- ):
266
- token = message.choices[0].delta.content
267
- response_text += token
268
- return response_text
 
269
  except Exception as e:
270
  return f"Error in help agent: {str(e)}"
271
-
272
  # Main game logic with enhanced UI
273
  def main():
274
  inject_custom_css()
 
3
  import requests
4
  from streamlit.components.v1 import html
5
  import os
6
+ import ollama
7
 
8
  # Import transformers and cache the help agent for performance
9
  @st.cache_resource
 
235
  # New function for the help AI assistant using the Hugging Face InferenceClient
236
  def ask_help_agent(query):
237
  try:
238
+ import ollama
239
+
240
+ # System message setup
241
  system_message = "You are a friendly Chatbot."
242
+
243
+ # Build conversation history in Ollama format
244
+ messages = [{"role": "system", "content": system_message}]
245
+
246
  if "help_conversation" in st.session_state:
247
  for msg in st.session_state.help_conversation:
248
+ if msg.get("query"):
249
+ messages.append({"role": "user", "content": msg["query"]})
250
+ if msg.get("response"):
251
+ messages.append({"role": "assistant", "content": msg["response"]})
252
+
253
+ # Add current query
 
 
 
254
  messages.append({"role": "user", "content": query})
255
+
256
+ # Get response from Ollama
257
+ response = ollama.chat(
258
+ model="llama3", # Can also use "mistral", "phi3", etc.
259
+ messages=messages,
260
+ options={
261
+ "temperature": 0.7,
262
+ "top_p": 0.95,
263
+ "num_predict": 150 # Similar to max_tokens
264
+ }
265
+ )
266
+
267
+ return response['message']['content']
268
+
269
  except Exception as e:
270
  return f"Error in help agent: {str(e)}"
 
271
  # Main game logic with enhanced UI
272
  def main():
273
  inject_custom_css()