iisadia commited on
Commit
65ff7ae
·
verified ·
1 Parent(s): 2dc8811

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -36
app.py CHANGED
@@ -7,7 +7,7 @@ from streamlit.components.v1 import html
7
  @st.cache_resource
8
  def get_help_agent():
9
  from transformers import pipeline
10
- # Using BlenderBot 400M Distill as the public conversational model
11
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
12
 
13
  # Custom CSS for professional look (fixed text color)
@@ -119,16 +119,13 @@ def ask_llama(conversation_history, category, is_final_guess=False):
119
  }
120
 
121
  system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
122
- 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities.
123
- 2. Consider all previous answers carefully before asking the next question.
124
- 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]".
125
- 4. For places: ask about continent, climate, famous landmarks, country, city or population.
126
- 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame.
127
- 6. For objects: ask about size, color, usage, material, or where it's found.
128
- 7. Never repeat questions and always make progress toward guessing.
129
- 8. If the guess is generic (e.g. "bed") and the player confirms it with "yes", end the game.
130
- 9. If the guess is generic but the answer is "no", ask additional questions to refine the guess.
131
- 10. After a rejected final guess, continue asking questions instead of making new guesses immediately."""
132
 
133
  if is_final_guess:
134
  prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
@@ -157,15 +154,20 @@ def ask_llama(conversation_history, category, is_final_guess=False):
157
  st.error(f"Error calling Llama API: {str(e)}")
158
  return "Could not generate question"
159
 
160
- # New function for the help AI assistant using a Hugging Face chatbot model
 
161
  def ask_help_agent(query):
162
- from transformers import Conversation
163
- # Get the cached help agent (BlenderBot)
164
- help_agent = get_help_agent()
165
- conversation = Conversation(query)
166
- result = help_agent(conversation)
167
- # The generated response is stored in generated_responses list
168
- return result.generated_responses[-1]
 
 
 
 
169
 
170
  # Main game logic
171
  def main():
@@ -217,18 +219,17 @@ def main():
217
  {"role": "assistant", "content": first_question}
218
  ]
219
  st.session_state.game_state = "gameplay"
220
- st.session_state.current_q = 0 # start at question 0
221
- st.rerun()
222
 
223
  # Gameplay screen
224
  elif st.session_state.game_state == "gameplay":
225
  current_question = st.session_state.questions[st.session_state.current_q]
226
 
227
- # If the AI made a final guess, switch to confirmation
228
  if "Final Guess:" in current_question:
229
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
230
  st.session_state.game_state = "confirm_guess"
231
- st.rerun()
232
 
233
  st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
234
  f'<strong>{current_question}</strong></div>',
@@ -246,12 +247,13 @@ def main():
246
  {"role": "user", "content": answer_input}
247
  )
248
 
249
- # Generate next response from Llama
250
  next_response = ask_llama(
251
  st.session_state.conversation_history,
252
  st.session_state.category
253
  )
254
 
 
255
  if "Final Guess:" in next_response:
256
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
257
  st.session_state.game_state = "confirm_guess"
@@ -263,12 +265,12 @@ def main():
263
  st.session_state.current_q += 1
264
 
265
  # Stop after 20 questions max
266
- if st.session_state.current_q >= 19:
267
  st.session_state.game_state = "result"
268
 
269
- st.rerun()
270
 
271
- # Side Help Option: independent chat with an AI help assistant (Hugging Face model)
272
  with st.expander("Need Help? Chat with AI Assistant"):
273
  help_query = st.text_input("Enter your help query:", key="help_query")
274
  if st.button("Send", key="send_help"):
@@ -282,7 +284,7 @@ def main():
282
  st.markdown(f"**You:** {msg['query']}")
283
  st.markdown(f"**Help Assistant:** {msg['response']}")
284
 
285
- # Confirm guess screen
286
  elif st.session_state.game_state == "confirm_guess":
287
  st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
288
  f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
@@ -296,9 +298,10 @@ def main():
296
  else:
297
  if confirm_input == "yes":
298
  st.session_state.game_state = "result"
299
- st.rerun()
 
300
  else:
301
- # Add the negative response to history and continue gameplay
302
  st.session_state.conversation_history.append(
303
  {"role": "user", "content": "no"}
304
  )
@@ -312,9 +315,7 @@ def main():
312
  {"role": "assistant", "content": next_response}
313
  )
314
  st.session_state.current_q += 1
315
- if st.session_state.current_q >= 19:
316
- st.session_state.game_state = "result"
317
- st.rerun()
318
 
319
  # Result screen
320
  elif st.session_state.game_state == "result":
@@ -337,12 +338,12 @@ def main():
337
  time.sleep(1)
338
  st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
339
  unsafe_allow_html=True)
340
- st.markdown(f"<p style='text-align:center'>Guessed in {st.session_state.current_q + 1} questions</p>",
341
  unsafe_allow_html=True)
342
 
343
  if st.button("Play Again", key="play_again"):
344
  st.session_state.clear()
345
- st.rerun()
346
 
347
  if __name__ == "__main__":
348
- main()
 
7
  @st.cache_resource
8
  def get_help_agent():
9
  from transformers import pipeline
10
+ # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
11
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
12
 
13
  # Custom CSS for professional look (fixed text color)
 
119
  }
120
 
121
  system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
122
+ 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
123
+ 2. Consider all previous answers carefully before asking next question
124
+ 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
125
+ 4. For places: ask about continent, climate, famous landmarks, country, city or population
126
+ 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
127
+ 6. For objects: ask about size, color, usage, material, or where it's found
128
+ 7. Never repeat questions and always make progress toward guessing"""
 
 
 
129
 
130
  if is_final_guess:
131
  prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
 
154
  st.error(f"Error calling Llama API: {str(e)}")
155
  return "Could not generate question"
156
 
157
+ # New function for the help AI assistant using a free Hugging Face conversational model
158
+ # (replacing the original BlenderBot approach with DialoGPT for simplicity)
159
  def ask_help_agent(query):
160
+ try:
161
+ from transformers import pipeline
162
+ # Using DialoGPT-medium as a free conversational text-generation model
163
+ help_chat = pipeline("text-generation", model="microsoft/DialoGPT-medium")
164
+ # The conversation string can incorporate previous messages if you want a dialogue
165
+ # Here we simply generate a response to the current query.
166
+ response = help_chat(query, max_length=100, do_sample=True, top_p=0.95)
167
+ # Trim the response to show only the generated text
168
+ return response[0]['generated_text']
169
+ except Exception as e:
170
+ return f"Error in help agent: {str(e)}"
171
 
172
  # Main game logic
173
  def main():
 
219
  {"role": "assistant", "content": first_question}
220
  ]
221
  st.session_state.game_state = "gameplay"
222
+ st.experimental_rerun()
 
223
 
224
  # Gameplay screen
225
  elif st.session_state.game_state == "gameplay":
226
  current_question = st.session_state.questions[st.session_state.current_q]
227
 
228
+ # Check if AI made a guess
229
  if "Final Guess:" in current_question:
230
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
231
  st.session_state.game_state = "confirm_guess"
232
+ st.experimental_rerun()
233
 
234
  st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
235
  f'<strong>{current_question}</strong></div>',
 
247
  {"role": "user", "content": answer_input}
248
  )
249
 
250
+ # Generate next response
251
  next_response = ask_llama(
252
  st.session_state.conversation_history,
253
  st.session_state.category
254
  )
255
 
256
+ # Check if AI made a guess
257
  if "Final Guess:" in next_response:
258
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
259
  st.session_state.game_state = "confirm_guess"
 
265
  st.session_state.current_q += 1
266
 
267
  # Stop after 20 questions max
268
+ if st.session_state.current_q >= 20:
269
  st.session_state.game_state = "result"
270
 
271
+ st.experimental_rerun()
272
 
273
+ # Side Help Option: independent chat with an AI help assistant (free Hugging Face model)
274
  with st.expander("Need Help? Chat with AI Assistant"):
275
  help_query = st.text_input("Enter your help query:", key="help_query")
276
  if st.button("Send", key="send_help"):
 
284
  st.markdown(f"**You:** {msg['query']}")
285
  st.markdown(f"**Help Assistant:** {msg['response']}")
286
 
287
+ # Guess confirmation screen using text input response
288
  elif st.session_state.game_state == "confirm_guess":
289
  st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
290
  f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
 
298
  else:
299
  if confirm_input == "yes":
300
  st.session_state.game_state = "result"
301
+ st.experimental_rerun()
302
+ st.stop() # Immediately halt further execution
303
  else:
304
+ # Add negative response to history and continue gameplay
305
  st.session_state.conversation_history.append(
306
  {"role": "user", "content": "no"}
307
  )
 
315
  {"role": "assistant", "content": next_response}
316
  )
317
  st.session_state.current_q += 1
318
+ st.experimental_rerun()
 
 
319
 
320
  # Result screen
321
  elif st.session_state.game_state == "result":
 
338
  time.sleep(1)
339
  st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
340
  unsafe_allow_html=True)
341
+ st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>",
342
  unsafe_allow_html=True)
343
 
344
  if st.button("Play Again", key="play_again"):
345
  st.session_state.clear()
346
+ st.experimental_rerun()
347
 
348
  if __name__ == "__main__":
349
+ main()