import streamlit as st import time import requests from streamlit.components.v1 import html # Import transformers and cache the help agent for performance @st.cache_resource def get_help_agent(): from transformers import pipeline # Using BlenderBot 400M Distill as the public conversational model return pipeline("conversational", model="facebook/blenderbot-400M-distill") # Custom CSS for professional look (fixed text color) def inject_custom_css(): st.markdown(""" """, unsafe_allow_html=True) # Confetti animation def show_confetti(): html(""" """) # Enhanced AI question generation for guessing game using Llama model def ask_llama(conversation_history, category, is_final_guess=False): api_url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Authorization": "Bearer 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", "Content-Type": "application/json" } system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules: 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities 2. Consider all previous answers carefully before asking next question 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]" 4. For places: ask about continent, climate, famous landmarks, country, city or population 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame 6. For objects: ask about size, color, usage, material, or where it's found 7. Never repeat questions and always make progress toward guessing""" if is_final_guess: prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text: {conversation_history}""" else: prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities." messages = [ {"role": "system", "content": system_prompt}, *conversation_history, {"role": "user", "content": prompt} ] data = { "model": "llama-3.3-70b-versatile", "messages": messages, "temperature": 0.7 if is_final_guess else 0.8, "max_tokens": 100 } try: response = requests.post(api_url, headers=headers, json=data) response.raise_for_status() return response.json()["choices"][0]["message"]["content"] except Exception as e: st.error(f"Error calling Llama API: {str(e)}") return "Could not generate question" # New function for the help AI assistant using a Hugging Face chatbot model def ask_help_agent(query): # Use a try/except block to import Conversation from the correct module, # accommodating different versions of transformers try: from transformers import Conversation except ImportError: from transformers.pipelines.conversational import Conversation # Get the cached help agent (BlenderBot) help_agent = get_help_agent() conversation = Conversation(query) result = help_agent(conversation) # The generated response is stored in generated_responses list return result.generated_responses[-1] # Main game logic def main(): inject_custom_css() st.markdown('
KASOTI
', unsafe_allow_html=True) st.markdown('
The Smart Guessing Game
', unsafe_allow_html=True) if 'game_state' not in st.session_state: st.session_state.game_state = "start" st.session_state.questions = [] st.session_state.current_q = 0 st.session_state.answers = [] st.session_state.conversation_history = [] st.session_state.category = None st.session_state.final_guess = None st.session_state.help_conversation = [] # separate history for help agent # Start screen if st.session_state.game_state == "start": st.markdown("""

Welcome to KASOTI 🎯

Think of something and I'll try to guess it in 20 questions or less!

Choose a category:

Type your category below to begin:

""", unsafe_allow_html=True) with st.form("start_form"): category_input = st.text_input("Enter category (person/place/object):").strip().lower() if st.form_submit_button("Start Game"): if not category_input: st.error("Please enter a category!") elif category_input not in ["person", "place", "object"]: st.error("Please enter either 'person', 'place', or 'object'!") else: st.session_state.category = category_input first_question = ask_llama([ {"role": "user", "content": "Ask your first strategic yes/no question."} ], category_input) st.session_state.questions = [first_question] st.session_state.conversation_history = [ {"role": "assistant", "content": first_question} ] st.session_state.game_state = "gameplay" st.rerun() # Gameplay screen elif st.session_state.game_state == "gameplay": current_question = st.session_state.questions[st.session_state.current_q] # Check if AI made a guess if "Final Guess:" in current_question: st.session_state.final_guess = current_question.split("Final Guess:")[1].strip() st.session_state.game_state = "confirm_guess" st.rerun() st.markdown(f'
Question {st.session_state.current_q + 1}/20:

' f'{current_question}
', unsafe_allow_html=True) with st.form("answer_form"): answer_input = st.text_input("Your answer (yes/no/both):", key=f"answer_{st.session_state.current_q}").strip().lower() if st.form_submit_button("Submit"): if answer_input not in ["yes", "no", "both"]: st.error("Please answer with 'yes', 'no', or 'both'!") else: st.session_state.answers.append(answer_input) st.session_state.conversation_history.append( {"role": "user", "content": answer_input} ) # Generate next response next_response = ask_llama( st.session_state.conversation_history, st.session_state.category ) # Check if AI made a guess if "Final Guess:" in next_response: st.session_state.final_guess = next_response.split("Final Guess:")[1].strip() st.session_state.game_state = "confirm_guess" else: st.session_state.questions.append(next_response) st.session_state.conversation_history.append( {"role": "assistant", "content": next_response} ) st.session_state.current_q += 1 # Stop after 20 questions max if st.session_state.current_q >= 20: st.session_state.game_state = "result" st.rerun() # Side Help Option: independent chat with an AI help assistant (Hugging Face model) with st.expander("Need Help? Chat with AI Assistant"): help_query = st.text_input("Enter your help query:", key="help_query") if st.button("Send", key="send_help"): if help_query: help_response = ask_help_agent(help_query) st.session_state.help_conversation.append({"query": help_query, "response": help_response}) else: st.error("Please enter a query!") if st.session_state.help_conversation: for msg in st.session_state.help_conversation: st.markdown(f"**You:** {msg['query']}") st.markdown(f"**Help Assistant:** {msg['response']}") # Guess confirmation screen using text input response elif st.session_state.game_state == "confirm_guess": st.markdown(f'
🤖 My Final Guess:

' f'Is it {st.session_state.final_guess}?
', unsafe_allow_html=True) with st.form("confirm_form"): confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower() if st.form_submit_button("Submit"): if confirm_input not in ["yes", "no", "both"]: st.error("Please answer with 'yes', 'no', or 'both'!") else: if confirm_input == "yes": st.session_state.game_state = "result" st.rerun() st.stop() # Immediately halt further execution else: # Add negative response to history and continue gameplay st.session_state.conversation_history.append( {"role": "user", "content": "no"} ) st.session_state.game_state = "gameplay" next_response = ask_llama( st.session_state.conversation_history, st.session_state.category ) st.session_state.questions.append(next_response) st.session_state.conversation_history.append( {"role": "assistant", "content": next_response} ) st.session_state.current_q += 1 st.rerun() # Result screen elif st.session_state.game_state == "result": if not st.session_state.final_guess: # Generate final guess if not already made qa_history = "\n".join( [f"Q{i+1}: {q}\nA: {a}" for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))] ) final_guess = ask_llama( [{"role": "user", "content": qa_history}], st.session_state.category, is_final_guess=True ) st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip() show_confetti() st.markdown(f'
🎉 It\'s...
', unsafe_allow_html=True) time.sleep(1) st.markdown(f'
{st.session_state.final_guess}
', unsafe_allow_html=True) st.markdown(f"

Guessed in {len(st.session_state.questions)} questions

", unsafe_allow_html=True) if st.button("Play Again", key="play_again"): st.session_state.clear() st.rerun() if __name__ == "__main__": main()