|
import streamlit as st |
|
import time |
|
import requests |
|
from streamlit.components.v1 import html |
|
|
|
|
|
@st.cache_resource |
|
def get_help_agent(): |
|
from transformers import pipeline |
|
|
|
return pipeline("conversational", model="facebook/blenderbot-400M-distill") |
|
|
|
|
|
def inject_custom_css(): |
|
st.markdown(""" |
|
<style> |
|
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap'); |
|
|
|
* { |
|
font-family: 'Poppins', sans-serif; |
|
} |
|
|
|
.title { |
|
font-size: 3rem !important; |
|
font-weight: 700 !important; |
|
color: #6C63FF !important; |
|
text-align: center; |
|
margin-bottom: 0.5rem; |
|
} |
|
|
|
.subtitle { |
|
font-size: 1.2rem !important; |
|
text-align: center; |
|
color: #666 !important; |
|
margin-bottom: 2rem; |
|
} |
|
|
|
.question-box { |
|
background: #F8F9FA; |
|
border-radius: 15px; |
|
padding: 2rem; |
|
margin: 1.5rem 0; |
|
box-shadow: 0 4px 6px rgba(0,0,0,0.1); |
|
color: black; |
|
} |
|
|
|
.answer-btn { |
|
border-radius: 12px !important; |
|
padding: 0.5rem 1.5rem !important; |
|
font-weight: 600 !important; |
|
margin: 0.5rem !important; |
|
} |
|
|
|
.yes-btn { |
|
background: #6C63FF !important; |
|
color: white !important; |
|
} |
|
|
|
.no-btn { |
|
background: #FF6B6B !important; |
|
color: white !important; |
|
} |
|
|
|
.final-reveal { |
|
animation: fadeIn 2s; |
|
font-size: 2.5rem; |
|
color: #6C63FF; |
|
text-align: center; |
|
margin: 2rem 0; |
|
} |
|
|
|
@keyframes fadeIn { |
|
from { opacity: 0; } |
|
to { opacity: 1; } |
|
} |
|
|
|
.confetti { |
|
position: fixed; |
|
top: 0; |
|
left: 0; |
|
width: 100%; |
|
height: 100%; |
|
pointer-events: none; |
|
z-index: 1000; |
|
} |
|
|
|
.confidence-meter { |
|
height: 10px; |
|
background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%); |
|
border-radius: 5px; |
|
margin: 10px 0; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
|
|
def show_confetti(): |
|
html(""" |
|
<canvas id="confetti-canvas" class="confetti"></canvas> |
|
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script> |
|
<script> |
|
const canvas = document.getElementById('confetti-canvas'); |
|
const confetti = confetti.create(canvas, { resize: true }); |
|
confetti({ |
|
particleCount: 150, |
|
spread: 70, |
|
origin: { y: 0.6 } |
|
}); |
|
setTimeout(() => { canvas.remove(); }, 5000); |
|
</script> |
|
""") |
|
|
|
|
|
def ask_llama(conversation_history, category, is_final_guess=False): |
|
api_url = "https://api.groq.com/openai/v1/chat/completions" |
|
headers = { |
|
"Authorization": "Bearer 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", |
|
"Content-Type": "application/json" |
|
} |
|
|
|
system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules: |
|
1. Ask strategic, non-repeating yes/no questions that narrow down possibilities |
|
2. Consider all previous answers carefully before asking next question |
|
3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]" |
|
4. For places: ask about continent, climate, famous landmarks, or population |
|
5. For people: ask about profession, gender, alive/dead, nationality, or fame |
|
6. For objects: ask about size, color, usage, material, or where it's found |
|
7. Never repeat questions and always make progress toward guessing""" |
|
|
|
if is_final_guess: |
|
prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text: |
|
{conversation_history}""" |
|
else: |
|
prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities." |
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
*conversation_history, |
|
{"role": "user", "content": prompt} |
|
] |
|
|
|
data = { |
|
"model": "llama-3.3-70b-versatile", |
|
"messages": messages, |
|
"temperature": 0.7 if is_final_guess else 0.8, |
|
"max_tokens": 100 |
|
} |
|
|
|
try: |
|
response = requests.post(api_url, headers=headers, json=data) |
|
response.raise_for_status() |
|
return response.json()["choices"][0]["message"]["content"] |
|
except Exception as e: |
|
st.error(f"Error calling Llama API: {str(e)}") |
|
return "Could not generate question" |
|
|
|
|
|
def ask_help_agent(query): |
|
from transformers import Conversation |
|
|
|
help_agent = get_help_agent() |
|
conversation = Conversation(query) |
|
result = help_agent(conversation) |
|
|
|
return result.generated_responses[-1] |
|
|
|
|
|
def main(): |
|
inject_custom_css() |
|
|
|
st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True) |
|
st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True) |
|
|
|
if 'game_state' not in st.session_state: |
|
st.session_state.game_state = "start" |
|
st.session_state.questions = [] |
|
st.session_state.current_q = 0 |
|
st.session_state.answers = [] |
|
st.session_state.conversation_history = [] |
|
st.session_state.category = None |
|
st.session_state.final_guess = None |
|
st.session_state.help_conversation = [] |
|
|
|
|
|
if st.session_state.game_state == "start": |
|
st.markdown(""" |
|
<div class="question-box"> |
|
<h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3> |
|
<p>Think of something and I'll try to guess it in 20 questions or less!</p> |
|
<p>Choose a category:</p> |
|
<ul> |
|
<li><strong>Person</strong> - celebrity, fictional character, historical figure</li> |
|
<li><strong>Place</strong> - city, country, landmark, geographical location</li> |
|
<li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li> |
|
</ul> |
|
<p>Type your category below to begin:</p> |
|
</div> |
|
""", unsafe_allow_html=True) |
|
|
|
with st.form("start_form"): |
|
category_input = st.text_input("Enter category (person/place/object):").strip().lower() |
|
if st.form_submit_button("Start Game"): |
|
if not category_input: |
|
st.error("Please enter a category!") |
|
elif category_input not in ["person", "place", "object"]: |
|
st.error("Please enter either 'person', 'place', or 'object'!") |
|
else: |
|
st.session_state.category = category_input |
|
first_question = ask_llama([ |
|
{"role": "user", "content": "Ask your first strategic yes/no question."} |
|
], category_input) |
|
st.session_state.questions = [first_question] |
|
st.session_state.conversation_history = [ |
|
{"role": "assistant", "content": first_question} |
|
] |
|
st.session_state.game_state = "gameplay" |
|
st.rerun() |
|
|
|
|
|
elif st.session_state.game_state == "gameplay": |
|
current_question = st.session_state.questions[st.session_state.current_q] |
|
|
|
|
|
if "Final Guess:" in current_question: |
|
st.session_state.final_guess = current_question.split("Final Guess:")[1].strip() |
|
st.session_state.game_state = "result" |
|
st.rerun() |
|
|
|
st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>' |
|
f'<strong>{current_question}</strong></div>', |
|
unsafe_allow_html=True) |
|
|
|
with st.form("answer_form"): |
|
answer_input = st.text_input("Your answer (yes/no/both):", |
|
key=f"answer_{st.session_state.current_q}").strip().lower() |
|
if st.form_submit_button("Submit"): |
|
if answer_input not in ["yes", "no", "both"]: |
|
st.error("Please answer with 'yes', 'no', or 'both'!") |
|
else: |
|
st.session_state.answers.append(answer_input) |
|
st.session_state.conversation_history.append( |
|
{"role": "user", "content": answer_input} |
|
) |
|
|
|
|
|
next_response = ask_llama( |
|
st.session_state.conversation_history, |
|
st.session_state.category |
|
) |
|
|
|
|
|
if "Final Guess:" in next_response: |
|
st.session_state.final_guess = next_response.split("Final Guess:")[1].strip() |
|
st.session_state.game_state = "result" |
|
else: |
|
st.session_state.questions.append(next_response) |
|
st.session_state.conversation_history.append( |
|
{"role": "assistant", "content": next_response} |
|
) |
|
st.session_state.current_q += 1 |
|
|
|
|
|
if st.session_state.current_q >= 8: |
|
st.session_state.game_state = "result" |
|
|
|
st.rerun() |
|
|
|
|
|
with st.expander("Need Help? Chat with AI Assistant"): |
|
help_query = st.text_input("Enter your help query:", key="help_query") |
|
if st.button("Send", key="send_help"): |
|
if help_query: |
|
help_response = ask_help_agent(help_query) |
|
st.session_state.help_conversation.append({"query": help_query, "response": help_response}) |
|
else: |
|
st.error("Please enter a query!") |
|
if st.session_state.help_conversation: |
|
for msg in st.session_state.help_conversation: |
|
st.markdown(f"**You:** {msg['query']}") |
|
st.markdown(f"**Help Assistant:** {msg['response']}") |
|
|
|
|
|
elif st.session_state.game_state == "result": |
|
if st.session_state.final_guess is None: |
|
|
|
qa_history = "\n".join( |
|
[f"Q{i+1}: {q}\nA: {a}" |
|
for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))] |
|
) |
|
|
|
st.session_state.final_guess = ask_llama( |
|
[{"role": "user", "content": qa_history}], |
|
st.session_state.category, |
|
is_final_guess=True |
|
) |
|
|
|
show_confetti() |
|
st.markdown('<div class="final-reveal">🎉 My guess is...</div>', unsafe_allow_html=True) |
|
time.sleep(1) |
|
st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>', |
|
unsafe_allow_html=True) |
|
|
|
st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>", |
|
unsafe_allow_html=True) |
|
|
|
if st.button("Play Again", key="play_again"): |
|
st.session_state.clear() |
|
st.rerun() |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|