KASOTI / app.py
iisadia's picture
Update app.py
a76773e verified
raw
history blame
8.71 kB
import streamlit as st
import time
import requests
from streamlit.components.v1 import html
import os
@st.cache_resource
def get_help_agent():
from transformers import pipeline
return pipeline("conversational", model="facebook/blenderbot-400M-distill")
def inject_custom_css():
st.markdown("""
<style>
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
* { font-family: 'Poppins', sans-serif; }
.title { font-size: 3rem !important; font-weight: 700 !important; color: #6C63FF !important; text-align: center; margin-bottom: 0.5rem; }
.subtitle { font-size: 1.2rem !important; text-align: center; color: #666 !important; margin-bottom: 2rem; }
.question-box { background: #F8F9FA; border-radius: 15px; padding: 2rem; margin: 1.5rem 0; box-shadow: 0 4px 6px rgba(0,0,0,0.1); color: black !important; }
.answer-btn { border-radius: 12px !important; padding: 0.5rem 1.5rem !important; font-weight: 600 !important; margin: 0.5rem !important; }
.yes-btn { background: #6C63FF !important; color: white !important; }
.no-btn { background: #FF6B6B !important; color: white !important; }
.final-reveal { animation: fadeIn 2s; font-size: 2.5rem; color: #6C63FF; text-align: center; margin: 2rem 0; }
@keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
.confetti { position: fixed; top: 0; left: 0; width: 100%; height: 100%; pointer-events: none; z-index: 1000; }
.confidence-meter { height: 10px; background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%); border-radius: 5px; margin: 10px 0; }
.mic-btn { margin-top: 29px; border: none; background: none; cursor: pointer; font-size: 1.5em; padding: 0; }
</style>
<script>
function startSpeechRecognition(inputId) {
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.lang = 'en-US';
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.onresult = function(event) {
const transcript = event.results[0][0].transcript.toLowerCase();
const inputElement = document.getElementById(inputId);
if (inputElement) {
inputElement.value = transcript;
const event = new Event('input', { bubbles: true });
inputElement.dispatchEvent(event);
}
};
recognition.onerror = function(event) {
console.error('Speech recognition error', event.error);
};
recognition.start();
}
</script>
""", unsafe_allow_html=True)
def show_confetti():
html("""
<canvas id="confetti-canvas" class="confetti"></canvas>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
<script>
const canvas = document.getElementById('confetti-canvas');
const confetti = confetti.create(canvas, { resize: true });
confetti({ particleCount: 150, spread: 70, origin: { y: 0.6 } });
setTimeout(() => { canvas.remove(); }, 5000);
</script>
""")
def ask_llama(conversation_history, category, is_final_guess=False):
api_url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Authorization": "Bearer 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",
"Content-Type": "application/json"
}
system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
2. Consider all previous answers carefully before asking next question
3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
4. For places: ask about continent, climate, famous landmarks, country, city or population
5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
6. For objects: ask about size, color, usage, material, or where it's found
7. Never repeat questions and always make progress toward guessing"""
if is_final_guess:
prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
{conversation_history}"""
else:
prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
messages = [
{"role": "system", "content": system_prompt},
*conversation_history,
{"role": "user", "content": prompt}
]
data = {
"model": "llama-3.3-70b-versatile",
"messages": messages,
"temperature": 0.7 if is_final_guess else 0.8,
"max_tokens": 100
}
try:
response = requests.post(api_url, headers=headers, json=data)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
except Exception as e:
st.error(f"Error calling Llama API: {str(e)}")
return "Could not generate question"
def ask_help_agent(query):
try:
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
system_message = "You are a friendly Chatbot."
history = []
if "help_conversation" in st.session_state:
for msg in st.session_state.help_conversation:
history.append((msg.get("query", ""), msg.get("response", "")))
messages = [{"role": "system", "content": system_message}]
for user_msg, bot_msg in history:
if user_msg: messages.append({"role": "user", "content": user_msg})
if bot_msg: messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": query})
response_text = ""
for message in client.chat_completion(messages, max_tokens=150, stream=True, temperature=0.7, top_p=0.95):
token = message.choices[0].delta.content
response_text += token
return response_text
except Exception as e:
return f"Error in help agent: {str(e)}"
def main():
inject_custom_css()
st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
if 'game_state' not in st.session_state:
st.session_state.game_state = "start"
st.session_state.questions = []
st.session_state.current_q = 0
st.session_state.answers = []
st.session_state.conversation_history = []
st.session_state.category = None
st.session_state.final_guess = None
st.session_state.help_conversation = []
if st.session_state.game_state == "start":
st.markdown("""
<div class="question-box">
<h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3>
<p>Think of something and I'll try to guess it in 20 questions or less!</p>
<p>Choose a category:</p>
<ul>
<li><strong>Person</strong> - celebrity, fictional character, historical figure</li>
<li><strong>Place</strong> - city, country, landmark, geographical location</li>
<li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
</ul>
<p>Type or speak your category below to begin:</p>
</div>
""", unsafe_allow_html=True)
with st.form("start_form"):
col1, col2 = st.columns([4, 1])
with col1:
category_input = st.text_input("Enter category (person/place/object):", key="category_input").strip().lower()
with col2:
st.markdown("""
<button type="button" onclick="startSpeechRecognition('text_input-category_input')" class="mic-btn">🎤</button>
""", unsafe_allow_html=True)
if st.form_submit_button("Start Game"):
if not category_input:
st.error("Please enter a category!")
elif category_input not in ["person", "place", "object"]:
st.error("Please enter either 'person', 'place', or 'object'!")
else:
st.session_state.category = category_input
first_question = ask_llama([], category_input)
st.session_state.questions = [first_question]
st.session_state.conversation_history = [{"role": "assistant", "content": first_question}]
st.session_state.game_state = "playing"
if __name__ == "__main__":
main()