iisadia commited on
Commit
e1bf4d4
·
verified ·
1 Parent(s): 03c49b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -347
app.py CHANGED
@@ -1,349 +1,43 @@
1
- import streamlit as st
2
- import time
3
- import requests
4
- from streamlit.components.v1 import html
5
-
6
- # Import transformers and cache the help agent for performance
7
- @st.cache_resource
8
- def get_help_agent():
9
- from transformers import pipeline
10
- # Using BlenderBot 400M Distill as the public conversational model
11
- return pipeline("conversational", model="facebook/blenderbot-400M-distill")
12
-
13
- # Custom CSS for professional look (fixed text color)
14
- def inject_custom_css():
15
- st.markdown("""
16
- <style>
17
- @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
18
-
19
- * {
20
- font-family: 'Poppins', sans-serif;
21
- }
22
-
23
- .title {
24
- font-size: 3rem !important;
25
- font-weight: 700 !important;
26
- color: #6C63FF !important;
27
- text-align: center;
28
- margin-bottom: 0.5rem;
29
- }
30
-
31
- .subtitle {
32
- font-size: 1.2rem !important;
33
- text-align: center;
34
- color: #666 !important;
35
- margin-bottom: 2rem;
36
- }
37
-
38
- .question-box {
39
- background: #F8F9FA;
40
- border-radius: 15px;
41
- padding: 2rem;
42
- margin: 1.5rem 0;
43
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
44
- color: black !important;
45
- }
46
-
47
- .answer-btn {
48
- border-radius: 12px !important;
49
- padding: 0.5rem 1.5rem !important;
50
- font-weight: 600 !important;
51
- margin: 0.5rem !important;
52
- }
53
-
54
- .yes-btn {
55
- background: #6C63FF !important;
56
- color: white !important;
57
- }
58
-
59
- .no-btn {
60
- background: #FF6B6B !important;
61
- color: white !important;
62
- }
63
-
64
- .final-reveal {
65
- animation: fadeIn 2s;
66
- font-size: 2.5rem;
67
- color: #6C63FF;
68
- text-align: center;
69
- margin: 2rem 0;
70
- }
71
-
72
- @keyframes fadeIn {
73
- from { opacity: 0; }
74
- to { opacity: 1; }
75
- }
76
-
77
- .confetti {
78
- position: fixed;
79
- top: 0;
80
- left: 0;
81
- width: 100%;
82
- height: 100%;
83
- pointer-events: none;
84
- z-index: 1000;
85
- }
86
-
87
- .confidence-meter {
88
- height: 10px;
89
- background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%);
90
- border-radius: 5px;
91
- margin: 10px 0;
92
- }
93
- </style>
94
- """, unsafe_allow_html=True)
95
-
96
- # Confetti animation
97
- def show_confetti():
98
- html("""
99
- <canvas id="confetti-canvas" class="confetti"></canvas>
100
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
101
- <script>
102
- const canvas = document.getElementById('confetti-canvas');
103
- const confetti = confetti.create(canvas, { resize: true });
104
- confetti({
105
- particleCount: 150,
106
- spread: 70,
107
- origin: { y: 0.6 }
108
- });
109
- setTimeout(() => { canvas.remove(); }, 5000);
110
- </script>
111
- """)
112
-
113
- # Enhanced AI question generation for guessing game using Llama model
114
- def ask_llama(conversation_history, category, is_final_guess=False):
115
- api_url = "https://api.groq.com/openai/v1/chat/completions"
116
- headers = {
117
- "Authorization": "Bearer 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",
118
- "Content-Type": "application/json"
119
- }
120
-
121
- system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
122
- 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
123
- 2. Consider all previous answers carefully before asking next question
124
- 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
125
- 4. For places: ask about continent, climate, famous landmarks, country, city or population
126
- 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
127
- 6. For objects: ask about size, color, usage, material, or where it's found
128
- 7. Never repeat questions and always make progress toward guessing"""
129
-
130
- if is_final_guess:
131
- prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
132
- {conversation_history}"""
133
- else:
134
- prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
135
-
136
- messages = [
137
- {"role": "system", "content": system_prompt},
138
- *conversation_history,
139
- {"role": "user", "content": prompt}
140
- ]
141
-
142
- data = {
143
- "model": "llama-3.3-70b-versatile",
144
- "messages": messages,
145
- "temperature": 0.7 if is_final_guess else 0.8,
146
- "max_tokens": 100
147
- }
148
-
149
- try:
150
- response = requests.post(api_url, headers=headers, json=data)
151
- response.raise_for_status()
152
- return response.json()["choices"][0]["message"]["content"]
153
- except Exception as e:
154
- st.error(f"Error calling Llama API: {str(e)}")
155
- return "Could not generate question"
156
-
157
- # New function for the help AI assistant using a Hugging Face chatbot model
158
- def ask_help_agent(query):
159
- # Use a try/except block to import Conversation from the correct module,
160
- # accommodating different versions of transformers
161
- try:
162
- from transformers import Conversation
163
- except ImportError:
164
- from transformers.pipelines.conversational import Conversation
165
- # Get the cached help agent (BlenderBot)
166
- help_agent = get_help_agent()
167
- conversation = Conversation(query)
168
- result = help_agent(conversation)
169
- # The generated response is stored in generated_responses list
170
- return result.generated_responses[-1]
171
-
172
- # Main game logic
173
- def main():
174
- inject_custom_css()
175
-
176
- st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
177
- st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
178
-
179
- if 'game_state' not in st.session_state:
180
- st.session_state.game_state = "start"
181
- st.session_state.questions = []
182
- st.session_state.current_q = 0
183
- st.session_state.answers = []
184
- st.session_state.conversation_history = []
185
- st.session_state.category = None
186
- st.session_state.final_guess = None
187
- st.session_state.help_conversation = [] # separate history for help agent
188
-
189
- # Start screen
190
- if st.session_state.game_state == "start":
191
- st.markdown("""
192
- <div class="question-box">
193
- <h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3>
194
- <p>Think of something and I'll try to guess it in 20 questions or less!</p>
195
- <p>Choose a category:</p>
196
- <ul>
197
- <li><strong>Person</strong> - celebrity, fictional character, historical figure</li>
198
- <li><strong>Place</strong> - city, country, landmark, geographical location</li>
199
- <li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
200
- </ul>
201
- <p>Type your category below to begin:</p>
202
- </div>
203
- """, unsafe_allow_html=True)
204
-
205
- with st.form("start_form"):
206
- category_input = st.text_input("Enter category (person/place/object):").strip().lower()
207
- if st.form_submit_button("Start Game"):
208
- if not category_input:
209
- st.error("Please enter a category!")
210
- elif category_input not in ["person", "place", "object"]:
211
- st.error("Please enter either 'person', 'place', or 'object'!")
212
- else:
213
- st.session_state.category = category_input
214
- first_question = ask_llama([
215
- {"role": "user", "content": "Ask your first strategic yes/no question."}
216
- ], category_input)
217
- st.session_state.questions = [first_question]
218
- st.session_state.conversation_history = [
219
- {"role": "assistant", "content": first_question}
220
- ]
221
- st.session_state.game_state = "gameplay"
222
- st.rerun()
223
-
224
- # Gameplay screen
225
- elif st.session_state.game_state == "gameplay":
226
- current_question = st.session_state.questions[st.session_state.current_q]
227
-
228
- # Check if AI made a guess
229
- if "Final Guess:" in current_question:
230
- st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
231
- st.session_state.game_state = "confirm_guess"
232
- st.rerun()
233
-
234
- st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
235
- f'<strong>{current_question}</strong></div>',
236
- unsafe_allow_html=True)
237
-
238
- with st.form("answer_form"):
239
- answer_input = st.text_input("Your answer (yes/no/both):",
240
- key=f"answer_{st.session_state.current_q}").strip().lower()
241
- if st.form_submit_button("Submit"):
242
- if answer_input not in ["yes", "no", "both"]:
243
- st.error("Please answer with 'yes', 'no', or 'both'!")
244
- else:
245
- st.session_state.answers.append(answer_input)
246
- st.session_state.conversation_history.append(
247
- {"role": "user", "content": answer_input}
248
- )
249
-
250
- # Generate next response
251
- next_response = ask_llama(
252
- st.session_state.conversation_history,
253
- st.session_state.category
254
- )
255
-
256
- # Check if AI made a guess
257
- if "Final Guess:" in next_response:
258
- st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
259
- st.session_state.game_state = "confirm_guess"
260
- else:
261
- st.session_state.questions.append(next_response)
262
- st.session_state.conversation_history.append(
263
- {"role": "assistant", "content": next_response}
264
- )
265
- st.session_state.current_q += 1
266
-
267
- # Stop after 20 questions max
268
- if st.session_state.current_q >= 20:
269
- st.session_state.game_state = "result"
270
-
271
- st.rerun()
272
-
273
- # Side Help Option: independent chat with an AI help assistant (Hugging Face model)
274
- with st.expander("Need Help? Chat with AI Assistant"):
275
- help_query = st.text_input("Enter your help query:", key="help_query")
276
- if st.button("Send", key="send_help"):
277
- if help_query:
278
- help_response = ask_help_agent(help_query)
279
- st.session_state.help_conversation.append({"query": help_query, "response": help_response})
280
- else:
281
- st.error("Please enter a query!")
282
- if st.session_state.help_conversation:
283
- for msg in st.session_state.help_conversation:
284
- st.markdown(f"**You:** {msg['query']}")
285
- st.markdown(f"**Help Assistant:** {msg['response']}")
286
-
287
- # Guess confirmation screen using text input response
288
- elif st.session_state.game_state == "confirm_guess":
289
- st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
290
- f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
291
- unsafe_allow_html=True)
292
-
293
- with st.form("confirm_form"):
294
- confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower()
295
- if st.form_submit_button("Submit"):
296
- if confirm_input not in ["yes", "no", "both"]:
297
- st.error("Please answer with 'yes', 'no', or 'both'!")
298
- else:
299
- if confirm_input == "yes":
300
- st.session_state.game_state = "result"
301
- st.rerun()
302
- st.stop() # Immediately halt further execution
303
- else:
304
- # Add negative response to history and continue gameplay
305
- st.session_state.conversation_history.append(
306
- {"role": "user", "content": "no"}
307
- )
308
- st.session_state.game_state = "gameplay"
309
- next_response = ask_llama(
310
- st.session_state.conversation_history,
311
- st.session_state.category
312
- )
313
- st.session_state.questions.append(next_response)
314
- st.session_state.conversation_history.append(
315
- {"role": "assistant", "content": next_response}
316
- )
317
- st.session_state.current_q += 1
318
- st.rerun()
319
-
320
- # Result screen
321
- elif st.session_state.game_state == "result":
322
- if not st.session_state.final_guess:
323
- # Generate final guess if not already made
324
- qa_history = "\n".join(
325
- [f"Q{i+1}: {q}\nA: {a}"
326
- for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
327
- )
328
-
329
- final_guess = ask_llama(
330
- [{"role": "user", "content": qa_history}],
331
- st.session_state.category,
332
- is_final_guess=True
333
- )
334
- st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
335
-
336
- show_confetti()
337
- st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
338
- time.sleep(1)
339
- st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
340
- unsafe_allow_html=True)
341
- st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>",
342
- unsafe_allow_html=True)
343
-
344
- if st.button("Play Again", key="play_again"):
345
- st.session_state.clear()
346
- st.rerun()
347
 
348
  if __name__ == "__main__":
349
- main()
 
1
+ from flask import Flask, request, render_template
2
+ from transformers import pipeline, AutoTokenizer
3
+ import torch
4
+
5
+ app = Flask(__name__)
6
+
7
+ # Load a lightweight model (e.g., Zephyr-7B, Mistral-7B)
8
+ model_name = "mistralai/Mistral-7B-Instruct-v0.2"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ chatbot = pipeline(
11
+ "text-generation",
12
+ model=model_name,
13
+ tokenizer=tokenizer,
14
+ torch_dtype=torch.float16,
15
+ device_map="auto" # Uses GPU if available
16
+ )
17
+
18
+ @app.route("/", methods=["GET", "POST"])
19
+ def home():
20
+ if request.method == "POST":
21
+ user_input = request.form["user_input"]
22
+ response = generate_response(user_input)
23
+ return render_template("index.html", user_input=user_input, bot_response=response)
24
+ return render_template("index.html")
25
+
26
+ def generate_response(prompt):
27
+ # Format prompt for instruction-following models
28
+ messages = [{"role": "user", "content": prompt}]
29
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
30
+
31
+ # Generate response
32
+ outputs = chatbot(
33
+ prompt,
34
+ max_new_tokens=256,
35
+ do_sample=True,
36
+ temperature=0.7,
37
+ top_k=50,
38
+ top_p=0.95
39
+ )
40
+ return outputs[0]["generated_text"][len(prompt):] # Extract only the bot's reply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  if __name__ == "__main__":
43
+ app.run(host="0.0.0.0", port=5000, debug=True)