sunbal7 commited on
Commit
af08924
Β·
verified Β·
1 Parent(s): c2fb213

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +505 -43
app.py CHANGED
@@ -1,45 +1,507 @@
1
  import streamlit as st
2
- import sympy as sp
3
- import chromadb
4
- from transformers import pipeline
5
- from langchain.chains import RetrievalQA
6
- from langchain_community.vectorstores import FAISS
7
- from langchain.embeddings import SentenceTransformerEmbeddings
8
- from langchain.llms import OpenAI
9
- import os
10
-
11
- # Initialize ChromaDB
12
- chroma_client = chromadb.PersistentClient(path="./chroma_db")
13
- embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
14
-
15
- # Check FAISS index existence
16
- faiss_path = "faiss_index"
17
-
18
- if not os.path.exists(faiss_path):
19
- st.error("⚠️ FAISS index not found. Generate it first using FAISS.from_texts().")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  else:
21
- # Load FAISS Vector Store
22
- vectorstore = FAISS.load_local(faiss_path, embedding_model, allow_dangerous_deserialization=True)
23
- retriever = vectorstore.as_retriever()
24
- qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
25
-
26
- # Load NLP Model
27
- model = pipeline("text2text-generation", model="google/flan-t5-small")
28
-
29
- st.title("πŸ€– AI-Driven Mathematical Model Generator")
30
- st.write("Enter a problem statement in natural language to get a mathematical model.")
31
-
32
- user_input = st.text_area("✍️ Enter your problem:")
33
-
34
- if st.button("πŸš€ Generate Model"):
35
- retrieved_context = qa_chain.run(user_input) # RAG retrieval
36
- response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
37
-
38
- try:
39
- equation = sp.sympify(response[0]['generated_text'])
40
- except:
41
- equation = response[0]['generated_text'] # If parsing fails, return text
42
-
43
- st.subheader("πŸ“Œ Mathematical Model:")
44
- st.latex(sp.latex(equation))
45
- st.code(str(equation), language='python')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from groq import Groq
3
+ import json
4
+ from transformers import pipeline # For sentiment analysis
5
+ import graphviz # For visualization
6
+ import time # For simulating processing time
7
+
8
+ # --- Configuration ---
9
+ try:
10
+ GROQ_API_KEY = st.secrets["GROQ_API_KEY"]
11
+ except KeyError:
12
+ st.error("GROQ_API_KEY not found in .streamlit/secrets.toml. Please add your Groq API key there.")
13
+ st.stop()
14
+
15
+ client = Groq(api_key=GROQ_API_KEY)
16
+ GROQ_MODEL = "llama3-8b-8192" # or "llama3-70b-8192" for more power
17
+
18
+ # Initialize sentiment analysis pipeline (cached for performance)
19
+ @st.cache_resource
20
+ def load_sentiment_analyzer():
21
+ try:
22
+ # Using a compact sentiment model for faster inference
23
+ return pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
24
+ except Exception as e:
25
+ st.error(f"Could not load sentiment analysis model: {e}. Please ensure you have 'torch' or 'tensorflow' installed.")
26
+ return None
27
+
28
+ sentiment_analyzer = load_sentiment_analyzer()
29
+
30
+ # --- Cognitive Bias Database (for display and LLM guidance) ---
31
+ COGNITIVE_BIASES_INFO = {
32
+ "Confirmation Bias": {
33
+ "short_desc": "Seeking information that confirms existing beliefs.",
34
+ },
35
+ "Anchoring Bias": {
36
+ "short_desc": "Over-relying on the first piece of information encountered.",
37
+ },
38
+ "Sunk Cost Fallacy": {
39
+ "short_desc": "Continuing an endeavor due to past investment, not future prospects.",
40
+ },
41
+ "Availability Heuristic": {
42
+ "short_desc": "Overestimating the likelihood of events based on vivid or easily recalled examples.",
43
+ },
44
+ "Bandwagon Effect": {
45
+ "short_desc": "Doing or believing things because many others do.",
46
+ },
47
+ "Self-Serving Bias": {
48
+ "short_desc": "Attributing successes to internal factors and failures to external ones.",
49
+ },
50
+ "Framing Effect": {
51
+ "short_desc": "Being influenced by how information is presented (e.g., gains vs. losses).",
52
+ },
53
+ "Hindsight Bias": {
54
+ "short_desc": "Believing an event was predictable after it has occurred ('I knew it all along').",
55
+ },
56
+ "Dunning-Kruger Effect": {
57
+ "short_desc": "Low-ability individuals overestimating their competence, and high-ability individuals underestimating theirs.",
58
+ },
59
+ "Halo Effect": {
60
+ "short_desc": "Positive impression in one area influencing overall positive judgment in other areas.",
61
+ }
62
+ }
63
+
64
+ # --- LLM Interaction Logic ---
65
+
66
+ # Few-shot examples for robust JSON output and quality insights
67
+ FEW_SHOT_EXAMPLES = [
68
+ {
69
+ "user_input": "I've poured so much effort into this side project over the last year, I can't just drop it now, even if it feels like it's going nowhere and burning me out. All that work would just be wasted.",
70
+ "ai_output": {
71
+ "biases": [
72
+ {
73
+ "name": "Sunk Cost Fallacy",
74
+ "explanation": "The tendency to continue investing resources (time, money, effort) into an endeavor because of past commitments, rather than basing the decision on future prospects or current utility.",
75
+ "why_apply": "Your reasoning 'I can't just drop it now, even if it feels like it's going nowhere' and 'All that work would just be wasted' directly reflects focusing on the past effort invested as the primary motivator for continuing, rather than assessing the project's current value or future potential.",
76
+ "reflection_questions": [
77
+ "If you were starting fresh today, with no prior investment, would you still choose to start this side project?",
78
+ "What would be the future costs and benefits of continuing versus stopping, independent of what you've already put in?"
79
+ ],
80
+ "alternative_perspective": "Instead of focusing on past effort, consider the present and future. What would bring you more value or joy going forward, regardless of what's already done?"
81
+ }
82
+ ]
83
+ }
84
+ },
85
+ {
86
+ "user_input": "I was researching alternative health remedies, and I only focused on websites and testimonials that supported the one I already believed in. I completely ignored any scientific studies that suggested it might not work.",
87
+ "ai_output": {
88
+ "biases": [
89
+ {
90
+ "name": "Confirmation Bias",
91
+ "explanation": "The tendency to seek out, interpret, and favor information that confirms one's existing beliefs or hypotheses, while disregarding evidence that contradicts them.",
92
+ "why_apply": "You explicitly stated you 'only focused on websites and testimonials that supported the one I already believed in' and 'completely ignored any scientific studies that suggested it might not work,' which is a classic manifestation of selectively gathering confirming evidence.",
93
+ "reflection_questions": [
94
+ "What evidence might contradict your initial belief about this remedy, and have you truly explored it?",
95
+ "How might your understanding change if you actively sought out diverse perspectives and criticisms?"
96
+ ],
97
+ "alternative_perspective": "To gain a more complete understanding, actively look for information that challenges your initial viewpoint, not just what supports it."
98
+ }
99
+ ]
100
+ }
101
+ },
102
+ {
103
+ "user_input": "My team decided to go with the most popular idea because everyone in the brainstorming session seemed excited about it. I had some reservations, but I didn't want to go against the group consensus.",
104
+ "ai_output": {
105
+ "biases": [
106
+ {
107
+ "name": "Bandwagon Effect",
108
+ "explanation": "The tendency to do or believe things because many other people do or believe the same, regardless of one's own independent evaluation or the underlying evidence.",
109
+ "why_apply": "You chose the idea because 'everyone in the brainstorming session seemed excited about it' and you 'didn't want to go against the group consensus,' indicating your decision was swayed by the popularity of the idea rather than your own reservations.",
110
+ "reflection_questions": [
111
+ "What were your specific reservations about the popular idea, and how would you articulate them now?",
112
+ "How can you ensure your voice is heard, even when it differs from the group's initial enthusiasm, to encourage more robust discussion?"
113
+ ],
114
+ "alternative_perspective": "Independent thought can strengthen collective decisions. Sometimes, the most popular idea isn't the best, and dissenting voices can lead to better outcomes."
115
+ }
116
+ ]
117
+ }
118
+ },
119
+ {
120
+ "user_input": "I just don't have any particular thoughts right now.",
121
+ "ai_output": {
122
+ "biases": [],
123
+ "message": "No obvious cognitive biases detected in your input at this time. Keep reflecting!"
124
+ }
125
+ }
126
+ ]
127
+
128
+ def get_llm_bias_analysis(user_text):
129
+ """
130
+ Sends user input to Groq LLM for cognitive bias analysis, requesting JSON output.
131
+ Includes few-shot examples in the prompt.
132
+ """
133
+ if not user_text.strip():
134
+ return None
135
+
136
+ example_messages = []
137
+ for example in FEW_SHOT_EXAMPLES:
138
+ example_messages.append({"role": "user", "content": example["user_input"]})
139
+ example_messages.append({"role": "assistant", "content": json.dumps(example["ai_output"], indent=2)})
140
+
141
+ system_prompt = f"""
142
+ You are a helpful and non-judgmental AI assistant specializing in cognitive psychology.
143
+ Your task is to analyze user-provided text describing a situation, decision, or thought process,
144
+ and gently identify potential cognitive biases that might be influencing their thinking.
145
+
146
+ Focus on the following common biases and their effects: {', '.join(COGNITIVE_BIASES_INFO.keys())}.
147
+
148
+ For each identified bias:
149
+ 1. State the **Bias Name**.
150
+ 2. Provide a **brief explanation** of what the bias is.
151
+ 3. Explain **why you think it might be present** in the user's text.
152
+ 4. Offer **1-2 non-judgmental, insightful reflection questions** to prompt the user for deeper self-awareness.
153
+ 5. Provide a short, **alternative perspective** or re-framing of the situation *without* the identified bias.
154
+
155
+ If you identify multiple biases, list them clearly. If no obvious biases are detected, state that clearly.
156
+
157
+ You MUST format your response as a JSON object, where the top-level key is 'biases' and its value is a list of bias objects.
158
+ Each bias object should have 'name', 'explanation', 'why_apply', 'reflection_questions' (a list of strings), and 'alternative_perspective' (a string).
159
+ If no biases are detected, return `{{ "biases": [], "message": "No obvious cognitive biases detected at this time. Keep reflecting!" }}`
160
+
161
+ Example JSON structure for a single bias:
162
+ ```json
163
+ {{
164
+ "biases": [
165
+ {{
166
+ "name": "Confirmation Bias",
167
+ "explanation": "Brief description...",
168
+ "why_apply": "Reason based on text...",
169
+ "reflection_questions": [
170
+ "Question 1?",
171
+ "Question 2?"
172
+ ],
173
+ "alternative_perspective": "A different way to think about this..."
174
+ }}
175
+ ]
176
+ }}
177
+ ```
178
+ Ensure the JSON is perfectly valid and ready for `json.loads()`. Do NOT include any extra text outside the JSON.
179
+ """
180
+
181
+ messages = [
182
+ {"role": "system", "content": system_prompt},
183
+ *example_messages,
184
+ {"role": "user", "content": user_text},
185
+ ]
186
+
187
+ try:
188
+ chat_completion = client.chat.completions.create(
189
+ messages=messages,
190
+ model=GROQ_MODEL,
191
+ temperature=0.7,
192
+ max_tokens=2000,
193
+ response_format={"type": "json_object"}
194
+ )
195
+ return chat_completion.choices[0].message.content
196
+ except Exception as e:
197
+ st.error(f"Error communicating with the LLM: {e}")
198
+ return None
199
+
200
+ def parse_llm_response(llm_response_text):
201
+ """
202
+ Parses the LLM's JSON response into a structured dictionary.
203
+ """
204
+ if not llm_response_text:
205
+ return {"error": "No LLM response received."}
206
+
207
+ try:
208
+ data = json.loads(llm_response_text)
209
+ # Ensure 'biases' key exists and is a list
210
+ if not isinstance(data.get("biases"), list):
211
+ return {"error": "Invalid JSON format: 'biases' key missing or not a list."}
212
+ return data
213
+ except json.JSONDecodeError as e:
214
+ st.error(f"Failed to parse JSON response from LLM: {e}")
215
+ st.markdown(f"**Raw LLM Response (for debugging):**\n```json\n{llm_response_text}\n```")
216
+ return {"error": "JSON parsing failed."}
217
+
218
+ # --- Cognitive Maze Visualization ---
219
+ def generate_cognitive_maze(user_input, biases_detected):
220
+ dot = graphviz.Digraph(comment='Cognitive Maze', graph_attr={'rankdir': 'LR', 'splines': 'ortho'})
221
+ dot.attr('node', shape='box', style='filled')
222
+ dot.attr('edge', color='gray')
223
+
224
+ # Start Node
225
+ dot.node('start', 'Your Initial Thought', fillcolor='#ADD8E6', fontcolor='black')
226
+
227
+ # Process Node
228
+ dot.node('process', 'Analyzing Thoughts', fillcolor='#FAFAD2', fontcolor='black')
229
+ dot.edge('start', 'process')
230
+
231
+ current_node = 'process'
232
+ if biases_detected and not biases_detected.get("message") and biases_detected.get("biases"):
233
+ bias_count = 0
234
+ for bias_info in biases_detected.get("biases", []):
235
+ bias_name = bias_info.get("name", "Unknown Bias")
236
+ bias_node_id = f"bias_{bias_count}"
237
+ reflection_node_id = f"reflect_{bias_count}"
238
+ alternative_node_id = f"alt_{bias_count}"
239
+
240
+ # Bias Node
241
+ dot.node(bias_node_id, f"Bias: {bias_name}", shape='octagon', fillcolor='#FFDAB9', fontcolor='black')
242
+ dot.edge(current_node, bias_node_id, label='Potential Influence')
243
+
244
+ # Reflection Node (Path to awareness)
245
+ dot.node(reflection_node_id, f"Reflect: {bias_name}", shape='note', fillcolor='#D8BFD8', fontcolor='black')
246
+ dot.edge(bias_node_id, reflection_node_id, label='Prompts for self-awareness')
247
+
248
+ # Alternative Perspective Node
249
+ dot.node(alternative_node_id, f"Reframe: {bias_name}", shape='parallelogram', fillcolor='#B0E0E6', fontcolor='black')
250
+ dot.edge(reflection_node_id, alternative_node_id, label='Consider alternatives')
251
+
252
+
253
+ current_node = alternative_node_id # Path continues from alternative
254
+ bias_count += 1
255
+
256
+ dot.node('enlightened', 'Deeper Understanding', fillcolor='#90EE90', fontcolor='black')
257
+ dot.edge(current_node, 'enlightened', label='Towards clarity')
258
+
259
+ else:
260
+ # No biases detected or error
261
+ dot.node('clear_path', 'Clear Path Ahead', fillcolor='#90EE90', fontcolor='black')
262
+ dot.edge(current_node, 'clear_path', label='No obvious biases identified')
263
+
264
+ return dot
265
+
266
+ # --- Bias Buster Mini-Challenges ---
267
+ def bias_buster_sunk_cost():
268
+ st.subheader("πŸ’‘ Sunk Cost Fallacy Buster: The Project Dilemma")
269
+ st.write("Imagine you've invested **$10,000** and **6 months** into a new app. It's not performing as expected, and market research suggests it might not be profitable. You now have two options:")
270
+ st.markdown("- **Option A:** Invest another **$5,000** and **3 months** to try and fix it.")
271
+ st.markdown("- **Option B:** Abandon the project now and use the **$5,000** and **3 months** to start a *new, promising* venture.")
272
+
273
+ st.warning("Which option feels harder to choose, even if it might be better for the future?")
274
+
275
+ choice = st.radio(
276
+ "Which option would you choose *if you ignored your past investment*?",
277
+ options=["Option A (Fix existing project)", "Option B (Start new venture)"],
278
+ index=None,
279
+ key="sunk_cost_choice"
280
+ )
281
+
282
+ if choice:
283
+ if choice == "Option B (Start new venture)":
284
+ st.success("Correct! Focusing on future potential rather than past sunk costs is key to rational decision-making.")
285
+ else:
286
+ st.error("This is a classic Sunk Cost trap! It's hard to let go of what you've invested, but a new opportunity might be more valuable.")
287
+ st.info("Remember: Resources already spent are gone regardless of your future decision. Focus on future benefits!")
288
+
289
+ def bias_buster_confirmation():
290
+ st.subheader("πŸ”Ž Confirmation Bias Buster: The News Feed Challenge")
291
+ st.write("You believe that 'cold showers are the ultimate health hack.' You open your news feed.")
292
+ st.write("Below are two headlines. Click the one you are *most likely* to click on first, based on your belief.")
293
+
294
+ col1, col2 = st.columns(2)
295
+ with col1:
296
+ clicked_headline_1 = st.button("Headline 1: 'Top Scientists Debunk Cold Shower Health Claims'", key="headline1")
297
+ with col2:
298
+ clicked_headline_2 = st.button("Headline 2: 'New Study Reveals Incredible Benefits of Daily Cold Plunges'", key="headline2")
299
+
300
+ if clicked_headline_1:
301
+ st.warning("You clicked on the headline that challenges your belief! That's great for avoiding confirmation bias.")
302
+ st.info("Actively seeking out information that contradicts your beliefs helps you form a more balanced view.")
303
+ elif clicked_headline_2:
304
+ st.error("You clicked on the headline that confirms your belief. This is a common way confirmation bias operates!")
305
+ st.info("Remember to challenge your own assumptions and seek out diverse perspectives, even if they're uncomfortable.")
306
+
307
+ # --- Streamlit UI ---
308
+ st.set_page_config(
309
+ page_title="CogniQuest: Your Thought Mirror",
310
+ page_icon="🧠",
311
+ layout="centered"
312
+ )
313
+
314
+ st.title("🧠 CogniQuest: Your Thought Mirror")
315
+ st.markdown(
316
+ """
317
+ **Understand your thinking patterns.** Describe a situation, decision, or thought process you're reflecting on.
318
+ CogniQuest will gently highlight potential cognitive biases that might be influencing your thinking,
319
+ and prompt you to consider alternative perspectives.
320
+ """
321
+ )
322
+
323
+ # Initialize session state for user input and reflections
324
+ if 'user_input_text' not in st.session_state:
325
+ st.session_state.user_input_text = ""
326
+ if 'reflection_text' not in st.session_state:
327
+ st.session_state.reflection_text = ""
328
+ if 'analysis_results' not in st.session_state:
329
+ st.session_state.analysis_results = None
330
+ if 'raw_llm_output_debug' not in st.session_state:
331
+ st.session_state.raw_llm_output_debug = None
332
+ if 'current_sentiment' not in st.session_state:
333
+ st.session_state.current_sentiment = "Neutral"
334
+
335
+ st.markdown("---")
336
+
337
+
338
+ # --- Text Input Section ---
339
+ st.subheader("✍️ Type Your Thoughts")
340
+
341
+ # Callback to update sentiment when text area changes
342
+ def update_sentiment_callback():
343
+ if sentiment_analyzer and st.session_state.temp_user_input_text:
344
+ text_to_analyze = st.session_state.temp_user_input_text.strip()
345
+ if text_to_analyze:
346
+ sentiment_result = sentiment_analyzer(text_to_analyze)
347
+ if sentiment_result:
348
+ label = sentiment_result[0]['label']
349
+ score = sentiment_result[0]['score']
350
+ if label == 'POSITIVE':
351
+ st.session_state.current_sentiment = f"😊 Positive ({score:.2f})"
352
+ elif label == 'NEGATIVE':
353
+ st.session_state.current_sentiment = f"😠 Negative ({score:.2f})"
354
+ else:
355
+ st.session_state.current_sentiment = f"😐 Neutral ({score:.2f})"
356
+ else:
357
+ st.session_state.current_sentiment = "Neutral"
358
+ else:
359
+ st.session_state.current_sentiment = "Neutral"
360
+ else:
361
+ st.session_state.current_sentiment = "Neutral"
362
+
363
+ user_input_text_area = st.text_area(
364
+ "Enter your thoughts here:",
365
+ value=st.session_state.user_input_text,
366
+ height=200,
367
+ placeholder="Example: 'I've spent so much time building this project, I can't just abandon it now, even if it's clearly not working out. I just know it will pay off eventually.'",
368
+ key="temp_user_input_text", # Use a temp key for callback
369
+ on_change=update_sentiment_callback # Trigger sentiment analysis on change
370
+ )
371
+ # Update actual session state after the widget renders and on_change potentially fires
372
+ st.session_state.user_input_text = user_input_text_area
373
+
374
+ # Display real-time sentiment
375
+ if sentiment_analyzer:
376
+ st.markdown(f"**Emotional Tone:** {st.session_state.current_sentiment}")
377
+ else:
378
+ st.warning("Sentiment analysis model not loaded. Check 'transformers' and 'torch' installation.")
379
+
380
+
381
+ st.markdown("---")
382
+
383
+ if st.button("Analyze My Thoughts", type="primary"):
384
+ if st.session_state.user_input_text:
385
+ # Clear previous results and reflections for new analysis
386
+ st.session_state.reflection_text = ""
387
+ st.session_state.analysis_results = None
388
+ st.session_state.raw_llm_output_debug = None
389
+
390
+ with st.spinner("Summoning psychological insights from the AI... (This may take a few seconds)"):
391
+ llm_raw_response = get_llm_bias_analysis(st.session_state.user_input_text)
392
+ st.session_state.raw_llm_output_debug = llm_raw_response # Store raw for debug
393
+
394
+ if llm_raw_response:
395
+ parsed_data = parse_llm_response(llm_raw_response)
396
+ st.session_state.analysis_results = parsed_data # Store parsed data
397
+
398
+ if "error" in parsed_data:
399
+ st.error(f"Failed to process AI insights: {parsed_data['error']}. Check raw output below.")
400
+ else:
401
+ st.success("Analysis Complete!")
402
+ else:
403
+ st.error("Failed to get a response from the AI. Please check your API key and network connection.")
404
+ else:
405
+ st.warning("Please describe your thoughts in the text area above to get an analysis.")
406
+
407
+ # --- Display Analysis Results ---
408
+ if st.session_state.analysis_results:
409
+ if st.session_state.analysis_results.get("biases"):
410
+ st.subheader("πŸ’‘ Insights for Self-Reflection:")
411
+ for bias_info in st.session_state.analysis_results["biases"]:
412
+ st.markdown(f"### πŸ€” Potential for **{bias_info.get('name', 'Unknown Bias')}**")
413
+ st.info(f"**Explanation:** {bias_info.get('explanation', 'No explanation provided.')}")
414
+ st.markdown(f"**Why it might apply:** {bias_info.get('why_apply', 'No specific reason provided.')}")
415
+ st.write("**Reflect on these questions:**")
416
+ for prompt in bias_info.get('reflection_questions', []):
417
+ st.markdown(f"- {prompt}")
418
+ if bias_info.get('alternative_perspective'):
419
+ st.success(f"**✨ Alternative Perspective:** {bias_info['alternative_perspective']}")
420
+ st.markdown("---")
421
+ st.markdown("Consider these insights as gentle prompts for deeper self-awareness. No judgment, just growth.")
422
+ elif st.session_state.analysis_results.get("message"):
423
+ st.info(st.session_state.analysis_results["message"])
424
+ else:
425
+ st.warning("The AI analyzed your thoughts but didn't provide clear bias insights. Try rephrasing or providing more context.")
426
+ if st.session_state.raw_llm_output_debug:
427
+ st.markdown(f"**Raw LLM Response (for debugging):**\n```json\n{st.session_state.raw_llm_output_debug}\n```")
428
+
429
+ # --- Cognitive Maze Visualization ---
430
+ st.markdown("---")
431
+ st.subheader("πŸ—ΊοΈ Your Cognitive Maze")
432
+ st.markdown("Visualizing the journey of your thoughts and where potential biases might influence your path.")
433
+
434
+ if st.session_state.analysis_results and not st.session_state.analysis_results.get("error"):
435
+ with st.expander("Click to view your Thought Journey"):
436
+ graph = generate_cognitive_maze(st.session_state.user_input_text, st.session_state.analysis_results)
437
+ st.graphviz_chart(graph)
438
+ else:
439
+ st.info("Analyze your thoughts first to see your Cognitive Maze!")
440
+
441
+
442
+ # --- Bias Buster Mini-Challenges ---
443
+ st.markdown("---")
444
+ st.subheader("πŸ’ͺ Bias Busters: Sharpen Your Thinking!")
445
+ st.markdown("Engage in quick, interactive challenges to practice identifying and overcoming common biases.")
446
+
447
+ if st.session_state.analysis_results and st.session_state.analysis_results.get("biases"):
448
+ # Dynamically offer challenges for detected biases
449
+ found_challenge_biases = False
450
+ for bias_info in st.session_state.analysis_results["biases"]:
451
+ bias_name = bias_info.get("name")
452
+ if bias_name == "Sunk Cost Fallacy":
453
+ with st.expander(f"Try the '{bias_name}' Buster"):
454
+ bias_buster_sunk_cost()
455
+ found_challenge_biases = True
456
+ elif bias_name == "Confirmation Bias":
457
+ with st.expander(f"Try the '{bias_name}' Buster"):
458
+ bias_buster_confirmation()
459
+ found_challenge_biases = True
460
+ # Add more `elif` conditions here for other bias busters if you create them
461
+ if not found_challenge_biases:
462
+ st.info("No specific challenges available for the detected biases yet. Try analyzing a thought that might reveal Sunk Cost Fallacy or Confirmation Bias!")
463
  else:
464
+ st.info("Analyze your thoughts first to unlock personalized Bias Busters!")
465
+
466
+
467
+ # --- My Reflections Input Field ---
468
+ st.markdown("---")
469
+ st.subheader("πŸ“ My Reflections")
470
+ st.markdown("Use this space to write down your thoughts, insights, or how you might apply these reflections.")
471
+ st.session_state.reflection_text = st.text_area(
472
+ "Write your reflections here:",
473
+ value=st.session_state.reflection_text,
474
+ height=150,
475
+ placeholder="Example: 'The sunk cost fallacy really resonates. I need to evaluate this project based on its future, not just what I've already put in. I'll set a clear exit strategy next time.'"
476
+ )
477
+
478
+
479
+ st.markdown("---")
480
+
481
+ # --- Library of Cognitive Biases ---
482
+ st.subheader("πŸ“š Library of Cognitive Biases")
483
+ st.markdown("Want to learn more? Explore common cognitive biases:")
484
+
485
+ bias_options = ["Select a Bias to Learn More"] + sorted(list(COGNITIVE_BIASES_INFO.keys()))
486
+ selected_bias = st.selectbox(
487
+ "",
488
+ options=bias_options,
489
+ key="bias_library_select"
490
+ )
491
+
492
+ if selected_bias != "Select a Bias to Learn More":
493
+ bias_info = COGNITIVE_BIASES_INFO[selected_bias]
494
+ st.write(f"**{selected_bias}:** {bias_info['short_desc']}")
495
+ st.write("These biases often lead to predictable patterns in thinking and decision-making by distorting how we perceive and interpret information.")
496
+ st.markdown(f"For a deeper understanding and real-world examples, you can search online for '{selected_bias}'.")
497
+
498
+ # --- Important Disclaimers ---
499
+ st.markdown("---")
500
+ st.warning(
501
+ """
502
+ **Important Disclaimer:**
503
+ This tool is for **educational and self-reflection purposes only**. It is not a substitute for professional psychological or medical advice, diagnosis, or treatment.
504
+ If you are experiencing mental health concerns, please consult with a qualified healthcare professional.
505
+ AI analysis may not always be accurate or complete. Your privacy is important; no identifying information is stored by this application beyond your current session.
506
+ """
507
+ )