Fix debrief message format and remove duplicate bubbles
Browse files
app.py
CHANGED
@@ -318,15 +318,14 @@ Journaling Prompt – Offer one reflective or integrative question to deepen the
|
|
318 |
Tone: Warm, precise, emotionally attuned. Do not overuse praise, avoid pathologizing, and refrain from offering generic feedback."""
|
319 |
|
320 |
# Initialize debrief conversation with just the system message
|
321 |
-
st.session_state.debrief_messages = [
|
322 |
-
{"role": "system", "content": debrief_system_message}
|
323 |
-
]
|
324 |
|
325 |
try:
|
326 |
-
# Get the initial response
|
327 |
response = client.messages.create(
|
328 |
model="claude-3-opus-20240229",
|
329 |
-
|
|
|
330 |
max_tokens=1000
|
331 |
)
|
332 |
# Add the response to the messages
|
@@ -343,7 +342,7 @@ Tone: Warm, precise, emotionally attuned. Do not overuse praise, avoid pathologi
|
|
343 |
st.markdown("## 🤝 Let's Process Together")
|
344 |
|
345 |
# Display debrief conversation
|
346 |
-
for message in st.session_state.debrief_messages
|
347 |
with st.chat_message(message["role"]):
|
348 |
st.markdown(message["content"])
|
349 |
|
@@ -359,7 +358,12 @@ Tone: Warm, precise, emotionally attuned. Do not overuse praise, avoid pathologi
|
|
359 |
try:
|
360 |
response = client.messages.create(
|
361 |
model="claude-3-opus-20240229",
|
362 |
-
|
|
|
|
|
|
|
|
|
|
|
363 |
max_tokens=1000
|
364 |
)
|
365 |
assistant_response = response.content[0].text
|
|
|
318 |
Tone: Warm, precise, emotionally attuned. Do not overuse praise, avoid pathologizing, and refrain from offering generic feedback."""
|
319 |
|
320 |
# Initialize debrief conversation with just the system message
|
321 |
+
st.session_state.debrief_messages = []
|
|
|
|
|
322 |
|
323 |
try:
|
324 |
+
# Get the initial response using the system message as a parameter
|
325 |
response = client.messages.create(
|
326 |
model="claude-3-opus-20240229",
|
327 |
+
system=debrief_system_message,
|
328 |
+
messages=[{"role": "user", "content": "Please help me process this conversation."}],
|
329 |
max_tokens=1000
|
330 |
)
|
331 |
# Add the response to the messages
|
|
|
342 |
st.markdown("## 🤝 Let's Process Together")
|
343 |
|
344 |
# Display debrief conversation
|
345 |
+
for message in st.session_state.debrief_messages:
|
346 |
with st.chat_message(message["role"]):
|
347 |
st.markdown(message["content"])
|
348 |
|
|
|
358 |
try:
|
359 |
response = client.messages.create(
|
360 |
model="claude-3-opus-20240229",
|
361 |
+
system=debrief_system_message,
|
362 |
+
messages=[
|
363 |
+
{"role": "user", "content": msg["content"]}
|
364 |
+
for msg in st.session_state.debrief_messages
|
365 |
+
if msg["role"] == "user"
|
366 |
+
],
|
367 |
max_tokens=1000
|
368 |
)
|
369 |
assistant_response = response.content[0].text
|