baxin commited on
Commit
93aca94
Β·
1 Parent(s): 9cef91f

fix ui issue

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. chat_column.py +11 -26
app.py CHANGED
@@ -126,4 +126,4 @@ with chat_col:
126
  render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT)
127
 
128
  # --- Footer ---
129
- st.markdown('<div style="text-align: center; margin-top: 2em; color: #888; font-size: 1.1em;">made with πŸ’› by baxin</div>', unsafe_allow_html=True)
 
126
  render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT)
127
 
128
  # --- Footer ---
129
+ st.markdown('<div style="text-align: center; margin-top: 2em; color: #888; font-size: 1.1em;">made with πŸ’™ by baxin</div>', unsafe_allow_html=True)
chat_column.py CHANGED
@@ -10,23 +10,24 @@ def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
10
  st.header("πŸ’¬ Chat with the AI to generate JSON for Veo3")
11
 
12
  # --- Display Chat History ---
13
- # (This part remains the same)
14
  for message in st.session_state.messages:
15
  avatar = 'πŸ€–' if message["role"] == "assistant" else 'πŸ¦”'
16
  with st.chat_message(message["role"], avatar=avatar):
17
  st.markdown(message["content"])
18
 
19
- # --- Chat Input and LLM Call ---
20
  if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."):
21
  if len(prompt.strip()) == 0:
22
  st.warning("Please enter a topic.", icon="⚠️")
23
  elif len(prompt) > 4000: # Example length limit
24
  st.error("Input is too long (max 4000 chars).", icon="🚨")
25
  else:
26
- # Add user message to history and display FIRST
27
- # It's important to add the user message *before* sending it to the API
28
  st.session_state.messages.append(
29
  {"role": "user", "content": prompt})
 
 
30
  with st.chat_message("user", avatar='πŸ¦”'):
31
  st.markdown(prompt)
32
 
@@ -37,41 +38,36 @@ def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
37
  response_placeholder.markdown("Generating prompt... β–Œ")
38
  full_response = ""
39
 
40
- # --- MODIFICATION START ---
41
  # Construct messages for API including the conversation history
42
-
43
  # 1. Start with the system prompt
44
  messages_for_api = [
45
  {"role": "system", "content": BASE_PROMPT}]
46
 
47
  # 2. Add all messages from the session state (history)
48
- # This now includes the user message we just added above.
49
  messages_for_api.extend(st.session_state.messages)
50
 
51
- # 3. Filter out any potential empty messages (just in case)
52
- # This step might be less critical now but is good practice.
53
  messages_for_api = [
54
  m for m in messages_for_api if m.get("content")]
55
- # --- MODIFICATION END ---
56
 
57
  stream_kwargs = {
58
  "model": model_option,
59
- "messages": messages_for_api, # <--- Now contains history!
60
  "max_tokens": max_tokens,
61
  "stream": True,
62
  }
63
- # Assuming llm_client is correctly initialized (OpenAI or Cerebras)
64
  response_stream = llm_client.chat.completions.create(
65
  **stream_kwargs)
66
 
67
- # --- (Rest of the streaming and response handling code remains the same) ---
68
  for chunk in response_stream:
69
  chunk_content = ""
70
  try:
71
  if chunk.choices and chunk.choices[0].delta:
72
  chunk_content = chunk.choices[0].delta.content or ""
73
  except (AttributeError, IndexError):
74
- chunk_content = "" # Handle potential errors gracefully
75
 
76
  if chunk_content:
77
  full_response += chunk_content
@@ -81,29 +77,18 @@ def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
81
  response_placeholder.markdown(full_response)
82
 
83
  # Add assistant response to history
84
- # Check if the last message isn't already the assistant's response to avoid duplicates if rerun happens unexpectedly
85
  if not st.session_state.messages or st.session_state.messages[-1]['role'] != 'assistant':
86
  st.session_state.messages.append(
87
  {"role": "assistant", "content": full_response})
88
  elif st.session_state.messages[-1]['role'] == 'assistant':
89
- # If last message is assistant, update it (useful if streaming was interrupted/retried)
90
  st.session_state.messages[-1]['content'] = full_response
91
 
92
- # No longer updating image prompt text area here (based on previous request)
93
-
94
- # Rerun might still cause subtle issues with message duplication if not handled carefully,
95
- # The check above helps mitigate this. Consider removing rerun if it causes problems.
96
- # st.rerun() # Keeping rerun commented out for now based on potential issues
97
-
98
  except Exception as e:
99
  st.error(
100
  f"Error during LLM response generation: {str(e)}", icon="🚨")
101
  # Clean up potentially failed message
102
- # Ensure we only pop if the *last* message is the user's (meaning the assistant failed)
103
  if st.session_state.messages and st.session_state.messages[-1]["role"] == "user":
104
- # Maybe add a placeholder error message for the assistant instead of popping user?
105
- # For now, let's not pop the user's message. The error message itself indicates failure.
106
  pass
107
- # Or if the assistant message was partially added:
108
  elif st.session_state.messages and st.session_state.messages[-1]["role"] == "assistant" and not full_response:
109
  st.session_state.messages.pop()
 
10
  st.header("πŸ’¬ Chat with the AI to generate JSON for Veo3")
11
 
12
  # --- Display Chat History ---
13
+ # Display all existing messages first
14
  for message in st.session_state.messages:
15
  avatar = 'πŸ€–' if message["role"] == "assistant" else 'πŸ¦”'
16
  with st.chat_message(message["role"], avatar=avatar):
17
  st.markdown(message["content"])
18
 
19
+ # --- Chat Input at the bottom ---
20
  if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."):
21
  if len(prompt.strip()) == 0:
22
  st.warning("Please enter a topic.", icon="⚠️")
23
  elif len(prompt) > 4000: # Example length limit
24
  st.error("Input is too long (max 4000 chars).", icon="🚨")
25
  else:
26
+ # Add user message to history
 
27
  st.session_state.messages.append(
28
  {"role": "user", "content": prompt})
29
+
30
+ # Display user message
31
  with st.chat_message("user", avatar='πŸ¦”'):
32
  st.markdown(prompt)
33
 
 
38
  response_placeholder.markdown("Generating prompt... β–Œ")
39
  full_response = ""
40
 
 
41
  # Construct messages for API including the conversation history
 
42
  # 1. Start with the system prompt
43
  messages_for_api = [
44
  {"role": "system", "content": BASE_PROMPT}]
45
 
46
  # 2. Add all messages from the session state (history)
 
47
  messages_for_api.extend(st.session_state.messages)
48
 
49
+ # 3. Filter out any potential empty messages
 
50
  messages_for_api = [
51
  m for m in messages_for_api if m.get("content")]
 
52
 
53
  stream_kwargs = {
54
  "model": model_option,
55
+ "messages": messages_for_api,
56
  "max_tokens": max_tokens,
57
  "stream": True,
58
  }
59
+
60
  response_stream = llm_client.chat.completions.create(
61
  **stream_kwargs)
62
 
63
+ # Stream the response
64
  for chunk in response_stream:
65
  chunk_content = ""
66
  try:
67
  if chunk.choices and chunk.choices[0].delta:
68
  chunk_content = chunk.choices[0].delta.content or ""
69
  except (AttributeError, IndexError):
70
+ chunk_content = ""
71
 
72
  if chunk_content:
73
  full_response += chunk_content
 
77
  response_placeholder.markdown(full_response)
78
 
79
  # Add assistant response to history
 
80
  if not st.session_state.messages or st.session_state.messages[-1]['role'] != 'assistant':
81
  st.session_state.messages.append(
82
  {"role": "assistant", "content": full_response})
83
  elif st.session_state.messages[-1]['role'] == 'assistant':
84
+ # Update existing assistant message if needed
85
  st.session_state.messages[-1]['content'] = full_response
86
 
 
 
 
 
 
 
87
  except Exception as e:
88
  st.error(
89
  f"Error during LLM response generation: {str(e)}", icon="🚨")
90
  # Clean up potentially failed message
 
91
  if st.session_state.messages and st.session_state.messages[-1]["role"] == "user":
 
 
92
  pass
 
93
  elif st.session_state.messages and st.session_state.messages[-1]["role"] == "assistant" and not full_response:
94
  st.session_state.messages.pop()