Spaces:
Build error
Build error
# chat_column.py | |
import streamlit as st | |
# Assuming BASE_PROMPT is imported or defined elsewhere if not passed explicitly | |
# from prompt import BASE_PROMPT # Or pass it as an argument | |
def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT): | |
"""Renders the chat history, input, and LLM prompt generation column.""" | |
st.header("π¬ Chat with the AI to generate JSON for Veo3") | |
# --- Display Chat History --- | |
# Display all existing messages first | |
for message in st.session_state.messages: | |
avatar = 'π€' if message["role"] == "assistant" else 'π¦' | |
with st.chat_message(message["role"], avatar=avatar): | |
st.markdown(message["content"]) | |
# --- Chat Input at the bottom --- | |
# Use a simple approach - just place the input after all messages | |
if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."): | |
if len(prompt.strip()) == 0: | |
st.warning("Please enter a topic.", icon="β οΈ") | |
elif len(prompt) > 4000: # Example length limit | |
st.error("Input is too long (max 4000 chars).", icon="π¨") | |
else: | |
# Add user message to history | |
st.session_state.messages.append( | |
{"role": "user", "content": prompt}) | |
# Display user message | |
with st.chat_message("user", avatar='π¦'): | |
st.markdown(prompt) | |
# Generate and display assistant response | |
try: | |
with st.chat_message("assistant", avatar="π€"): | |
response_placeholder = st.empty() | |
response_placeholder.markdown("Generating prompt... β") | |
full_response = "" | |
# Construct messages for API including the conversation history | |
# 1. Start with the system prompt | |
messages_for_api = [ | |
{"role": "system", "content": BASE_PROMPT}] | |
# 2. Add all messages from the session state (history) | |
messages_for_api.extend(st.session_state.messages) | |
# 3. Filter out any potential empty messages | |
messages_for_api = [ | |
m for m in messages_for_api if m.get("content")] | |
stream_kwargs = { | |
"model": model_option, | |
"messages": messages_for_api, | |
"max_tokens": max_tokens, | |
"stream": True, | |
} | |
response_stream = llm_client.chat.completions.create( | |
**stream_kwargs) | |
# Stream the response | |
for chunk in response_stream: | |
chunk_content = "" | |
try: | |
if chunk.choices and chunk.choices[0].delta: | |
chunk_content = chunk.choices[0].delta.content or "" | |
except (AttributeError, IndexError): | |
chunk_content = "" | |
if chunk_content: | |
full_response += chunk_content | |
response_placeholder.markdown(full_response + "β") | |
# Final response display | |
response_placeholder.markdown(full_response) | |
# Add assistant response to history | |
if not st.session_state.messages or st.session_state.messages[-1]['role'] != 'assistant': | |
st.session_state.messages.append( | |
{"role": "assistant", "content": full_response}) | |
elif st.session_state.messages[-1]['role'] == 'assistant': | |
# Update existing assistant message if needed | |
st.session_state.messages[-1]['content'] = full_response | |
# Rerun to ensure proper layout | |
st.rerun() | |
except Exception as e: | |
st.error( | |
f"Error during LLM response generation: {str(e)}", icon="π¨") | |
# Clean up potentially failed message | |
if st.session_state.messages and st.session_state.messages[-1]["role"] == "user": | |
pass | |
elif st.session_state.messages and st.session_state.messages[-1]["role"] == "assistant" and not full_response: | |
st.session_state.messages.pop() | |