Spaces:
Build error
Build error
File size: 4,465 Bytes
fdbb2cb af82975 fdbb2cb 93aca94 fdbb2cb 93aca94 d2dfd31 247a8eb fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb 93aca94 fdbb2cb d2dfd31 fdbb2cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
# chat_column.py
import streamlit as st
# Assuming BASE_PROMPT is imported or defined elsewhere if not passed explicitly
# from prompt import BASE_PROMPT # Or pass it as an argument
def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
"""Renders the chat history, input, and LLM prompt generation column."""
st.header("💬 Chat with the AI to generate JSON for Veo3")
# --- Display Chat History ---
# Display all existing messages first
for message in st.session_state.messages:
avatar = '🤖' if message["role"] == "assistant" else '🦔'
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"])
# --- Chat Input at the bottom ---
# Use a simple approach - just place the input after all messages
if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."):
if len(prompt.strip()) == 0:
st.warning("Please enter a topic.", icon="⚠️")
elif len(prompt) > 4000: # Example length limit
st.error("Input is too long (max 4000 chars).", icon="🚨")
else:
# Add user message to history
st.session_state.messages.append(
{"role": "user", "content": prompt})
# Display user message
with st.chat_message("user", avatar='🦔'):
st.markdown(prompt)
# Generate and display assistant response
try:
with st.chat_message("assistant", avatar="🤖"):
response_placeholder = st.empty()
response_placeholder.markdown("Generating prompt... ▌")
full_response = ""
# Construct messages for API including the conversation history
# 1. Start with the system prompt
messages_for_api = [
{"role": "system", "content": BASE_PROMPT}]
# 2. Add all messages from the session state (history)
messages_for_api.extend(st.session_state.messages)
# 3. Filter out any potential empty messages
messages_for_api = [
m for m in messages_for_api if m.get("content")]
stream_kwargs = {
"model": model_option,
"messages": messages_for_api,
"max_tokens": max_tokens,
"stream": True,
}
response_stream = llm_client.chat.completions.create(
**stream_kwargs)
# Stream the response
for chunk in response_stream:
chunk_content = ""
try:
if chunk.choices and chunk.choices[0].delta:
chunk_content = chunk.choices[0].delta.content or ""
except (AttributeError, IndexError):
chunk_content = ""
if chunk_content:
full_response += chunk_content
response_placeholder.markdown(full_response + "▌")
# Final response display
response_placeholder.markdown(full_response)
# Add assistant response to history
if not st.session_state.messages or st.session_state.messages[-1]['role'] != 'assistant':
st.session_state.messages.append(
{"role": "assistant", "content": full_response})
elif st.session_state.messages[-1]['role'] == 'assistant':
# Update existing assistant message if needed
st.session_state.messages[-1]['content'] = full_response
# Rerun to ensure proper layout
st.rerun()
except Exception as e:
st.error(
f"Error during LLM response generation: {str(e)}", icon="🚨")
# Clean up potentially failed message
if st.session_state.messages and st.session_state.messages[-1]["role"] == "user":
pass
elif st.session_state.messages and st.session_state.messages[-1]["role"] == "assistant" and not full_response:
st.session_state.messages.pop()
|