# app.py import gradio as gr from my_memory_logic import run_with_session_memory def chat_interface_fn(message, history, session_id): """ Multi-turn chat function for Gradio's ChatInterface. 'session_id' is used to store conversation across turns. Deduplicates consecutive repeated Q&A pairs to avoid repetition. """ # 1) Get answer from the session-based memory pipeline answer = run_with_session_memory(message, session_id) # 2) Deduplicate consecutive identical exchanges if not history or history[-1] != (message, answer): history.append((message, answer)) # 3) Convert history to message dictionaries for display message_dicts = [] for user_msg, ai_msg in history: message_dicts.append({"role": "user", "content": user_msg}) message_dicts.append({"role": "assistant", "content": ai_msg}) # Return the message dicts and updated history return message_dicts, history my_chat_css = """ .gradio-container { margin: auto; } .user .wrap { text-align: right !important; } .assistant .wrap { text-align: left !important; } """ with gr.Blocks(css=my_chat_css) as demo: gr.Markdown("### DailyWellnessAI (User on right, Assistant on left)") session_id_box = gr.Textbox(label="Session ID", value="abc123", interactive=True) chat_interface = gr.ChatInterface( fn=lambda msg, hist: chat_interface_fn(msg, hist, session_id_box.value), title="DailyWellnessAI (Session-based Memory)", description="Ask your questions. The session_id determines your stored memory." ) demo.launch()