Chris4K commited on
Commit
c757f82
·
verified ·
1 Parent(s): f21190a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -112
app.py CHANGED
@@ -68,23 +68,26 @@ def load_initial_greeting(filepath="greeting_prompt.txt") -> str:
68
  logger.warning(f"Warning: Prompt file '{filepath}' not found.")
69
  return "Welcome to DIYO! I'm here to help you create amazing DIY projects. What would you like to build today?"
70
 
71
- async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid: UUID, prompt: str, search_enabled: bool, download_website_text_enabled: bool):
72
  """
73
- Chat function that handles both 'messages' and 'tuples' format for compatibility
74
 
75
  Args:
76
  user_input (str): The user's input message
77
- history (dict): The history of the conversation in gradio (format depends on chatbot type)
78
- input_graph_state (dict): The current state of the graph. This includes tool call history
79
  uuid (UUID): The unique identifier for the current conversation
80
  prompt (str): The system prompt
81
  Yields:
82
- str: The output message
83
- dict|Any: The final state of the graph
84
- bool|Any: Whether to trigger follow up questions
85
  """
86
  try:
87
  logger.info(f"Processing user input: {user_input[:100]}...")
 
 
 
88
 
89
  # Initialize input_graph_state if None
90
  if input_graph_state is None:
@@ -97,8 +100,16 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
97
  if prompt:
98
  input_graph_state["prompt"] = prompt
99
 
 
 
 
 
 
 
 
 
100
  if input_graph_state.get("awaiting_human_input"):
101
- input_graph_state["messages"].append(
102
  ToolMessage(
103
  tool_call_id=input_graph_state.pop("human_assistance_tool_id"),
104
  content=user_input
@@ -107,12 +118,12 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
107
  input_graph_state["awaiting_human_input"] = False
108
  else:
109
  # New user message
110
- if "messages" not in input_graph_state:
111
- input_graph_state["messages"] = []
112
- input_graph_state["messages"].append(
113
  HumanMessage(user_input[:USER_INPUT_MAX_LENGTH])
114
  )
115
- input_graph_state["messages"] = input_graph_state["messages"][-TRIM_MESSAGE_LENGTH:]
 
 
116
 
117
  config = RunnableConfig(
118
  recursion_limit=20,
@@ -121,8 +132,12 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
121
  )
122
 
123
  output: str = ""
124
- final_state: dict | Any = {}
125
  waiting_output_seq: list[str] = []
 
 
 
 
126
 
127
  async for stream_mode, chunk in graph.astream(
128
  input_graph_state,
@@ -140,12 +155,17 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
140
  if tool_name == "tavily_search_results_json":
141
  query = msg_tool_call['args']['query']
142
  waiting_output_seq.append(f"🔍 Searching for '{query}'...")
143
- yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
 
 
 
144
 
145
  elif tool_name == "download_website_text":
146
  url = msg_tool_call['args']['url']
147
  waiting_output_seq.append(f"📥 Downloading text from '{url}'...")
148
- yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
 
 
149
 
150
  elif tool_name == "human_assistance":
151
  query = msg_tool_call["args"]["query"]
@@ -155,13 +175,17 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
155
  final_state["awaiting_human_input"] = True
156
  final_state["human_assistance_tool_id"] = msg_tool_call["id"]
157
 
158
- # Indicate that human input is needed
159
- yield "\n".join(waiting_output_seq), final_state, True
 
 
160
  return # Pause execution, resume in next call
161
 
162
  else:
163
  waiting_output_seq.append(f"🔧 Running {tool_name}...")
164
- yield "\n".join(waiting_output_seq), gr.skip(), gr.skip()
 
 
165
 
166
  elif stream_mode == "messages":
167
  msg, metadata = chunk
@@ -180,20 +204,30 @@ async def chat_fn(user_input: str, history: dict, input_graph_state: dict, uuid:
180
 
181
  if current_chunk_text:
182
  output += current_chunk_text
183
- yield output, gr.skip(), gr.skip()
 
 
 
184
 
185
  # Final yield with complete response
186
- yield output + " ", dict(final_state), True
 
 
 
187
 
188
  except Exception as e:
189
  logger.exception("Exception occurred in chat_fn")
190
- user_error_message = "There was an error processing your request. Please try again."
191
- yield user_error_message, gr.skip(), False
 
 
 
192
 
193
 
194
  def convert_to_tuples_format(messages_list):
195
  """Convert messages format to tuples format for older Gradio versions"""
196
  if not isinstance(messages_list, list):
 
197
  return []
198
 
199
  tuples = []
@@ -221,23 +255,29 @@ def convert_to_tuples_format(messages_list):
221
  if user_msg is not None:
222
  tuples.append((user_msg, ""))
223
 
 
224
  return tuples
225
 
226
 
227
  def convert_from_tuples_format(tuples_list):
228
  """Convert tuples format to messages format"""
229
  if not isinstance(tuples_list, list):
 
230
  return []
231
 
232
  messages = []
233
  for item in tuples_list:
234
  if isinstance(item, tuple) and len(item) == 2:
235
  user_msg, assistant_msg = item
236
- if user_msg:
237
  messages.append({"role": "user", "content": user_msg})
238
- if assistant_msg:
239
  messages.append({"role": "assistant", "content": assistant_msg})
 
 
 
240
 
 
241
  return messages
242
 
243
  def clear():
@@ -248,25 +288,29 @@ class FollowupQuestions(BaseModel):
248
  """Model for langchain to use for structured output for followup questions"""
249
  questions: list[str]
250
 
251
- async def populate_followup_questions(end_of_chat_response: bool, messages: dict[str, str], uuid: UUID):
252
  """
253
- This function gets called a lot due to the asynchronous nature of streaming
254
- Only populate followup questions if streaming has completed and the message is coming from the assistant
 
 
 
 
255
  """
256
- if not end_of_chat_response or not messages or len(messages) == 0:
257
  return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
258
 
259
- # Convert tuples format to messages format if needed
260
- if isinstance(messages, list) and len(messages) > 0:
261
- if isinstance(messages[0], tuple):
262
- # Convert from tuples to messages format
263
- messages = convert_from_tuples_format(messages)
264
-
265
- # Check if the last message is from assistant
266
- if not messages or (isinstance(messages[-1], dict) and messages[-1].get("role") != "assistant"):
267
  return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
268
 
269
  try:
 
 
 
 
 
 
270
  config = RunnableConfig(
271
  run_name="populate_followup_questions",
272
  configurable={"thread_id": str(uuid)}
@@ -292,26 +336,21 @@ async def populate_followup_questions(end_of_chat_response: bool, messages: dict
292
  logger.error(f"Error generating followup questions: {e}")
293
  return *[gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
294
 
295
- async def summarize_chat(end_of_chat_response: bool, messages: dict, sidebar_summaries: dict, uuid: UUID):
296
- """Summarize chat for tab names"""
297
  should_return = (
298
  not end_of_chat_response or
299
- not messages or
300
- len(messages) == 0 or
 
301
  isinstance(sidebar_summaries, type(lambda x: x)) or
302
  uuid in sidebar_summaries
303
  )
304
  if should_return:
305
  return gr.skip(), gr.skip()
306
 
307
- # Convert tuples format to messages format if needed
308
- if isinstance(messages, list) and len(messages) > 0:
309
- if isinstance(messages[0], tuple):
310
- messages = convert_from_tuples_format(messages)
311
-
312
- # Check if the last message is from assistant
313
- if not messages or (isinstance(messages[-1], dict) and messages[-1].get("role") != "assistant"):
314
- return gr.skip(), gr.skip()
315
 
316
  # Filter valid messages
317
  filtered_messages = []
@@ -346,58 +385,40 @@ async def summarize_chat(end_of_chat_response: bool, messages: dict, sidebar_sum
346
 
347
  return sidebar_summaries, False
348
 
349
- async def new_tab(uuid, gradio_graph, messages, tabs, prompt, sidebar_summaries):
350
  """Create a new chat tab"""
351
  new_uuid = uuid4()
352
  new_graph = {}
353
 
354
  # Save current tab if it has content
355
- if messages and len(messages) > 0:
356
  if uuid not in sidebar_summaries:
357
- sidebar_summaries, _ = await summarize_chat(True, messages, sidebar_summaries, uuid)
358
  tabs[uuid] = {
359
  "graph": gradio_graph,
360
- "messages": messages,
361
  "prompt": prompt,
362
  }
363
 
364
  # Clear suggestion buttons
365
  suggestion_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
366
 
367
- # Load initial greeting for new chat
368
  greeting_text = load_initial_greeting()
369
-
370
- # Determine format based on current chatbot configuration
371
- # Check if we're using tuples format (older Gradio) or messages format
372
- try:
373
- # Try to detect the format from existing messages
374
- uses_tuples_format = True
375
- if messages and len(messages) > 0:
376
- if isinstance(messages[0], dict) and "role" in messages[0]:
377
- uses_tuples_format = False
378
-
379
- if uses_tuples_format:
380
- new_chat_messages_for_display = [(None, greeting_text)]
381
- else:
382
- new_chat_messages_for_display = [{"role": "assistant", "content": greeting_text}]
383
-
384
- except Exception as e:
385
- logger.warning(f"Error determining chat format: {e}")
386
- # Default to tuples format for older Gradio
387
- new_chat_messages_for_display = [(None, greeting_text)]
388
 
389
  new_prompt = prompt if prompt else "You are a helpful DIY assistant."
390
 
391
- return new_uuid, new_graph, new_chat_messages_for_display, tabs, new_prompt, sidebar_summaries, *suggestion_buttons
392
 
393
- def switch_tab(selected_uuid, tabs, gradio_graph, uuid, messages, prompt):
394
  """Switch to a different chat tab"""
395
  try:
396
  # Save current state if there are messages
397
- if messages and len(messages) > 0:
398
  tabs[uuid] = {
399
  "graph": gradio_graph if gradio_graph else {},
400
- "messages": messages,
401
  "prompt": prompt
402
  }
403
 
@@ -407,12 +428,12 @@ def switch_tab(selected_uuid, tabs, gradio_graph, uuid, messages, prompt):
407
 
408
  selected_tab_state = tabs[selected_uuid]
409
  selected_graph = selected_tab_state.get("graph", {})
410
- selected_messages = selected_tab_state.get("messages", [])
411
  selected_prompt = selected_tab_state.get("prompt", "You are a helpful DIY assistant.")
412
 
413
  suggestion_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
414
 
415
- return selected_graph, selected_uuid, selected_messages, tabs, selected_prompt, *suggestion_buttons
416
 
417
  except Exception as e:
418
  logger.error(f"Error switching tabs: {e}")
@@ -420,11 +441,11 @@ def switch_tab(selected_uuid, tabs, gradio_graph, uuid, messages, prompt):
420
 
421
  def delete_tab(current_chat_uuid, selected_uuid, sidebar_summaries, tabs):
422
  """Delete a chat tab"""
423
- output_messages = gr.skip()
424
 
425
  # If deleting the current tab, clear the chatbot
426
  if current_chat_uuid == selected_uuid:
427
- output_messages = []
428
 
429
  # Remove from storage
430
  if selected_uuid in tabs:
@@ -432,7 +453,7 @@ def delete_tab(current_chat_uuid, selected_uuid, sidebar_summaries, tabs):
432
  if selected_uuid in sidebar_summaries:
433
  del sidebar_summaries[selected_uuid]
434
 
435
- return sidebar_summaries, tabs, output_messages
436
 
437
  def submit_edit_tab(selected_uuid, sidebar_summaries, text):
438
  """Submit edited tab name"""
@@ -730,12 +751,17 @@ if __name__ == "__main__":
730
  # Check parameter availability without creating test instance
731
  init_params = gr.Chatbot.__init__.__code__.co_varnames
732
 
733
- # Always try to set type="messages" to avoid the deprecation warning
 
734
  if 'type' in init_params:
735
- chatbot_kwargs["type"] = "messages"
736
- logger.info("Using 'messages' type for chatbot")
 
 
 
 
737
  else:
738
- logger.warning("Chatbot 'type' parameter not supported, will use deprecated 'tuples' format")
739
 
740
  # Check if 'show_copy_button' parameter is supported
741
  if 'show_copy_button' in init_params:
@@ -937,10 +963,10 @@ if __name__ == "__main__":
937
  else:
938
  logger.warning("ChatInterface 'additional_outputs' not supported - some features may be limited")
939
 
940
- # Check if 'type' parameter is supported
941
  if 'type' in init_params:
942
- chat_interface_kwargs["type"] = "messages"
943
- logger.info("Added type='messages' to ChatInterface")
944
 
945
  # Check if 'multimodal' parameter is supported
946
  if 'multimodal' in init_params:
@@ -981,20 +1007,13 @@ if __name__ == "__main__":
981
  if not message.strip():
982
  return history, "", graph_state
983
 
984
- # Add user message
985
  if not isinstance(history, list):
986
  history = []
987
 
988
- # Use tuples format for older Gradio
989
- history.append((message, "Processing..."))
990
-
991
- # TODO: Integrate with your actual graph processing here
992
- # For now, provide a simple response
993
  response = f"Manual chat mode: {message} (ChatInterface not available in this Gradio version)"
994
-
995
- # Update the last tuple with the response
996
- if history:
997
- history[-1] = (message, response)
998
 
999
  return history, "", graph_state
1000
  except Exception as e:
@@ -1045,7 +1064,7 @@ if __name__ == "__main__":
1045
  def clear_current_chat():
1046
  """Clear the current chat and reset state"""
1047
  new_state, new_uuid = clear()
1048
- # Clear followup buttons
1049
  cleared_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
1050
  return [], new_state, new_uuid, *cleared_buttons
1051
 
@@ -1164,17 +1183,11 @@ if __name__ == "__main__":
1164
  if not isinstance(existing_chat_history, list):
1165
  existing_chat_history = []
1166
 
1167
- # Detect format and add greeting accordingly
1168
- if existing_chat_history and isinstance(existing_chat_history[0], tuple):
1169
- # Tuples format
1170
- greeting_entry = (None, greeting_message_text)
1171
- else:
1172
- # Messages format
1173
- greeting_entry = {"role": "assistant", "content": greeting_message_text}
1174
-
1175
  updated_chat_history = [greeting_entry] + existing_chat_history
1176
  updated_is_new_user_flag = False
1177
- logger.info("Greeting added for new user.")
1178
  return updated_chat_history, updated_is_new_user_flag
1179
  else:
1180
  logger.info("Not a new user or already greeted.")
@@ -1183,10 +1196,10 @@ if __name__ == "__main__":
1183
  return existing_chat_history, False
1184
 
1185
  @demo.load(inputs=[chatbot_message_storage], outputs=[chatbot])
1186
- def load_messages(messages):
1187
  """Load stored messages into chatbot"""
1188
- if isinstance(messages, list):
1189
- return messages
1190
  return []
1191
 
1192
  @demo.load(inputs=[current_prompt_state], outputs=[prompt_textbox])
@@ -1201,7 +1214,7 @@ if __name__ == "__main__":
1201
  def load_initial_greeting():
1202
  """Load initial greeting for users without BrowserState"""
1203
  greeting_text = load_initial_greeting()
1204
- # Use tuples format for older Gradio versions without BrowserState
1205
  return [(None, greeting_text)]
1206
 
1207
  # Launch the application
 
68
  logger.warning(f"Warning: Prompt file '{filepath}' not found.")
69
  return "Welcome to DIYO! I'm here to help you create amazing DIY projects. What would you like to build today?"
70
 
71
+ async def chat_fn(user_input: str, history: list, input_graph_state: dict, uuid: UUID, prompt: str, search_enabled: bool, download_website_text_enabled: bool):
72
  """
73
+ Chat function that works with tuples format for maximum compatibility
74
 
75
  Args:
76
  user_input (str): The user's input message
77
+ history (list): The history of the conversation in tuples format [(user_msg, bot_msg), ...]
78
+ input_graph_state (dict): The current state of the graph
79
  uuid (UUID): The unique identifier for the current conversation
80
  prompt (str): The system prompt
81
  Yields:
82
+ list: Updated history in tuples format
83
+ dict: The final state of the graph
84
+ bool: Whether to trigger follow up questions
85
  """
86
  try:
87
  logger.info(f"Processing user input: {user_input[:100]}...")
88
+ logger.info(f"History format: {type(history)}, length: {len(history) if history else 0}")
89
+ if history:
90
+ logger.info(f"Sample history entry: {history[0] if len(history) > 0 else 'None'}")
91
 
92
  # Initialize input_graph_state if None
93
  if input_graph_state is None:
 
100
  if prompt:
101
  input_graph_state["prompt"] = prompt
102
 
103
+ # Convert tuples history to internal messages format for graph processing
104
+ if not isinstance(history, list):
105
+ history = []
106
+
107
+ # Convert history to messages format for graph processing
108
+ internal_messages = convert_from_tuples_format(history)
109
+ logger.info(f"Converted {len(history)} tuples to {len(internal_messages)} internal messages")
110
+
111
  if input_graph_state.get("awaiting_human_input"):
112
+ internal_messages.append(
113
  ToolMessage(
114
  tool_call_id=input_graph_state.pop("human_assistance_tool_id"),
115
  content=user_input
 
118
  input_graph_state["awaiting_human_input"] = False
119
  else:
120
  # New user message
121
+ internal_messages.append(
 
 
122
  HumanMessage(user_input[:USER_INPUT_MAX_LENGTH])
123
  )
124
+
125
+ # Store internal messages in graph state
126
+ input_graph_state["messages"] = internal_messages[-TRIM_MESSAGE_LENGTH:]
127
 
128
  config = RunnableConfig(
129
  recursion_limit=20,
 
132
  )
133
 
134
  output: str = ""
135
+ final_state: dict = {}
136
  waiting_output_seq: list[str] = []
137
+
138
+ # Add user message to history immediately
139
+ updated_history = history + [(user_input, "")]
140
+ logger.info(f"Updated history length: {len(updated_history)}")
141
 
142
  async for stream_mode, chunk in graph.astream(
143
  input_graph_state,
 
155
  if tool_name == "tavily_search_results_json":
156
  query = msg_tool_call['args']['query']
157
  waiting_output_seq.append(f"🔍 Searching for '{query}'...")
158
+ # Update the last tuple with current status
159
+ if updated_history:
160
+ updated_history[-1] = (user_input, "\n".join(waiting_output_seq))
161
+ yield updated_history, gr.skip(), gr.skip()
162
 
163
  elif tool_name == "download_website_text":
164
  url = msg_tool_call['args']['url']
165
  waiting_output_seq.append(f"📥 Downloading text from '{url}'...")
166
+ if updated_history:
167
+ updated_history[-1] = (user_input, "\n".join(waiting_output_seq))
168
+ yield updated_history, gr.skip(), gr.skip()
169
 
170
  elif tool_name == "human_assistance":
171
  query = msg_tool_call["args"]["query"]
 
175
  final_state["awaiting_human_input"] = True
176
  final_state["human_assistance_tool_id"] = msg_tool_call["id"]
177
 
178
+ # Update history and indicate that human input is needed
179
+ if updated_history:
180
+ updated_history[-1] = (user_input, "\n".join(waiting_output_seq))
181
+ yield updated_history, final_state, True
182
  return # Pause execution, resume in next call
183
 
184
  else:
185
  waiting_output_seq.append(f"🔧 Running {tool_name}...")
186
+ if updated_history:
187
+ updated_history[-1] = (user_input, "\n".join(waiting_output_seq))
188
+ yield updated_history, gr.skip(), gr.skip()
189
 
190
  elif stream_mode == "messages":
191
  msg, metadata = chunk
 
204
 
205
  if current_chunk_text:
206
  output += current_chunk_text
207
+ # Update the last tuple with accumulated output
208
+ if updated_history:
209
+ updated_history[-1] = (user_input, output)
210
+ yield updated_history, gr.skip(), gr.skip()
211
 
212
  # Final yield with complete response
213
+ if updated_history:
214
+ updated_history[-1] = (user_input, output + " ")
215
+ logger.info(f"Final response: {output[:100]}...")
216
+ yield updated_history, dict(final_state), True
217
 
218
  except Exception as e:
219
  logger.exception("Exception occurred in chat_fn")
220
+ error_message = "There was an error processing your request. Please try again."
221
+ if not isinstance(history, list):
222
+ history = []
223
+ error_history = history + [(user_input, error_message)]
224
+ yield error_history, gr.skip(), False
225
 
226
 
227
  def convert_to_tuples_format(messages_list):
228
  """Convert messages format to tuples format for older Gradio versions"""
229
  if not isinstance(messages_list, list):
230
+ logger.warning(f"Expected list for messages conversion, got {type(messages_list)}")
231
  return []
232
 
233
  tuples = []
 
255
  if user_msg is not None:
256
  tuples.append((user_msg, ""))
257
 
258
+ logger.info(f"Converted {len(messages_list)} messages to {len(tuples)} tuples")
259
  return tuples
260
 
261
 
262
  def convert_from_tuples_format(tuples_list):
263
  """Convert tuples format to messages format"""
264
  if not isinstance(tuples_list, list):
265
+ logger.warning(f"Expected list for tuples conversion, got {type(tuples_list)}")
266
  return []
267
 
268
  messages = []
269
  for item in tuples_list:
270
  if isinstance(item, tuple) and len(item) == 2:
271
  user_msg, assistant_msg = item
272
+ if user_msg and user_msg.strip():
273
  messages.append({"role": "user", "content": user_msg})
274
+ if assistant_msg and assistant_msg.strip():
275
  messages.append({"role": "assistant", "content": assistant_msg})
276
+ elif isinstance(item, dict):
277
+ # Already in messages format
278
+ messages.append(item)
279
 
280
+ logger.info(f"Converted {len(tuples_list)} tuples to {len(messages)} messages")
281
  return messages
282
 
283
  def clear():
 
288
  """Model for langchain to use for structured output for followup questions"""
289
  questions: list[str]
290
 
291
+ async def populate_followup_questions(end_of_chat_response: bool, history: list, uuid: UUID):
292
  """
293
+ Generate followup questions based on chat history in tuples format
294
+
295
+ Args:
296
+ end_of_chat_response (bool): Whether the chat response has ended
297
+ history (list): Chat history in tuples format [(user, bot), ...]
298
+ uuid (UUID): Session UUID
299
  """
300
+ if not end_of_chat_response or not history or len(history) == 0:
301
  return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
302
 
303
+ # Check if the last tuple has a bot response
304
+ if not history[-1][1]: # No bot response in the last tuple
 
 
 
 
 
 
305
  return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
306
 
307
  try:
308
+ # Convert tuples format to messages format for LLM processing
309
+ messages = convert_from_tuples_format(history)
310
+
311
+ if not messages:
312
+ return *[gr.skip() for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
313
+
314
  config = RunnableConfig(
315
  run_name="populate_followup_questions",
316
  configurable={"thread_id": str(uuid)}
 
336
  logger.error(f"Error generating followup questions: {e}")
337
  return *[gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)], False
338
 
339
+ async def summarize_chat(end_of_chat_response: bool, history: list, sidebar_summaries: dict, uuid: UUID):
340
+ """Summarize chat for tab names using tuples format"""
341
  should_return = (
342
  not end_of_chat_response or
343
+ not history or
344
+ len(history) == 0 or
345
+ not history[-1][1] or # No bot response in last tuple
346
  isinstance(sidebar_summaries, type(lambda x: x)) or
347
  uuid in sidebar_summaries
348
  )
349
  if should_return:
350
  return gr.skip(), gr.skip()
351
 
352
+ # Convert tuples format to messages format for processing
353
+ messages = convert_from_tuples_format(history)
 
 
 
 
 
 
354
 
355
  # Filter valid messages
356
  filtered_messages = []
 
385
 
386
  return sidebar_summaries, False
387
 
388
+ async def new_tab(uuid, gradio_graph, history, tabs, prompt, sidebar_summaries):
389
  """Create a new chat tab"""
390
  new_uuid = uuid4()
391
  new_graph = {}
392
 
393
  # Save current tab if it has content
394
+ if history and len(history) > 0:
395
  if uuid not in sidebar_summaries:
396
+ sidebar_summaries, _ = await summarize_chat(True, history, sidebar_summaries, uuid)
397
  tabs[uuid] = {
398
  "graph": gradio_graph,
399
+ "messages": history, # Store history as-is (tuples format)
400
  "prompt": prompt,
401
  }
402
 
403
  # Clear suggestion buttons
404
  suggestion_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
405
 
406
+ # Load initial greeting for new chat in tuples format
407
  greeting_text = load_initial_greeting()
408
+ new_chat_history = [(None, greeting_text)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
 
410
  new_prompt = prompt if prompt else "You are a helpful DIY assistant."
411
 
412
+ return new_uuid, new_graph, new_chat_history, tabs, new_prompt, sidebar_summaries, *suggestion_buttons
413
 
414
+ def switch_tab(selected_uuid, tabs, gradio_graph, uuid, history, prompt):
415
  """Switch to a different chat tab"""
416
  try:
417
  # Save current state if there are messages
418
+ if history and len(history) > 0:
419
  tabs[uuid] = {
420
  "graph": gradio_graph if gradio_graph else {},
421
+ "messages": history, # Store history as-is (tuples format)
422
  "prompt": prompt
423
  }
424
 
 
428
 
429
  selected_tab_state = tabs[selected_uuid]
430
  selected_graph = selected_tab_state.get("graph", {})
431
+ selected_history = selected_tab_state.get("messages", []) # This should be tuples format
432
  selected_prompt = selected_tab_state.get("prompt", "You are a helpful DIY assistant.")
433
 
434
  suggestion_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
435
 
436
+ return selected_graph, selected_uuid, selected_history, tabs, selected_prompt, *suggestion_buttons
437
 
438
  except Exception as e:
439
  logger.error(f"Error switching tabs: {e}")
 
441
 
442
  def delete_tab(current_chat_uuid, selected_uuid, sidebar_summaries, tabs):
443
  """Delete a chat tab"""
444
+ output_history = gr.skip()
445
 
446
  # If deleting the current tab, clear the chatbot
447
  if current_chat_uuid == selected_uuid:
448
+ output_history = [] # Empty tuples list
449
 
450
  # Remove from storage
451
  if selected_uuid in tabs:
 
453
  if selected_uuid in sidebar_summaries:
454
  del sidebar_summaries[selected_uuid]
455
 
456
+ return sidebar_summaries, tabs, output_history
457
 
458
  def submit_edit_tab(selected_uuid, sidebar_summaries, text):
459
  """Submit edited tab name"""
 
751
  # Check parameter availability without creating test instance
752
  init_params = gr.Chatbot.__init__.__code__.co_varnames
753
 
754
+ # For older Gradio versions, don't try to set type parameter
755
+ # Let it default to 'tuples' format to avoid compatibility issues
756
  if 'type' in init_params:
757
+ # Try to set type, but if it fails, let it default
758
+ try:
759
+ chatbot_kwargs["type"] = "tuples" # Use tuples for maximum compatibility
760
+ logger.info("Using 'tuples' type for chatbot (compatibility mode)")
761
+ except:
762
+ logger.warning("Could not set chatbot type, using default")
763
  else:
764
+ logger.info("Chatbot 'type' parameter not supported, using default 'tuples' format")
765
 
766
  # Check if 'show_copy_button' parameter is supported
767
  if 'show_copy_button' in init_params:
 
963
  else:
964
  logger.warning("ChatInterface 'additional_outputs' not supported - some features may be limited")
965
 
966
+ # Use tuples format to match the Chatbot for compatibility
967
  if 'type' in init_params:
968
+ chat_interface_kwargs["type"] = "tuples"
969
+ logger.info("Added type='tuples' to ChatInterface (matching Chatbot format)")
970
 
971
  # Check if 'multimodal' parameter is supported
972
  if 'multimodal' in init_params:
 
1007
  if not message.strip():
1008
  return history, "", graph_state
1009
 
1010
+ # Add user message in tuples format
1011
  if not isinstance(history, list):
1012
  history = []
1013
 
1014
+ # Create response tuple
 
 
 
 
1015
  response = f"Manual chat mode: {message} (ChatInterface not available in this Gradio version)"
1016
+ history.append((message, response))
 
 
 
1017
 
1018
  return history, "", graph_state
1019
  except Exception as e:
 
1064
  def clear_current_chat():
1065
  """Clear the current chat and reset state"""
1066
  new_state, new_uuid = clear()
1067
+ # Clear followup buttons and return empty tuples list
1068
  cleared_buttons = [gr.Button(visible=False) for _ in range(FOLLOWUP_QUESTION_NUMBER)]
1069
  return [], new_state, new_uuid, *cleared_buttons
1070
 
 
1183
  if not isinstance(existing_chat_history, list):
1184
  existing_chat_history = []
1185
 
1186
+ # Always use tuples format for compatibility
1187
+ greeting_entry = (None, greeting_message_text)
 
 
 
 
 
 
1188
  updated_chat_history = [greeting_entry] + existing_chat_history
1189
  updated_is_new_user_flag = False
1190
+ logger.info("Greeting added for new user (tuples format).")
1191
  return updated_chat_history, updated_is_new_user_flag
1192
  else:
1193
  logger.info("Not a new user or already greeted.")
 
1196
  return existing_chat_history, False
1197
 
1198
  @demo.load(inputs=[chatbot_message_storage], outputs=[chatbot])
1199
+ def load_messages(history):
1200
  """Load stored messages into chatbot"""
1201
+ if isinstance(history, list):
1202
+ return history
1203
  return []
1204
 
1205
  @demo.load(inputs=[current_prompt_state], outputs=[prompt_textbox])
 
1214
  def load_initial_greeting():
1215
  """Load initial greeting for users without BrowserState"""
1216
  greeting_text = load_initial_greeting()
1217
+ # Use tuples format for maximum compatibility
1218
  return [(None, greeting_text)]
1219
 
1220
  # Launch the application