Spaces:
Running
Running
Update services/analytics_handlers.py
Browse files
services/analytics_handlers.py
CHANGED
|
@@ -224,13 +224,9 @@ class AnalyticsHandlers:
|
|
| 224 |
clicked_plot_config = next((p for p in self.plot_configs if p["id"] == plot_id_clicked), None)
|
| 225 |
if not clicked_plot_config:
|
| 226 |
logging.error(f"Config not found for plot_id {plot_id_clicked}")
|
| 227 |
-
# Construct a list of gr.update() of the correct length
|
| 228 |
num_outputs = len(self._get_action_panel_outputs_list())
|
| 229 |
error_updates = [gr.update()] * num_outputs
|
| 230 |
-
#
|
| 231 |
-
# This part is tricky without knowing the exact order and meaning of each output.
|
| 232 |
-
# For simplicity, returning all gr.update() might be safer if an error occurs early.
|
| 233 |
-
# Or, more robustly, identify which states need to be passed through.
|
| 234 |
# Indices for states in action_panel_outputs_list:
|
| 235 |
# active_panel_action_state is at index 11
|
| 236 |
# current_chat_plot_id_st is at index 12
|
|
@@ -240,7 +236,8 @@ class AnalyticsHandlers:
|
|
| 240 |
error_updates[12] = current_chat_plot_id
|
| 241 |
error_updates[13] = current_chat_histories
|
| 242 |
error_updates[14] = current_explored_plot_id
|
| 243 |
-
|
|
|
|
| 244 |
|
| 245 |
clicked_plot_label = clicked_plot_config["label"]
|
| 246 |
clicked_plot_section = clicked_plot_config["section"]
|
|
@@ -331,15 +328,11 @@ class AnalyticsHandlers:
|
|
| 331 |
|
| 332 |
if not history: # First time opening insights for this plot (or after a refresh)
|
| 333 |
prompt, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary_for_plot)
|
| 334 |
-
# Gradio's chatbot expects a list of lists/tuples: [[user_msg, None], [None, assistant_msg]]
|
| 335 |
-
# Our generate_llm_response and history uses: [{"role": "user", "content": prompt}, {"role": "assistant", "content": resp}]
|
| 336 |
-
# We need to adapt. For now, let's assume generate_llm_response takes our format and returns a string.
|
| 337 |
-
# The history for Gradio Chatbot component needs to be [[user_msg, assistant_msg], ...]
|
| 338 |
-
# Let's build history for LLM first
|
| 339 |
llm_history_for_generation = [{"role": "user", "content": prompt}]
|
| 340 |
|
| 341 |
# Display "Thinking..." or similar
|
| 342 |
chatbot_content_update = gr.update(value=[[prompt, "Sto pensando..."]])
|
|
|
|
| 343 |
yield tuple(self._assemble_panel_action_updates(action_col_visible_update, insights_chatbot_visible_update, chatbot_content_update,
|
| 344 |
insights_chat_input_visible_update, insights_suggestions_row_visible_update,
|
| 345 |
s1_upd, s2_upd, s3_upd, formula_display_visible_update, formula_content_update,
|
|
@@ -351,9 +344,7 @@ class AnalyticsHandlers:
|
|
| 351 |
|
| 352 |
resp_text = await generate_llm_response(prompt, plot_id_clicked, clicked_plot_label, llm_history_for_generation, summary_for_plot)
|
| 353 |
|
| 354 |
-
# Gradio chatbot history format
|
| 355 |
new_gr_history_for_plot = [[prompt, resp_text]]
|
| 356 |
-
# Internal history format for re-sending to LLM
|
| 357 |
new_internal_history_for_plot = [
|
| 358 |
{"role": "user", "content": prompt},
|
| 359 |
{"role": "assistant", "content": resp_text}
|
|
@@ -362,18 +353,7 @@ class AnalyticsHandlers:
|
|
| 362 |
chatbot_content_update = gr.update(value=new_gr_history_for_plot)
|
| 363 |
else: # History exists, just display it
|
| 364 |
_, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary_for_plot) # Get fresh suggestions
|
| 365 |
-
|
| 366 |
-
gr_history_to_display = []
|
| 367 |
-
# Assuming history is [{"role":"user", "content":"..."}, {"role":"assistant", "content":"..."}]
|
| 368 |
-
# We need to pair them up. If an odd number, the last user message might not have a pair yet.
|
| 369 |
-
temp_hist = history[:] # Make a copy
|
| 370 |
-
while temp_hist:
|
| 371 |
-
user_turn = temp_hist.pop(0)
|
| 372 |
-
assistant_turn = None
|
| 373 |
-
if temp_hist and temp_hist[0]["role"] == "assistant":
|
| 374 |
-
assistant_turn = temp_hist.pop(0)
|
| 375 |
-
gr_history_to_display.append([user_turn["content"], assistant_turn["content"] if assistant_turn else None])
|
| 376 |
-
|
| 377 |
chatbot_content_update = gr.update(value=gr_history_to_display)
|
| 378 |
|
| 379 |
s1_upd = gr.update(value=sugg[0] if sugg and len(sugg) > 0 else "N/A")
|
|
@@ -404,7 +384,7 @@ class AnalyticsHandlers:
|
|
| 404 |
generated_panel_vis_updates, generated_bomb_btn_updates,
|
| 405 |
generated_formula_btn_updates, generated_explore_btn_updates, section_title_vis_updates
|
| 406 |
)
|
| 407 |
-
logging.debug(f"handle_panel_action
|
| 408 |
yield final_updates_tuple
|
| 409 |
|
| 410 |
|
|
@@ -497,7 +477,7 @@ class AnalyticsHandlers:
|
|
| 497 |
if thinking and gradio_history and gradio_history[-1][1] is None: # If last message was user and we are in 'thinking' mode
|
| 498 |
gradio_history[-1][1] = "Sto pensando..." # Replace None with "Thinking..."
|
| 499 |
elif thinking and not gradio_history: # Should not happen if user_message was added
|
| 500 |
-
pass
|
| 501 |
|
| 502 |
|
| 503 |
return gradio_history
|
|
|
|
| 224 |
clicked_plot_config = next((p for p in self.plot_configs if p["id"] == plot_id_clicked), None)
|
| 225 |
if not clicked_plot_config:
|
| 226 |
logging.error(f"Config not found for plot_id {plot_id_clicked}")
|
|
|
|
| 227 |
num_outputs = len(self._get_action_panel_outputs_list())
|
| 228 |
error_updates = [gr.update()] * num_outputs
|
| 229 |
+
# Preserve existing state values if possible
|
|
|
|
|
|
|
|
|
|
| 230 |
# Indices for states in action_panel_outputs_list:
|
| 231 |
# active_panel_action_state is at index 11
|
| 232 |
# current_chat_plot_id_st is at index 12
|
|
|
|
| 236 |
error_updates[12] = current_chat_plot_id
|
| 237 |
error_updates[13] = current_chat_histories
|
| 238 |
error_updates[14] = current_explored_plot_id
|
| 239 |
+
yield tuple(error_updates) # Use yield instead of return <value>
|
| 240 |
+
return # Explicitly return to end the generator function
|
| 241 |
|
| 242 |
clicked_plot_label = clicked_plot_config["label"]
|
| 243 |
clicked_plot_section = clicked_plot_config["section"]
|
|
|
|
| 328 |
|
| 329 |
if not history: # First time opening insights for this plot (or after a refresh)
|
| 330 |
prompt, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary_for_plot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
llm_history_for_generation = [{"role": "user", "content": prompt}]
|
| 332 |
|
| 333 |
# Display "Thinking..." or similar
|
| 334 |
chatbot_content_update = gr.update(value=[[prompt, "Sto pensando..."]])
|
| 335 |
+
# Yield intermediate update
|
| 336 |
yield tuple(self._assemble_panel_action_updates(action_col_visible_update, insights_chatbot_visible_update, chatbot_content_update,
|
| 337 |
insights_chat_input_visible_update, insights_suggestions_row_visible_update,
|
| 338 |
s1_upd, s2_upd, s3_upd, formula_display_visible_update, formula_content_update,
|
|
|
|
| 344 |
|
| 345 |
resp_text = await generate_llm_response(prompt, plot_id_clicked, clicked_plot_label, llm_history_for_generation, summary_for_plot)
|
| 346 |
|
|
|
|
| 347 |
new_gr_history_for_plot = [[prompt, resp_text]]
|
|
|
|
| 348 |
new_internal_history_for_plot = [
|
| 349 |
{"role": "user", "content": prompt},
|
| 350 |
{"role": "assistant", "content": resp_text}
|
|
|
|
| 353 |
chatbot_content_update = gr.update(value=new_gr_history_for_plot)
|
| 354 |
else: # History exists, just display it
|
| 355 |
_, sugg = get_initial_insight_prompt_and_suggestions(plot_id_clicked, clicked_plot_label, summary_for_plot) # Get fresh suggestions
|
| 356 |
+
gr_history_to_display = self._convert_internal_to_gradio_chat_history(history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 357 |
chatbot_content_update = gr.update(value=gr_history_to_display)
|
| 358 |
|
| 359 |
s1_upd = gr.update(value=sugg[0] if sugg and len(sugg) > 0 else "N/A")
|
|
|
|
| 384 |
generated_panel_vis_updates, generated_bomb_btn_updates,
|
| 385 |
generated_formula_btn_updates, generated_explore_btn_updates, section_title_vis_updates
|
| 386 |
)
|
| 387 |
+
logging.debug(f"handle_panel_action yielding final updates. Count: {len(final_updates_tuple)}")
|
| 388 |
yield final_updates_tuple
|
| 389 |
|
| 390 |
|
|
|
|
| 477 |
if thinking and gradio_history and gradio_history[-1][1] is None: # If last message was user and we are in 'thinking' mode
|
| 478 |
gradio_history[-1][1] = "Sto pensando..." # Replace None with "Thinking..."
|
| 479 |
elif thinking and not gradio_history: # Should not happen if user_message was added
|
| 480 |
+
pass # Or log an error, but it implies user_message wasn't added to internal_history_for_plot before calling
|
| 481 |
|
| 482 |
|
| 483 |
return gradio_history
|