GuglielmoTor commited on
Commit
56bc649
·
verified ·
1 Parent(s): 20903a4

Update eb_agent_module.py

Browse files
Files changed (1) hide show
  1. eb_agent_module.py +143 -69
eb_agent_module.py CHANGED
@@ -103,10 +103,10 @@ GENERATION_CONFIG_PARAMS = {
103
  # Default safety settings list for Gemini
104
  # This is now a list of SafetySetting objects (or dicts if using dummy)
105
  try:
106
- DEFAULT_SAFETY_SETTINGS = [ # Renamed from DEFAULT_SAFETY_SETTINGS_LIST for consistency with app.py import
107
  genai_types.SafetySetting(
108
  category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
109
- threshold=genai_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, # As per user example
110
  ),
111
  genai_types.SafetySetting(
112
  category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT,
@@ -123,7 +123,6 @@ try:
123
  ]
124
  except AttributeError as e:
125
  logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using real genai_types: {e}. Using placeholder list of dicts.")
126
- # Fallback to list of dicts if genai_types.SafetySetting or HarmCategory/HarmBlockThreshold are dummies that don't work as expected
127
  DEFAULT_SAFETY_SETTINGS = [
128
  {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_LOW_AND_ABOVE"},
129
  {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
@@ -149,43 +148,75 @@ else:
149
  rag_documents_data = {
150
  'Title': ["Employer Branding Best Practices 2024", "Attracting Tech Talent"],
151
  'Text': ["Focus on authentic employee stories...", "Tech candidates value challenging projects..."]
152
- } # Truncated for brevity
153
  df_rag_documents = pd.DataFrame(rag_documents_data)
154
 
155
- # --- Schema Representation (truncated for brevity) ---
156
  def get_schema_representation(df_name: str, df: pd.DataFrame) -> str:
157
  if df.empty: return f"Schema for DataFrame '{df_name}': Empty.\n"
158
- return f"Schema for DataFrame 'df_{df_name}': {df.columns.tolist()[:3]}...\nSample:\n{df.head(1).to_string()}\n"
 
159
  def get_all_schemas_representation(dataframes_dict: dict) -> str:
160
- return "".join(get_schema_representation(name, df) for name, df in dataframes_dict.items())
 
161
 
162
- # --- Advanced RAG System (truncated for brevity) ---
163
- class AdvancedRAGSystem:
 
164
  def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
165
  self.embedding_model_name = embedding_model_name
166
  self.documents_df = documents_df.copy()
167
- self.embeddings_generated = False # Simplified
168
  if GEMINI_API_KEY and hasattr(genai, 'embed_content') and not (hasattr(genai.embed_content, '__func__') and genai.embed_content.__func__.__qualname__.startswith('genai.embed_content')):
169
  try:
170
- self._precompute_embeddings() # Simplified
171
  self.embeddings_generated = True
 
172
  except Exception as e: logging.error(f"RAG precomputation error: {e}")
173
- def _embed_fn(self, title: str, text: str) -> list[float]: # Simplified
 
 
 
174
  if not self.embeddings_generated: return [0.0] * 768
175
- return genai.embed_content(model=self.embedding_model_name, content=text, task_type="retrieval_document", title=title)["embedding"]
176
- def _precompute_embeddings(self): # Simplified
 
 
 
 
 
 
177
  self.documents_df['Embeddings'] = self.documents_df.apply(lambda row: self._embed_fn(row['Title'], row['Text']), axis=1)
178
- def retrieve_relevant_info(self, query_text: str, top_k: int = 1) -> str: # Simplified
179
- if not self.embeddings_generated: return "\n[RAG Context]\nEmbeddings not generated.\n"
 
180
  # Simplified retrieval logic for brevity
181
- return f"\n[RAG Context]\nRetrieved info for: {query_text} (Top {top_k})\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
 
184
  # --- PandasLLM Class (Gemini-Powered) ---
185
  class PandasLLM:
186
  def __init__(self, llm_model_name: str,
187
- generation_config_dict: dict, # Base config: temp, top_k, etc.
188
- safety_settings_list: list, # List of SafetySetting objects/dicts
189
  data_privacy=True, force_sandbox=True):
190
  self.llm_model_name = llm_model_name
191
  self.generation_config_dict = generation_config_dict
@@ -222,58 +253,37 @@ class PandasLLM:
222
  contents_for_api.append({"role": role, "parts": [{"text": entry.get("content", "")}]})
223
  contents_for_api.append({"role": "user", "parts": [{"text": prompt_text}]})
224
 
225
- # Prepare the full configuration object for the API call
226
  api_config_object = None
227
  try:
228
- # **self.generation_config_dict provides temperature, top_p, etc.
229
- # safety_settings takes the list of SafetySetting objects/dicts
230
  api_config_object = genai_types.GenerateContentConfig(
231
  **self.generation_config_dict,
232
  safety_settings=self.safety_settings_list
233
  )
234
- logging.debug(f"Constructed GenerateContentConfig object: {api_config_object}")
235
  except Exception as e_cfg:
236
- logging.error(f"Error creating GenerateContentConfig object: {e_cfg}. API call may fail or use defaults.")
237
- # Fallback: try to pass the raw dicts if GenerateContentConfig class itself fails (e.g. dummy issues)
238
- # This is less ideal as the API might strictly expect the object.
239
  api_config_object = {**self.generation_config_dict, "safety_settings": self.safety_settings_list}
240
 
241
-
242
- logging.info(f"\n--- Calling Gemini API via Client (model: {self.llm_model_name}) ---\n")
243
 
244
  try:
245
  model_id_for_api = self.llm_model_name
246
  if not model_id_for_api.startswith("models/"):
247
  model_id_for_api = f"models/{model_id_for_api}"
248
-
249
- response = await asyncio.to_thread(
250
- self.generative_model_service.generate_content,
251
- model=model_id_for_api,
252
- contents=contents_for_api,
253
- config=api_config_object # Use 'generation_config' as it's common, but user example used 'config'.
254
- # If 'client.models.generate_content' specifically needs 'config', change this.
255
- # For now, assuming 'generation_config' is more standard for the object.
256
- # UPDATE based on user's example: it should be 'config'
257
- # config=api_config_object
258
- )
259
- # Re-checking user's example: client.models.generate_content(..., config=types.GenerateContentConfig(...))
260
- # So, the parameter name should indeed be 'config'.
261
-
262
  response = await asyncio.to_thread(
263
  self.generative_model_service.generate_content,
264
  model=model_id_for_api,
265
  contents=contents_for_api,
266
- config=api_config_object # CORRECTED to 'config' based on user example
267
  )
268
 
269
-
270
  if hasattr(response, 'prompt_feedback') and response.prompt_feedback and response.prompt_feedback.block_reason:
271
  return f"# Error: Prompt blocked by API: {response.prompt_feedback.block_reason}."
272
 
273
  llm_output = ""
274
  if hasattr(response, 'text') and response.text:
275
  llm_output = response.text
276
- elif hasattr(response, 'candidates') and response.candidates: # Standard structure
277
  candidate = response.candidates[0]
278
  if hasattr(candidate, 'content') and candidate.content and hasattr(candidate.content, 'parts') and candidate.content.parts:
279
  llm_output = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
@@ -281,23 +291,19 @@ class PandasLLM:
281
  return f"# Error: Empty response. Finish reason: {candidate.finish_reason}."
282
  else:
283
  return f"# Error: Unexpected API response structure: {str(response)[:200]}"
284
-
285
  return llm_output
286
-
287
  except Exception as e:
288
  logging.error(f"Error calling Gemini API via Client: {e}", exc_info=True)
289
  return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
290
 
291
-
292
  async def query(self, prompt_with_query_and_context: str, dataframes_dict: dict, history: list = None) -> str:
293
  llm_response_text = await self._call_gemini_api_async(prompt_with_query_and_context, history)
294
  if self.force_sandbox:
295
- # ... (sandbox execution logic - truncated for brevity, assumed correct from previous versions)
296
  code_to_execute = ""
297
  if "```python" in llm_response_text:
298
  try:
299
  code_to_execute = llm_response_text.split("```python\n", 1)[1].split("\n```", 1)[0]
300
- except IndexError: # Try alternative split
301
  try:
302
  code_to_execute = llm_response_text.split("```python", 1)[1].split("```", 1)[0]
303
  if code_to_execute.startswith("\n"): code_to_execute = code_to_execute[1:]
@@ -305,30 +311,46 @@ class PandasLLM:
305
  except IndexError: code_to_execute = ""
306
 
307
  if llm_response_text.startswith("# Error:") or not code_to_execute:
308
- return f"# LLM Error or No Code: {llm_response_text}"
 
 
 
309
 
310
  logging.info(f"\n--- Code to Execute: ---\n{code_to_execute}\n----------------------\n")
311
- # Sandbox execution (simplified for brevity)
312
  from io import StringIO
313
  import sys
314
  old_stdout = sys.stdout; sys.stdout = captured_output = StringIO()
315
- exec_globals = {'pd': pd, 'np': np} # Simplified builtins for brevity
316
- for name, df in dataframes_dict.items(): exec_globals[f"df_{name}"] = df
 
 
 
 
 
 
317
  try:
318
  exec(code_to_execute, exec_globals, {})
319
  final_output_str = captured_output.getvalue()
320
- return final_output_str if final_output_str else "# Code executed, no print output."
 
 
 
 
 
 
321
  except Exception as e:
322
- return f"# Sandbox Execution Error: {e}\nCode:\n{code_to_execute}"
323
- finally: sys.stdout = old_stdout
 
 
324
  else:
325
  return llm_response_text
326
 
327
  # --- Employer Branding Agent ---
328
  class EmployerBrandingAgent:
329
  def __init__(self, llm_model_name: str,
330
- generation_config_dict: dict, # Base config (temp, top_k)
331
- safety_settings_list: list, # List of SafetySetting objects/dicts
332
  all_dataframes: dict,
333
  rag_documents_df: pd.DataFrame,
334
  embedding_model_name: str,
@@ -337,7 +359,7 @@ class EmployerBrandingAgent:
337
  self.pandas_llm = PandasLLM(
338
  llm_model_name,
339
  generation_config_dict,
340
- safety_settings_list, # Pass the list here
341
  data_privacy,
342
  force_sandbox
343
  )
@@ -348,24 +370,76 @@ class EmployerBrandingAgent:
348
  logging.info("EmployerBrandingAgent Initialized with updated safety settings handling.")
349
 
350
  def _build_prompt(self, user_query: str, role="Employer Branding Analyst", task_decomposition_hint=None, cot_hint=True) -> str:
351
- # ... (prompt building logic - truncated for brevity, assumed correct from previous versions)
352
- prompt = f"You are a helpful '{role}'...\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  prompt += self.schemas_representation
354
- prompt += f"User Query: {user_query}\n"
355
- prompt += "Generate Python code using Pandas...\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356
  return prompt
357
 
358
  async def process_query(self, user_query: str, role="Employer Branding Analyst", task_decomposition_hint=None, cot_hint=True) -> str:
359
- # ... (process query logic - truncated for brevity, assumed correct from previous versions)
360
  self.chat_history.append({"role": "user", "content": user_query})
361
  full_prompt = self._build_prompt(user_query, role, task_decomposition_hint, cot_hint)
 
 
 
362
  response_text = await self.pandas_llm.query(full_prompt, self.all_dataframes, history=self.chat_history[:-1])
363
  self.chat_history.append({"role": "assistant", "content": response_text})
364
- # Limit history
365
- if len(self.chat_history) > 10: self.chat_history = self.chat_history[-10:]
 
 
 
366
  return response_text
367
 
368
- def update_dataframes(self, new_dataframes: dict): # Simplified
369
  self.all_dataframes = new_dataframes
370
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
371
- def clear_chat_history(self): self.chat_history = []
 
 
 
 
 
103
  # Default safety settings list for Gemini
104
  # This is now a list of SafetySetting objects (or dicts if using dummy)
105
  try:
106
+ DEFAULT_SAFETY_SETTINGS = [
107
  genai_types.SafetySetting(
108
  category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
109
+ threshold=genai_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
110
  ),
111
  genai_types.SafetySetting(
112
  category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT,
 
123
  ]
124
  except AttributeError as e:
125
  logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using real genai_types: {e}. Using placeholder list of dicts.")
 
126
  DEFAULT_SAFETY_SETTINGS = [
127
  {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_LOW_AND_ABOVE"},
128
  {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
 
148
  rag_documents_data = {
149
  'Title': ["Employer Branding Best Practices 2024", "Attracting Tech Talent"],
150
  'Text': ["Focus on authentic employee stories...", "Tech candidates value challenging projects..."]
151
+ }
152
  df_rag_documents = pd.DataFrame(rag_documents_data)
153
 
154
+ # --- Schema Representation ---
155
  def get_schema_representation(df_name: str, df: pd.DataFrame) -> str:
156
  if df.empty: return f"Schema for DataFrame '{df_name}': Empty.\n"
157
+ # Truncated for brevity in example, keep your full version
158
+ return f"Schema for DataFrame 'df_{df_name}': {df.columns.tolist()[:5]}...\nSample:\n{df.head(1).to_string()}\n"
159
  def get_all_schemas_representation(dataframes_dict: dict) -> str:
160
+ # Truncated for brevity in example, keep your full version
161
+ return "".join(get_schema_representation(name, df) for name, df in dataframes_dict.items() if isinstance(df, pd.DataFrame))
162
 
163
+
164
+ # --- Advanced RAG System ---
165
+ class AdvancedRAGSystem: # Truncated for brevity, assume correct from previous versions
166
  def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
167
  self.embedding_model_name = embedding_model_name
168
  self.documents_df = documents_df.copy()
169
+ self.embeddings_generated = False
170
  if GEMINI_API_KEY and hasattr(genai, 'embed_content') and not (hasattr(genai.embed_content, '__func__') and genai.embed_content.__func__.__qualname__.startswith('genai.embed_content')):
171
  try:
172
+ self._precompute_embeddings()
173
  self.embeddings_generated = True
174
+ logging.info("RAG embeddings precomputed.")
175
  except Exception as e: logging.error(f"RAG precomputation error: {e}")
176
+ else:
177
+ logging.warning("RAG embeddings not precomputed (API key or genai.embed_content issue).")
178
+
179
+ def _embed_fn(self, title: str, text: str) -> list[float]:
180
  if not self.embeddings_generated: return [0.0] * 768
181
+ try:
182
+ return genai.embed_content(model=self.embedding_model_name, content=text, task_type="retrieval_document", title=title)["embedding"]
183
+ except Exception as e:
184
+ logging.error(f"Error in _embed_fn for '{title}': {e}")
185
+ return [0.0] * 768
186
+ def _precompute_embeddings(self):
187
+ if 'Embeddings' not in self.documents_df.columns:
188
+ self.documents_df['Embeddings'] = pd.Series(dtype='object')
189
  self.documents_df['Embeddings'] = self.documents_df.apply(lambda row: self._embed_fn(row['Title'], row['Text']), axis=1)
190
+ def retrieve_relevant_info(self, query_text: str, top_k: int = 1) -> str:
191
+ if not self.embeddings_generated or self.documents_df['Embeddings'].isnull().all():
192
+ return "\n[RAG Context]\nEmbeddings not generated or all are null.\n"
193
  # Simplified retrieval logic for brevity
194
+ try:
195
+ query_embedding = np.array(genai.embed_content(model=self.embedding_model_name, content=query_text, task_type="retrieval_query")["embedding"])
196
+ # Filter out rows with invalid embeddings before stacking
197
+ valid_embeddings_df = self.documents_df.dropna(subset=['Embeddings'])
198
+ valid_embeddings_df = valid_embeddings_df[valid_embeddings_df['Embeddings'].apply(lambda x: isinstance(x, list) and len(x) > 0)]
199
+
200
+ if valid_embeddings_df.empty: return "\n[RAG Context]\nNo valid document embeddings for RAG.\n"
201
+
202
+ document_embeddings = np.stack(valid_embeddings_df['Embeddings'].apply(np.array).values)
203
+ if query_embedding.shape[0] != document_embeddings.shape[1]: return "\n[RAG Context]\nEmbedding dimension mismatch.\n"
204
+
205
+ dot_products = np.dot(document_embeddings, query_embedding)
206
+ idx = np.argsort(dot_products)[-min(top_k, len(valid_embeddings_df)):][::-1]
207
+
208
+ relevant_passages = "".join([f"\n[RAG Context from: '{valid_embeddings_df.iloc[i]['Title']}']\n{valid_embeddings_df.iloc[i]['Text']}\n" for i in idx])
209
+ return relevant_passages if relevant_passages else "\n[RAG Context]\nNo relevant passages found.\n"
210
+ except Exception as e:
211
+ logging.error(f"Error in RAG retrieve_relevant_info: {e}")
212
+ return f"\n[RAG Context]\nError during RAG retrieval: {e}\n"
213
 
214
 
215
  # --- PandasLLM Class (Gemini-Powered) ---
216
  class PandasLLM:
217
  def __init__(self, llm_model_name: str,
218
+ generation_config_dict: dict,
219
+ safety_settings_list: list,
220
  data_privacy=True, force_sandbox=True):
221
  self.llm_model_name = llm_model_name
222
  self.generation_config_dict = generation_config_dict
 
253
  contents_for_api.append({"role": role, "parts": [{"text": entry.get("content", "")}]})
254
  contents_for_api.append({"role": "user", "parts": [{"text": prompt_text}]})
255
 
 
256
  api_config_object = None
257
  try:
 
 
258
  api_config_object = genai_types.GenerateContentConfig(
259
  **self.generation_config_dict,
260
  safety_settings=self.safety_settings_list
261
  )
 
262
  except Exception as e_cfg:
263
+ logging.error(f"Error creating GenerateContentConfig object: {e_cfg}.")
 
 
264
  api_config_object = {**self.generation_config_dict, "safety_settings": self.safety_settings_list}
265
 
266
+ logging.info(f"\n--- Calling Gemini API via Client (model: {self.llm_model_name}) with config: {api_config_object} ---\n")
 
267
 
268
  try:
269
  model_id_for_api = self.llm_model_name
270
  if not model_id_for_api.startswith("models/"):
271
  model_id_for_api = f"models/{model_id_for_api}"
272
+
 
 
 
 
 
 
 
 
 
 
 
 
 
273
  response = await asyncio.to_thread(
274
  self.generative_model_service.generate_content,
275
  model=model_id_for_api,
276
  contents=contents_for_api,
277
+ config=api_config_object
278
  )
279
 
 
280
  if hasattr(response, 'prompt_feedback') and response.prompt_feedback and response.prompt_feedback.block_reason:
281
  return f"# Error: Prompt blocked by API: {response.prompt_feedback.block_reason}."
282
 
283
  llm_output = ""
284
  if hasattr(response, 'text') and response.text:
285
  llm_output = response.text
286
+ elif hasattr(response, 'candidates') and response.candidates:
287
  candidate = response.candidates[0]
288
  if hasattr(candidate, 'content') and candidate.content and hasattr(candidate.content, 'parts') and candidate.content.parts:
289
  llm_output = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
 
291
  return f"# Error: Empty response. Finish reason: {candidate.finish_reason}."
292
  else:
293
  return f"# Error: Unexpected API response structure: {str(response)[:200]}"
 
294
  return llm_output
 
295
  except Exception as e:
296
  logging.error(f"Error calling Gemini API via Client: {e}", exc_info=True)
297
  return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
298
 
 
299
  async def query(self, prompt_with_query_and_context: str, dataframes_dict: dict, history: list = None) -> str:
300
  llm_response_text = await self._call_gemini_api_async(prompt_with_query_and_context, history)
301
  if self.force_sandbox:
 
302
  code_to_execute = ""
303
  if "```python" in llm_response_text:
304
  try:
305
  code_to_execute = llm_response_text.split("```python\n", 1)[1].split("\n```", 1)[0]
306
+ except IndexError:
307
  try:
308
  code_to_execute = llm_response_text.split("```python", 1)[1].split("```", 1)[0]
309
  if code_to_execute.startswith("\n"): code_to_execute = code_to_execute[1:]
 
311
  except IndexError: code_to_execute = ""
312
 
313
  if llm_response_text.startswith("# Error:") or not code_to_execute:
314
+ # If LLM returns an error or no code, pass that through directly.
315
+ # The user will see the LLM's error message or its non-code response.
316
+ logging.warning(f"LLM response is an error or not code: {llm_response_text}")
317
+ return llm_response_text
318
 
319
  logging.info(f"\n--- Code to Execute: ---\n{code_to_execute}\n----------------------\n")
 
320
  from io import StringIO
321
  import sys
322
  old_stdout = sys.stdout; sys.stdout = captured_output = StringIO()
323
+ # Ensure dataframes_dict is correctly populated for exec_globals
324
+ exec_globals = {'pd': pd, 'np': np}
325
+ for name, df_instance in dataframes_dict.items():
326
+ if isinstance(df_instance, pd.DataFrame):
327
+ exec_globals[f"df_{name}"] = df_instance
328
+ else:
329
+ logging.warning(f"Item '{name}' in dataframes_dict is not a DataFrame. Skipping for exec_globals.")
330
+
331
  try:
332
  exec(code_to_execute, exec_globals, {})
333
  final_output_str = captured_output.getvalue()
334
+ # Check if the output is just whitespace or truly empty
335
+ if not final_output_str.strip(): # If only whitespace or empty
336
+ # This is where the "no print output" message originates.
337
+ # We can now add a more informative message if the code itself ran without error.
338
+ logging.info("Code executed successfully, but no explicit print() output was generated by the LLM's code.")
339
+ return "# Code executed successfully, but it did not produce any printed output. Please ensure the LLM's Python code includes print() statements for the desired results."
340
+ return final_output_str
341
  except Exception as e:
342
+ logging.error(f"Sandbox Execution Error: {e}\nCode was:\n{code_to_execute}", exc_info=False)
343
+ return f"# Sandbox Execution Error: {type(e).__name__}: {e}\n# --- Code that caused error: ---\n{textwrap.indent(code_to_execute, '# ')}"
344
+ finally:
345
+ sys.stdout = old_stdout
346
  else:
347
  return llm_response_text
348
 
349
  # --- Employer Branding Agent ---
350
  class EmployerBrandingAgent:
351
  def __init__(self, llm_model_name: str,
352
+ generation_config_dict: dict,
353
+ safety_settings_list: list,
354
  all_dataframes: dict,
355
  rag_documents_df: pd.DataFrame,
356
  embedding_model_name: str,
 
359
  self.pandas_llm = PandasLLM(
360
  llm_model_name,
361
  generation_config_dict,
362
+ safety_settings_list,
363
  data_privacy,
364
  force_sandbox
365
  )
 
370
  logging.info("EmployerBrandingAgent Initialized with updated safety settings handling.")
371
 
372
  def _build_prompt(self, user_query: str, role="Employer Branding Analyst", task_decomposition_hint=None, cot_hint=True) -> str:
373
+ prompt = f"You are a helpful and expert '{role}'. Your primary goal is to assist with analyzing LinkedIn-related data using Pandas DataFrames.\n"
374
+ prompt += "You will be provided with schemas for available Pandas DataFrames and a user query.\n"
375
+
376
+ if self.pandas_llm.data_privacy:
377
+ prompt += "IMPORTANT: Be mindful of data privacy. Do not output raw Personally Identifiable Information (PII) like names or specific user details unless explicitly asked and absolutely necessary for the query. Summarize or aggregate data where possible.\n"
378
+
379
+ if self.pandas_llm.force_sandbox:
380
+ prompt += "Your main task is to GENERATE PYTHON CODE using the Pandas library to answer the user query based on the provided DataFrames. Output ONLY the Python code block.\n"
381
+ prompt += "The available DataFrames are already loaded and can be accessed by their dictionary keys prefixed with 'df_' (e.g., df_follower_stats, df_posts) within the execution environment.\n"
382
+ prompt += "Example of accessing a DataFrame: `df_follower_stats['country']`.\n"
383
+ prompt += "CRITICAL INSTRUCTION: Your Python code MUST include `print()` statements for ANY results, DataFrames, or values that should be displayed as the answer to the user's query. The output of these `print()` statements will be the final answer shown to the user.\n"
384
+ prompt += "If you define a function to perform the analysis, you MUST call this function with the appropriate DataFrame(s) and `print()` its returned value. Do not just define functions without executing them and printing their results.\n"
385
+ prompt += "If the query is simple and the result is a single value or a small piece of information, compute it and `print()` it directly.\n"
386
+ prompt += "For example, if asked for 'total followers', your code should end with something like `print(total_followers)` or `print(df_result.to_string())`.\n"
387
+
388
+ prompt += "If a column contains lists (e.g., 'skills' in a hypothetical 'df_employees'), you might need to use methods like `.explode()` or `.apply(pd.Series)` or `.apply(lambda x: ...)` for analysis.\n"
389
+ prompt += "If the query is ambiguous or requires clarification, ask for it instead of making assumptions. If the query cannot be answered with the given data, state that clearly in a comment within the code block (e.g. `# Cannot answer: data not available`).\n"
390
+ prompt += "If the query is not about data analysis or code generation (e.g. 'hello', 'how are you?'), respond politely and briefly in a comment, do not attempt to generate code (e.g. `# Hello there! How can I help you with data analysis today?`).\n"
391
+ prompt += "Structure your code clearly. Add comments (#) to explain each step of your logic.\n"
392
+ else:
393
+ prompt += "Your task is to analyze the data and provide a comprehensive textual answer to the user query. You can explain your reasoning step-by-step.\n"
394
+
395
+ prompt += "\n--- AVAILABLE DATA AND SCHEMAS ---\n"
396
  prompt += self.schemas_representation
397
+
398
+ rag_context = self.rag_system.retrieve_relevant_info(user_query)
399
+ if rag_context and "[RAG Context]" in rag_context and "No specific pre-defined context found" not in rag_context and "No highly relevant passages found" not in rag_context and "Embeddings not generated" not in rag_context:
400
+ prompt += f"\n--- ADDITIONAL CONTEXT (from internal knowledge base, consider this information) ---\n{rag_context}\n"
401
+
402
+ prompt += f"\n--- USER QUERY ---\n{user_query}\n"
403
+
404
+ if task_decomposition_hint:
405
+ prompt += f"\n--- GUIDANCE FOR ANALYSIS (Task Decomposition) ---\n{task_decomposition_hint}\n"
406
+
407
+ if cot_hint:
408
+ if self.pandas_llm.force_sandbox:
409
+ prompt += "\n--- INSTRUCTIONS FOR PYTHON CODE GENERATION (Chain of Thought & Output) ---\n"
410
+ prompt += "1. Understand the query: What specific information is requested?\n"
411
+ prompt += "2. Identify relevant DataFrame(s) and column(s) from the schemas provided.\n"
412
+ prompt += "3. Plan the steps: Outline the Pandas operations needed (filtering, grouping, aggregation, merging, etc.) as comments in your code.\n"
413
+ prompt += "4. Write the code: Implement the steps using Pandas. Remember to use `df_name_of_dataframe` (e.g. `df_follower_stats`).\n"
414
+ prompt += "5. CRITICAL - Ensure output: Call any functions you define and use `print()` for ALL results that should be displayed. For DataFrames, you can print the DataFrame directly (e.g., `print(my_result_df)`), or `print(df.to_string())` if it might be large. For single values, `print(my_value)`.\n"
415
+ prompt += "6. Review: Check for correctness, efficiency, and adherence to the prompt (especially the CRITICAL `print()` requirement for the final answer).\n"
416
+ prompt += "7. Generate ONLY the Python code block starting with ```python and ending with ```. No explanations outside the code block's comments.\n"
417
+ else:
418
+ prompt += "\n--- INSTRUCTIONS FOR RESPONSE (Chain of Thought) ---\n"
419
+ prompt += "Please provide a step-by-step explanation of your analysis before giving the final answer.\n"
420
+
421
  return prompt
422
 
423
  async def process_query(self, user_query: str, role="Employer Branding Analyst", task_decomposition_hint=None, cot_hint=True) -> str:
 
424
  self.chat_history.append({"role": "user", "content": user_query})
425
  full_prompt = self._build_prompt(user_query, role, task_decomposition_hint, cot_hint)
426
+
427
+ logging.info(f"Full prompt to LLM (last 300 chars of user query part for brevity in log): ... {full_prompt[-500:]}") # Log end of prompt
428
+
429
  response_text = await self.pandas_llm.query(full_prompt, self.all_dataframes, history=self.chat_history[:-1])
430
  self.chat_history.append({"role": "assistant", "content": response_text})
431
+
432
+ MAX_HISTORY_TURNS = 5
433
+ if len(self.chat_history) > MAX_HISTORY_TURNS * 2:
434
+ self.chat_history = self.chat_history[-(MAX_HISTORY_TURNS * 2):]
435
+
436
  return response_text
437
 
438
+ def update_dataframes(self, new_dataframes: dict):
439
  self.all_dataframes = new_dataframes
440
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
441
+ logging.info("EmployerBrandingAgent DataFrames updated.")
442
+ def clear_chat_history(self):
443
+ self.chat_history = []
444
+ logging.info("EmployerBrandingAgent chat history cleared.")
445
+