GuglielmoTor commited on
Commit
5deb357
·
verified ·
1 Parent(s): 5cec9b7

Update eb_agent_module.py

Browse files
Files changed (1) hide show
  1. eb_agent_module.py +348 -164
eb_agent_module.py CHANGED
@@ -26,12 +26,18 @@ class _DummyGenAIClientModels: # Represents the dummy model service client
26
  return DummyResponse()
27
 
28
  def embed_content(self, model=None, contents=None, config=None): # Added dummy embed_content
29
- print(f"Dummy _DummyGenAI.Client.models.embed_content called for model: {model}, task_type (from config): {config.get('task_type') if isinstance(config, dict) else 'N/A'}")
 
 
 
 
 
 
30
  return {"embedding": [0.2] * 768} # Different values for dummy distinction
31
 
32
 
33
  class _DummyGenAIClient: # Dummy Client
34
- def __init__(self, client_options=None): # Added client_options for signature consistency
35
  self.client_options = client_options
36
  self.models = _DummyGenAIClientModels()
37
  api_key_present_in_options = client_options and client_options.get("api_key")
@@ -71,12 +77,13 @@ class _ActualDummyGenAI: # type: ignore
71
  @staticmethod
72
  def GenerationConfig(**kwargs):
73
  print(f"Dummy _ActualDummyGenAI.types.GenerationConfig created with: {kwargs}")
74
- return dict(kwargs)
75
 
76
  @staticmethod
77
  def SafetySetting(category, threshold):
78
  print(f"Dummy _ActualDummyGenAI.types.SafetySetting created: category={category}, threshold={threshold}")
79
- return {"category": category, "threshold": threshold}
 
80
 
81
  @staticmethod # Added dummy EmbedContentConfig
82
  def EmbedContentConfig(task_type=None, output_dimensionality=None, title=None):
@@ -84,8 +91,16 @@ class _ActualDummyGenAI: # type: ignore
84
  conf = {}
85
  if task_type: conf["task_type"] = task_type
86
  if output_dimensionality: conf["output_dimensionality"] = output_dimensionality
87
- if title: conf["title"] = title # Though title is usually direct param for embed_content
88
- return conf
 
 
 
 
 
 
 
 
89
 
90
 
91
  class HarmCategory: HARM_CATEGORY_UNSPECIFIED = "HARM_CATEGORY_UNSPECIFIED"; HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT"; HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH"; HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT"; HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT"
@@ -101,25 +116,27 @@ _REAL_GENAI_LOADED = False
101
  genai_types = None
102
 
103
  try:
104
- from google import genai
 
 
105
  genai_types = genai.types
106
  _REAL_GENAI_LOADED = True
107
- logging.info("Successfully imported 'google.genai' and accessed 'genai.types'.")
108
  except ImportError:
109
  genai = _ActualDummyGenAI()
110
  genai_types = genai.types
111
- logging.warning("Google AI library ('google.genai') not found. Using dummy implementations for 'genai' and 'genai_types'.")
112
- except AttributeError: # If 'genai' imported but 'genai.types' is missing
113
  genai = _ActualDummyGenAI()
114
  genai_types = genai.types # Fallback to dummy types
115
  _REAL_GENAI_LOADED = False
116
- logging.warning("'google.genai' imported, but 'genai.types' not found. Falling back to dummy implementations.")
117
 
118
 
119
  # --- Configuration ---
120
  GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', "")
121
- LLM_MODEL_NAME = "gemini-2.0-flash"
122
- GEMINI_EMBEDDING_MODEL_NAME = "gemini-embedding-exp-03-07"
123
 
124
  GENERATION_CONFIG_PARAMS = {
125
  "temperature": 0.3, "top_p": 1.0, "top_k": 32, "max_output_tokens": 8192,
@@ -129,11 +146,15 @@ try:
129
  DEFAULT_SAFETY_SETTINGS = [
130
  genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
131
  genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
132
- # ... other settings
 
133
  ]
134
  except Exception as e_safety:
135
  logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using 'genai_types' (real_loaded: {_REAL_GENAI_LOADED}): {e_safety}. Using placeholder list of dicts.")
136
- DEFAULT_SAFETY_SETTINGS = [{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}] # Simplified
 
 
 
137
 
138
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(filename)s:%(lineno)d - %(message)s')
139
 
@@ -145,14 +166,14 @@ if _REAL_GENAI_LOADED:
145
  except Exception as e:
146
  logging.error(f"Failed to configure REAL Gemini API globally: {e}", exc_info=True)
147
  else:
148
- logging.warning("REAL 'google.genai' loaded, but GEMINI_API_KEY not set. API calls might fail or use other auth.")
149
- elif not _REAL_GENAI_LOADED:
150
- logging.info("Operating in DUMMY mode for 'google.genai'.")
151
- if GEMINI_API_KEY: genai.configure(api_key=GEMINI_API_KEY)
152
 
153
 
154
  # --- RAG Documents Definition (Example) ---
155
- rag_documents_data = { 'Title': ["EB Practices", "Tech Talent"], 'Text': ["Stories...", "Projects..."] }
156
  df_rag_documents = pd.DataFrame(rag_documents_data)
157
 
158
  # --- Schema Representation ---
@@ -168,89 +189,146 @@ def get_all_schemas_representation(dataframes_dict: dict) -> str:
168
  # --- Advanced RAG System ---
169
  class AdvancedRAGSystem:
170
  def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
171
- self.embedding_model_name_for_api = embedding_model_name # Store raw name
172
  if not self.embedding_model_name_for_api.startswith("models/"):
173
  self.embedding_model_name_for_api = f"models/{self.embedding_model_name_for_api}"
174
 
175
  self.documents_df = documents_df.copy()
176
  self.embeddings_generated = False
177
- self.embedding_service = None # Will hold client.models or its dummy equivalent
178
 
 
 
179
  self.real_client_available_for_rag = _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)
180
 
181
  if self.real_client_available_for_rag:
182
  try:
183
- # Pass client_options if API key is available, to help Client find it
184
- client_opts = {"api_key": GEMINI_API_KEY} if GEMINI_API_KEY else None
185
- rag_client = genai.Client(client_options=client_opts)
186
- self.embedding_service = rag_client.models
187
- logging.info(f"RAG: REAL embedding service (genai.Client.models) initialized for '{self.embedding_model_name_for_api}'.")
188
- self._precompute_embeddings()
189
- self.embeddings_generated = True
190
  except Exception as e:
191
- logging.error(f"RAG: Error initializing REAL embedding service: {e}", exc_info=True)
192
- self.embedding_service = None
193
- else:
194
- logging.warning(f"RAG: Not using REAL embedding service. Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)}.")
195
- if not _REAL_GENAI_LOADED: # Full dummy mode
196
- self.embedding_service = genai.Client().models # genai is _ActualDummyGenAI, gets dummy service
197
- self._precompute_embeddings()
 
 
 
 
 
 
198
 
199
  def _embed_fn(self, contents_to_embed: str, task_type: str) -> list[float]:
200
- if not self.embedding_service:
201
- logging.error(f"RAG _embed_fn: Embedding service not available for model '{self.embedding_model_name_for_api}'.")
202
- return [0.0] * 768
203
  try:
204
  if not contents_to_embed: return [0.0] * 768
205
 
206
- # Use genai_types (which is real or dummy) to create EmbedContentConfig
207
- embed_config = genai_types.EmbedContentConfig(task_type=task_type)
 
208
 
209
- # Call embed_content on the service (real or dummy)
210
- response = self.embedding_service.embed_content(
211
- model=self.embedding_model_name_for_api,
212
- contents=contents_to_embed,
213
- config=embed_config
214
- )
 
 
 
 
 
 
 
 
215
  return response["embedding"]
216
  except Exception as e:
217
- logging.error(f"Error in _embed_fn for task '{task_type}' using model '{self.embedding_model_name_for_api}' (real_genai_loaded: {_REAL_GENAI_LOADED}): {e}", exc_info=True)
218
  return [0.0] * 768
219
 
220
  def _precompute_embeddings(self):
221
  if 'Embeddings' not in self.documents_df.columns: self.documents_df['Embeddings'] = pd.Series(dtype='object')
222
- mask = (self.documents_df['Text'].notna() & (self.documents_df['Text'] != '')) | (self.documents_df['Title'].notna() & (self.documents_df['Title'] != ''))
223
- if not mask.any(): logging.warning("No content for RAG embeddings."); return
 
224
 
 
 
 
 
 
225
  for index, row in self.documents_df[mask].iterrows():
226
- text_to_embed = row.get('Text', '') if row.get('Text', '') else row.get('Title', '')
227
- self.documents_df.loc[index, 'Embeddings'] = self._embed_fn(text_to_embed, task_type="RETRIEVAL_DOCUMENT") # Corrected task type string
 
 
 
 
 
 
 
 
 
 
 
228
 
229
- logging.info(f"Applied RAG embedding function to {mask.sum()} rows (embedding_service active: {self.embedding_service is not None}).")
 
230
 
231
 
232
  def retrieve_relevant_info(self, query_text: str, top_k: int = 2) -> str:
233
- if not self.real_client_available_for_rag or not self.embedding_service:
234
- if not _REAL_GENAI_LOADED and self.embedding_service: # Full dummy mode
235
- self._embed_fn(query_text, task_type="RETRIEVAL_QUERY") # Call for dummy log
236
- logging.warning(f"Skipping real RAG retrieval. Real client available: {self.real_client_available_for_rag}, Embedding service OK: {self.embedding_service is not None}")
237
- return "\n[RAG Context]\nReal RAG retrieval skipped.\n"
 
 
 
 
 
 
 
 
 
 
238
 
239
  try:
240
- query_embedding = np.array(self._embed_fn(query_text, task_type="RETRIEVAL_QUERY")) # Corrected task type string
 
 
 
 
241
 
 
 
 
 
242
  valid_df = self.documents_df.dropna(subset=['Embeddings'])
 
243
  valid_df = valid_df[valid_df['Embeddings'].apply(lambda x: isinstance(x, (list, np.ndarray)) and len(x) > 0 and np.any(x))]
244
- if valid_df.empty: return "\n[RAG Context]\nNo valid document embeddings after filtering.\n"
245
 
246
  doc_embeddings = np.stack(valid_df['Embeddings'].apply(np.array).values)
247
- if query_embedding.shape[0] != doc_embeddings.shape[1]: return "\n[RAG Context]\nEmbedding dimension mismatch.\n"
 
 
248
 
249
  dot_products = np.dot(doc_embeddings, query_embedding)
250
  num_to_retrieve = min(top_k, len(valid_df))
251
  if num_to_retrieve == 0: return "\n[RAG Context]\nNo relevant passages found (num_to_retrieve is 0).\n"
252
 
253
- idx = np.argsort(dot_products)[-num_to_retrieve:][::-1]
 
254
  passages = "".join([f"\n[RAG Context from: '{valid_df.iloc[i]['Title']}']\n{valid_df.iloc[i]['Text']}\n" for i in idx if i < len(valid_df)])
255
  return passages if passages else "\n[RAG Context]\nNo relevant passages found after search.\n"
256
  except Exception as e:
@@ -268,40 +346,45 @@ class PandasLLM:
268
  self.safety_settings_list = safety_settings_list
269
  self.data_privacy = data_privacy
270
  self.force_sandbox = force_sandbox
271
- self.client = None
272
- self.model_service = None
273
 
274
- if _REAL_GENAI_LOADED and GEMINI_API_KEY:
 
 
 
275
  try:
276
- # genai.configure should have been called. Try passing client_options as a fallback.
277
- client_opts = {"api_key": GEMINI_API_KEY} if GEMINI_API_KEY else None
278
- self.client = genai.Client(client_options=client_opts)
279
- self.model_service = self.client.models
280
  logging.info(f"PandasLLM: Initialized with REAL genai.Client().models for '{self.llm_model_name}'.")
281
  except Exception as e:
282
- logging.error(f"Failed to initialize REAL PandasLLM with genai.Client: {e}", exc_info=True)
283
- self.client = None
284
- self.model_service = None
285
- else:
286
- logging.warning(f"PandasLLM: Not using REAL genai.Client. RealGenAILoaded: {_REAL_GENAI_LOADED}, APIKeySet: {bool(GEMINI_API_KEY)}.")
287
- if not _REAL_GENAI_LOADED:
288
- self.client = genai.Client()
289
- self.model_service = self.client.models
290
- logging.info("PandasLLM: Initialized with DUMMY genai.Client().models (real library failed to load).")
 
 
 
 
291
 
292
 
293
  async def _call_gemini_api_async(self, prompt_text: str, history: list = None) -> str:
294
- use_real_service = _REAL_GENAI_LOADED and GEMINI_API_KEY and self.model_service is not None
 
295
 
296
- active_model_service = self.model_service
297
- if not use_real_service and not _REAL_GENAI_LOADED:
298
- if active_model_service is None:
299
- logging.debug("PandasLLM._call_gemini_api_async: active_model_service is None in dummy mode, using global dummy genai.Client().models.")
300
- active_model_service = genai.Client().models
301
 
302
  if not active_model_service:
303
- logging.error(f"PandasLLM: Model service not available (use_real_service: {use_real_service}, _REAL_GENAI_LOADED: {_REAL_GENAI_LOADED}, self.model_service is None: {self.model_service is None}). Cannot call API.")
304
- return "# Error: Gemini model service not available for API call."
305
 
306
  gemini_history = []
307
  if history:
@@ -320,12 +403,13 @@ class PandasLLM:
320
  api_generation_config = None
321
  if self.generation_config_dict:
322
  try:
 
323
  api_generation_config = genai_types.GenerationConfig(**self.generation_config_dict)
324
  except Exception as e_cfg:
325
- logging.error(f"Error creating GenerationConfig (real_loaded: {_REAL_GENAI_LOADED}): {e_cfg}. Using dict fallback.")
326
- api_generation_config = self.generation_config_dict
327
 
328
- logging.info(f"\n--- Calling Gemini API (model: {model_id_for_api}, RealMode: {use_real_service}) ---\nConfig: {api_generation_config}\nSafety: {bool(self.safety_settings_list)}\nContent (last part text): {contents_for_api[-1]['parts'][0]['text'][:100]}...\n")
329
 
330
  try:
331
  response = await active_model_service.generate_content_async(
@@ -335,55 +419,62 @@ class PandasLLM:
335
  safety_settings=self.safety_settings_list
336
  )
337
 
 
338
  if hasattr(response, 'prompt_feedback') and response.prompt_feedback and \
339
  hasattr(response.prompt_feedback, 'block_reason') and response.prompt_feedback.block_reason:
340
  block_reason_val = response.prompt_feedback.block_reason
341
- block_reason_str = str(block_reason_val.name if hasattr(block_reason_val, 'name') else block_reason_val)
 
342
  logging.warning(f"Prompt blocked by API. Reason: {block_reason_str}.")
343
  return f"# Error: Prompt blocked by API. Reason: {block_reason_str}."
344
 
345
  llm_output = ""
346
- if hasattr(response, 'text') and isinstance(response.text, str):
 
347
  llm_output = response.text
348
  elif response.candidates:
349
  candidate = response.candidates[0]
350
  if candidate.content and candidate.content.parts:
351
  llm_output = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
352
 
 
353
  if not llm_output and candidate.finish_reason:
354
  finish_reason_val = candidate.finish_reason
 
355
  finish_reason_str = str(finish_reason_val.name if hasattr(finish_reason_val, 'name') and not isinstance(finish_reason_val, str) else finish_reason_val)
356
 
357
- if finish_reason_str == "SAFETY":
358
- safety_messages = []
359
- if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
360
- for rating in candidate.safety_ratings:
361
- cat_name = rating.category.name if hasattr(rating.category, 'name') else str(rating.category)
362
- prob_name = rating.probability.name if hasattr(rating.probability, 'name') else str(rating.probability)
363
- safety_messages.append(f"Category: {cat_name}, Probability: {prob_name}")
364
- logging.warning(f"Content generation stopped due to safety. Finish reason: {finish_reason_str}. Details: {'; '.join(safety_messages)}")
365
- return f"# Error: Content generation stopped by API due to safety. Finish Reason: {finish_reason_str}. Details: {'; '.join(safety_messages)}"
366
 
367
  logging.warning(f"Empty response from LLM. Finish reason: {finish_reason_str}.")
368
  return f"# Error: LLM returned an empty response. Finish reason: {finish_reason_str}."
369
- else:
370
- logging.error(f"Unexpected API response structure: {str(response)[:500]}")
371
  return f"# Error: Unexpected API response structure: {str(response)[:200]}"
372
 
373
  return llm_output
374
 
375
- except (genai_types.BlockedPromptException if _REAL_GENAI_LOADED and hasattr(genai_types, 'BlockedPromptException') else Exception) as bpe:
376
- if _REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException':
 
377
  logging.error(f"Prompt blocked (BlockedPromptException): {bpe}", exc_info=True)
378
  return f"# Error: Prompt blocked. Details: {bpe}"
 
379
  if not (_REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException'): raise
380
- except (genai_types.StopCandidateException if _REAL_GENAI_LOADED and hasattr(genai_types, 'StopCandidateException') else Exception) as sce:
381
  if _REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException':
382
- logging.error(f"Candidate stopped (StopCandidateException): {sce}", exc_info=True)
383
- return f"# Error: Content generation stopped. Details: {sce}"
384
  if not (_REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException'): raise
385
  except Exception as e:
386
- logging.error(f"Error calling Gemini API (RealMode: {use_real_service}): {e}", exc_info=True)
387
  return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
388
 
389
 
@@ -392,97 +483,139 @@ class PandasLLM:
392
 
393
  if self.force_sandbox:
394
  code_to_execute = ""
 
395
  if "```python" in llm_response_text:
396
  try:
397
- code_block_match = llm_response_text.split("```python\n", 1)
398
- if len(code_block_match) > 1: code_to_execute = code_block_match[1].split("\n```", 1)[0]
399
- else:
400
- code_block_match = llm_response_text.split("```python", 1)
401
- if len(code_block_match) > 1:
402
- code_to_execute = code_block_match[1].split("```", 1)[0]
403
- if code_to_execute.startswith("\n"): code_to_execute = code_to_execute[1:]
404
- except IndexError: code_to_execute = ""
405
 
406
  if llm_response_text.startswith("# Error:") or not code_to_execute.strip():
407
  logging.warning(f"LLM response is an error, or no valid Python code block found for sandbox. Raw LLM response: {llm_response_text[:200]}")
 
408
  if not code_to_execute.strip() and not llm_response_text.startswith("# Error:"):
409
  if "```" not in llm_response_text and len(llm_response_text.strip()) > 0:
410
  logging.info(f"LLM produced text output instead of Python code in sandbox mode. Passing through: {llm_response_text[:200]}")
411
- return llm_response_text
412
 
413
- logging.info(f"\n--- Code to Execute: ---\n{code_to_execute}\n----------------------\n")
414
  from io import StringIO; import sys
415
  old_stdout, sys.stdout = sys.stdout, StringIO()
 
416
  exec_globals = {'pd': pd, 'np': np}
417
- if dataframes_dict:
418
  for name, df_instance in dataframes_dict.items():
419
  if isinstance(df_instance, pd.DataFrame): exec_globals[f"df_{name}"] = df_instance
420
  try:
421
- exec(code_to_execute, exec_globals, {})
422
  final_output_str = sys.stdout.getvalue()
423
- if not final_output_str.strip():
 
424
  if not any(ln.strip() and not ln.strip().startswith("#") for ln in code_to_execute.splitlines()):
425
- return "# LLM generated only comments or empty code. No output by sandbox."
426
- return "# Code executed by sandbox, but no print() output. Ensure print() for results."
427
  return final_output_str
428
  except Exception as e:
429
  logging.error(f"Sandbox Execution Error: {e}\nCode:\n{code_to_execute}", exc_info=True)
430
  return f"# Sandbox Exec Error: {type(e).__name__}: {e}\n# Code:\n{textwrap.indent(code_to_execute, '# ')}"
431
- finally: sys.stdout = old_stdout
432
- else: return llm_response_text
 
433
 
434
  # --- Employer Branding Agent ---
435
  class EmployerBrandingAgent:
436
  def __init__(self, llm_model_name: str,
437
- generation_config_dict: dict, # Changed from gc_dict
438
- safety_settings_list: list, # Changed from ss_list
439
- all_dataframes: dict, # Changed from all_dfs
440
- rag_documents_df: pd.DataFrame, # Changed from rag_df
441
- embedding_model_name: str, # Changed from emb_m_name
442
- data_privacy=True, # Changed from dp
443
- force_sandbox=True): # Changed from fs
444
 
445
  self.pandas_llm = PandasLLM(llm_model_name, generation_config_dict, safety_settings_list, data_privacy, force_sandbox)
446
  self.rag_system = AdvancedRAGSystem(rag_documents_df, embedding_model_name)
447
  self.all_dataframes = all_dataframes if all_dataframes else {}
448
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
449
  self.chat_history = []
450
- logging.info(f"EmployerBrandingAgent Initialized (Real GenAI Loaded: {_REAL_GENAI_LOADED}).")
451
 
452
  def _build_prompt(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str: # Simplified role for brevity in example
453
- prompt = f"You are '{role}'. Goal: insights from DataFrames & RAG.\n"
454
- if self.pandas_llm.data_privacy: prompt += "PRIVACY: Summarize/aggregate PII.\n"
 
455
  if self.pandas_llm.force_sandbox:
456
- prompt += "TASK: PYTHON CODE. `print()` textual insights/answers. ```python ... ``` ONLY.\nAccess DFs as 'df_name'.\n"
457
- prompt += "CRITICAL: `print()` insights, NOT raw DFs (unless asked). Synthesize RAG. Comment code. Handle issues (ambiguity, missing data) via `print()`.\n"
458
- else: prompt += "TASK: TEXTUAL INSIGHTS. Explain step-by-step.\n"
459
- prompt += f"--- DATA SCHEMAS ---\n{self.schemas_representation if self.schemas_representation.strip() != 'No DataFrames provided.' else 'No DFs loaded.'}\n"
 
 
 
460
 
 
461
  rag_context = self.rag_system.retrieve_relevant_info(user_query)
462
- meaningful_rag_kws = ["Error", "No valid", "No relevant", "Cannot retrieve", "not available", "not generated", "Skipped"]
463
- is_meaningful_rag = bool(rag_context.strip()) and not any(kw in rag_context for kw in meaningful_rag_kws)
464
- prompt += f"--- RAG CONTEXT (Real RAG: {self.rag_system.real_client_available_for_rag}) ---\n{rag_context if is_meaningful_rag else f'No specific RAG context or RAG issue. Details: {rag_context[:70]}...'}\n"
 
 
 
 
 
 
 
465
  prompt += f"--- USER QUERY ---\n{user_query}\n"
466
- if task_hint: prompt += f"--- GUIDANCE ---\n{task_hint}\n"
467
- if cot:
468
- if self.pandas_llm.force_sandbox: prompt += "--- PYTHON THOUGHT PROCESS ---\n1.Goal? 2.Data? 3.Plan? 4.Code. 5.CRITICAL: `print()` insights. 6.Review. 7.```python ... ``` ONLY.\n"
469
- else: prompt += "--- TEXT RESPONSE THOUGHT PROCESS ---\n1.Goal? 2.Data? 3.Insights (DFs+RAG). 4.Structure response.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470
  return prompt
471
 
472
  async def process_query(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str: # Simplified role
473
- hist_for_llm = self.chat_history[:]
 
 
 
474
  self.chat_history.append({"role": "user", "content": user_query})
 
475
  prompt = self._build_prompt(user_query, role, task_hint, cot)
476
- logging.info(f"Prompt for query: {user_query[:70]}... (Real GenAI: {_REAL_GENAI_LOADED})")
 
477
  response = await self.pandas_llm.query(prompt, self.all_dataframes, history=hist_for_llm)
 
 
478
  self.chat_history.append({"role": "assistant", "content": response})
479
- if len(self.chat_history) > 10: self.chat_history = self.chat_history[-10:]; logging.info("Chat history truncated.")
 
 
 
 
480
  return response
481
 
482
  def update_dataframes(self, new_dataframes: dict): # Changed from new_dfs
483
  self.all_dataframes = new_dataframes if new_dataframes else {}
484
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
485
- logging.info("Agent DFs updated.")
486
 
487
  def clear_chat_history(self):
488
  self.chat_history = []
@@ -490,28 +623,79 @@ class EmployerBrandingAgent:
490
 
491
  # --- Example Usage (Conceptual) ---
492
  async def main_test():
493
- logging.info(f"Test (Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)})")
494
- # Example: Provide all parameters to EmployerBrandingAgent constructor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
  agent = EmployerBrandingAgent(
496
  llm_model_name=LLM_MODEL_NAME,
497
  generation_config_dict=GENERATION_CONFIG_PARAMS,
498
  safety_settings_list=DEFAULT_SAFETY_SETTINGS,
499
- all_dataframes={}, # Empty dict for example
500
- rag_documents_df=df_rag_documents,
501
  embedding_model_name=GEMINI_EMBEDDING_MODEL_NAME,
502
  data_privacy=True,
503
- force_sandbox=True
504
  )
505
- for q in ["What are EB best practices?", "Hello Agent!"]:
506
- logging.info(f"\nQuery: {q}")
 
 
 
 
 
 
 
507
  resp = await agent.process_query(q)
508
- logging.info(f"Response: {resp}\n")
509
- if _REAL_GENAI_LOADED and GEMINI_API_KEY: await asyncio.sleep(0.1)
 
510
 
511
  if __name__ == "__main__":
512
- print(f"Script starting... Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)}")
513
- try: asyncio.run(main_test())
 
 
 
 
 
514
  except RuntimeError as e:
515
- if "asyncio.run() cannot be called" in str(e): print("Skip asyncio.run in existing loop.")
516
- else: raise
517
- except Exception as e_main: print(f"Test Error: {e_main}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  return DummyResponse()
27
 
28
  def embed_content(self, model=None, contents=None, config=None): # Added dummy embed_content
29
+ task_type_from_config = "N/A"
30
+ if isinstance(config, dict) and config.get("task_type"):
31
+ task_type_from_config = config["task_type"]
32
+ elif hasattr(config, "task_type"): # Handle if config is an object with a task_type attribute
33
+ task_type_from_config = config.task_type
34
+
35
+ print(f"Dummy _DummyGenAI.Client.models.embed_content called for model: {model}, task_type (from config): {task_type_from_config}")
36
  return {"embedding": [0.2] * 768} # Different values for dummy distinction
37
 
38
 
39
  class _DummyGenAIClient: # Dummy Client
40
+ def __init__(self, client_options=None): # client_options is kept for dummy's internal logic if any
41
  self.client_options = client_options
42
  self.models = _DummyGenAIClientModels()
43
  api_key_present_in_options = client_options and client_options.get("api_key")
 
77
  @staticmethod
78
  def GenerationConfig(**kwargs):
79
  print(f"Dummy _ActualDummyGenAI.types.GenerationConfig created with: {kwargs}")
80
+ return dict(kwargs) # Real lib returns an object, but dict is fine for dummy
81
 
82
  @staticmethod
83
  def SafetySetting(category, threshold):
84
  print(f"Dummy _ActualDummyGenAI.types.SafetySetting created: category={category}, threshold={threshold}")
85
+ # Real lib returns an object, dict is fine for dummy, but ensure it matches expected structure if used
86
+ return {"category": category, "threshold": threshold}
87
 
88
  @staticmethod # Added dummy EmbedContentConfig
89
  def EmbedContentConfig(task_type=None, output_dimensionality=None, title=None):
 
91
  conf = {}
92
  if task_type: conf["task_type"] = task_type
93
  if output_dimensionality: conf["output_dimensionality"] = output_dimensionality
94
+ if title: conf["title"] = title
95
+ # The real library returns a types.EmbedContentRequest, which has these as attributes.
96
+ # For the dummy, returning a dict that the dummy embed_content can understand is okay.
97
+ # Or, make it return a simple object:
98
+ class DummyEmbedConfig:
99
+ def __init__(self):
100
+ self.task_type = task_type
101
+ self.output_dimensionality = output_dimensionality
102
+ self.title = title
103
+ return DummyEmbedConfig()
104
 
105
 
106
  class HarmCategory: HARM_CATEGORY_UNSPECIFIED = "HARM_CATEGORY_UNSPECIFIED"; HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT"; HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH"; HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT"; HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT"
 
116
  genai_types = None
117
 
118
  try:
119
+ # Attempt to import the actual google.generativeai library
120
+ import google.generativeai as real_genai
121
+ genai = real_genai # Use the real library
122
  genai_types = genai.types
123
  _REAL_GENAI_LOADED = True
124
+ logging.info("Successfully imported 'google.generativeai' and accessed 'genai.types'.")
125
  except ImportError:
126
  genai = _ActualDummyGenAI()
127
  genai_types = genai.types
128
+ logging.warning("Google AI library ('google.generativeai') not found. Using dummy implementations for 'genai' and 'genai_types'.")
129
+ except AttributeError: # If 'genai' imported but 'genai.types' is missing (less likely with modern SDK)
130
  genai = _ActualDummyGenAI()
131
  genai_types = genai.types # Fallback to dummy types
132
  _REAL_GENAI_LOADED = False
133
+ logging.warning("'google.generativeai' imported, but 'genai.types' not found. Falling back to dummy implementations.")
134
 
135
 
136
  # --- Configuration ---
137
  GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', "")
138
+ LLM_MODEL_NAME = "gemini-1.5-flash" # Corrected to a generally available model like 1.5 flash
139
+ GEMINI_EMBEDDING_MODEL_NAME = "text-embedding-004" # Corrected to a generally available embedding model
140
 
141
  GENERATION_CONFIG_PARAMS = {
142
  "temperature": 0.3, "top_p": 1.0, "top_k": 32, "max_output_tokens": 8192,
 
146
  DEFAULT_SAFETY_SETTINGS = [
147
  genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
148
  genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
149
+ genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
150
+ genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
151
  ]
152
  except Exception as e_safety:
153
  logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using 'genai_types' (real_loaded: {_REAL_GENAI_LOADED}): {e_safety}. Using placeholder list of dicts.")
154
+ DEFAULT_SAFETY_SETTINGS = [ # Simplified for dummy if types fail
155
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
156
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
157
+ ]
158
 
159
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(filename)s:%(lineno)d - %(message)s')
160
 
 
166
  except Exception as e:
167
  logging.error(f"Failed to configure REAL Gemini API globally: {e}", exc_info=True)
168
  else:
169
+ logging.warning("REAL 'google.generativeai' loaded, but GEMINI_API_KEY not set. API calls might fail or use other auth.")
170
+ elif not _REAL_GENAI_LOADED: # This means we are in dummy mode
171
+ logging.info("Operating in DUMMY mode for 'google.generativeai'.")
172
+ if GEMINI_API_KEY: genai.configure(api_key=GEMINI_API_KEY) # Call dummy configure
173
 
174
 
175
  # --- RAG Documents Definition (Example) ---
176
+ rag_documents_data = { 'Title': ["EB Practices", "Tech Talent"], 'Text': ["Stories about best practices...", "Projects showcasing tech talent..."] }
177
  df_rag_documents = pd.DataFrame(rag_documents_data)
178
 
179
  # --- Schema Representation ---
 
189
  # --- Advanced RAG System ---
190
  class AdvancedRAGSystem:
191
  def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
192
+ self.embedding_model_name_for_api = embedding_model_name
193
  if not self.embedding_model_name_for_api.startswith("models/"):
194
  self.embedding_model_name_for_api = f"models/{self.embedding_model_name_for_api}"
195
 
196
  self.documents_df = documents_df.copy()
197
  self.embeddings_generated = False
198
+ self.embedding_service_client_models = None # Will hold client.models or its dummy equivalent
199
 
200
+ # Determine if we can use the real client for RAG
201
+ # This depends on the library being loaded AND an API key being present
202
  self.real_client_available_for_rag = _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)
203
 
204
  if self.real_client_available_for_rag:
205
  try:
206
+ # REAL LIBRARY: genai.Client() does not take client_options.
207
+ # genai.configure() should have already set the API key.
208
+ rag_client = genai.Client() # NO client_options here for real client
209
+ self.embedding_service_client_models = rag_client.models # Access the 'models' service from the client instance
210
+ logging.info(f"RAG: REAL embedding service (genai.Client().models) initialized for '{self.embedding_model_name_for_api}'.")
211
+ self._precompute_embeddings() # Try to precompute
212
+ self.embeddings_generated = True # Mark as generated if precomputation started
213
  except Exception as e:
214
+ logging.error(f"RAG: Error initializing REAL embedding service (genai.Client().models): {e}", exc_info=True)
215
+ self.embedding_service_client_models = None
216
+ self.real_client_available_for_rag = False # Cannot use real client if init fails
217
+
218
+ # Fallback to dummy if real client is not available or failed
219
+ if not self.real_client_available_for_rag:
220
+ logging.warning(f"RAG: Not using REAL embedding service. Real GenAI Loaded: {_REAL_GENAI_LOADED}, API Key Set: {bool(GEMINI_API_KEY)}, Real Client Init Failed: {self.embedding_service_client_models is None and _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)}.")
221
+ # Ensure we use the DUMMY genai instance if _REAL_GENAI_LOADED is false
222
+ dummy_genai_instance = _ActualDummyGenAI() if not _REAL_GENAI_LOADED else genai
223
+ self.embedding_service_client_models = dummy_genai_instance.Client().models # Gets dummy service
224
+ logging.info(f"RAG: Using DUMMY embedding service for '{self.embedding_model_name_for_api}'.")
225
+ self._precompute_embeddings() # Precompute with dummy
226
+
227
 
228
  def _embed_fn(self, contents_to_embed: str, task_type: str) -> list[float]:
229
+ if not self.embedding_service_client_models:
230
+ logging.error(f"RAG _embed_fn: Embedding service (client.models) not available for model '{self.embedding_model_name_for_api}'.")
231
+ return [0.0] * 768 # Default embedding dimension
232
  try:
233
  if not contents_to_embed: return [0.0] * 768
234
 
235
+ # For the REAL library, embed_content takes `content` (not contents) and `task_type` directly.
236
+ # The `config` parameter is for more advanced settings not used here.
237
+ # The DUMMY `embed_content` was adapted to take a `config` object/dict.
238
 
239
+ if _REAL_GENAI_LOADED and self.real_client_available_for_rag: # Check if we are using the real service
240
+ response = self.embedding_service_client_models.embed_content(
241
+ model=self.embedding_model_name_for_api,
242
+ content=contents_to_embed, # Real API uses 'content'
243
+ task_type=task_type # Real API takes task_type directly
244
+ )
245
+ else: # DUMMY mode or real library failed, use dummy logic
246
+ # The dummy embed_content expects a config object/dict
247
+ embed_config_for_dummy = genai_types.EmbedContentConfig(task_type=task_type)
248
+ response = self.embedding_service_client_models.embed_content(
249
+ model=self.embedding_model_name_for_api,
250
+ contents=contents_to_embed, # Dummy API was expecting 'contents'
251
+ config=embed_config_for_dummy # Dummy API was expecting 'config'
252
+ )
253
  return response["embedding"]
254
  except Exception as e:
255
+ logging.error(f"Error in _embed_fn for task '{task_type}' using model '{self.embedding_model_name_for_api}' (real_genai_loaded: {_REAL_GENAI_LOADED}, real_client_for_rag: {self.real_client_available_for_rag}): {e}", exc_info=True)
256
  return [0.0] * 768
257
 
258
  def _precompute_embeddings(self):
259
  if 'Embeddings' not in self.documents_df.columns: self.documents_df['Embeddings'] = pd.Series(dtype='object')
260
+ # Ensure text to embed is string, handle NaN gracefully
261
+ mask = (self.documents_df['Text'].astype(str).str.strip() != '') | \
262
+ (self.documents_df['Title'].astype(str).str.strip() != '')
263
 
264
+ if not mask.any():
265
+ logging.warning("No content with Text or Title found for RAG embeddings.")
266
+ return
267
+
268
+ logging.info(f"Attempting to precompute embeddings for {mask.sum()} documents.")
269
  for index, row in self.documents_df[mask].iterrows():
270
+ text_to_embed = str(row.get('Text', '')) if pd.notna(row.get('Text')) and str(row.get('Text','')).strip() else str(row.get('Title', ''))
271
+ if not text_to_embed.strip(): # Double check if after selection it's still empty
272
+ logging.debug(f"Skipping row {index} due to empty text_to_embed after selection.")
273
+ self.documents_df.loc[index, 'Embeddings'] = [0.0] * 768 # Store default for empty
274
+ continue
275
+
276
+ # Corrected task type string to match API expectations (e.g., TASK_TYPE_RETRIEVAL_DOCUMENT)
277
+ # For google-generativeai, it's often just "RETRIEVAL_DOCUMENT"
278
+ task_type_for_embedding = "RETRIEVAL_DOCUMENT"
279
+ if hasattr(genai_types, 'TaskType') and hasattr(genai_types.TaskType, 'RETRIEVAL_DOCUMENT'):
280
+ task_type_for_embedding = genai_types.TaskType.RETRIEVAL_DOCUMENT # Use enum if available
281
+
282
+ self.documents_df.loc[index, 'Embeddings'] = self._embed_fn(text_to_embed, task_type=task_type_for_embedding)
283
 
284
+ self.embeddings_generated = True # Mark as generated after attempting
285
+ logging.info(f"Finished RAG embedding precomputation for {mask.sum()} rows (embedding_service_client_models active: {self.embedding_service_client_models is not None}).")
286
 
287
 
288
  def retrieve_relevant_info(self, query_text: str, top_k: int = 2) -> str:
289
+ if not self.embeddings_generated: # Check if embeddings were generated
290
+ logging.warning("RAG: Embeddings not generated, attempting to generate them now for retrieval.")
291
+ self._precompute_embeddings() # Attempt to generate if not already done
292
+ if not self.embeddings_generated: # If still not generated (e.g. no docs)
293
+ return "\n[RAG Context]\nEmbeddings could not be generated. No documents to search.\n"
294
+
295
+ if not self.real_client_available_for_rag or not self.embedding_service_client_models:
296
+ # If in full dummy mode and service is available, log the call for testing purposes
297
+ if not _REAL_GENAI_LOADED and self.embedding_service_client_models:
298
+ task_type_for_query = "RETRIEVAL_QUERY"
299
+ if hasattr(genai_types, 'TaskType') and hasattr(genai_types.TaskType, 'RETRIEVAL_QUERY'):
300
+ task_type_for_query = genai_types.TaskType.RETRIEVAL_QUERY
301
+ self._embed_fn(query_text, task_type=task_type_for_query) # Call for dummy log
302
+ logging.warning(f"Skipping real RAG retrieval. Real client available for RAG: {self.real_client_available_for_rag}, Embedding service OK: {self.embedding_service_client_models is not None}")
303
+ return "\n[RAG Context]\nReal RAG retrieval skipped (client/service issue or dummy mode).\n"
304
 
305
  try:
306
+ task_type_for_query = "RETRIEVAL_QUERY"
307
+ if hasattr(genai_types, 'TaskType') and hasattr(genai_types.TaskType, 'RETRIEVAL_QUERY'):
308
+ task_type_for_query = genai_types.TaskType.RETRIEVAL_QUERY
309
+ query_embedding_list = self._embed_fn(query_text, task_type=task_type_for_query)
310
+ query_embedding = np.array(query_embedding_list)
311
 
312
+ if not np.any(query_embedding): # Check if query embedding is all zeros (error)
313
+ logging.warning("RAG: Query embedding resulted in all zeros. Cannot retrieve.")
314
+ return "\n[RAG Context]\nFailed to generate a valid embedding for the query.\n"
315
+
316
  valid_df = self.documents_df.dropna(subset=['Embeddings'])
317
+ # Filter out embeddings that are not lists/arrays or are empty or all zeros
318
  valid_df = valid_df[valid_df['Embeddings'].apply(lambda x: isinstance(x, (list, np.ndarray)) and len(x) > 0 and np.any(x))]
319
+ if valid_df.empty: return "\n[RAG Context]\nNo valid document embeddings available after filtering.\n"
320
 
321
  doc_embeddings = np.stack(valid_df['Embeddings'].apply(np.array).values)
322
+ if query_embedding.shape[0] != doc_embeddings.shape[1]:
323
+ logging.error(f"RAG: Embedding dimension mismatch. Query: {query_embedding.shape[0]}, Docs: {doc_embeddings.shape[1]}")
324
+ return "\n[RAG Context]\nEmbedding dimension mismatch between query and documents.\n"
325
 
326
  dot_products = np.dot(doc_embeddings, query_embedding)
327
  num_to_retrieve = min(top_k, len(valid_df))
328
  if num_to_retrieve == 0: return "\n[RAG Context]\nNo relevant passages found (num_to_retrieve is 0).\n"
329
 
330
+ # Get indices of top_k largest dot products
331
+ idx = np.argsort(dot_products)[-num_to_retrieve:][::-1]
332
  passages = "".join([f"\n[RAG Context from: '{valid_df.iloc[i]['Title']}']\n{valid_df.iloc[i]['Text']}\n" for i in idx if i < len(valid_df)])
333
  return passages if passages else "\n[RAG Context]\nNo relevant passages found after search.\n"
334
  except Exception as e:
 
346
  self.safety_settings_list = safety_settings_list
347
  self.data_privacy = data_privacy
348
  self.force_sandbox = force_sandbox
349
+ self.client_instance = None # Stores the genai.Client() instance
350
+ self.model_service_from_client = None # Stores client.models
351
 
352
+ # Determine if we can use the real client for LLM
353
+ self.use_real_llm_service = _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)
354
+
355
+ if self.use_real_llm_service:
356
  try:
357
+ # REAL LIBRARY: genai.Client() does not take client_options.
358
+ # genai.configure() should have already set the API key.
359
+ self.client_instance = genai.Client() # NO client_options here
360
+ self.model_service_from_client = self.client_instance.models # Access 'models' service
361
  logging.info(f"PandasLLM: Initialized with REAL genai.Client().models for '{self.llm_model_name}'.")
362
  except Exception as e:
363
+ logging.error(f"Failed to initialize REAL PandasLLM with genai.Client().models: {e}", exc_info=True)
364
+ self.client_instance = None
365
+ self.model_service_from_client = None
366
+ self.use_real_llm_service = False # Fallback if init fails
367
+
368
+ # Fallback to dummy if real client is not available or failed
369
+ if not self.use_real_llm_service:
370
+ logging.warning(f"PandasLLM: Not using REAL genai.Client. RealGenAILoaded: {_REAL_GENAI_LOADED}, APIKeySet: {bool(GEMINI_API_KEY)}, Real Client Init Failed: {self.model_service_from_client is None and _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)}.")
371
+ # Ensure we use the DUMMY genai instance if _REAL_GENAI_LOADED is false
372
+ dummy_genai_instance = _ActualDummyGenAI() if not _REAL_GENAI_LOADED else genai
373
+ self.client_instance = dummy_genai_instance.Client()
374
+ self.model_service_from_client = self.client_instance.models
375
+ logging.info("PandasLLM: Initialized with DUMMY genai.Client().models.")
376
 
377
 
378
  async def _call_gemini_api_async(self, prompt_text: str, history: list = None) -> str:
379
+ # active_model_service will be self.model_service_from_client (either real or dummy)
380
+ active_model_service = self.model_service_from_client
381
 
382
+ is_actually_using_real_service = self.use_real_llm_service and active_model_service is not None and hasattr(active_model_service, 'generate_content_async') and not isinstance(active_model_service, _DummyGenAIClientModels)
383
+
 
 
 
384
 
385
  if not active_model_service:
386
+ logging.error(f"PandasLLM: Model service (client.models) not available. Cannot call API.")
387
+ return "# Error: Gemini model service not available for API call."
388
 
389
  gemini_history = []
390
  if history:
 
403
  api_generation_config = None
404
  if self.generation_config_dict:
405
  try:
406
+ # genai_types will point to real or dummy types
407
  api_generation_config = genai_types.GenerationConfig(**self.generation_config_dict)
408
  except Exception as e_cfg:
409
+ logging.error(f"Error creating GenerationConfig (real_loaded: {_REAL_GENAI_LOADED}, using_real_service_now: {is_actually_using_real_service}): {e_cfg}. Using dict fallback.")
410
+ api_generation_config = self.generation_config_dict # Fallback for safety
411
 
412
+ logging.info(f"\n--- Calling Gemini API (model: {model_id_for_api}, RealModeActive: {is_actually_using_real_service}) ---\nConfig: {type(api_generation_config)}\nSafety: {bool(self.safety_settings_list)}\nContent (last part text): {contents_for_api[-1]['parts'][0]['text'][:100]}...\n")
413
 
414
  try:
415
  response = await active_model_service.generate_content_async(
 
419
  safety_settings=self.safety_settings_list
420
  )
421
 
422
+ # Handle blocked prompts (common with real API)
423
  if hasattr(response, 'prompt_feedback') and response.prompt_feedback and \
424
  hasattr(response.prompt_feedback, 'block_reason') and response.prompt_feedback.block_reason:
425
  block_reason_val = response.prompt_feedback.block_reason
426
+ # Convert enum to string if it's an enum
427
+ block_reason_str = str(block_reason_val.name if hasattr(block_reason_val, 'name') and not isinstance(block_reason_val, str) else block_reason_val)
428
  logging.warning(f"Prompt blocked by API. Reason: {block_reason_str}.")
429
  return f"# Error: Prompt blocked by API. Reason: {block_reason_str}."
430
 
431
  llm_output = ""
432
+ # The real API response often has .text directly for simple cases, or via candidates
433
+ if hasattr(response, 'text') and isinstance(response.text, str) and response.text:
434
  llm_output = response.text
435
  elif response.candidates:
436
  candidate = response.candidates[0]
437
  if candidate.content and candidate.content.parts:
438
  llm_output = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
439
 
440
+ # Check finish reason if output is empty
441
  if not llm_output and candidate.finish_reason:
442
  finish_reason_val = candidate.finish_reason
443
+ # Convert enum to string if it's an enum
444
  finish_reason_str = str(finish_reason_val.name if hasattr(finish_reason_val, 'name') and not isinstance(finish_reason_val, str) else finish_reason_val)
445
 
446
+ if finish_reason_str == "SAFETY" or (hasattr(genai_types, 'FinishReason') and finish_reason_val == genai_types.FinishReason.SAFETY) :
447
+ safety_messages = []
448
+ if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
449
+ for rating in candidate.safety_ratings:
450
+ cat_name = rating.category.name if hasattr(rating.category, 'name') else str(rating.category)
451
+ prob_name = rating.probability.name if hasattr(rating.probability, 'name') else str(rating.probability)
452
+ safety_messages.append(f"Category: {cat_name}, Probability: {prob_name}")
453
+ logging.warning(f"Content generation stopped due to safety. Finish reason: {finish_reason_str}. Details: {'; '.join(safety_messages)}")
454
+ return f"# Error: Content generation stopped by API due to safety. Finish Reason: {finish_reason_str}. Details: {'; '.join(safety_messages)}"
455
 
456
  logging.warning(f"Empty response from LLM. Finish reason: {finish_reason_str}.")
457
  return f"# Error: LLM returned an empty response. Finish reason: {finish_reason_str}."
458
+ else: # If no .text and no candidates, or candidates structure is unexpected
459
+ logging.error(f"Unexpected API response structure (no .text or valid candidates): {str(response)[:500]}")
460
  return f"# Error: Unexpected API response structure: {str(response)[:200]}"
461
 
462
  return llm_output
463
 
464
+ # Specific exceptions from the google.generativeai library
465
+ except (getattr(genai_types, 'BlockedPromptException', Exception)) as bpe: # Use getattr for safe access
466
+ if _REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException': # Check type name if real lib
467
  logging.error(f"Prompt blocked (BlockedPromptException): {bpe}", exc_info=True)
468
  return f"# Error: Prompt blocked. Details: {bpe}"
469
+ # If it's not the specific exception from the real library, or if in dummy mode, re-raise if it's a general Exception
470
  if not (_REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException'): raise
471
+ except (getattr(genai_types, 'StopCandidateException', Exception)) as sce:
472
  if _REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException':
473
+ logging.error(f"Candidate stopped (StopCandidateException): {sce}", exc_info=True)
474
+ return f"# Error: Content generation stopped. Details: {sce}"
475
  if not (_REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException'): raise
476
  except Exception as e:
477
+ logging.error(f"Error calling Gemini API (RealModeActive: {is_actually_using_real_service}): {e}", exc_info=True)
478
  return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
479
 
480
 
 
483
 
484
  if self.force_sandbox:
485
  code_to_execute = ""
486
+ # Improved code extraction to handle potential leading/trailing newlines or spaces
487
  if "```python" in llm_response_text:
488
  try:
489
+ # Split by ```python, take the part after it
490
+ parts = llm_response_text.split("```python", 1)
491
+ if len(parts) > 1:
492
+ # From the second part, split by ```, take the part before it
493
+ code_block_parts = parts[1].split("```", 1)
494
+ code_to_execute = code_block_parts[0].strip() # .strip() to remove leading/trailing whitespace/newlines
495
+ except IndexError:
496
+ code_to_execute = "" # Should not happen with the check above but as a safeguard
497
 
498
  if llm_response_text.startswith("# Error:") or not code_to_execute.strip():
499
  logging.warning(f"LLM response is an error, or no valid Python code block found for sandbox. Raw LLM response: {llm_response_text[:200]}")
500
+ # If no code was extracted, but the response wasn't an error, it might be a textual answer.
501
  if not code_to_execute.strip() and not llm_response_text.startswith("# Error:"):
502
  if "```" not in llm_response_text and len(llm_response_text.strip()) > 0:
503
  logging.info(f"LLM produced text output instead of Python code in sandbox mode. Passing through: {llm_response_text[:200]}")
504
+ return llm_response_text # Return original LLM response (error or non-code)
505
 
506
+ logging.info(f"\n--- Code to Execute (Sandbox): ---\n{code_to_execute}\n----------------------\n")
507
  from io import StringIO; import sys
508
  old_stdout, sys.stdout = sys.stdout, StringIO()
509
+ # Prepare a safe global scope for exec
510
  exec_globals = {'pd': pd, 'np': np}
511
+ if dataframes_dict: # Add DataFrames to the execution scope
512
  for name, df_instance in dataframes_dict.items():
513
  if isinstance(df_instance, pd.DataFrame): exec_globals[f"df_{name}"] = df_instance
514
  try:
515
+ exec(code_to_execute, exec_globals, {}) # Execute in the prepared scope
516
  final_output_str = sys.stdout.getvalue()
517
+ if not final_output_str.strip(): # If code ran but produced no stdout
518
+ # Check if the code was just comments or whitespace
519
  if not any(ln.strip() and not ln.strip().startswith("#") for ln in code_to_execute.splitlines()):
520
+ return "# LLM generated only comments or empty code. No output from sandbox."
521
+ return "# Code executed by sandbox, but no print() output. Ensure print() is used for results."
522
  return final_output_str
523
  except Exception as e:
524
  logging.error(f"Sandbox Execution Error: {e}\nCode:\n{code_to_execute}", exc_info=True)
525
  return f"# Sandbox Exec Error: {type(e).__name__}: {e}\n# Code:\n{textwrap.indent(code_to_execute, '# ')}"
526
+ finally: sys.stdout = old_stdout # Restore stdout
527
+ else: # Not forcing sandbox, return raw LLM response
528
+ return llm_response_text
529
 
530
  # --- Employer Branding Agent ---
531
  class EmployerBrandingAgent:
532
  def __init__(self, llm_model_name: str,
533
+ generation_config_dict: dict,
534
+ safety_settings_list: list,
535
+ all_dataframes: dict,
536
+ rag_documents_df: pd.DataFrame,
537
+ embedding_model_name: str,
538
+ data_privacy=True,
539
+ force_sandbox=True):
540
 
541
  self.pandas_llm = PandasLLM(llm_model_name, generation_config_dict, safety_settings_list, data_privacy, force_sandbox)
542
  self.rag_system = AdvancedRAGSystem(rag_documents_df, embedding_model_name)
543
  self.all_dataframes = all_dataframes if all_dataframes else {}
544
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
545
  self.chat_history = []
546
+ logging.info(f"EmployerBrandingAgent Initialized (Real GenAI Loaded: {_REAL_GENAI_LOADED}, RAG Real Client: {self.rag_system.real_client_available_for_rag}, LLM Real Service: {self.pandas_llm.use_real_llm_service}).")
547
 
548
  def _build_prompt(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str: # Simplified role for brevity in example
549
+ prompt = f"You are '{role}'. Your primary goal is to provide insights from the provided DataFrames and RAG context.\n"
550
+ if self.pandas_llm.data_privacy: prompt += "IMPORTANT PRIVACY NOTE: If dealing with Personally Identifiable Information (PII), you must summarize or aggregate it. Do not output raw PII.\n"
551
+
552
  if self.pandas_llm.force_sandbox:
553
+ prompt += "RESPONSE FORMAT: Generate Python code to analyze the data and `print()` the textual insights or answers. Your entire response MUST be a single Python code block enclosed in ```python ... ```. Do NOT provide any text outside this block.\n"
554
+ prompt += "CODE GUIDELINES: Access DataFrames using their assigned 'df_name' (e.g., df_sales, df_employees). Comment your code clearly. Handle potential issues like ambiguity or missing data by printing informative messages.\n"
555
+ prompt += "CRITICAL: Your Python code's `print()` statements should output the final synthesized insights, not just raw DataFrames (unless specifically asked to show a DataFrame sample).\n"
556
+ else:
557
+ prompt += "RESPONSE FORMAT: Provide comprehensive textual insights. Explain your reasoning and steps clearly.\n"
558
+
559
+ prompt += f"--- AVAILABLE DATA SCHEMAS ---\n{self.schemas_representation if self.schemas_representation.strip() and self.schemas_representation.strip() != 'No DataFrames provided.' else 'No DataFrames are currently loaded or available.'}\n"
560
 
561
+ # Retrieve RAG context. The RAG system itself handles logging if it's skipped.
562
  rag_context = self.rag_system.retrieve_relevant_info(user_query)
563
+ # Check if RAG context is meaningful (not an error or "skipped" message)
564
+ non_meaningful_rag_keywords = ["[RAG Context]\nError", "[RAG Context]\nSkipped", "[RAG Context]\nNo valid", "[RAG Context]\nNo relevant", "could not be generated", "not available", "dimension mismatch"]
565
+ is_meaningful_rag = bool(rag_context.strip()) and not any(keyword in rag_context for keyword in non_meaningful_rag_keywords)
566
+
567
+ prompt += f"--- RAG (Retrieval Augmented Generation) CONTEXT (Real RAG active: {self.rag_system.real_client_available_for_rag and self.rag_system.embedding_service_client_models is not None}) ---\n"
568
+ if is_meaningful_rag:
569
+ prompt += f"{rag_context}\n"
570
+ else:
571
+ prompt += f"No specific relevant RAG context found for this query, or RAG system reported an issue. Details from RAG: {rag_context.strip()[:150]}...\n"
572
+
573
  prompt += f"--- USER QUERY ---\n{user_query}\n"
574
+ if task_hint: prompt += f"--- ADDITIONAL GUIDANCE ---\n{task_hint}\n"
575
+
576
+ if cot: # Chain of Thought guidance
577
+ if self.pandas_llm.force_sandbox:
578
+ prompt += "--- PYTHON CODE THOUGHT PROCESS (Follow these steps internally before writing code) ---\n"
579
+ prompt += "1. Understand the Goal: What specific question does the user query ask? What insight is needed?\n"
580
+ prompt += "2. Identify Data Sources: Which DataFrame(s) are relevant? Is the RAG context useful?\n"
581
+ prompt += "3. Formulate a Plan: Outline the steps to take. E.g., filter df_X, aggregate df_Y, combine with RAG info.\n"
582
+ prompt += "4. Write Python Code: Implement the plan. Use pandas for DataFrame operations. Access RAG context as text.\n"
583
+ prompt += "5. CRITICAL - Print Results: Ensure `print()` statements clearly output the synthesized answer/insights. Do not just print raw data.\n"
584
+ prompt += "6. Review and Refine: Check code for correctness, clarity, and efficiency. Ensure it directly addresses the query.\n"
585
+ prompt += "7. Final Output: Ensure the entire response is ONLY the Python code block: ```python ... ```.\n"
586
+ else:
587
+ prompt += "--- TEXT RESPONSE THOUGHT PROCESS (Follow these steps internally) ---\n"
588
+ prompt += "1. Understand the Goal: What is the user asking for?\n"
589
+ prompt += "2. Identify Data Sources: Which DataFrames and RAG context sections are relevant?\n"
590
+ prompt += "3. Synthesize Insights: Combine information from DataFrames and RAG to form a coherent answer.\n"
591
+ prompt += "4. Structure Response: Organize the answer logically and explain the findings clearly.\n"
592
  return prompt
593
 
594
  async def process_query(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str: # Simplified role
595
+ # Keep a temporary copy of history for this call, so current user_query isn't in it
596
+ hist_for_llm = self.chat_history[:]
597
+
598
+ # Add current user query to persistent history *before* calling LLM
599
  self.chat_history.append({"role": "user", "content": user_query})
600
+
601
  prompt = self._build_prompt(user_query, role, task_hint, cot)
602
+ logging.info(f"Agent processing query: '{user_query[:70]}...' (Real GenAI Loaded: {_REAL_GENAI_LOADED}, LLM Real Service: {self.pandas_llm.use_real_llm_service})")
603
+
604
  response = await self.pandas_llm.query(prompt, self.all_dataframes, history=hist_for_llm)
605
+
606
+ # Add assistant's response to persistent history
607
  self.chat_history.append({"role": "assistant", "content": response})
608
+
609
+ # Truncate history if it gets too long
610
+ if len(self.chat_history) > 10: # Keep last 5 pairs (user/assistant)
611
+ self.chat_history = self.chat_history[-10:]
612
+ logging.info("Chat history truncated to the last 10 entries.")
613
  return response
614
 
615
  def update_dataframes(self, new_dataframes: dict): # Changed from new_dfs
616
  self.all_dataframes = new_dataframes if new_dataframes else {}
617
  self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
618
+ logging.info(f"Agent DataFrames updated. New schema: {self.schemas_representation[:200]}...")
619
 
620
  def clear_chat_history(self):
621
  self.chat_history = []
 
623
 
624
  # --- Example Usage (Conceptual) ---
625
  async def main_test():
626
+ # Configure logging for the test
627
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(filename)s:%(lineno)d - %(message)s')
628
+
629
+ print(f"--- main_test() ---")
630
+ logging.info(f"Test starting with Real GenAI Loaded: {_REAL_GENAI_LOADED}, API Key Set: {bool(GEMINI_API_KEY)}")
631
+
632
+ # Example DataFrames for testing
633
+ sample_employee_data = {
634
+ 'EmployeeID': [1, 2, 3, 4, 5],
635
+ 'Name': ['Alice', 'Bob', 'Charlie', 'David', 'Eve'],
636
+ 'Department': ['HR', 'Tech', 'Tech', 'Sales', 'HR'],
637
+ 'Projects': [['Onboarding Revamp'], ['AI Chatbot', 'Platform Upgrade'], ['AI Chatbot'], ['Client Outreach'], ['Benefits Portal']]
638
+ }
639
+ df_employees = pd.DataFrame(sample_employee_data)
640
+
641
+ sample_rag_docs_data = {
642
+ 'Title': ["Company Culture Handbook", "Tech Team Achievements Q1", "Remote Work Policy"],
643
+ 'Text': [
644
+ "Our company values collaboration, innovation, and inclusivity. We encourage cross-departmental projects.",
645
+ "The tech team successfully launched the AI Chatbot project, significantly improving customer engagement. The Platform Upgrade is on track.",
646
+ "We offer flexible remote work options. All employees are expected to maintain high levels of communication."
647
+ ]
648
+ }
649
+ df_test_rag_documents = pd.DataFrame(sample_rag_docs_data)
650
+
651
  agent = EmployerBrandingAgent(
652
  llm_model_name=LLM_MODEL_NAME,
653
  generation_config_dict=GENERATION_CONFIG_PARAMS,
654
  safety_settings_list=DEFAULT_SAFETY_SETTINGS,
655
+ all_dataframes={'employees': df_employees}, # Provide a sample DataFrame
656
+ rag_documents_df=df_test_rag_documents, # Provide sample RAG documents
657
  embedding_model_name=GEMINI_EMBEDDING_MODEL_NAME,
658
  data_privacy=True,
659
+ force_sandbox=True # Set to True to test code generation, False for direct text
660
  )
661
+
662
+ queries = [
663
+ "What are the key aspects of our company culture according to the handbook?",
664
+ "Which employees are working on the 'AI Chatbot' project? Summarize the project's impact.",
665
+ "How many employees are in the Tech department?"
666
+ ]
667
+
668
+ for q_idx, q in enumerate(queries):
669
+ logging.info(f"\n--- Query {q_idx+1}: {q} ---")
670
  resp = await agent.process_query(q)
671
+ logging.info(f"--- Agent Response for Query {q_idx+1} ---:\n{resp}\n---------------------\n")
672
+ # Add a small delay if using real API to avoid rate limits, though for flash models it's usually fine.
673
+ if _REAL_GENAI_LOADED and GEMINI_API_KEY: await asyncio.sleep(0.2)
674
 
675
  if __name__ == "__main__":
676
+ print(f"Script starting... Real GenAI: {_REAL_GENAI_LOADED}, API Key Set: {bool(GEMINI_API_KEY)}")
677
+ # Setup basic logging if not already configured by a higher-level application
678
+ if not logging.getLogger().hasHandlers():
679
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s')
680
+
681
+ try:
682
+ asyncio.run(main_test())
683
  except RuntimeError as e:
684
+ if "asyncio.run() cannot be called when another asyncio event loop is running" in str(e):
685
+ print("Skipping asyncio.run(main_test()) as an event loop is already running (e.g., in Jupyter).")
686
+ # If in Jupyter or similar, you might need to run like this:
687
+ # loop = asyncio.get_event_loop()
688
+ # if loop.is_running():
689
+ # print("Event loop is running, creating task for main_test()")
690
+ # asyncio.create_task(main_test())
691
+ # else:
692
+ # print("Event loop not running, using asyncio.run()")
693
+ # asyncio.run(main_test())
694
+
695
+ else:
696
+ print(f"Main Test Runtime Error: {e}")
697
+ raise
698
+ except Exception as e_main:
699
+ print(f"Main Test Exception: {e_main}")
700
+ logging.error("Exception in __main__ execution of main_test:", exc_info=True)
701
+