Spaces:
Running
Running
Update eb_agent_module.py
Browse files- eb_agent_module.py +170 -210
eb_agent_module.py
CHANGED
@@ -25,14 +25,21 @@ class _DummyGenAIClientModels: # Represents the dummy model service client
|
|
25 |
class DummyResponse: candidates = [DummyCandidate()]; text = DummyCandidate.content.parts[0].text; prompt_feedback = None
|
26 |
return DummyResponse()
|
27 |
|
|
|
|
|
|
|
|
|
|
|
28 |
class _DummyGenAIClient: # Dummy Client
|
29 |
-
def __init__(self,
|
30 |
-
self.
|
31 |
self.models = _DummyGenAIClientModels()
|
32 |
-
|
|
|
|
|
33 |
|
34 |
-
class _DummyGenAIGenerativeModel:
|
35 |
-
def __init__(self, model_name_in,
|
36 |
self.model_name = model_name_in
|
37 |
print(f"Dummy _DummyGenAIGenerativeModel initialized for {model_name_in}")
|
38 |
async def generate_content_async(self, contents, stream=False):
|
@@ -42,8 +49,13 @@ class _DummyGenAIGenerativeModel:
|
|
42 |
class DummyCandidate: content = DummyContent(); finish_reason = "_DUMMY_STOP"; safety_ratings = []
|
43 |
class DummyResponse: candidates = [DummyCandidate()]; prompt_feedback = None; text = DummyCandidate.content.parts[0].text
|
44 |
return DummyResponse()
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
class _ActualDummyGenAI: # type: ignore
|
47 |
Client = _DummyGenAIClient
|
48 |
|
49 |
@staticmethod
|
@@ -54,13 +66,7 @@ class _ActualDummyGenAI: # type: ignore # Renamed the main dummy class
|
|
54 |
def GenerativeModel(model_name, generation_config=None, safety_settings=None, system_instruction=None):
|
55 |
print(f"Dummy _ActualDummyGenAI.GenerativeModel called for model: {model_name}")
|
56 |
return _DummyGenAIGenerativeModel(model_name, generation_config, safety_settings, system_instruction)
|
57 |
-
|
58 |
-
@staticmethod
|
59 |
-
def embed_content(model, content, task_type, title=None):
|
60 |
-
print(f"Dummy _ActualDummyGenAI.embed_content called for model: {model}, task_type: {task_type}, title: {title}")
|
61 |
-
return {"embedding": [0.1] * 768}
|
62 |
|
63 |
-
# Add a dummy 'types' attribute to the dummy genai class
|
64 |
class types:
|
65 |
@staticmethod
|
66 |
def GenerationConfig(**kwargs):
|
@@ -71,102 +77,89 @@ class _ActualDummyGenAI: # type: ignore # Renamed the main dummy class
|
|
71 |
def SafetySetting(category, threshold):
|
72 |
print(f"Dummy _ActualDummyGenAI.types.SafetySetting created: category={category}, threshold={threshold}")
|
73 |
return {"category": category, "threshold": threshold}
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
84 |
class BlockedPromptException(Exception): pass
|
85 |
class StopCandidateException(Exception): pass
|
86 |
|
87 |
|
88 |
# --- Attempt to import the real library ---
|
89 |
_REAL_GENAI_LOADED = False
|
90 |
-
genai_types = None
|
91 |
|
92 |
try:
|
93 |
-
from google import genai
|
94 |
-
|
95 |
-
genai_types = genai.types # Assign the real types
|
96 |
_REAL_GENAI_LOADED = True
|
97 |
logging.info("Successfully imported 'google.genai' and accessed 'genai.types'.")
|
98 |
except ImportError:
|
99 |
-
# If 'from google import genai' fails, use the dummy genai and its dummy types
|
100 |
genai = _ActualDummyGenAI()
|
101 |
-
genai_types = genai.types
|
102 |
logging.warning("Google AI library ('google.genai') not found. Using dummy implementations for 'genai' and 'genai_types'.")
|
103 |
-
except AttributeError:
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
genai_types = genai.types
|
108 |
-
_REAL_GENAI_LOADED = False # Mark as not fully loaded if types are missing
|
109 |
logging.warning("'google.genai' imported, but 'genai.types' not found. Falling back to dummy implementations.")
|
110 |
|
111 |
|
112 |
# --- Configuration ---
|
113 |
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', "")
|
114 |
LLM_MODEL_NAME = "gemini-2.0-flash"
|
115 |
-
GEMINI_EMBEDDING_MODEL_NAME = "gemini-embedding-exp-03-07"
|
116 |
|
117 |
GENERATION_CONFIG_PARAMS = {
|
118 |
"temperature": 0.3, "top_p": 1.0, "top_k": 32, "max_output_tokens": 8192,
|
119 |
}
|
120 |
|
121 |
-
# Default safety settings list for Gemini
|
122 |
-
# genai_types is now consistently the real genai.types or the dummy _ActualDummyGenAI.types
|
123 |
try:
|
124 |
DEFAULT_SAFETY_SETTINGS = [
|
125 |
genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
|
126 |
genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
|
127 |
-
|
128 |
-
genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
|
129 |
]
|
130 |
except Exception as e_safety:
|
131 |
logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using 'genai_types' (real_loaded: {_REAL_GENAI_LOADED}): {e_safety}. Using placeholder list of dicts.")
|
132 |
-
DEFAULT_SAFETY_SETTINGS = [
|
133 |
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
134 |
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
135 |
-
]
|
136 |
-
|
137 |
|
138 |
-
# Logging setup
|
139 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(filename)s:%(lineno)d - %(message)s')
|
140 |
|
141 |
-
if
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
149 |
elif not _REAL_GENAI_LOADED:
|
150 |
-
logging.info("Operating in DUMMY mode
|
151 |
-
if GEMINI_API_KEY:
|
152 |
-
genai.configure(api_key=GEMINI_API_KEY) # Calls dummy configure
|
153 |
|
154 |
|
155 |
# --- RAG Documents Definition (Example) ---
|
156 |
-
rag_documents_data = {
|
157 |
-
'Title': ["Employer Branding Best Practices 2024", "Attracting Tech Talent", "Employee Advocacy", "Gen Z Expectations"],
|
158 |
-
'Text': ["Focus on authentic employee stories...", "Tech candidates value challenging projects...", "Encourage employees to share experiences...", "Gen Z values purpose-driven work..."]
|
159 |
-
}
|
160 |
df_rag_documents = pd.DataFrame(rag_documents_data)
|
161 |
|
162 |
# --- Schema Representation ---
|
163 |
def get_schema_representation(df_name: str, df: pd.DataFrame) -> str:
|
164 |
if not isinstance(df, pd.DataFrame): return f"Schema for item '{df_name}': Not a DataFrame.\n"
|
165 |
if df.empty: return f"Schema for DataFrame 'df_{df_name}': Empty.\n"
|
166 |
-
|
167 |
-
if not df.empty: schema_str += f" Sample Data (first 2 rows):\n{textwrap.indent(df.head(2).to_string(), ' ')}\n"
|
168 |
-
else: schema_str += " Sample Data: DataFrame is empty.\n"
|
169 |
-
return schema_str
|
170 |
|
171 |
def get_all_schemas_representation(dataframes_dict: dict) -> str:
|
172 |
if not dataframes_dict: return "No DataFrames provided.\n"
|
@@ -175,50 +168,77 @@ def get_all_schemas_representation(dataframes_dict: dict) -> str:
|
|
175 |
# --- Advanced RAG System ---
|
176 |
class AdvancedRAGSystem:
|
177 |
def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
|
178 |
-
self.
|
|
|
|
|
|
|
179 |
self.documents_df = documents_df.copy()
|
180 |
self.embeddings_generated = False
|
|
|
|
|
181 |
self.real_client_available_for_rag = _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)
|
182 |
|
183 |
if self.real_client_available_for_rag:
|
184 |
try:
|
|
|
|
|
|
|
|
|
|
|
185 |
self._precompute_embeddings()
|
186 |
self.embeddings_generated = True
|
187 |
-
|
188 |
-
|
|
|
189 |
else:
|
190 |
-
logging.warning(f"RAG: Not using
|
191 |
-
if not _REAL_GENAI_LOADED: #
|
192 |
-
self.
|
|
|
193 |
|
194 |
-
def _embed_fn(self,
|
|
|
|
|
|
|
195 |
try:
|
196 |
-
|
197 |
-
|
198 |
-
#
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
except Exception as e:
|
201 |
-
logging.error(f"Error in _embed_fn for '{
|
202 |
return [0.0] * 768
|
203 |
|
204 |
def _precompute_embeddings(self):
|
205 |
if 'Embeddings' not in self.documents_df.columns: self.documents_df['Embeddings'] = pd.Series(dtype='object')
|
206 |
mask = (self.documents_df['Text'].notna() & (self.documents_df['Text'] != '')) | (self.documents_df['Title'].notna() & (self.documents_df['Title'] != ''))
|
207 |
if not mask.any(): logging.warning("No content for RAG embeddings."); return
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
210 |
|
211 |
|
212 |
def retrieve_relevant_info(self, query_text: str, top_k: int = 2) -> str:
|
213 |
-
if not self.real_client_available_for_rag:
|
214 |
-
if not _REAL_GENAI_LOADED: #
|
215 |
-
|
216 |
-
logging.warning(f"Skipping real RAG retrieval. Real
|
217 |
-
return "\n[RAG Context]\nReal RAG retrieval skipped
|
218 |
|
219 |
try:
|
220 |
-
|
221 |
-
|
222 |
valid_df = self.documents_df.dropna(subset=['Embeddings'])
|
223 |
valid_df = valid_df[valid_df['Embeddings'].apply(lambda x: isinstance(x, (list, np.ndarray)) and len(x) > 0 and np.any(x))]
|
224 |
if valid_df.empty: return "\n[RAG Context]\nNo valid document embeddings after filtering.\n"
|
@@ -234,7 +254,7 @@ class AdvancedRAGSystem:
|
|
234 |
passages = "".join([f"\n[RAG Context from: '{valid_df.iloc[i]['Title']}']\n{valid_df.iloc[i]['Text']}\n" for i in idx if i < len(valid_df)])
|
235 |
return passages if passages else "\n[RAG Context]\nNo relevant passages found after search.\n"
|
236 |
except Exception as e:
|
237 |
-
logging.error(f"Error in RAG retrieve_relevant_info (real mode): {e}", exc_info=True)
|
238 |
return f"\n[RAG Context]\nError during RAG retrieval (real mode): {type(e).__name__} - {e}\n"
|
239 |
|
240 |
# --- PandasLLM Class (Gemini-Powered using genai.Client) ---
|
@@ -248,21 +268,24 @@ class PandasLLM:
|
|
248 |
self.safety_settings_list = safety_settings_list
|
249 |
self.data_privacy = data_privacy
|
250 |
self.force_sandbox = force_sandbox
|
251 |
-
self.client = None
|
252 |
-
self.model_service = None
|
253 |
|
254 |
if _REAL_GENAI_LOADED and GEMINI_API_KEY:
|
255 |
try:
|
256 |
-
|
|
|
|
|
257 |
self.model_service = self.client.models
|
258 |
logging.info(f"PandasLLM: Initialized with REAL genai.Client().models for '{self.llm_model_name}'.")
|
259 |
except Exception as e:
|
260 |
logging.error(f"Failed to initialize REAL PandasLLM with genai.Client: {e}", exc_info=True)
|
261 |
-
|
|
|
262 |
else:
|
263 |
logging.warning(f"PandasLLM: Not using REAL genai.Client. RealGenAILoaded: {_REAL_GENAI_LOADED}, APIKeySet: {bool(GEMINI_API_KEY)}.")
|
264 |
-
if not _REAL_GENAI_LOADED:
|
265 |
-
self.client = genai.Client()
|
266 |
self.model_service = self.client.models
|
267 |
logging.info("PandasLLM: Initialized with DUMMY genai.Client().models (real library failed to load).")
|
268 |
|
@@ -271,13 +294,13 @@ class PandasLLM:
|
|
271 |
use_real_service = _REAL_GENAI_LOADED and GEMINI_API_KEY and self.model_service is not None
|
272 |
|
273 |
active_model_service = self.model_service
|
274 |
-
if not use_real_service and not _REAL_GENAI_LOADED:
|
275 |
-
if active_model_service is None:
|
276 |
logging.debug("PandasLLM._call_gemini_api_async: active_model_service is None in dummy mode, using global dummy genai.Client().models.")
|
277 |
-
active_model_service = genai.Client().models
|
278 |
|
279 |
if not active_model_service:
|
280 |
-
logging.error(f"PandasLLM: Model service not available (use_real_service: {use_real_service}, _REAL_GENAI_LOADED: {_REAL_GENAI_LOADED}). Cannot call API.")
|
281 |
return "# Error: Gemini model service not available for API call."
|
282 |
|
283 |
gemini_history = []
|
@@ -291,7 +314,7 @@ class PandasLLM:
|
|
291 |
contents_for_api = gemini_history + current_prompt_content
|
292 |
|
293 |
model_id_for_api = self.llm_model_name
|
294 |
-
if not model_id_for_api.startswith("models/"):
|
295 |
model_id_for_api = f"models/{model_id_for_api}"
|
296 |
|
297 |
api_generation_config = None
|
@@ -349,20 +372,16 @@ class PandasLLM:
|
|
349 |
|
350 |
return llm_output
|
351 |
|
352 |
-
# Use genai_types for exceptions if real library is loaded
|
353 |
except (genai_types.BlockedPromptException if _REAL_GENAI_LOADED and hasattr(genai_types, 'BlockedPromptException') else Exception) as bpe:
|
354 |
-
if _REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException':
|
355 |
logging.error(f"Prompt blocked (BlockedPromptException): {bpe}", exc_info=True)
|
356 |
return f"# Error: Prompt blocked. Details: {bpe}"
|
357 |
-
|
358 |
-
pass # Let the general Exception handler catch it or re-raise if needed
|
359 |
-
|
360 |
except (genai_types.StopCandidateException if _REAL_GENAI_LOADED and hasattr(genai_types, 'StopCandidateException') else Exception) as sce:
|
361 |
-
if _REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException':
|
362 |
logging.error(f"Candidate stopped (StopCandidateException): {sce}", exc_info=True)
|
363 |
return f"# Error: Content generation stopped. Details: {sce}"
|
364 |
-
|
365 |
-
|
366 |
except Exception as e:
|
367 |
logging.error(f"Error calling Gemini API (RealMode: {use_real_service}): {e}", exc_info=True)
|
368 |
return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
|
@@ -392,142 +411,83 @@ class PandasLLM:
|
|
392 |
return llm_response_text
|
393 |
|
394 |
logging.info(f"\n--- Code to Execute: ---\n{code_to_execute}\n----------------------\n")
|
395 |
-
from io import StringIO
|
396 |
-
import sys
|
397 |
old_stdout, sys.stdout = sys.stdout, StringIO()
|
398 |
exec_globals = {'pd': pd, 'np': np}
|
399 |
if dataframes_dict:
|
400 |
for name, df_instance in dataframes_dict.items():
|
401 |
if isinstance(df_instance, pd.DataFrame): exec_globals[f"df_{name}"] = df_instance
|
402 |
-
else: logging.warning(f"Item '{name}' not a DataFrame for sandbox exec.")
|
403 |
try:
|
404 |
exec(code_to_execute, exec_globals, {})
|
405 |
final_output_str = sys.stdout.getvalue()
|
406 |
if not final_output_str.strip():
|
407 |
if not any(ln.strip() and not ln.strip().startswith("#") for ln in code_to_execute.splitlines()):
|
408 |
-
return "# LLM generated only comments or empty code. No output
|
409 |
-
return "# Code executed
|
410 |
return final_output_str
|
411 |
except Exception as e:
|
412 |
-
logging.error(f"Sandbox Execution Error: {e}\nCode
|
413 |
-
|
414 |
-
return f"# Sandbox Execution Error: {type(e).__name__}: {e}\n# --- Code that caused error: ---\n{indented_code}"
|
415 |
finally: sys.stdout = old_stdout
|
416 |
else: return llm_response_text
|
417 |
|
418 |
# --- Employer Branding Agent ---
|
419 |
class EmployerBrandingAgent:
|
420 |
-
def __init__(self, llm_model_name: str,
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
rag_documents_df: pd.DataFrame,
|
425 |
-
embedding_model_name: str,
|
426 |
-
data_privacy=True, force_sandbox=True):
|
427 |
-
|
428 |
-
self.pandas_llm = PandasLLM(llm_model_name, generation_config_dict, safety_settings_list, data_privacy, force_sandbox)
|
429 |
-
self.rag_system = AdvancedRAGSystem(rag_documents_df, embedding_model_name)
|
430 |
-
self.all_dataframes = all_dataframes if all_dataframes else {}
|
431 |
self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
|
432 |
self.chat_history = []
|
433 |
logging.info(f"EmployerBrandingAgent Initialized (Real GenAI Loaded: {_REAL_GENAI_LOADED}).")
|
434 |
|
435 |
-
def _build_prompt(self, user_query: str, role="
|
436 |
-
prompt = f"You are
|
437 |
-
if self.pandas_llm.data_privacy: prompt += "
|
438 |
-
|
439 |
if self.pandas_llm.force_sandbox:
|
440 |
-
prompt += "
|
441 |
-
prompt += "
|
442 |
-
|
443 |
-
|
444 |
-
prompt += "\n--- CRITICAL INSTRUCTIONS FOR PYTHON CODE OUTPUT ---\n"
|
445 |
-
prompt += "1. **Print Insights, Not Just Data:** `print()` clear, actionable insights. NOT raw DataFrames unless specifically asked for a table.\n"
|
446 |
-
prompt += " Good: `print(f'Insight: Theme {top_theme} has {engagement_increase}% higher engagement.')`\n"
|
447 |
-
prompt += " Avoid: `print(df_result)` (for insight queries).\n"
|
448 |
-
prompt += "2. **Synthesize with RAG:** Weave RAG takeaways into printed insights. Ex: `print(f'Data shows X. RAG says Y. Recommend Z.')`\n"
|
449 |
-
prompt += "3. **Comments & Clarity:** Write clean, commented code.\n"
|
450 |
-
prompt += "4. **Handle Issues in Code:** If ambiguous, `print()` a question. If data unavailable, `print()` explanation. For non-analytical queries, `print()` polite reply.\n"
|
451 |
-
prompt += "5. **Function Usage:** Call functions and `print()` their (insightful) results.\n"
|
452 |
-
else: # Not force_sandbox
|
453 |
-
prompt += "\n--- TASK: DIRECT TEXTUAL INSIGHT GENERATION ---\n"
|
454 |
-
prompt += "Analyze data and RAG, then provide a comprehensive textual answer with insights. Explain step-by-step.\n"
|
455 |
-
|
456 |
-
prompt += "\n--- AVAILABLE DATA AND SCHEMAS ---\n"
|
457 |
-
prompt += self.schemas_representation if self.schemas_representation.strip() != "No DataFrames provided." else "No DataFrames loaded.\n"
|
458 |
|
459 |
rag_context = self.rag_system.retrieve_relevant_info(user_query)
|
460 |
-
|
461 |
-
is_meaningful_rag = bool(rag_context.strip()) and not any(
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
prompt +=
|
467 |
-
|
468 |
-
|
469 |
-
prompt += f"\n--- USER QUERY ---\n{user_query}\n"
|
470 |
-
if task_decomposition_hint: prompt += f"\n--- GUIDANCE ---\n{task_decomposition_hint}\n"
|
471 |
-
|
472 |
-
if cot_hint:
|
473 |
-
if self.pandas_llm.force_sandbox:
|
474 |
-
prompt += "\n--- PYTHON CODE GENERATION THOUGHT PROCESS ---\n"
|
475 |
-
prompt += "1. Goal? 2. Data sources (DFs, RAG)? 3. Analysis plan (comments)? 4. Write Python code. 5. CRITICAL: Formulate & `print()` textual insights. 6. Review. 7. Output ONLY ```python ... ```.\n"
|
476 |
-
else: # Not force_sandbox
|
477 |
-
prompt += "\n--- TEXTUAL RESPONSE THOUGHT PROCESS ---\n"
|
478 |
-
prompt += "1. Goal? 2. Data sources? 3. Formulate insights (data + RAG). 4. Structure: explanation, then insights.\n"
|
479 |
return prompt
|
480 |
|
481 |
-
async def process_query(self, user_query: str, role="
|
482 |
-
|
483 |
self.chat_history.append({"role": "user", "content": user_query})
|
484 |
-
|
485 |
-
logging.info(f"
|
486 |
-
|
487 |
-
self.chat_history.append({"role": "assistant", "content":
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
return response_text
|
493 |
-
|
494 |
-
def update_dataframes(self, new_dataframes: dict):
|
495 |
-
self.all_dataframes = new_dataframes if new_dataframes else {}
|
496 |
-
self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
|
497 |
-
logging.info(f"Agent DataFrames updated. Schemas: {self.schemas_representation[:100]}...")
|
498 |
-
|
499 |
def clear_chat_history(self): self.chat_history = []; logging.info("Agent chat history cleared.")
|
500 |
|
501 |
# --- Example Usage (Conceptual) ---
|
502 |
async def main_test():
|
503 |
-
logging.info(f"
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
logging.warning("GEMINI_API_KEY not set but real library loaded. Real API calls in test will fail.")
|
511 |
-
|
512 |
-
agent = EmployerBrandingAgent(LLM_MODEL_NAME, GENERATION_CONFIG_PARAMS, DEFAULT_SAFETY_SETTINGS, test_dataframes, df_rag_documents, GEMINI_EMBEDDING_MODEL_NAME, force_sandbox=True)
|
513 |
-
|
514 |
-
queries = ["Which post theme has the highest average engagement rate? Provide an insight.", "Hello!"]
|
515 |
-
for query in queries:
|
516 |
-
logging.info(f"\n\n--- Query: {query} ---")
|
517 |
-
response = await agent.process_query(user_query=query)
|
518 |
-
logging.info(f"--- Response for '{query}': ---\n{response}\n---------------------------\n")
|
519 |
-
if _REAL_GENAI_LOADED and GEMINI_API_KEY: await asyncio.sleep(0.1)
|
520 |
|
521 |
if __name__ == "__main__":
|
522 |
-
print(f"Script starting... Real GenAI
|
523 |
-
try:
|
524 |
-
asyncio.run(main_test())
|
525 |
except RuntimeError as e:
|
526 |
-
if "asyncio.run() cannot be called
|
527 |
-
|
528 |
-
|
529 |
-
else:
|
530 |
-
raise
|
531 |
-
except Exception as e_main:
|
532 |
-
print(f"Error during main_test execution: {e_main}")
|
533 |
-
|
|
|
25 |
class DummyResponse: candidates = [DummyCandidate()]; text = DummyCandidate.content.parts[0].text; prompt_feedback = None
|
26 |
return DummyResponse()
|
27 |
|
28 |
+
def embed_content(self, model=None, contents=None, config=None): # Added dummy embed_content
|
29 |
+
print(f"Dummy _DummyGenAI.Client.models.embed_content called for model: {model}, task_type (from config): {config.get('task_type') if isinstance(config, dict) else 'N/A'}")
|
30 |
+
return {"embedding": [0.2] * 768} # Different values for dummy distinction
|
31 |
+
|
32 |
+
|
33 |
class _DummyGenAIClient: # Dummy Client
|
34 |
+
def __init__(self, client_options=None): # Added client_options for signature consistency
|
35 |
+
self.client_options = client_options
|
36 |
self.models = _DummyGenAIClientModels()
|
37 |
+
api_key_present_in_options = client_options and client_options.get("api_key")
|
38 |
+
print(f"Dummy _DummyGenAI.Client initialized {'with api_key in client_options' if api_key_present_in_options else '(global API key expected by dummy)'}.")
|
39 |
+
|
40 |
|
41 |
+
class _DummyGenAIGenerativeModel: # This dummy might be less used if client.models is preferred
|
42 |
+
def __init__(self, model_name_in, generation_config=None, safety_settings=None, system_instruction=None):
|
43 |
self.model_name = model_name_in
|
44 |
print(f"Dummy _DummyGenAIGenerativeModel initialized for {model_name_in}")
|
45 |
async def generate_content_async(self, contents, stream=False):
|
|
|
49 |
class DummyCandidate: content = DummyContent(); finish_reason = "_DUMMY_STOP"; safety_ratings = []
|
50 |
class DummyResponse: candidates = [DummyCandidate()]; prompt_feedback = None; text = DummyCandidate.content.parts[0].text
|
51 |
return DummyResponse()
|
52 |
+
# This embed_content on the dummy GenerativeModel might not be used if AdvancedRAGSystem uses client.models.embed_content
|
53 |
+
def embed_content(self, content, task_type=None, title=None):
|
54 |
+
print(f"Dummy _DummyGenAIGenerativeModel.embed_content called for model {self.model_name} (task: {task_type})")
|
55 |
+
return {"embedding": [0.1] * 768}
|
56 |
+
|
57 |
|
58 |
+
class _ActualDummyGenAI: # type: ignore
|
59 |
Client = _DummyGenAIClient
|
60 |
|
61 |
@staticmethod
|
|
|
66 |
def GenerativeModel(model_name, generation_config=None, safety_settings=None, system_instruction=None):
|
67 |
print(f"Dummy _ActualDummyGenAI.GenerativeModel called for model: {model_name}")
|
68 |
return _DummyGenAIGenerativeModel(model_name, generation_config, safety_settings, system_instruction)
|
|
|
|
|
|
|
|
|
|
|
69 |
|
|
|
70 |
class types:
|
71 |
@staticmethod
|
72 |
def GenerationConfig(**kwargs):
|
|
|
77 |
def SafetySetting(category, threshold):
|
78 |
print(f"Dummy _ActualDummyGenAI.types.SafetySetting created: category={category}, threshold={threshold}")
|
79 |
return {"category": category, "threshold": threshold}
|
80 |
+
|
81 |
+
@staticmethod # Added dummy EmbedContentConfig
|
82 |
+
def EmbedContentConfig(task_type=None, output_dimensionality=None, title=None):
|
83 |
+
print(f"Dummy _ActualDummyGenAI.types.EmbedContentConfig created with task_type: {task_type}")
|
84 |
+
conf = {}
|
85 |
+
if task_type: conf["task_type"] = task_type
|
86 |
+
if output_dimensionality: conf["output_dimensionality"] = output_dimensionality
|
87 |
+
if title: conf["title"] = title # Though title is usually direct param for embed_content
|
88 |
+
return conf
|
89 |
+
|
90 |
+
|
91 |
+
class HarmCategory: HARM_CATEGORY_UNSPECIFIED = "HARM_CATEGORY_UNSPECIFIED"; HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT"; HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH"; HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT"; HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT"
|
92 |
+
class HarmBlockThreshold: BLOCK_NONE = "BLOCK_NONE"; BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE"; BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE"; BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH"
|
93 |
+
class FinishReason: FINISH_REASON_UNSPECIFIED = "UNSPECIFIED"; STOP = "STOP"; MAX_TOKENS = "MAX_TOKENS"; SAFETY = "SAFETY"; RECITATION = "RECITATION"; OTHER = "OTHER"
|
94 |
+
class BlockedReason: BLOCKED_REASON_UNSPECIFIED = "BLOCKED_REASON_UNSPECIFIED"; SAFETY = "SAFETY"; OTHER = "OTHER"
|
95 |
class BlockedPromptException(Exception): pass
|
96 |
class StopCandidateException(Exception): pass
|
97 |
|
98 |
|
99 |
# --- Attempt to import the real library ---
|
100 |
_REAL_GENAI_LOADED = False
|
101 |
+
genai_types = None
|
102 |
|
103 |
try:
|
104 |
+
from google import genai
|
105 |
+
genai_types = genai.types
|
|
|
106 |
_REAL_GENAI_LOADED = True
|
107 |
logging.info("Successfully imported 'google.genai' and accessed 'genai.types'.")
|
108 |
except ImportError:
|
|
|
109 |
genai = _ActualDummyGenAI()
|
110 |
+
genai_types = genai.types
|
111 |
logging.warning("Google AI library ('google.genai') not found. Using dummy implementations for 'genai' and 'genai_types'.")
|
112 |
+
except AttributeError: # If 'genai' imported but 'genai.types' is missing
|
113 |
+
genai = _ActualDummyGenAI()
|
114 |
+
genai_types = genai.types # Fallback to dummy types
|
115 |
+
_REAL_GENAI_LOADED = False
|
|
|
|
|
116 |
logging.warning("'google.genai' imported, but 'genai.types' not found. Falling back to dummy implementations.")
|
117 |
|
118 |
|
119 |
# --- Configuration ---
|
120 |
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', "")
|
121 |
LLM_MODEL_NAME = "gemini-2.0-flash"
|
122 |
+
GEMINI_EMBEDDING_MODEL_NAME = "gemini-embedding-exp-03-07"
|
123 |
|
124 |
GENERATION_CONFIG_PARAMS = {
|
125 |
"temperature": 0.3, "top_p": 1.0, "top_k": 32, "max_output_tokens": 8192,
|
126 |
}
|
127 |
|
|
|
|
|
128 |
try:
|
129 |
DEFAULT_SAFETY_SETTINGS = [
|
130 |
genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
|
131 |
genai_types.SafetySetting(category=genai_types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=genai_types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE),
|
132 |
+
# ... other settings
|
|
|
133 |
]
|
134 |
except Exception as e_safety:
|
135 |
logging.warning(f"Could not define DEFAULT_SAFETY_SETTINGS using 'genai_types' (real_loaded: {_REAL_GENAI_LOADED}): {e_safety}. Using placeholder list of dicts.")
|
136 |
+
DEFAULT_SAFETY_SETTINGS = [{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}] # Simplified
|
|
|
|
|
|
|
|
|
137 |
|
|
|
138 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(filename)s:%(lineno)d - %(message)s')
|
139 |
|
140 |
+
if _REAL_GENAI_LOADED:
|
141 |
+
if GEMINI_API_KEY:
|
142 |
+
try:
|
143 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
144 |
+
logging.info(f"Gemini API key configured globally using REAL genai.configure.")
|
145 |
+
except Exception as e:
|
146 |
+
logging.error(f"Failed to configure REAL Gemini API globally: {e}", exc_info=True)
|
147 |
+
else:
|
148 |
+
logging.warning("REAL 'google.genai' loaded, but GEMINI_API_KEY not set. API calls might fail or use other auth.")
|
149 |
elif not _REAL_GENAI_LOADED:
|
150 |
+
logging.info("Operating in DUMMY mode for 'google.genai'.")
|
151 |
+
if GEMINI_API_KEY: genai.configure(api_key=GEMINI_API_KEY)
|
|
|
152 |
|
153 |
|
154 |
# --- RAG Documents Definition (Example) ---
|
155 |
+
rag_documents_data = { 'Title': ["EB Practices", "Tech Talent"], 'Text': ["Stories...", "Projects..."] }
|
|
|
|
|
|
|
156 |
df_rag_documents = pd.DataFrame(rag_documents_data)
|
157 |
|
158 |
# --- Schema Representation ---
|
159 |
def get_schema_representation(df_name: str, df: pd.DataFrame) -> str:
|
160 |
if not isinstance(df, pd.DataFrame): return f"Schema for item '{df_name}': Not a DataFrame.\n"
|
161 |
if df.empty: return f"Schema for DataFrame 'df_{df_name}': Empty.\n"
|
162 |
+
return f"DataFrame 'df_{df_name}': Cols: {df.columns.tolist()}, Shape: {df.shape}\nSample:\n{textwrap.indent(df.head(1).to_string(), ' ')}\n"
|
|
|
|
|
|
|
163 |
|
164 |
def get_all_schemas_representation(dataframes_dict: dict) -> str:
|
165 |
if not dataframes_dict: return "No DataFrames provided.\n"
|
|
|
168 |
# --- Advanced RAG System ---
|
169 |
class AdvancedRAGSystem:
|
170 |
def __init__(self, documents_df: pd.DataFrame, embedding_model_name: str):
|
171 |
+
self.embedding_model_name_for_api = embedding_model_name # Store raw name
|
172 |
+
if not self.embedding_model_name_for_api.startswith("models/"):
|
173 |
+
self.embedding_model_name_for_api = f"models/{self.embedding_model_name_for_api}"
|
174 |
+
|
175 |
self.documents_df = documents_df.copy()
|
176 |
self.embeddings_generated = False
|
177 |
+
self.embedding_service = None # Will hold client.models or its dummy equivalent
|
178 |
+
|
179 |
self.real_client_available_for_rag = _REAL_GENAI_LOADED and bool(GEMINI_API_KEY)
|
180 |
|
181 |
if self.real_client_available_for_rag:
|
182 |
try:
|
183 |
+
# Pass client_options if API key is available, to help Client find it
|
184 |
+
client_opts = {"api_key": GEMINI_API_KEY} if GEMINI_API_KEY else None
|
185 |
+
rag_client = genai.Client(client_options=client_opts)
|
186 |
+
self.embedding_service = rag_client.models
|
187 |
+
logging.info(f"RAG: REAL embedding service (genai.Client.models) initialized for '{self.embedding_model_name_for_api}'.")
|
188 |
self._precompute_embeddings()
|
189 |
self.embeddings_generated = True
|
190 |
+
except Exception as e:
|
191 |
+
logging.error(f"RAG: Error initializing REAL embedding service: {e}", exc_info=True)
|
192 |
+
self.embedding_service = None
|
193 |
else:
|
194 |
+
logging.warning(f"RAG: Not using REAL embedding service. Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)}.")
|
195 |
+
if not _REAL_GENAI_LOADED: # Full dummy mode
|
196 |
+
self.embedding_service = genai.Client().models # genai is _ActualDummyGenAI, gets dummy service
|
197 |
+
self._precompute_embeddings()
|
198 |
|
199 |
+
def _embed_fn(self, contents_to_embed: str, task_type: str) -> list[float]:
|
200 |
+
if not self.embedding_service:
|
201 |
+
logging.error(f"RAG _embed_fn: Embedding service not available for model '{self.embedding_model_name_for_api}'.")
|
202 |
+
return [0.0] * 768
|
203 |
try:
|
204 |
+
if not contents_to_embed: return [0.0] * 768
|
205 |
+
|
206 |
+
# Use genai_types (which is real or dummy) to create EmbedContentConfig
|
207 |
+
embed_config = genai_types.EmbedContentConfig(task_type=task_type)
|
208 |
+
|
209 |
+
# Call embed_content on the service (real or dummy)
|
210 |
+
response = self.embedding_service.embed_content(
|
211 |
+
model=self.embedding_model_name_for_api,
|
212 |
+
contents=contents_to_embed,
|
213 |
+
config=embed_config
|
214 |
+
)
|
215 |
+
return response["embedding"]
|
216 |
except Exception as e:
|
217 |
+
logging.error(f"Error in _embed_fn for task '{task_type}' using model '{self.embedding_model_name_for_api}' (real_genai_loaded: {_REAL_GENAI_LOADED}): {e}", exc_info=True)
|
218 |
return [0.0] * 768
|
219 |
|
220 |
def _precompute_embeddings(self):
|
221 |
if 'Embeddings' not in self.documents_df.columns: self.documents_df['Embeddings'] = pd.Series(dtype='object')
|
222 |
mask = (self.documents_df['Text'].notna() & (self.documents_df['Text'] != '')) | (self.documents_df['Title'].notna() & (self.documents_df['Title'] != ''))
|
223 |
if not mask.any(): logging.warning("No content for RAG embeddings."); return
|
224 |
+
|
225 |
+
for index, row in self.documents_df[mask].iterrows():
|
226 |
+
text_to_embed = row.get('Text', '') if row.get('Text', '') else row.get('Title', '')
|
227 |
+
self.documents_df.loc[index, 'Embeddings'] = self._embed_fn(text_to_embed, task_type="RETRIEVAL_DOCUMENT") # Corrected task type string
|
228 |
+
|
229 |
+
logging.info(f"Applied RAG embedding function to {mask.sum()} rows (embedding_service active: {self.embedding_service is not None}).")
|
230 |
|
231 |
|
232 |
def retrieve_relevant_info(self, query_text: str, top_k: int = 2) -> str:
|
233 |
+
if not self.real_client_available_for_rag or not self.embedding_service:
|
234 |
+
if not _REAL_GENAI_LOADED and self.embedding_service: # Full dummy mode
|
235 |
+
self._embed_fn(query_text, task_type="RETRIEVAL_QUERY") # Call for dummy log
|
236 |
+
logging.warning(f"Skipping real RAG retrieval. Real client available: {self.real_client_available_for_rag}, Embedding service OK: {self.embedding_service is not None}")
|
237 |
+
return "\n[RAG Context]\nReal RAG retrieval skipped.\n"
|
238 |
|
239 |
try:
|
240 |
+
query_embedding = np.array(self._embed_fn(query_text, task_type="RETRIEVAL_QUERY")) # Corrected task type string
|
241 |
+
|
242 |
valid_df = self.documents_df.dropna(subset=['Embeddings'])
|
243 |
valid_df = valid_df[valid_df['Embeddings'].apply(lambda x: isinstance(x, (list, np.ndarray)) and len(x) > 0 and np.any(x))]
|
244 |
if valid_df.empty: return "\n[RAG Context]\nNo valid document embeddings after filtering.\n"
|
|
|
254 |
passages = "".join([f"\n[RAG Context from: '{valid_df.iloc[i]['Title']}']\n{valid_df.iloc[i]['Text']}\n" for i in idx if i < len(valid_df)])
|
255 |
return passages if passages else "\n[RAG Context]\nNo relevant passages found after search.\n"
|
256 |
except Exception as e:
|
257 |
+
logging.error(f"Error in RAG retrieve_relevant_info (real mode with embedding service): {e}", exc_info=True)
|
258 |
return f"\n[RAG Context]\nError during RAG retrieval (real mode): {type(e).__name__} - {e}\n"
|
259 |
|
260 |
# --- PandasLLM Class (Gemini-Powered using genai.Client) ---
|
|
|
268 |
self.safety_settings_list = safety_settings_list
|
269 |
self.data_privacy = data_privacy
|
270 |
self.force_sandbox = force_sandbox
|
271 |
+
self.client = None
|
272 |
+
self.model_service = None
|
273 |
|
274 |
if _REAL_GENAI_LOADED and GEMINI_API_KEY:
|
275 |
try:
|
276 |
+
# genai.configure should have been called. Try passing client_options as a fallback.
|
277 |
+
client_opts = {"api_key": GEMINI_API_KEY} if GEMINI_API_KEY else None
|
278 |
+
self.client = genai.Client(client_options=client_opts)
|
279 |
self.model_service = self.client.models
|
280 |
logging.info(f"PandasLLM: Initialized with REAL genai.Client().models for '{self.llm_model_name}'.")
|
281 |
except Exception as e:
|
282 |
logging.error(f"Failed to initialize REAL PandasLLM with genai.Client: {e}", exc_info=True)
|
283 |
+
self.client = None
|
284 |
+
self.model_service = None
|
285 |
else:
|
286 |
logging.warning(f"PandasLLM: Not using REAL genai.Client. RealGenAILoaded: {_REAL_GENAI_LOADED}, APIKeySet: {bool(GEMINI_API_KEY)}.")
|
287 |
+
if not _REAL_GENAI_LOADED:
|
288 |
+
self.client = genai.Client()
|
289 |
self.model_service = self.client.models
|
290 |
logging.info("PandasLLM: Initialized with DUMMY genai.Client().models (real library failed to load).")
|
291 |
|
|
|
294 |
use_real_service = _REAL_GENAI_LOADED and GEMINI_API_KEY and self.model_service is not None
|
295 |
|
296 |
active_model_service = self.model_service
|
297 |
+
if not use_real_service and not _REAL_GENAI_LOADED:
|
298 |
+
if active_model_service is None:
|
299 |
logging.debug("PandasLLM._call_gemini_api_async: active_model_service is None in dummy mode, using global dummy genai.Client().models.")
|
300 |
+
active_model_service = genai.Client().models
|
301 |
|
302 |
if not active_model_service:
|
303 |
+
logging.error(f"PandasLLM: Model service not available (use_real_service: {use_real_service}, _REAL_GENAI_LOADED: {_REAL_GENAI_LOADED}, self.model_service is None: {self.model_service is None}). Cannot call API.")
|
304 |
return "# Error: Gemini model service not available for API call."
|
305 |
|
306 |
gemini_history = []
|
|
|
314 |
contents_for_api = gemini_history + current_prompt_content
|
315 |
|
316 |
model_id_for_api = self.llm_model_name
|
317 |
+
if not model_id_for_api.startswith("models/"):
|
318 |
model_id_for_api = f"models/{model_id_for_api}"
|
319 |
|
320 |
api_generation_config = None
|
|
|
372 |
|
373 |
return llm_output
|
374 |
|
|
|
375 |
except (genai_types.BlockedPromptException if _REAL_GENAI_LOADED and hasattr(genai_types, 'BlockedPromptException') else Exception) as bpe:
|
376 |
+
if _REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException':
|
377 |
logging.error(f"Prompt blocked (BlockedPromptException): {bpe}", exc_info=True)
|
378 |
return f"# Error: Prompt blocked. Details: {bpe}"
|
379 |
+
if not (_REAL_GENAI_LOADED and type(bpe).__name__ == 'BlockedPromptException'): raise
|
|
|
|
|
380 |
except (genai_types.StopCandidateException if _REAL_GENAI_LOADED and hasattr(genai_types, 'StopCandidateException') else Exception) as sce:
|
381 |
+
if _REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException':
|
382 |
logging.error(f"Candidate stopped (StopCandidateException): {sce}", exc_info=True)
|
383 |
return f"# Error: Content generation stopped. Details: {sce}"
|
384 |
+
if not (_REAL_GENAI_LOADED and type(sce).__name__ == 'StopCandidateException'): raise
|
|
|
385 |
except Exception as e:
|
386 |
logging.error(f"Error calling Gemini API (RealMode: {use_real_service}): {e}", exc_info=True)
|
387 |
return f"# Error during API call: {type(e).__name__} - {str(e)[:100]}."
|
|
|
411 |
return llm_response_text
|
412 |
|
413 |
logging.info(f"\n--- Code to Execute: ---\n{code_to_execute}\n----------------------\n")
|
414 |
+
from io import StringIO; import sys
|
|
|
415 |
old_stdout, sys.stdout = sys.stdout, StringIO()
|
416 |
exec_globals = {'pd': pd, 'np': np}
|
417 |
if dataframes_dict:
|
418 |
for name, df_instance in dataframes_dict.items():
|
419 |
if isinstance(df_instance, pd.DataFrame): exec_globals[f"df_{name}"] = df_instance
|
|
|
420 |
try:
|
421 |
exec(code_to_execute, exec_globals, {})
|
422 |
final_output_str = sys.stdout.getvalue()
|
423 |
if not final_output_str.strip():
|
424 |
if not any(ln.strip() and not ln.strip().startswith("#") for ln in code_to_execute.splitlines()):
|
425 |
+
return "# LLM generated only comments or empty code. No output by sandbox."
|
426 |
+
return "# Code executed by sandbox, but no print() output. Ensure print() for results."
|
427 |
return final_output_str
|
428 |
except Exception as e:
|
429 |
+
logging.error(f"Sandbox Execution Error: {e}\nCode:\n{code_to_execute}", exc_info=True)
|
430 |
+
return f"# Sandbox Exec Error: {type(e).__name__}: {e}\n# Code:\n{textwrap.indent(code_to_execute, '# ')}"
|
|
|
431 |
finally: sys.stdout = old_stdout
|
432 |
else: return llm_response_text
|
433 |
|
434 |
# --- Employer Branding Agent ---
|
435 |
class EmployerBrandingAgent:
|
436 |
+
def __init__(self, llm_model_name: str, gc_dict: dict, ss_list: list, all_dfs: dict, rag_df: pd.DataFrame, emb_m_name: str, dp=True, fs=True):
|
437 |
+
self.pandas_llm = PandasLLM(llm_model_name, gc_dict, ss_list, dp, fs)
|
438 |
+
self.rag_system = AdvancedRAGSystem(rag_df, emb_m_name)
|
439 |
+
self.all_dataframes = all_dfs if all_dfs else {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
440 |
self.schemas_representation = get_all_schemas_representation(self.all_dataframes)
|
441 |
self.chat_history = []
|
442 |
logging.info(f"EmployerBrandingAgent Initialized (Real GenAI Loaded: {_REAL_GENAI_LOADED}).")
|
443 |
|
444 |
+
def _build_prompt(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str:
|
445 |
+
prompt = f"You are '{role}'. Goal: insights from DataFrames & RAG.\n"
|
446 |
+
if self.pandas_llm.data_privacy: prompt += "PRIVACY: Summarize/aggregate PII.\n"
|
|
|
447 |
if self.pandas_llm.force_sandbox:
|
448 |
+
prompt += "TASK: PYTHON CODE. `print()` textual insights/answers. ```python ... ``` ONLY.\nAccess DFs as 'df_name'.\n"
|
449 |
+
prompt += "CRITICAL: `print()` insights, NOT raw DFs (unless asked). Synthesize RAG. Comment code. Handle issues (ambiguity, missing data) via `print()`.\n"
|
450 |
+
else: prompt += "TASK: TEXTUAL INSIGHTS. Explain step-by-step.\n"
|
451 |
+
prompt += f"--- DATA SCHEMAS ---\n{self.schemas_representation if self.schemas_representation.strip() != 'No DataFrames provided.' else 'No DFs loaded.'}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
452 |
|
453 |
rag_context = self.rag_system.retrieve_relevant_info(user_query)
|
454 |
+
meaningful_rag_kws = ["Error", "No valid", "No relevant", "Cannot retrieve", "not available", "not generated", "Skipped"]
|
455 |
+
is_meaningful_rag = bool(rag_context.strip()) and not any(kw in rag_context for kw in meaningful_rag_kws)
|
456 |
+
prompt += f"--- RAG CONTEXT (Real RAG: {self.rag_system.real_client_available_for_rag}) ---\n{rag_context if is_meaningful_rag else f'No specific RAG context or RAG issue. Details: {rag_context[:70]}...'}\n"
|
457 |
+
prompt += f"--- USER QUERY ---\n{user_query}\n"
|
458 |
+
if task_hint: prompt += f"--- GUIDANCE ---\n{task_hint}\n"
|
459 |
+
if cot:
|
460 |
+
if self.pandas_llm.force_sandbox: prompt += "--- PYTHON THOUGHT PROCESS ---\n1.Goal? 2.Data? 3.Plan? 4.Code. 5.CRITICAL: `print()` insights. 6.Review. 7.```python ... ``` ONLY.\n"
|
461 |
+
else: prompt += "--- TEXT RESPONSE THOUGHT PROCESS ---\n1.Goal? 2.Data? 3.Insights (DFs+RAG). 4.Structure response.\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
return prompt
|
463 |
|
464 |
+
async def process_query(self, user_query: str, role="EB Analyst", task_hint=None, cot=True) -> str:
|
465 |
+
hist_for_llm = self.chat_history[:]
|
466 |
self.chat_history.append({"role": "user", "content": user_query})
|
467 |
+
prompt = self._build_prompt(user_query, role, task_hint, cot)
|
468 |
+
logging.info(f"Prompt for query: {user_query[:70]}... (Real GenAI: {_REAL_GENAI_LOADED})")
|
469 |
+
response = await self.pandas_llm.query(prompt, self.all_dataframes, history=hist_for_llm)
|
470 |
+
self.chat_history.append({"role": "assistant", "content": response})
|
471 |
+
if len(self.chat_history) > 10: self.chat_history = self.chat_history[-10:]; logging.info("Chat history truncated.")
|
472 |
+
return response
|
473 |
+
|
474 |
+
def update_dataframes(self, new_dfs: dict): self.all_dataframes = new_dfs if new_dfs else {}; self.schemas_representation = get_all_schemas_representation(self.all_dataframes); logging.info("Agent DFs updated.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
475 |
def clear_chat_history(self): self.chat_history = []; logging.info("Agent chat history cleared.")
|
476 |
|
477 |
# --- Example Usage (Conceptual) ---
|
478 |
async def main_test():
|
479 |
+
logging.info(f"Test (Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)})")
|
480 |
+
agent = EmployerBrandingAgent(LLM_MODEL_NAME, GENERATION_CONFIG_PARAMS, DEFAULT_SAFETY_SETTINGS, {}, df_rag_documents, GEMINI_EMBEDDING_MODEL_NAME)
|
481 |
+
for q in ["What are EB best practices?", "Hello Agent!"]:
|
482 |
+
logging.info(f"\nQuery: {q}")
|
483 |
+
resp = await agent.process_query(q)
|
484 |
+
logging.info(f"Response: {resp}\n")
|
485 |
+
if _REAL_GENAI_LOADED and GEMINI_API_KEY: await asyncio.sleep(0.1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
486 |
|
487 |
if __name__ == "__main__":
|
488 |
+
print(f"Script starting... Real GenAI: {_REAL_GENAI_LOADED}, API Key: {bool(GEMINI_API_KEY)}")
|
489 |
+
try: asyncio.run(main_test())
|
|
|
490 |
except RuntimeError as e:
|
491 |
+
if "asyncio.run() cannot be called" in str(e): print("Skip asyncio.run in existing loop.")
|
492 |
+
else: raise
|
493 |
+
except Exception as e_main: print(f"Test Error: {e_main}")
|
|
|
|
|
|
|
|
|
|