File size: 50,699 Bytes
e03d275
 
 
 
 
 
dcb9c80
 
a05b048
514ad52
205df41
 
514ad52
67e5b47
 
 
 
 
514ad52
 
e03d275
 
003ceb6
dcb9c80
d350f74
514ad52
 
e03d275
514ad52
 
 
dcb9c80
514ad52
 
dcb9c80
 
 
 
69061c0
dcb9c80
514ad52
 
606d7ff
97bdf15
 
 
 
514ad52
 
606d7ff
97bdf15
 
514ad52
 
97bdf15
 
 
 
 
dcb9c80
97bdf15
 
a5ee064
514ad52
 
 
 
69061c0
 
dcb9c80
 
69061c0
 
 
 
 
dcb9c80
97bdf15
69061c0
 
dcb9c80
97bdf15
69061c0
 
514ad52
69061c0
 
dcb9c80
69061c0
dcb9c80
514ad52
69061c0
514ad52
 
 
 
 
 
 
 
 
 
 
 
 
 
489cb0a
 
 
 
 
 
514ad52
 
 
489cb0a
 
 
514ad52
 
 
 
 
489cb0a
 
 
 
 
 
514ad52
 
489cb0a
 
 
514ad52
 
 
 
489cb0a
 
514ad52
 
69061c0
514ad52
 
 
 
 
 
 
 
 
e03d275
69061c0
efa9136
205df41
69061c0
514ad52
 
 
 
dcb9c80
514ad52
dcb9c80
514ad52
69061c0
514ad52
dcb9c80
 
 
 
afb06b4
 
 
 
dcb9c80
 
 
 
3ae9352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97bdf15
dcb9c80
7be0087
dcb9c80
 
 
f3ff19a
 
3ae9352
7be0087
dcb9c80
205df41
49c7360
7be0087
49c7360
 
205df41
7be0087
205df41
7be0087
 
 
 
 
 
 
 
 
 
 
 
205df41
 
7be0087
205df41
dcb9c80
7be0087
 
 
 
dcb9c80
205df41
 
dcb9c80
7be0087
205df41
dcb9c80
 
205df41
dcb9c80
205df41
dcb9c80
 
3ae9352
dcb9c80
514ad52
3ae9352
dcb9c80
7be0087
3ae9352
 
 
 
 
 
 
7be0087
3ae9352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dcb9c80
 
69061c0
514ad52
dcb9c80
97bdf15
dcb9c80
 
 
09757d6
dcb9c80
 
 
 
 
 
 
40fa51c
 
dcb9c80
514ad52
97bdf15
dcb9c80
514ad52
 
 
dcb9c80
514ad52
 
f3ff19a
 
7be0087
f3ff19a
 
77ff02d
 
 
 
 
 
 
f3ff19a
 
 
 
 
 
 
 
 
 
 
 
 
7be0087
 
 
77ff02d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7be0087
77ff02d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7be0087
77ff02d
 
7be0087
77ff02d
 
 
 
 
 
 
7be0087
69cc1ad
4cd2f6c
dcb9c80
514ad52
dcb9c80
 
 
 
 
afb06b4
dcb9c80
 
 
 
 
 
 
 
76fa038
 
 
 
 
 
 
dcb9c80
69cc1ad
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205df41
 
 
 
 
 
dcb9c80
 
514ad52
406215c
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85e2ff6
7be0087
85e2ff6
3ae9352
85e2ff6
 
7be0087
85e2ff6
 
 
 
 
 
 
 
3ae9352
 
 
85e2ff6
3ae9352
e7eee4a
3ae9352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85e2ff6
e7eee4a
85e2ff6
 
 
e7eee4a
 
 
 
 
 
 
 
 
85e2ff6
e7eee4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85e2ff6
e7eee4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85e2ff6
 
e7eee4a
 
 
 
85e2ff6
 
 
 
 
e7eee4a
7be0087
85e2ff6
 
 
 
 
 
 
 
 
 
 
e7eee4a
 
85e2ff6
 
 
3ae9352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dcb9c80
 
ba2acc2
 
dcb9c80
ba2acc2
dcb9c80
ba2acc2
dcb9c80
 
 
 
 
 
 
ba2acc2
dcb9c80
 
ba2acc2
dcb9c80
 
ba2acc2
dcb9c80
 
 
 
d350f74
dcb9c80
 
 
 
 
 
ba2acc2
dcb9c80
 
ba2acc2
dcb9c80
ba2acc2
dcb9c80
 
ba2acc2
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
 
 
ba2acc2
 
 
 
 
 
dcb9c80
 
 
 
 
ba2acc2
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d350f74
dcb9c80
ba2acc2
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
ba2acc2
 
 
 
dcb9c80
 
ba2acc2
dcb9c80
 
514ad52
97bdf15
dcb9c80
514ad52
 
 
dcb9c80
514ad52
 
97bdf15
 
67e5b47
514ad52
67e5b47
 
514ad52
 
67e5b47
514ad52
 
 
 
 
67e5b47
514ad52
dcb9c80
 
 
 
67e5b47
 
 
dcb9c80
 
 
 
a8848c7
dcb9c80
 
a8848c7
 
 
 
 
 
 
 
 
dcb9c80
67e5b47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dcb9c80
67e5b47
 
 
514ad52
dcb9c80
 
514ad52
dcb9c80
 
 
09757d6
606d7ff
97bdf15
dcb9c80
efa9136
514ad52
 
 
dcb9c80
514ad52
 
 
 
dcb9c80
 
514ad52
 
 
a05b048
 
 
 
dcb9c80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09757d6
dcb9c80
 
09757d6
514ad52
205df41
dcb9c80
514ad52
 
 
dcb9c80
 
 
 
 
 
514ad52
dcb9c80
 
514ad52
dcb9c80
 
 
 
 
 
514ad52
dcb9c80
 
 
 
 
514ad52
dcb9c80
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
import pandas as pd
import json
import os
import asyncio
import logging
import numpy as np
import textwrap
from datetime import datetime
from typing import Dict, List, Optional, Union, Any, Tuple
import traceback
import pandasai as pai
from pandasai_litellm import LiteLLM

# Add this early, before matplotlib.pyplot is imported directly or by pandasai
import matplotlib
matplotlib.use('Agg') # Use a non-interactive backend for Matplotlib
import matplotlib.pyplot as plt

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')

try:
    from google import genai
    from google.genai import types
    from google.genai import errors
    GENAI_AVAILABLE = True
    logging.info("Google Generative AI library imported successfully.")
except ImportError:
    logging.warning("Google Generative AI library not found. Please install it: pip install google-generativeai")
    GENAI_AVAILABLE = False
    
    # Dummy classes for graceful degradation
    class genai:
        Client = None

    class types:
        EmbedContentConfig = None
        GenerationConfig = None
        SafetySetting = None
        Candidate = type('Candidate', (), {'FinishReason': type('FinishReason', (), {'STOP': 'STOP'})})

        class HarmCategory:
            HARM_CATEGORY_UNSPECIFIED = "HARM_CATEGORY_UNSPECIFIED"
            HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH"
            HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT"
            HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
            HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT"
            
        class HarmBlockThreshold:
            BLOCK_NONE = "BLOCK_NONE"
            BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE"
            BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE"
            BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH"

# --- Custom Exceptions ---
class ValidationError(Exception):
    """Custom validation error for agent inputs"""
    pass

class RateLimitError(Exception):
    """Placeholder for rate limit errors."""
    pass

class AgentNotReadyError(Exception):
    """Agent is not properly initialized"""
    pass

# --- Configuration Constants ---
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', "")
LLM_MODEL_NAME = "gemini-2.5-flash-preview-05-20"
GEMINI_EMBEDDING_MODEL_NAME = "gemini-embedding-exp-03-07"

GENERATION_CONFIG_PARAMS = {
    "temperature": 0.7,
    "top_p": 0.95,
    "top_k": 40,
    "max_output_tokens": 8192,
    "candidate_count": 1,
}

DEFAULT_SAFETY_SETTINGS = []

# --- Client Initialization ---
client = None
if GEMINI_API_KEY and GENAI_AVAILABLE:
    try:
        client = genai.Client(api_key=GEMINI_API_KEY)
        logging.info("Google GenAI client initialized successfully.")
    except Exception as e:
        logging.error(f"Failed to initialize Google GenAI client: {e}")
        client = None
else:
    if not GEMINI_API_KEY:
        logging.warning("GEMINI_API_KEY environment variable not set.")
    if not GENAI_AVAILABLE:
        logging.warning("Google GenAI library not available.")


# --- Utility function to get DataFrame schema representation ---
def get_df_schema_representation(df: pd.DataFrame, df_name: str) -> str:
    """Generates a string representation of a DataFrame's schema and a small sample."""
    if not isinstance(df, pd.DataFrame):
        return f"Item '{df_name}' is not a DataFrame.\n"
    if df.empty:
        return f"DataFrame '{df_name}': Empty\n"
    
    # Define system columns to exclude from schema representation
    system_columns = ['Created Date', 'Modified Date', '_id']
    
    # Filter out system columns for schema representation
    filtered_columns = [col for col in df.columns if col not in system_columns]
    
    schema_parts = [f"DataFrame '{df_name}':"]
    schema_parts.append(f"  Shape: {df.shape}")
    schema_parts.append("  Columns:")
    
    # Show only filtered columns in schema
    for col in filtered_columns:
        col_type = str(df[col].dtype)
        null_count = df[col].isnull().sum()
        unique_count = df[col].nunique()
        schema_parts.append(f"    - {col} (Type: {col_type}, Nulls: {null_count}/{len(df)}, Uniques: {unique_count})")
    
    # Add note if system columns were excluded
    excluded_columns = [col for col in df.columns if col in system_columns]
    if excluded_columns:
        schema_parts.append(f"  Note: System columns excluded from display: {', '.join(excluded_columns)}")
    
    if not df.empty and filtered_columns:
        schema_parts.append("  Sample Data (first 2 rows):")
        try:
            # Create sample with only filtered columns
            sample_df = df[filtered_columns].head(2)
            sample_df_str = sample_df.to_string(index=True, max_colwidth=50)
            indented_sample_df = "\n".join(["    " + line for line in sample_df_str.split('\n')])
            schema_parts.append(indented_sample_df)
        except Exception as e:
            schema_parts.append(f"    Could not generate sample data: {e}")
    elif not df.empty and not filtered_columns:
        schema_parts.append("  Sample Data: Only system columns present, no business data to display")
            
    return "\n".join(schema_parts) + "\n"

def get_all_schemas_representation(dataframes: Dict[str, pd.DataFrame]) -> str:
    """Generates a string representation of all DataFrame schemas."""
    if not dataframes:
        return "No DataFrames available to the agent."
    
    full_representation = ["=== Available DataFrame Schemas for Analysis ==="]
    for name, df_instance in dataframes.items():
        full_representation.append(get_df_schema_representation(df_instance, name))
    return "\n".join(full_representation)



class EmployerBrandingAgent:
    def __init__(self,
                 all_dataframes: Optional[Dict[str, pd.DataFrame]] = None,
                 llm_model_name: str = LLM_MODEL_NAME,
                 embedding_model_name: str = GEMINI_EMBEDDING_MODEL_NAME,
                 generation_config_dict: Optional[Dict] = None,
                 safety_settings_list: Optional[List] = None):
        
        self.all_dataframes = {k: v.copy() for k, v in (all_dataframes or {}).items()}
        
        self.llm_model_name = llm_model_name
        self.generation_config_dict = generation_config_dict or GENERATION_CONFIG_PARAMS
        self.safety_settings_list = safety_settings_list or DEFAULT_SAFETY_SETTINGS

        self.chat_history: List[Dict[str, str]] = []
        self.is_ready = False

        # Create charts directory
        self.charts_dir = "./charts"
        os.makedirs(self.charts_dir, exist_ok=True)        
        
        # Initialize PandasAI Agent
        self.pandas_agent = None
        self._initialize_pandas_agent()

    def _validate_and_log_data(self):
        """Validate data quality and log findings"""
        logging.info("=== DATA VALIDATION REPORT ===")
        
        for name, df in self.all_dataframes.items():
            logging.info(f"\nDataFrame: {name}")
            logging.info(f"Shape: {df.shape}")
            logging.info(f"Columns: {list(df.columns)}")
            
            # Check for date columns and their ranges
            date_cols = [col for col in df.columns if 'date' in col.lower()]
            for date_col in date_cols:
                if not df[date_col].empty:
                    try:
                        date_series = pd.to_datetime(df[date_col], errors='coerce')
                        valid_dates = date_series.dropna()
                        if not valid_dates.empty:
                            min_date = valid_dates.min()
                            max_date = valid_dates.max()
                            logging.info(f"  {date_col}: {min_date} to {max_date}")
                            
                            # Specifically check for 2025 data
                            dates_2025 = valid_dates[valid_dates.dt.year == 2025]
                            if not dates_2025.empty:
                                logging.info(f"  Found {len(dates_2025)} records in 2025")
                    except Exception as e:
                        logging.warning(f"  Could not parse dates in {date_col}: {e}")
            
            # Check follower data specifically
            if 'follower' in name.lower():
                if 'follower_count_type' in df.columns:
                    type_counts = df['follower_count_type'].value_counts()
                    logging.info(f"  Follower count types: {dict(type_counts)}")
                
                if 'follower_count' in df.columns:
                    follower_stats = df['follower_count'].describe()
                    logging.info(f"  Follower count stats: {follower_stats}")
        
    def _initialize_pandas_agent(self):
        """Initialize PandasAI with enhanced configuration for chart generation"""
        if not self.all_dataframes or not GEMINI_API_KEY:
            logging.warning("Cannot initialize PandasAI agent: missing dataframes or API key")
            return
            
        self._preprocess_dataframes_for_pandas_ai()
        self._validate_and_log_data()
    
        try:
            # Configure LiteLLM with Gemini
            llm = LiteLLM(
                model="gemini/gemini-2.5-flash-preview-05-20",
                api_key=GEMINI_API_KEY
            )
            
            # Enhanced PandasAI configuration for better chart generation
            pai.config.set({
                "llm": llm,
                "temperature": 0.3,  # Lower temperature for more consistent results
                "verbose": True,
                "enable_cache": False,  # Disable cache to avoid stale results
                "save_charts": True,
                "save_charts_path": "./charts",
                "open_charts": False,
                "custom_whitelisted_dependencies": [
                    "matplotlib", "seaborn", "plotly", "pandas", "numpy"
                ],
                "max_retries": 3,  # Add retry logic
                "use_error_correction_framework": True  # Enable error correction
            })
            
            # Store dataframes for chat queries
            self.pandas_dfs = {}
            for name, df in self.all_dataframes.items():
                # Skip empty dataframes
                if df.empty:
                    continue
                    
                df_description = self._generate_dataframe_description(name, df)
                pandas_df = pai.DataFrame(df, description=df_description)
                self.pandas_dfs[name] = pandas_df
            
            self.pandas_agent = True
            logging.info(f"PandasAI initialized successfully with {len(self.pandas_dfs)} DataFrames")
            
        except Exception as e:
            logging.error(f"Failed to initialize PandasAI: {e}", exc_info=True)
            self.pandas_agent = None
            self.pandas_dfs = {}

    def _generate_dataframe_description(self, name: str, df: pd.DataFrame) -> str:
        """Enhanced dataframe description with better data context"""
        description_parts = [f"This is the '{name}' dataset containing {len(df)} records."]
        
        # Add specific context for follower data
        if name.lower() in ['follower_stats', 'followers']:
            description_parts.append("""
            CRITICAL DATA STRUCTURE INFO:
            - Records with follower_count_type='follower_gains_monthly' contain monthly new follower counts
            - Records with follower_count_type='follower_count_cumulative' contain total follower counts
            - The 'extracted_date' column contains properly parsed dates for time analysis
            - For monthly gains analysis, ALWAYS filter by follower_count_type='follower_gains_monthly'
            - For growth trends, use extracted_date for chronological ordering
            - The follower_count column contains the actual numeric values to analyze
            """)
            
            # Add date range info if available
            if 'extracted_date' in df.columns:
                try:
                    date_col = pd.to_datetime(df['extracted_date'], errors='coerce')
                    valid_dates = date_col.dropna()
                    if not valid_dates.empty:
                        min_date = valid_dates.min()
                        max_date = valid_dates.max()
                        description_parts.append(f"Date range: {min_date.strftime('%Y-%m-%d')} to {max_date.strftime('%Y-%m-%d')}")
                        
                        # Highlight 2025 data
                        dates_2025 = valid_dates[valid_dates.dt.year >= 2025]
                        if not dates_2025.empty:
                            description_parts.append(f"Contains {len(dates_2025)} records from 2025 onwards")
                except Exception as e:
                    logging.warning(f"Could not analyze date range: {e}")
        
        return " ".join(description_parts)

    async def initialize(self) -> bool:
        """Initializes asynchronous components of the agent"""
        try:
            if not client:  # Fix: Remove reference to llm_model_instance
                logging.error("Cannot initialize agent: GenAI client not available/configured.")
                return False
        
            
            # Verify PandasAI agent is ready
            pandas_ready = self.pandas_agent is not None
            if not pandas_ready:
                logging.warning("PandasAI agent not initialized, attempting re-initialization")
                self._initialize_pandas_agent()
                pandas_ready = self.pandas_agent is not None
                
            self.is_ready = True
            return self.is_ready
            
        except Exception as e:
            logging.error(f"Error during EnhancedEmployerBrandingAgent.initialize: {e}", exc_info=True)
            self.is_ready = False
            return False


    def _get_dataframes_summary(self) -> str:
        return get_all_schemas_representation(self.all_dataframes)

    def _preprocess_dataframes_for_pandas_ai(self):
        """Enhanced preprocessing to handle date casting issues and ensure chart generation"""
        if not self.all_dataframes:
            return

        dataframes_to_add = {} # To store newly created dataframes

        # Iterate over a copy of the items to avoid runtime errors if modifying the dict
        for name, df_original in list(self.all_dataframes.items()):
            df_copy = df_original.copy() # Work on a copy for this iteration step

            if name.lower() in ['follower_stats', 'followers']:
                # Handle category_name column that contains dates for follower_gains_monthly
                if 'category_name' in df_copy.columns and 'follower_count_type' in df_copy.columns:
                    def extract_date_from_category(row):
                        if row.get('follower_count_type') == 'follower_gains_monthly':
                            category_name = str(row.get('category_name', ''))
                            import re
                            date_pattern = r'^\d{4}-\d{2}-\d{2}$'
                            if re.match(date_pattern, category_name):
                                return category_name
                        return None
                    
                    df_copy['extracted_date'] = df_copy.apply(extract_date_from_category, axis=1)
                    df_copy['extracted_date'] = pd.to_datetime(df_copy['extracted_date'], errors='coerce')
                    
                    monthly_mask = df_copy['follower_count_type'] == 'follower_gains_monthly'
                    # Ensure extracted_date is not NaT before strftime
                    valid_dates_mask = monthly_mask & df_copy['extracted_date'].notna()

                    df_copy.loc[valid_dates_mask, 'date_for_analysis'] = df_copy.loc[valid_dates_mask, 'extracted_date']
                    df_copy.loc[valid_dates_mask, 'year_month'] = df_copy.loc[valid_dates_mask, 'extracted_date'].dt.strftime('%Y-%m')
                    df_copy.loc[valid_dates_mask, 'month_name'] = df_copy.loc[valid_dates_mask, 'extracted_date'].dt.strftime('%B %Y')
                
                if 'follower_count' in df_copy.columns:
                    df_copy['follower_count'] = pd.to_numeric(df_copy['follower_count'], errors='coerce')
                    # df_copy['follower_count'] = df_copy['follower_count'].fillna(0) # Moved to general fillna

                # Create separate monthly gains dataframe for easier analysis
                if 'follower_count_type' in df_copy.columns and 'extracted_date' in df_copy.columns:
                    monthly_gains_df = df_copy[df_copy['follower_count_type'] == 'follower_gains_monthly'].copy()
                    if not monthly_gains_df.empty:
                        monthly_gains_df = monthly_gains_df.dropna(subset=['extracted_date'])
                        if not monthly_gains_df.empty: # Check again after dropna
                            monthly_gains_df = monthly_gains_df.sort_values('extracted_date')
                            # Store in the temporary dictionary
                            dataframes_to_add[f'{name}_monthly_gains'] = monthly_gains_df 
                            logging.info(f"Created '{name}_monthly_gains' with {len(monthly_gains_df)} records.")
                
                # Update the main dataframe in self.all_dataframes with these specific changes
                self.all_dataframes[name] = df_copy.copy() # Save the processed df_copy
                logging.info(f"Preprocessed '{name}' dataframe for date handling.")
            
            # General preprocessing for the current dataframe (df_copy or df_original if not 'follower_stats')
            # Fetch the potentially modified df_copy if it was processed above, otherwise use original df for this iteration
            current_df_to_process = self.all_dataframes[name].copy()

            # Convert object columns that look numeric
            for col in current_df_to_process.columns:
                if current_df_to_process[col].dtype == 'object':
                    try:
                        # Attempt conversion if a good portion of non-null values match numeric pattern
                        if current_df_to_process[col].str.match(r'^-?\d+\.?\d*$').sum() > len(current_df_to_process[col].dropna()) * 0.5:
                             current_df_to_process[col] = pd.to_numeric(current_df_to_process[col], errors='coerce')
                             logging.info(f"Converted column '{col}' in '{name}' to numeric.")
                    except AttributeError: # Handles cases where .str accessor fails (e.g. column has mixed types like numbers and lists)
                        logging.debug(f"Could not apply .str accessor to column '{col}' in '{name}'. Skipping numeric conversion for it.")


            numeric_columns = current_df_to_process.select_dtypes(include=[np.number]).columns
            current_df_to_process[numeric_columns] = current_df_to_process[numeric_columns].fillna(0)
            
            text_columns = current_df_to_process.select_dtypes(include=['object']).columns
            current_df_to_process[text_columns] = current_df_to_process[text_columns].fillna('')
            
            # Update self.all_dataframes with the fully processed version for this key
            self.all_dataframes[name] = current_df_to_process

        # After the loop, add all newly created dataframes
        if dataframes_to_add:
            self.all_dataframes.update(dataframes_to_add)
            logging.info(f"Added new derived dataframes: {list(dataframes_to_add.keys())}")

    
    def _build_system_prompt(self) -> str:
        """Enhanced system prompt that works with PandasAI integration"""
        return textwrap.dedent("""
        You are a friendly and insightful Employer Branding Analyst AI, working as a dedicated partner for HR professionals to make LinkedIn data analysis accessible, actionable, and easy to understand.
        
        ## Your Enhanced Capabilities:
        You now have advanced data analysis capabilities through PandasAI integration, allowing you to:
        - Directly query and analyze DataFrames with natural language
        - Generate charts and visualizations automatically (ALWAYS create charts when data visualization would be helpful)
        - Perform complex statistical analysis on LinkedIn employer branding data
        - Handle multi-DataFrame queries and joins seamlessly
        
        ## Core Responsibilities:
        1. **Intelligent Data Analysis**: Use your PandasAI integration to answer data questions directly and accurately
        2. **Business Context Translation**: Convert technical analysis results into HR-friendly insights
        3. **Actionable Recommendations**: Provide specific, implementable strategies based on data findings
        4. **Educational Guidance**: Help users understand both the data insights and the LinkedIn analytics concepts

        ## CRITICAL COMMUNICATION RULES:
        - **NEVER show code, technical commands, or programming syntax**
        - **NEVER mention dataset names, column names, or technical data structure details**
        - **NEVER reference DataFrames, schemas, or database terminology**
        - **Always speak in business terms**: refer to "your LinkedIn data", "follower metrics", "engagement data", etc.
        - **Focus on insights, not methods**: explain what the data shows, not how it was analyzed
        
        ## Communication Style:
        - **Natural and Conversational**: Maintain a warm, supportive tone as a helpful colleague
        - **HR-Focused Language**: Avoid technical jargon; explain analytics terms in business context
        - **Context-Rich Responses**: Always explain what metrics mean for employer branding strategy
        - **Structured Insights**: Use clear formatting with headers, bullets, and logical flow
        
        ## Data Analysis Approach:
        When users ask data questions, you will:
        1. **Leverage PandasAI**: Use your integrated data analysis capabilities to query the data directly
        2. **Interpret Results**: Translate technical findings into business insights
        3. **Add Context**: Combine data results with your RAG knowledge base for comprehensive answers
        4. **Provide Recommendations**: Suggest specific actions based on the analysis
        
        ## Response Structure:
        1. **Executive Summary**: Key findings in business terms
        2. **Data Insights**: What the analysis reveals (charts/visualizations when helpful)
        3. **Business Impact**: What this means for employer branding strategy
        4. **Recommendations**: Specific, prioritized action items
        5. **Next Steps**: Follow-up suggestions or questions
        
        ## Key Behaviors:
        - **Data-Driven**: Always ground insights in actual data analysis when possible
        - **Visual When Helpful**: Suggest or create charts that make data more understandable
        - **Proactive**: Identify related insights the user might find valuable
        - **Honest About Limitations**: Clearly state when data doesn't support certain analyses

        ## Example Language Patterns:
        - Instead of "DataFrame shows" → "Your LinkedIn data reveals"
        - Instead of "follower_count column" → "follower growth metrics"
        - Instead of "engagement_rate variable" → "post engagement performance"
        - Instead of "dataset analysis" → "performance review"
        
        Your goal remains to be a trusted partner, but now with powerful data analysis capabilities that enable deeper, more accurate insights for data-driven employer branding decisions.
        """).strip()
    
    def _classify_query_type(self, query: str) -> str:
        """Classify whether query needs data analysis, general advice, or both"""
        data_keywords = [
            'show', 'analyze', 'chart', 'graph', 'data', 'numbers', 'count', 'total', 
            'average', 'trend', 'compare', 'statistics', 'performance', 'metrics',
            'followers', 'engagement', 'posts', 'growth', 'rate', 'percentage'
        ]
        
        advice_keywords = [
            'recommend', 'suggest', 'advice', 'strategy', 'improve', 'optimize',
            'best practice', 'should', 'how to', 'what to do', 'tips'
        ]
        
        query_lower = query.lower()
        has_data_request = any(keyword in query_lower for keyword in data_keywords)
        has_advice_request = any(keyword in query_lower for keyword in advice_keywords)
        
        if has_data_request and has_advice_request:
            return "hybrid"
        elif has_data_request:
            return "data"
        elif has_advice_request:
            return "advice"
        else:
            return "general"

    # Replace the _generate_pandas_response method and everything after it with this properly indented code:
    
    async def _generate_pandas_response(self, query: str) -> tuple[str, bool]:
        """Generate response using PandasAI with enhanced error handling and data validation"""
        if not self.pandas_agent or not hasattr(self, 'pandas_dfs'):
            return "Data analysis not available - PandasAI not initialized.", False
        
        try:
            logging.info(f"Processing data query with PandasAI: {query[:100]}...")
            
            # Clear any existing matplotlib figures
            import matplotlib.pyplot as plt
            plt.clf()
            plt.close('all')
            
            # Enhanced query preprocessing
            processed_query = self._enhance_query_for_pandas(query)
            logging.info(f"Enhanced query: {processed_query[:200]}...")
            
            # Execute the query with better error handling
            pandas_response = None
            try:
                if len(self.pandas_dfs) == 1:
                    df = list(self.pandas_dfs.values())[0]
                    logging.info(f"Using single DataFrame for query with shape: {df.df.shape}")
                    pandas_response = df.chat(processed_query)
                else:
                    dfs = list(self.pandas_dfs.values())
                    pandas_response = pai.chat(processed_query, *dfs)
            except Exception as pandas_error:
                logging.error(f"PandasAI execution error: {pandas_error}")
                
                # Try a simpler version of the query
                simple_query = self._simplify_query_for_retry(query)
                if simple_query != query:
                    logging.info(f"Retrying with simplified query: {simple_query}")
                    try:
                        if len(self.pandas_dfs) == 1:
                            df = list(self.pandas_dfs.values())[0]
                            pandas_response = df.chat(simple_query)
                        else:
                            dfs = list(self.pandas_dfs.values())
                            pandas_response = pai.chat(simple_query, *dfs)
                    except Exception as retry_error:
                        logging.error(f"Retry also failed: {retry_error}")
                        return f"Data analysis failed: {str(pandas_error)}", False
                else:
                    return f"Data analysis failed: {str(pandas_error)}", False
            
            # Enhanced response processing with better type handling
            response_text = ""
            chart_path = None
            
            # Handle different response types from PandasAI
            try:
                # Case 1: Direct string response (file path)
                if isinstance(pandas_response, str):
                    if pandas_response.endswith(('.png', '.jpg', '.jpeg', '.svg')):
                        chart_path = pandas_response
                        response_text = "Analysis completed with visualization"
                    else:
                        response_text = pandas_response
                
                # Case 2: Chart object response
                elif hasattr(pandas_response, 'value') and hasattr(pandas_response, '_get_image'):
                    # Handle PandasAI Chart response object
                    try:
                        # Try to get the chart path without calling show() which causes the error
                        if hasattr(pandas_response, 'value'):
                            if isinstance(pandas_response.value, str) and pandas_response.value.endswith(('.png', '.jpg', '.jpeg', '.svg')):
                                chart_path = pandas_response.value
                                response_text = "Analysis completed with visualization"
                            elif isinstance(pandas_response.value, dict):
                                # Handle dict response from Chart object
                                if 'path' in pandas_response.value:
                                    chart_path = pandas_response.value['path']
                                    response_text = "Analysis completed with visualization"
                                else:
                                    response_text = "Chart generated but path not accessible"
                    except Exception as chart_error:
                        logging.warning(f"Error handling chart response: {chart_error}")
                        response_text = "Chart generated but encountered display issue"
                
                # Case 3: Response with plot_path attribute
                elif hasattr(pandas_response, 'plot_path') and pandas_response.plot_path:
                    chart_path = pandas_response.plot_path
                    response_text = getattr(pandas_response, 'text', "Analysis completed with visualization")
                
                # Case 4: Other response types
                else:
                    if pandas_response is not None:
                        response_text = str(pandas_response).strip()
                    
            except Exception as response_error:
                logging.warning(f"Error processing PandasAI response: {response_error}")
                response_text = "Analysis completed but encountered response processing issue"
            
            # Fallback: Check charts directory for new files if no chart path found
            if not chart_path and os.path.exists(self.charts_dir):
                chart_files = []
                for f in os.listdir(self.charts_dir):
                    if f.endswith(('.png', '.jpg', '.jpeg', '.svg')):
                        full_path = os.path.join(self.charts_dir, f)
                        chart_files.append((full_path, os.path.getmtime(full_path)))
                
                if chart_files:
                    # Sort by modification time (newest first)
                    chart_files.sort(key=lambda x: x[1], reverse=True)
                    latest_chart_path, latest_time = chart_files[0]
                    
                    # Check if created in last 60 seconds
                    import time
                    if time.time() - latest_time < 60:
                        chart_path = latest_chart_path
                        logging.info(f"Found recent chart: {chart_path}")
            
            # Format final response
            if not response_text:
                response_text = "Analysis completed"
                
            chart_info = ""
            if chart_path and os.path.exists(chart_path):
                chart_info = f"\n\n📊 **Chart Generated**: {os.path.basename(chart_path)}\nChart saved at: {chart_path}"
                logging.info(f"Chart successfully generated: {chart_path}")
            
            final_response = response_text + chart_info
            return final_response, True
        
        except Exception as e:
            logging.error(f"Error in PandasAI processing: {e}", exc_info=True)
            
            # Enhanced error handling
            error_str = str(e).lower()
            if "matplotlib" in error_str and "none" in error_str:
                return "I encountered a data visualization error. This might be due to missing or null values in your data. Please try asking for the raw data first, or specify which specific columns you'd like to analyze.", False
            elif "strftime" in error_str:
                return "I encountered a date formatting issue. Please try asking for the data without specific date formatting, or ask me to show the raw data structure first.", False
            elif "ambiguous" in error_str:
                return "I encountered an ambiguous data type issue. Please try being more specific about which data you'd like to analyze (e.g., 'show monthly follower gains' vs 'show cumulative followers').", False
            elif "startswith" in error_str or "dict" in error_str:
                return "I encountered a response formatting issue. The analysis may have completed but I couldn't process the result properly. Please try rephrasing your query.", False
            else:
                return f"Error processing data query: {str(e)}", False


    def _enhance_query_for_pandas(self, query: str) -> str:
        """Enhance query with specific data context and instructions"""
        enhanced_parts = [query]
        
        # Add specific instructions for follower queries
        if 'follower' in query.lower() and ('gain' in query.lower() or 'growth' in query.lower()):
            enhanced_parts.append("""
            IMPORTANT INSTRUCTIONS:
            - Use only data where follower_count_type='follower_gains_monthly' for monthly gains analysis
            - Filter out any rows where extracted_date is null or NaT
            - Sort results by extracted_date in ascending order
            - For 2025 data, make sure to include all months from January 2025 onwards
            - Use extracted_date for time series and month_name for better chart labels
            - Sum the follower_count values to get total gains
            """)
        
        if 'plot' in query.lower() or 'chart' in query.lower():
            enhanced_parts.append("""
            CHART REQUIREMENTS:
            - Create a clear, well-labeled chart
            - Use appropriate chart type (line chart for time series, bar chart for comparisons)
            - Include proper axis labels and title
            - Format dates nicely on x-axis if applicable
            - Save the chart and return the path
            """)
        
        if '2025' in query:
            enhanced_parts.append("- Focus specifically on data from 2025 onwards")
        
        return " ".join(enhanced_parts)
    
    def _simplify_query_for_retry(self, query: str) -> str:
        """Create a simpler version of the query for retry attempts"""
        # Remove complex requirements and focus on core request
        simple_patterns = {
            r'plot.*followers.*per.*month': 'show follower gains by month',
            r'how many.*followers.*gain.*since.*2025': 'sum follower gains from 2025',
            r'chart.*growth': 'show follower growth over time',
        }
        
        query_lower = query.lower()
        for pattern, replacement in simple_patterns.items():
            import re
            if re.search(pattern, query_lower):
                return replacement
        
        return query

    async def _generate_enhanced_response(self, query: str, pandas_result: str = "", query_type: str = "general") -> str:
        """Generate enhanced response combining PandasAI results with RAG context"""
        if not self.is_ready:
            return "Agent is not ready. Please initialize."
        if not client:
            return "Error: AI service is not available. Check API configuration."

        try:
            system_prompt = self._build_system_prompt()
            data_summary = self._get_dataframes_summary()

            # Build enhanced prompt based on query type and available results
            if query_type == "data" and pandas_result:
                enhanced_prompt = f"""
                {system_prompt}
                
                ## Data Analysis Context:
                {data_summary}
                
                ## PandasAI Analysis Result:
                {pandas_result}
                
                ## Additional Knowledge Context:
                
                ## User Query:
                {query}
                
                Please interpret the data analysis result above and provide business insights in a friendly, HR-focused manner. 
                Explain what the findings mean for employer branding strategy and provide actionable recommendations.
                """
            else:
                enhanced_prompt = f"""
                {system_prompt}
                
                ## Available Data Context:
                {data_summary}
                
                ## Knowledge Base Context:
                
                ## User Query:
                {query}
                
                Please provide helpful insights and recommendations for this employer branding query.
                """

            # Fix: Use only genai.Client approach - remove all google-generativeai code
            logging.debug(f"Using genai.Client for enhanced response generation")
            
            # Prepare config
            config_dict = dict(self.generation_config_dict) if self.generation_config_dict else {}
            
            if self.safety_settings_list:
                safety_settings = []
                for ss in self.safety_settings_list:
                    if isinstance(ss, dict):
                        if GENAI_AVAILABLE and hasattr(types, 'SafetySetting'):
                            safety_settings.append(types.SafetySetting(
                                category=ss.get('category'),
                                threshold=ss.get('threshold')
                            ))
                        else:
                            safety_settings.append(ss)
                    else:
                        safety_settings.append(ss)
                config_dict['safety_settings'] = safety_settings
            
            if GENAI_AVAILABLE and hasattr(types, 'GenerateContentConfig'):
                config = types.GenerateContentConfig(**config_dict)
            else:
                config = config_dict
            
            model_path = f"models/{self.llm_model_name}" if not self.llm_model_name.startswith("models/") else self.llm_model_name
            
            # Fix: Use synchronous call wrapped in asyncio.to_thread
            api_response = await asyncio.to_thread(
                client.models.generate_content,
                model=model_path,
                contents=enhanced_prompt,  # Fix: Pass single prompt string instead of complex message format
                config=config
            )
            
            # Fix: Updated response parsing
            if hasattr(api_response, 'candidates') and api_response.candidates:
                candidate = api_response.candidates[0]
                if hasattr(candidate, 'content') and candidate.content:
                    if hasattr(candidate.content, 'parts') and candidate.content.parts:
                        response_text_parts = [part.text for part in candidate.content.parts if hasattr(part, 'text')]
                        response_text = "".join(response_text_parts).strip()
                    else:
                        response_text = str(candidate.content).strip()
                else:
                    response_text = ""
            else:
                response_text = ""
            
            if not response_text:
                # Handle blocked or empty responses
                if hasattr(api_response, 'prompt_feedback') and api_response.prompt_feedback:
                    if hasattr(api_response.prompt_feedback, 'block_reason') and api_response.prompt_feedback.block_reason:
                        logging.warning(f"Prompt blocked: {api_response.prompt_feedback.block_reason}")
                        return f"I'm sorry, your request was blocked. Please try rephrasing your query."
                return "I couldn't generate a response. Please try rephrasing your query."

            return response_text

        except Exception as e:
            error_message = str(e).lower()
            
            if any(keyword in error_message for keyword in ['blocked', 'safety', 'filter', 'prohibited']):
                logging.error(f"Blocked prompt: {e}")
                return "I'm sorry, your request was blocked by the safety filter. Please rephrase your query."
            else:
                logging.error(f"Error in _generate_enhanced_response: {e}", exc_info=True)
                return f"I encountered an error while processing your request: {str(e)}"

    def _validate_query(self, query: str) -> bool:
        """Validate user query input"""
        if not query or not isinstance(query, str) or len(query.strip()) < 3:
            logging.warning(f"Invalid query: too short or not a string. Query: '{query}'")
            return False
        if len(query) > 3000:
            logging.warning(f"Invalid query: too long. Length: {len(query)}")
            return False
        return True

    async def process_query(self, user_query: str) -> Dict[str, Optional[str]]:
        """
        Main method to process user queries.
        Returns a dictionary: {"text": llm_response_string, "image_path": path_to_chart_or_none}
        """
        if not self._validate_query(user_query):
            return {"text": "Please provide a valid query (3 to 3000 characters).", "image_path": None}
        
        if not self.is_ready:
            logging.warning("process_query called but agent is not ready. Attempting re-initialization.")
            init_success = await self.initialize()
            if not init_success:
                return {"text": "The agent is not properly initialized and could not be started. Please check configuration and logs.", "image_path": None}
        
        try:
            query_type = self._classify_query_type(user_query)
            logging.info(f"Query classified as: {query_type}")
            
            pandas_text_output: Optional[str] = None
            pandas_chart_path: Optional[str] = None
            pandas_success = False # Flag to track if PandasAI ran successfully
            
            # For data-related queries, try PandasAI first
            if query_type in ["data", "hybrid"] and self.pandas_agent:
                logging.info("Attempting PandasAI analysis...")
                pandas_text_output, pandas_success = await self._generate_pandas_response(user_query)
                
                if pandas_success:
                    logging.info(f"PandasAI analysis successful. Text: '{str(pandas_text_output)[:100]}...'")
                    # Check for chart generation in response
                    if "Chart Generated" in pandas_text_output:
                        # Extract chart path from response if present
                        lines = pandas_text_output.split('\n')
                        for line in lines:
                            if "Chart saved at:" in line:
                                pandas_chart_path = line.split("Chart saved at: ")[1].strip()
                                break
                else:
                    # pandas_text_output might contain the error message from PandasAI
                    logging.warning(f"PandasAI analysis failed or returned no specific result. Message from PandasAI: {pandas_text_output}")
            
            # Prepare the context from PandasAI for the LLM
            llm_context_from_pandas = ""
            if pandas_text_output: # This could be a success message or an error message from PandasAI
                llm_context_from_pandas += f"Data Analysis Tool Output: {pandas_text_output}\n"
                if pandas_chart_path and pandas_success: # Only mention chart path if PandasAI was successful
                    llm_context_from_pandas += f"[A chart has been generated by the data tool and saved at '{pandas_chart_path}'. You should refer to this chart in your explanation if it's relevant to the user's query.]\n"
            elif query_type in ["data", "hybrid"] and not self.pandas_agent:
                llm_context_from_pandas += "Note: The data analysis tool is currently unavailable.\n"

            # Always call the LLM to formulate the final response
            final_llm_response = await self._generate_enhanced_response(
                query=user_query,
                pandas_result=llm_context_from_pandas, # Pass the textual summary from PandasAI
                query_type=query_type
            )
            
            # Return the LLM's response and the chart path if PandasAI was successful and generated one.
            # If PandasAI failed, pandas_chart_path would be None.
            # The final_llm_response should ideally explain any failures if pandas_text_output contained an error.
            return {"text": final_llm_response, "image_path": pandas_chart_path if pandas_success else None}

        except Exception as e:
            logging.error(f"Critical error in process_query: {e}", exc_info=True)
            return {"text": f"I encountered a critical error while processing your request: {type(e).__name__}. Please check the logs.", "image_path": None}

    def update_dataframes(self, new_dataframes: Dict[str, pd.DataFrame]):
        """Updates the agent's DataFrames and reinitializes PandasAI agent"""
        self.all_dataframes = {k: v.copy() for k, v in new_dataframes.items()}
        logging.info(f"Agent DataFrames updated. Keys: {list(self.all_dataframes.keys())}")
        
        # Reinitialize PandasAI agent with new data
        self._initialize_pandas_agent()
    

    def clear_chat_history(self):
        """Clears the agent's internal chat history"""
        self.chat_history = []
        logging.info("EmployerBrandingAgent internal chat history cleared.")

    def get_status(self) -> Dict[str, Any]:
        """Returns comprehensive status information about the agent"""
        return {
            "is_ready": self.is_ready,
            "has_api_key": bool(GEMINI_API_KEY),
            "genai_available": GENAI_AVAILABLE,
            "client_type": "genai.Client" if client else "None",  # Fix: Remove reference to llm_model_instance
            "pandas_agent_ready": self.pandas_agent is not None,
            "num_dataframes": len(self.all_dataframes),
            "dataframe_keys": list(self.all_dataframes.keys()),
            "llm_model_name": self.llm_model_name,
            "chat_history_length": len(self.chat_history),
            "charts_save_path_pandasai": pai.config.save_charts_path if pai.config.llm else "PandasAI not configured"
        }

    def get_available_analyses(self) -> List[str]:
        """Returns list of suggested analyses based on available data"""
        if not self.all_dataframes:
            return ["No data available for analysis"]
        
        suggestions = []
        for df_name, df in self.all_dataframes.items():
            if 'follower' in df_name.lower():
                suggestions.extend([
                    f"Show follower growth trends from {df_name}",
                    f"Analyze follower demographics in {df_name}",
                    f"Compare follower engagement rates"
                ])
            elif 'post' in df_name.lower():
                suggestions.extend([
                    f"Analyze post performance metrics from {df_name}",
                    f"Show best performing content types",
                    f"Compare engagement across post categories"
                ])
            elif 'mention' in df_name.lower():
                suggestions.extend([
                    f"Analyze brand mention sentiment from {df_name}",
                    f"Show mention volume trends",
                    f"Identify top mention sources"
                ])
        
        # Add general suggestions
        suggestions.extend([
            "What are the key employer branding trends?",
            "How can I improve our LinkedIn presence?",
            "What content strategy should we adopt?",
            "How do we measure employer branding success?"
        ])
        
        return suggestions[:10]  # Limit to top 10 suggestions

# --- Helper Functions for External Integration ---
def create_agent_instance(dataframes: Optional[Dict[str, pd.DataFrame]] = None) -> EmployerBrandingAgent:
    """Factory function to create a new agent instance"""
    logging.info("Creating new EnhancedEmployerBrandingAgent instance via helper function.")
    return EmployerBrandingAgent(all_dataframes=dataframes)

async def initialize_agent_async(agent: EmployerBrandingAgent) -> bool:
    """Async helper to initialize an agent instance"""
    logging.info("Initializing agent via async helper function.")
    return await agent.initialize()

def validate_dataframes(dataframes: Dict[str, pd.DataFrame]) -> Dict[str, List[str]]:
    """Validate dataframes for common issues and return validation report"""
    validation_report = {}
    
    for name, df in dataframes.items():
        issues = []
        
        if df.empty:
            issues.append("DataFrame is empty")
        
        # Check for required columns based on data type
        if 'follower' in name.lower():
            required_cols = ['date', 'follower_count']
            missing_cols = [col for col in required_cols if col not in df.columns]
            if missing_cols:
                issues.append(f"Missing expected columns for follower data: {missing_cols}")
        
        elif 'post' in name.lower():
            required_cols = ['date', 'engagement']
            missing_cols = [col for col in required_cols if col not in df.columns]
            if missing_cols:
                issues.append(f"Missing expected columns for post data: {missing_cols}")
        
        # Check for data quality issues
        if not df.empty:
            null_percentages = (df.isnull().sum() / len(df) * 100).round(2)
            high_null_cols = null_percentages[null_percentages > 50].index.tolist()
            if high_null_cols:
                issues.append(f"Columns with >50% null values: {high_null_cols}")
        
        validation_report[name] = issues
    
    return validation_report