GuglielmoTor commited on
Commit
ed0af7b
·
verified ·
1 Parent(s): ee8125f

Delete data_processing

Browse files
data_processing/__init__.py DELETED
@@ -1,44 +0,0 @@
1
- # __init__.py
2
-
3
- # Imports from posts_categorization module
4
- from .posts_categorization import (
5
- summarize_post,
6
- classify_post,
7
- summarize_and_classify_post,
8
- batch_summarize_and_classify,
9
- SummaryOutput,
10
- ClassificationOutput,
11
- CLASSIFICATION_LABELS,
12
- PRIMARY_SUMMARIZER_MODEL,
13
- FALLBACK_SUMMARIZER_MODEL,
14
- CLASSIFICATION_MODEL
15
- )
16
-
17
- # Imports from analytics_data_processing module
18
- from .analytics_data_processing import (
19
- filter_dataframe_by_date,
20
- prepare_filtered_analytics_data,
21
- generate_chatbot_data_summaries
22
- )
23
-
24
- # Define __all__ to specify what is exported when 'from package import *' is used
25
- __all__ = [
26
- # From posts_categorization
27
- 'summarize_post',
28
- 'classify_post',
29
- 'summarize_and_classify_post',
30
- 'batch_summarize_and_classify',
31
- 'SummaryOutput',
32
- 'ClassificationOutput',
33
- 'CLASSIFICATION_LABELS',
34
- 'PRIMARY_SUMMARIZER_MODEL',
35
- 'FALLBACK_SUMMARIZER_MODEL',
36
- 'CLASSIFICATION_MODEL',
37
-
38
- # From analytics_data_processing
39
- 'filter_dataframe_by_date',
40
- 'prepare_filtered_analytics_data',
41
- 'generate_chatbot_data_summaries'
42
- ]
43
-
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_processing/analytics_data_processing.py DELETED
@@ -1,503 +0,0 @@
1
- #analytics_data_processing.py
2
- import pandas as pd
3
- from datetime import datetime, timedelta, time
4
- import logging
5
- import numpy as np
6
-
7
- # Configure logging for this module
8
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
9
-
10
- def filter_dataframe_by_date(df, date_column, start_date, end_date):
11
- """Filters a DataFrame by a date column within a given date range."""
12
- if df is None or df.empty or not date_column:
13
- logging.warning(f"Filter by date: DataFrame is None, empty, or no date_column provided. DF: {df is not None}, empty: {df.empty if df is not None else 'N/A'}, date_column: {date_column}")
14
- return pd.DataFrame()
15
- if date_column not in df.columns:
16
- logging.warning(f"Filter by date: Date column '{date_column}' not found in DataFrame columns: {df.columns.tolist()}.")
17
- return pd.DataFrame()
18
-
19
- df_copy = df.copy() # Work on a copy to avoid SettingWithCopyWarning
20
- try:
21
- # Ensure the date column is pandas datetime objects
22
- if not pd.api.types.is_datetime64_any_dtype(df_copy[date_column]):
23
- df_copy[date_column] = pd.to_datetime(df_copy[date_column], errors='coerce')
24
-
25
- # Drop rows where date conversion might have failed (NaT) or was originally NaT
26
- df_copy.dropna(subset=[date_column], inplace=True)
27
- if df_copy.empty:
28
- logging.info(f"Filter by date: DataFrame empty after to_datetime and dropna for column '{date_column}'.")
29
- return pd.DataFrame()
30
-
31
- # Normalize to midnight. This preserves timezone information if present.
32
- df_copy[date_column] = df_copy[date_column].dt.normalize()
33
-
34
- # If the column is timezone-aware, convert its values to naive UTC equivalent.
35
- # This allows comparison with naive filter dates.
36
- if hasattr(df_copy[date_column].dt, 'tz') and df_copy[date_column].dt.tz is not None:
37
- logging.info(f"Column '{date_column}' is timezone-aware ({df_copy[date_column].dt.tz}). Converting to naive (from UTC) for comparison.")
38
- df_copy[date_column] = df_copy[date_column].dt.tz_convert('UTC').dt.tz_localize(None)
39
-
40
- except Exception as e:
41
- logging.error(f"Error processing date column '{date_column}': {e}", exc_info=True)
42
- return pd.DataFrame()
43
-
44
- # Convert start_date and end_date (which are naive Python datetime or naive Pandas Timestamp)
45
- # to naive pandas Timestamps and normalize them.
46
- start_dt_obj = pd.to_datetime(start_date, errors='coerce').normalize() if start_date else None
47
- end_dt_obj = pd.to_datetime(end_date, errors='coerce').normalize() if end_date else None
48
-
49
- # Perform the filtering
50
- # df_filtered is already df_copy which has NaNs dropped and dates processed
51
- if start_dt_obj and end_dt_obj:
52
- df_filtered_final = df_copy[(df_copy[date_column] >= start_dt_obj) & (df_copy[date_column] <= end_dt_obj)]
53
- elif start_dt_obj:
54
- df_filtered_final = df_copy[df_copy[date_column] >= start_dt_obj]
55
- elif end_dt_obj:
56
- df_filtered_final = df_copy[df_copy[date_column] <= end_dt_obj]
57
- else:
58
- df_filtered_final = df_copy # No date filtering if neither start_date nor end_date is provided
59
-
60
- if df_filtered_final.empty:
61
- logging.info(f"Filter by date: DataFrame became empty after applying date range to column '{date_column}'.")
62
-
63
- return df_filtered_final
64
-
65
-
66
- def prepare_filtered_analytics_data(token_state_value, date_filter_option, custom_start_date, custom_end_date):
67
- """
68
- Retrieves data from token_state, determines date range, filters posts, mentions, and follower time-series data.
69
- Merges posts with post stats.
70
- Returns:
71
- - filtered_merged_posts_df: Posts merged with stats, filtered by date.
72
- - filtered_mentions_df: Mentions filtered by date.
73
- - date_filtered_follower_stats_df: Follower stats filtered by date (for time-series plots).
74
- - raw_follower_stats_df: Unfiltered follower stats (for demographic plots).
75
- - start_dt_filter: Determined start date for filtering.
76
- - end_dt_filter: Determined end date for filtering.
77
- """
78
- logging.info(f"Preparing filtered analytics data. Filter: {date_filter_option}, Custom Start: {custom_start_date}, Custom End: {custom_end_date}")
79
-
80
- posts_df = token_state_value.get("bubble_posts_df", pd.DataFrame()).copy()
81
- mentions_df = token_state_value.get("bubble_mentions_df", pd.DataFrame()).copy()
82
- follower_stats_df = token_state_value.get("bubble_follower_stats_df", pd.DataFrame()).copy()
83
- post_stats_df = token_state_value.get("bubble_post_stats_df", pd.DataFrame()).copy() # Fetch post_stats_df
84
-
85
- date_column_posts = token_state_value.get("config_date_col_posts", "published_at")
86
- date_column_mentions = token_state_value.get("config_date_col_mentions", "date")
87
- # Assuming follower_stats_df has a 'date' column for time-series data
88
- date_column_followers = token_state_value.get("config_date_col_followers", "date")
89
-
90
- # Determine date range for filtering
91
- current_datetime_obj = datetime.now()
92
- current_time_normalized = current_datetime_obj.replace(hour=0, minute=0, second=0, microsecond=0)
93
-
94
- end_dt_filter = current_time_normalized
95
- start_dt_filter = None
96
-
97
- if date_filter_option == "Last 7 Days":
98
- start_dt_filter = current_time_normalized - timedelta(days=6)
99
- elif date_filter_option == "Last 30 Days":
100
- start_dt_filter = current_time_normalized - timedelta(days=29)
101
- elif date_filter_option == "Custom Range":
102
- start_dt_filter_temp = pd.to_datetime(custom_start_date, errors='coerce')
103
- start_dt_filter = start_dt_filter_temp.replace(hour=0, minute=0, second=0, microsecond=0) if pd.notna(start_dt_filter_temp) else None
104
-
105
- end_dt_filter_temp = pd.to_datetime(custom_end_date, errors='coerce')
106
- end_dt_filter = end_dt_filter_temp.replace(hour=0, minute=0, second=0, microsecond=0) if pd.notna(end_dt_filter_temp) else current_time_normalized
107
-
108
- logging.info(f"Date range for filtering: Start: {start_dt_filter}, End: {end_dt_filter}")
109
-
110
- # Merge posts_df and post_stats_df
111
- merged_posts_df = pd.DataFrame()
112
- if not posts_df.empty and not post_stats_df.empty:
113
- # Assuming posts_df has 'id' and post_stats_df has 'post_id' for merging
114
- if 'id' in posts_df.columns and 'post_id' in post_stats_df.columns:
115
- merged_posts_df = pd.merge(posts_df, post_stats_df, left_on='id', right_on='post_id', how='left')
116
- logging.info(f"Merged posts_df ({len(posts_df)} rows) and post_stats_df ({len(post_stats_df)} rows) into merged_posts_df ({len(merged_posts_df)} rows).")
117
- else:
118
- logging.warning("Cannot merge posts_df and post_stats_df due to missing 'id' or 'post_id' columns.")
119
- # Fallback to using posts_df if merge fails but provide an empty df for stats-dependent plots
120
- merged_posts_df = posts_df # Or handle as an error / empty DF for those plots
121
- elif not posts_df.empty:
122
- logging.warning("post_stats_df is empty. Proceeding with posts_df only for plots that don't require stats.")
123
- merged_posts_df = posts_df # Create necessary columns with NaN if they are expected by plots
124
- # For columns expected from post_stats_df, add them with NaNs if not present
125
- expected_stat_cols = ['engagement', 'impressionCount', 'clickCount', 'likeCount', 'commentCount', 'shareCount']
126
- for col in expected_stat_cols:
127
- if col not in merged_posts_df.columns:
128
- merged_posts_df[col] = pd.NA
129
-
130
-
131
- # Filter DataFrames by date
132
- filtered_merged_posts_data = pd.DataFrame()
133
- if not merged_posts_df.empty and date_column_posts in merged_posts_df.columns:
134
- filtered_merged_posts_data = filter_dataframe_by_date(merged_posts_df, date_column_posts, start_dt_filter, end_dt_filter)
135
- elif not merged_posts_df.empty:
136
- logging.warning(f"Date column '{date_column_posts}' not found in merged_posts_df. Returning unfiltered merged posts data.")
137
- filtered_merged_posts_data = merged_posts_df # Or apply other logic
138
-
139
- filtered_mentions_data = pd.DataFrame()
140
- if not mentions_df.empty and date_column_mentions in mentions_df.columns:
141
- filtered_mentions_data = filter_dataframe_by_date(mentions_df, date_column_mentions, start_dt_filter, end_dt_filter)
142
- elif not mentions_df.empty:
143
- logging.warning(f"Date column '{date_column_mentions}' not found in mentions_df. Returning unfiltered mentions data.")
144
- filtered_mentions_data = mentions_df
145
-
146
- date_filtered_follower_stats_df = pd.DataFrame()
147
- raw_follower_stats_df = follower_stats_df.copy() # For demographic plots, use raw (or latest snapshot logic)
148
-
149
- if not follower_stats_df.empty and date_column_followers in follower_stats_df.columns:
150
- date_filtered_follower_stats_df = filter_dataframe_by_date(follower_stats_df, date_column_followers, start_dt_filter, end_dt_filter)
151
- elif not follower_stats_df.empty:
152
- logging.warning(f"Date column '{date_column_followers}' not found in follower_stats_df. Time-series follower plots might be empty or use unfiltered data.")
153
- # Decide if date_filtered_follower_stats_df should be raw_follower_stats_df or empty
154
- date_filtered_follower_stats_df = follower_stats_df # Or pd.DataFrame() if strict filtering is required
155
-
156
- logging.info(f"Processed - Filtered Merged Posts: {len(filtered_merged_posts_data)} rows, Filtered Mentions: {len(filtered_mentions_data)} rows, Date-Filtered Follower Stats: {len(date_filtered_follower_stats_df)} rows.")
157
-
158
- return filtered_merged_posts_data, filtered_mentions_data, date_filtered_follower_stats_df, raw_follower_stats_df, start_dt_filter, end_dt_filter
159
-
160
- # --- Helper function to generate textual data summaries for chatbot ---
161
- def generate_chatbot_data_summaries(
162
- plot_configs_list,
163
- filtered_merged_posts_df,
164
- filtered_mentions_df,
165
- date_filtered_follower_stats_df, # Expected to contain 'follower_gains_monthly'
166
- raw_follower_stats_df, # Expected to contain other demographics like 'follower_geo', 'follower_industry'
167
- token_state_value
168
- ):
169
- """
170
- Generates textual summaries for each plot ID to be used by the chatbot,
171
- based on the corrected understanding of DataFrame structures and follower count columns.
172
- """
173
- data_summaries = {}
174
-
175
- # --- Date and Config Columns from token_state ---
176
- # For Posts
177
- date_col_posts = token_state_value.get("config_date_col_posts", "published_at")
178
- media_type_col_name = token_state_value.get("config_media_type_col", "media_type")
179
- eb_labels_col_name = token_state_value.get("config_eb_labels_col", "li_eb_label")
180
- # For Mentions
181
- date_col_mentions = token_state_value.get("config_date_col_mentions", "date")
182
- mentions_sentiment_col = "sentiment_label" # As per user's mention df structure
183
-
184
- # For Follower Stats - Actual column names provided by user
185
- follower_count_organic_col = "follower_count_organic"
186
- follower_count_paid_col = "follower_count_paid"
187
-
188
- # For Follower Stats (Demographics from raw_follower_stats_df)
189
- follower_demographics_type_col = "follower_count_type" # Column indicating 'follower_geo', 'follower_industry'
190
- follower_demographics_category_col = "category_name" # Column indicating 'USA', 'Technology'
191
-
192
- # For Follower Gains/Growth (from date_filtered_follower_stats_df)
193
- follower_gains_type_col = "follower_count_type" # Should be 'follower_gains_monthly'
194
- follower_gains_date_col = "category_name" # This is 'YYYY-MM-DD'
195
-
196
- # --- Helper: Safely convert to datetime ---
197
- def safe_to_datetime(series, errors='coerce'):
198
- return pd.to_datetime(series, errors=errors)
199
-
200
- # --- Prepare DataFrames (copy and convert dates) ---
201
- if filtered_merged_posts_df is not None and not filtered_merged_posts_df.empty:
202
- posts_df = filtered_merged_posts_df.copy()
203
- if date_col_posts in posts_df.columns:
204
- posts_df[date_col_posts] = safe_to_datetime(posts_df[date_col_posts])
205
- else:
206
- logging.warning(f"Date column '{date_col_posts}' not found in posts_df for chatbot summary.")
207
- else:
208
- posts_df = pd.DataFrame()
209
-
210
- if filtered_mentions_df is not None and not filtered_mentions_df.empty:
211
- mentions_df = filtered_mentions_df.copy()
212
- if date_col_mentions in mentions_df.columns:
213
- mentions_df[date_col_mentions] = safe_to_datetime(mentions_df[date_col_mentions])
214
- else:
215
- logging.warning(f"Date column '{date_col_mentions}' not found in mentions_df for chatbot summary.")
216
- else:
217
- mentions_df = pd.DataFrame()
218
-
219
- # For date_filtered_follower_stats_df (monthly gains)
220
- if date_filtered_follower_stats_df is not None and not date_filtered_follower_stats_df.empty:
221
- follower_monthly_df = date_filtered_follower_stats_df.copy()
222
- if follower_gains_type_col in follower_monthly_df.columns:
223
- follower_monthly_df = follower_monthly_df[follower_monthly_df[follower_gains_type_col] == 'follower_gains_monthly'].copy()
224
-
225
- if follower_gains_date_col in follower_monthly_df.columns:
226
- follower_monthly_df['datetime_obj'] = safe_to_datetime(follower_monthly_df[follower_gains_date_col])
227
- follower_monthly_df = follower_monthly_df.dropna(subset=['datetime_obj'])
228
-
229
- # Calculate total gains
230
- if follower_count_organic_col in follower_monthly_df.columns and follower_count_paid_col in follower_monthly_df.columns:
231
- follower_monthly_df[follower_count_organic_col] = pd.to_numeric(follower_monthly_df[follower_count_organic_col], errors='coerce').fillna(0)
232
- follower_monthly_df[follower_count_paid_col] = pd.to_numeric(follower_monthly_df[follower_count_paid_col], errors='coerce').fillna(0)
233
- follower_monthly_df['total_monthly_gains'] = follower_monthly_df[follower_count_organic_col] + follower_monthly_df[follower_count_paid_col]
234
- elif follower_count_organic_col in follower_monthly_df.columns: # Only organic exists
235
- follower_monthly_df[follower_count_organic_col] = pd.to_numeric(follower_monthly_df[follower_count_organic_col], errors='coerce').fillna(0)
236
- follower_monthly_df['total_monthly_gains'] = follower_monthly_df[follower_count_organic_col]
237
- elif follower_count_paid_col in follower_monthly_df.columns: # Only paid exists
238
- follower_monthly_df[follower_count_paid_col] = pd.to_numeric(follower_monthly_df[follower_count_paid_col], errors='coerce').fillna(0)
239
- follower_monthly_df['total_monthly_gains'] = follower_monthly_df[follower_count_paid_col]
240
- else:
241
- logging.warning(f"Neither '{follower_count_organic_col}' nor '{follower_count_paid_col}' found in follower_monthly_df for total gains calculation.")
242
- follower_monthly_df['total_monthly_gains'] = 0 # Avoid KeyError later
243
- else:
244
- logging.warning(f"Date column '{follower_gains_date_col}' (from category_name) not found in follower_monthly_df for chatbot summary.")
245
- if 'datetime_obj' not in follower_monthly_df.columns:
246
- follower_monthly_df['datetime_obj'] = pd.NaT
247
- if 'total_monthly_gains' not in follower_monthly_df.columns:
248
- follower_monthly_df['total_monthly_gains'] = 0
249
- else:
250
- follower_monthly_df = pd.DataFrame(columns=[follower_gains_date_col, 'total_monthly_gains', 'datetime_obj'])
251
-
252
-
253
- if raw_follower_stats_df is not None and not raw_follower_stats_df.empty:
254
- follower_demographics_df = raw_follower_stats_df.copy()
255
- # Calculate total followers for demographics
256
- if follower_count_organic_col in follower_demographics_df.columns and follower_count_paid_col in follower_demographics_df.columns:
257
- follower_demographics_df[follower_count_organic_col] = pd.to_numeric(follower_demographics_df[follower_count_organic_col], errors='coerce').fillna(0)
258
- follower_demographics_df[follower_count_paid_col] = pd.to_numeric(follower_demographics_df[follower_count_paid_col], errors='coerce').fillna(0)
259
- follower_demographics_df['total_follower_count'] = follower_demographics_df[follower_count_organic_col] + follower_demographics_df[follower_count_paid_col]
260
- elif follower_count_organic_col in follower_demographics_df.columns:
261
- follower_demographics_df[follower_count_organic_col] = pd.to_numeric(follower_demographics_df[follower_count_organic_col], errors='coerce').fillna(0)
262
- follower_demographics_df['total_follower_count'] = follower_demographics_df[follower_count_organic_col]
263
- elif follower_count_paid_col in follower_demographics_df.columns:
264
- follower_demographics_df[follower_count_paid_col] = pd.to_numeric(follower_demographics_df[follower_count_paid_col], errors='coerce').fillna(0)
265
- follower_demographics_df['total_follower_count'] = follower_demographics_df[follower_count_paid_col]
266
- else:
267
- logging.warning(f"Neither '{follower_count_organic_col}' nor '{follower_count_paid_col}' found in follower_demographics_df for total count calculation.")
268
- if 'total_follower_count' not in follower_demographics_df.columns:
269
- follower_demographics_df['total_follower_count'] = 0
270
- else:
271
- follower_demographics_df = pd.DataFrame()
272
-
273
-
274
- for plot_cfg in plot_configs_list:
275
- plot_id = plot_cfg["id"]
276
- plot_label = plot_cfg["label"]
277
- summary_text = f"No specific data summary available for '{plot_label}' for the selected period."
278
-
279
- try:
280
- # --- FOLLOWER STATS ---
281
- if plot_id == "followers_count": # Uses follower_monthly_df
282
- if not follower_monthly_df.empty and 'total_monthly_gains' in follower_monthly_df.columns and 'datetime_obj' in follower_monthly_df.columns and not follower_monthly_df['datetime_obj'].isnull().all():
283
- df_summary = follower_monthly_df[['datetime_obj', 'total_monthly_gains']].copy()
284
- df_summary['datetime_obj'] = df_summary['datetime_obj'].dt.strftime('%Y-%m-%d')
285
- df_summary.rename(columns={'datetime_obj': 'Date', 'total_monthly_gains': 'Total Monthly Gains'}, inplace=True)
286
- summary_text = f"Follower Count (Total Monthly Gains):\n{df_summary.sort_values(by='Date').tail(5).to_string(index=False)}"
287
- else:
288
- summary_text = f"Follower count data (total monthly gains) is unavailable or incomplete for '{plot_label}'."
289
-
290
- elif plot_id == "followers_growth_rate": # Uses follower_monthly_df
291
- if not follower_monthly_df.empty and 'total_monthly_gains' in follower_monthly_df.columns and 'datetime_obj' in follower_monthly_df.columns and not follower_monthly_df['datetime_obj'].isnull().all():
292
- df_calc = follower_monthly_df.sort_values(by='datetime_obj').copy()
293
- # Growth rate is calculated on the total monthly gains (which are changes, not cumulative counts)
294
- # To calculate growth rate of followers, we'd need cumulative follower count.
295
- # The plot logic also uses pct_change on the gains themselves.
296
- # If 'total_monthly_gains' represents the *change* in followers, then pct_change on this is rate of change of gains.
297
- # If it represents the *cumulative* followers at that point, then pct_change is follower growth rate.
298
- # Assuming 'total_monthly_gains' is the *change* for the month, like the plot logic.
299
- df_calc['total_monthly_gains'] = pd.to_numeric(df_calc['total_monthly_gains'], errors='coerce')
300
- if len(df_calc) >= 2:
301
- # Calculate cumulative sum to get follower count if 'total_monthly_gains' are indeed just gains
302
- # If your 'total_monthly_gains' already IS the total follower count at end of month, remove next line
303
- # For now, assuming it's GAINS, so we need cumulative for growth rate of total followers.
304
- # However, the original plot logic applies pct_change directly to 'follower_gains_monthly'.
305
- # Let's stick to pct_change on the gains/count column for consistency with plot.
306
-
307
- # If 'total_monthly_gains' is the actual follower count for that month:
308
- df_calc['growth_rate_monthly'] = df_calc['total_monthly_gains'].pct_change() * 100
309
- df_calc['growth_rate_monthly'] = df_calc['growth_rate_monthly'].round(2)
310
- df_calc.replace([np.inf, -np.inf], np.nan, inplace=True) # Handle division by zero if a gain was 0
311
-
312
- df_summary = df_calc[['datetime_obj', 'growth_rate_monthly']].dropna().copy()
313
- df_summary['datetime_obj'] = df_summary['datetime_obj'].dt.strftime('%Y-%m-%d')
314
- df_summary.rename(columns={'datetime_obj': 'Date', 'growth_rate_monthly': 'Growth Rate (%)'}, inplace=True)
315
- if not df_summary.empty:
316
- summary_text = f"Follower Growth Rate (Monthly % based on Total Follower Count/Gains):\n{df_summary.sort_values(by='Date').tail(5).to_string(index=False)}"
317
- else:
318
- summary_text = f"Not enough data points or valid transitions to calculate follower growth rate for '{plot_label}'."
319
- else:
320
- summary_text = f"Not enough data points (need at least 2) to calculate follower growth rate for '{plot_label}'."
321
- else:
322
- summary_text = f"Follower growth rate data (total monthly gains) is unavailable or incomplete for '{plot_label}'."
323
-
324
- elif plot_id in ["followers_by_location", "followers_by_role", "followers_by_industry", "followers_by_seniority"]:
325
- demographic_type_map = {
326
- "followers_by_location": "follower_geo",
327
- "followers_by_role": "follower_function",
328
- "followers_by_industry": "follower_industry",
329
- "followers_by_seniority": "follower_seniority"
330
- }
331
- current_demographic_type = demographic_type_map.get(plot_id)
332
- if not follower_demographics_df.empty and \
333
- follower_demographics_type_col in follower_demographics_df.columns and \
334
- follower_demographics_category_col in follower_demographics_df.columns and \
335
- 'total_follower_count' in follower_demographics_df.columns: # Check for the calculated total
336
-
337
- df_filtered_demographics = follower_demographics_df[
338
- follower_demographics_df[follower_demographics_type_col] == current_demographic_type
339
- ].copy()
340
-
341
- if not df_filtered_demographics.empty:
342
- df_summary = df_filtered_demographics.groupby(follower_demographics_category_col)['total_follower_count'].sum().reset_index()
343
- df_summary.rename(columns={follower_demographics_category_col: 'Category', 'total_follower_count': 'Total Follower Count'}, inplace=True)
344
- top_5 = df_summary.nlargest(5, 'Total Follower Count')
345
- summary_text = f"Top 5 {plot_label} (Total Followers):\n{top_5.to_string(index=False)}"
346
- else:
347
- summary_text = f"No data available for demographic type '{current_demographic_type}' in '{plot_label}'."
348
- else:
349
- summary_text = f"Follower demographic data columns (including total_follower_count) are missing or incomplete for '{plot_label}'."
350
-
351
- # --- POSTS STATS ---
352
- elif plot_id == "engagement_rate":
353
- if not posts_df.empty and 'engagement' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
354
- df_resampled = posts_df.set_index(date_col_posts)['engagement'].resample('W').mean().reset_index()
355
- df_resampled['engagement'] = pd.to_numeric(df_resampled['engagement'], errors='coerce').round(2)
356
- df_summary = df_resampled[[date_col_posts, 'engagement']].dropna().copy()
357
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
358
- summary_text = f"Engagement Rate Over Time (Weekly Avg %):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
359
- else:
360
- summary_text = f"Engagement rate data is unavailable for '{plot_label}'."
361
-
362
- elif plot_id == "reach_over_time":
363
- if not posts_df.empty and 'reach' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
364
- df_resampled = posts_df.set_index(date_col_posts)['reach'].resample('W').sum().reset_index()
365
- df_resampled['reach'] = pd.to_numeric(df_resampled['reach'], errors='coerce')
366
- df_summary = df_resampled[[date_col_posts, 'reach']].dropna().copy()
367
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
368
- summary_text = f"Reach Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
369
- else:
370
- summary_text = f"Reach data is unavailable for '{plot_label}'."
371
-
372
- elif plot_id == "impressions_over_time":
373
- if not posts_df.empty and 'impressionCount' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
374
- df_resampled = posts_df.set_index(date_col_posts)['impressionCount'].resample('W').sum().reset_index()
375
- df_resampled['impressionCount'] = pd.to_numeric(df_resampled['impressionCount'], errors='coerce')
376
- df_summary = df_resampled[[date_col_posts, 'impressionCount']].dropna().copy()
377
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
378
- df_summary.rename(columns={'impressionCount': 'Impressions'}, inplace=True)
379
- summary_text = f"Impressions Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
380
- else:
381
- summary_text = f"Impressions data is unavailable for '{plot_label}'."
382
-
383
- elif plot_id == "likes_over_time":
384
- if not posts_df.empty and 'likeCount' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
385
- df_resampled = posts_df.set_index(date_col_posts)['likeCount'].resample('W').sum().reset_index()
386
- df_resampled['likeCount'] = pd.to_numeric(df_resampled['likeCount'], errors='coerce')
387
- df_summary = df_resampled[[date_col_posts, 'likeCount']].dropna().copy()
388
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
389
- df_summary.rename(columns={'likeCount': 'Likes'}, inplace=True)
390
- summary_text = f"Likes Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
391
- else:
392
- summary_text = f"Likes data is unavailable for '{plot_label}'."
393
-
394
- elif plot_id == "clicks_over_time":
395
- if not posts_df.empty and 'clickCount' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
396
- df_resampled = posts_df.set_index(date_col_posts)['clickCount'].resample('W').sum().reset_index()
397
- df_resampled['clickCount'] = pd.to_numeric(df_resampled['clickCount'], errors='coerce')
398
- df_summary = df_resampled[[date_col_posts, 'clickCount']].dropna().copy()
399
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
400
- df_summary.rename(columns={'clickCount': 'Clicks'}, inplace=True)
401
- summary_text = f"Clicks Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
402
- else:
403
- summary_text = f"Clicks data is unavailable for '{plot_label}'."
404
-
405
- elif plot_id == "shares_over_time":
406
- if not posts_df.empty and 'shareCount' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
407
- df_resampled = posts_df.set_index(date_col_posts)['shareCount'].resample('W').sum().reset_index()
408
- df_resampled['shareCount'] = pd.to_numeric(df_resampled['shareCount'], errors='coerce')
409
- df_summary = df_resampled[[date_col_posts, 'shareCount']].dropna().copy()
410
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
411
- df_summary.rename(columns={'shareCount': 'Shares'}, inplace=True)
412
- summary_text = f"Shares Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
413
- elif 'shareCount' not in posts_df.columns and not posts_df.empty : # Check if posts_df is not empty before assuming column is the only issue
414
- summary_text = f"Shares data column ('shareCount') not found for '{plot_label}'."
415
- else:
416
- summary_text = f"Shares data is unavailable for '{plot_label}'."
417
-
418
- elif plot_id == "comments_over_time":
419
- if not posts_df.empty and 'commentCount' in posts_df.columns and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
420
- df_resampled = posts_df.set_index(date_col_posts)['commentCount'].resample('W').sum().reset_index()
421
- df_resampled['commentCount'] = pd.to_numeric(df_resampled['commentCount'], errors='coerce')
422
- df_summary = df_resampled[[date_col_posts, 'commentCount']].dropna().copy()
423
- df_summary[date_col_posts] = df_summary[date_col_posts].dt.strftime('%Y-%m-%d')
424
- df_summary.rename(columns={'commentCount': 'Comments'}, inplace=True)
425
- summary_text = f"Comments Over Time (Weekly Sum):\n{df_summary.sort_values(by=date_col_posts).tail(5).to_string(index=False)}"
426
- else:
427
- summary_text = f"Comments data is unavailable for '{plot_label}'."
428
-
429
- elif plot_id == "comments_sentiment":
430
- comment_sentiment_col_posts = "sentiment"
431
- if not posts_df.empty and comment_sentiment_col_posts in posts_df.columns:
432
- sentiment_counts = posts_df[comment_sentiment_col_posts].value_counts().reset_index()
433
- sentiment_counts.columns = ['Sentiment', 'Count']
434
- summary_text = f"Comments Sentiment Breakdown (Posts Data):\n{sentiment_counts.to_string(index=False)}"
435
- else:
436
- summary_text = f"Comment sentiment data ('{comment_sentiment_col_posts}') is unavailable for '{plot_label}'."
437
-
438
- elif plot_id == "post_frequency_cs":
439
- if not posts_df.empty and date_col_posts in posts_df.columns and not posts_df[date_col_posts].isnull().all():
440
- post_counts_weekly = posts_df.set_index(date_col_posts).resample('W').size().reset_index(name='post_count')
441
- post_counts_weekly.rename(columns={date_col_posts: 'Week', 'post_count': 'Posts'}, inplace=True)
442
- post_counts_weekly['Week'] = post_counts_weekly['Week'].dt.strftime('%Y-%m-%d (Week of)')
443
- summary_text = f"Post Frequency (Weekly):\n{post_counts_weekly.sort_values(by='Week').tail(5).to_string(index=False)}"
444
- else:
445
- summary_text = f"Post frequency data is unavailable for '{plot_label}'."
446
-
447
- elif plot_id == "content_format_breakdown_cs":
448
- if not posts_df.empty and media_type_col_name in posts_df.columns:
449
- format_counts = posts_df[media_type_col_name].value_counts().reset_index()
450
- format_counts.columns = ['Format', 'Count']
451
- summary_text = f"Content Format Breakdown:\n{format_counts.nlargest(5, 'Count').to_string(index=False)}"
452
- else:
453
- summary_text = f"Content format data ('{media_type_col_name}') is unavailable for '{plot_label}'."
454
-
455
- elif plot_id == "content_topic_breakdown_cs":
456
- if not posts_df.empty and eb_labels_col_name in posts_df.columns:
457
- try:
458
- # Ensure the column is not all NaN before trying to check for lists or explode
459
- if posts_df[eb_labels_col_name].notna().any():
460
- if posts_df[eb_labels_col_name].apply(lambda x: isinstance(x, list)).any():
461
- topic_counts = posts_df.explode(eb_labels_col_name)[eb_labels_col_name].value_counts().reset_index()
462
- else:
463
- topic_counts = posts_df[eb_labels_col_name].value_counts().reset_index()
464
- topic_counts.columns = ['Topic', 'Count']
465
- summary_text = f"Content Topic Breakdown (Top 5):\n{topic_counts.nlargest(5, 'Count').to_string(index=False)}"
466
- else:
467
- summary_text = f"Content topic data ('{eb_labels_col_name}') contains no valid topics for '{plot_label}'."
468
- except Exception as e_topic:
469
- logging.warning(f"Could not process topic breakdown for '{eb_labels_col_name}': {e_topic}")
470
- summary_text = f"Content topic data ('{eb_labels_col_name}') could not be processed for '{plot_label}'."
471
- else:
472
- summary_text = f"Content topic data ('{eb_labels_col_name}') is unavailable for '{plot_label}'."
473
-
474
- # --- MENTIONS STATS ---
475
- elif plot_id == "mention_analysis_volume":
476
- if not mentions_df.empty and date_col_mentions in mentions_df.columns and not mentions_df[date_col_mentions].isnull().all():
477
- mentions_over_time = mentions_df.set_index(date_col_mentions).resample('W').size().reset_index(name='mention_count')
478
- mentions_over_time.rename(columns={date_col_mentions: 'Week', 'mention_count': 'Mentions'}, inplace=True)
479
- mentions_over_time['Week'] = mentions_over_time['Week'].dt.strftime('%Y-%m-%d (Week of)')
480
- if not mentions_over_time.empty:
481
- summary_text = f"Mentions Volume (Weekly):\n{mentions_over_time.sort_values(by='Week').tail(5).to_string(index=False)}"
482
- else:
483
- summary_text = f"No mention activity found for '{plot_label}' in the selected period."
484
- else:
485
- summary_text = f"Mentions volume data is unavailable for '{plot_label}'."
486
-
487
- elif plot_id == "mention_analysis_sentiment":
488
- if not mentions_df.empty and mentions_sentiment_col in mentions_df.columns:
489
- sentiment_counts = mentions_df[mentions_sentiment_col].value_counts().reset_index()
490
- sentiment_counts.columns = ['Sentiment', 'Count']
491
- summary_text = f"Mentions Sentiment Breakdown:\n{sentiment_counts.to_string(index=False)}"
492
- else:
493
- summary_text = f"Mention sentiment data ('{mentions_sentiment_col}') is unavailable for '{plot_label}'."
494
-
495
- data_summaries[plot_id] = summary_text
496
- except KeyError as e:
497
- logging.warning(f"KeyError generating summary for {plot_id} ('{plot_label}'): {e}. Using default summary.")
498
- data_summaries[plot_id] = f"Data summary generation error for '{plot_label}' (missing column: {e})."
499
- except Exception as e:
500
- logging.error(f"Error generating summary for {plot_id} ('{plot_label}'): {e}", exc_info=True)
501
- data_summaries[plot_id] = f"Error generating data summary for '{plot_label}'."
502
-
503
- return data_summaries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_processing/posts_categorization.py DELETED
@@ -1,207 +0,0 @@
1
- import pandas as pd
2
- from groq import Groq, RateLimitError
3
- import instructor
4
- from pydantic import BaseModel
5
- import os
6
-
7
- # Ensure GROQ_API_KEY is set in your environment variables
8
- api_key = os.getenv('GROQ_API_KEY')
9
-
10
- if not api_key:
11
- raise ValueError("GROQ_API_KEY environment variable not set.")
12
-
13
- # Create single patched Groq client with instructor for structured output
14
- # Using Mode.JSON for structured output based on Pydantic models
15
- client = instructor.from_groq(Groq(api_key=api_key), mode=instructor.Mode.JSON)
16
-
17
- # Pydantic model for summarization output
18
- class SummaryOutput(BaseModel):
19
- summary: str
20
-
21
- # Pydantic model for classification output
22
- class ClassificationOutput(BaseModel):
23
- category: str
24
-
25
- # Define model names (as per your original code)
26
- PRIMARY_SUMMARIZER_MODEL = "deepseek-r1-distill-llama-70b"
27
- FALLBACK_SUMMARIZER_MODEL = "llama-3.3-70b-versatile"
28
- CLASSIFICATION_MODEL = "meta-llama/llama-4-maverick-17b-128e-instruct" # Or your preferred classification model
29
-
30
- # Define the standard list of categories, including "None"
31
- CLASSIFICATION_LABELS = [
32
- "Company Culture and Values",
33
- "Employee Stories and Spotlights",
34
- "Work-Life Balance, Flexibility, and Well-being",
35
- "Diversity, Equity, and Inclusion (DEI)",
36
- "Professional Development and Growth Opportunities",
37
- "Mission, Vision, and Social Responsibility",
38
- "None" # Represents no applicable category or cases where classification isn't possible
39
- ]
40
-
41
- def summarize_post(text: str) -> str | None:
42
- """
43
- Summarizes the given post text using a primary model with a fallback.
44
- Returns the summary string or None if summarization fails or input is invalid.
45
- """
46
- # Check for NaN, None, or empty/whitespace-only string
47
- if pd.isna(text) or text is None or not str(text).strip():
48
- print("Summarizer: Input text is empty or None. Returning None.")
49
- return None
50
-
51
- # Truncate text to a reasonable length to avoid token overflow and reduce costs
52
- processed_text = str(text)[:500]
53
-
54
- prompt = f"""
55
- Summarize the following LinkedIn post in 5 to 10 words.
56
- Only return the summary inside a JSON field called 'summary'.
57
-
58
- Post Text:
59
- \"\"\"{processed_text}\"\"\"
60
- """
61
-
62
- try:
63
- # Attempt with primary model
64
- print(f"Attempting summarization with primary model: {PRIMARY_SUMMARIZER_MODEL}")
65
- response = client.chat.completions.create(
66
- model=PRIMARY_SUMMARIZER_MODEL,
67
- response_model=SummaryOutput,
68
- messages=[
69
- {"role": "system", "content": "You are a precise summarizer. Only return a JSON object with a 'summary' string."},
70
- {"role": "user", "content": prompt}
71
- ],
72
- temperature=0.3
73
- )
74
- return response.summary
75
- except RateLimitError:
76
- print(f"Rate limit hit for primary summarizer model: {PRIMARY_SUMMARIZER_MODEL}. Trying fallback: {FALLBACK_SUMMARIZER_MODEL}")
77
- try:
78
- # Attempt with fallback model
79
- response = client.chat.completions.create(
80
- model=FALLBACK_SUMMARIZER_MODEL,
81
- response_model=SummaryOutput,
82
- messages=[
83
- {"role": "system", "content": "You are a precise summarizer. Only return a JSON object with a 'summary' string."},
84
- {"role": "user", "content": prompt}
85
- ],
86
- temperature=0.3
87
- )
88
- print(f"Summarization successful with fallback model: {FALLBACK_SUMMARIZER_MODEL}")
89
- return response.summary
90
- except RateLimitError as rle_fallback:
91
- print(f"Rate limit hit for fallback summarizer model ({FALLBACK_SUMMARIZER_MODEL}): {rle_fallback}. Summarization failed.")
92
- return None
93
- except Exception as e_fallback:
94
- print(f"Error during summarization with fallback model ({FALLBACK_SUMMARIZER_MODEL}): {e_fallback}")
95
- return None
96
- except Exception as e_primary:
97
- print(f"Error during summarization with primary model ({PRIMARY_SUMMARIZER_MODEL}): {e_primary}")
98
- # Consider if fallback should be attempted for other errors too, or just return None
99
- return None
100
-
101
- def classify_post(summary: str | None, labels: list[str]) -> str:
102
- """
103
- Classifies the post summary into one of the provided labels.
104
- Ensures the returned category is one of the labels, defaulting to "None".
105
- """
106
- # If the summary is None (e.g., from a failed summarization or empty input),
107
- # or if the summary is an empty string after stripping, classify as "None".
108
- if pd.isna(summary) or summary is None or not str(summary).strip():
109
- print("Classifier: Input summary is empty or None. Returning 'None' category.")
110
- return "None" # Return the string "None" to match the label
111
-
112
- # Join labels for the prompt to ensure the LLM knows the exact expected strings
113
- labels_string = "', '".join(labels)
114
-
115
- prompt = f"""
116
- Post Summary: "{summary}"
117
-
118
- Available Categories:
119
- '{labels_string}'
120
-
121
- Task: Choose the single most relevant category from the list above that applies to this summary.
122
- Return ONLY ONE category string in a structured JSON format under the field 'category'.
123
- The category MUST be one of the following: '{labels_string}'.
124
- If no specific category applies, or if you are unsure, return "None".
125
- """
126
- try:
127
- system_message = (
128
- f"You are a very strict classifier. Your ONLY job is to return a JSON object "
129
- f"with a 'category' field. The value of 'category' MUST be one of these "
130
- f"exact strings: '{labels_string}'."
131
- )
132
- result = client.chat.completions.create(
133
- model=CLASSIFICATION_MODEL,
134
- response_model=ClassificationOutput,
135
- messages=[
136
- {"role": "system", "content": system_message},
137
- {"role": "user", "content": prompt}
138
- ],
139
- temperature=0 # Temperature 0 for deterministic classification
140
- )
141
-
142
- returned_category = result.category
143
-
144
- # Validate the output against the provided labels
145
- if returned_category not in labels:
146
- print(f"Warning: Classifier returned '{returned_category}', which is not in the predefined labels. Forcing to 'None'. Summary: '{summary}'")
147
- return "None" # Force to "None" if the LLM returns an unexpected category
148
- return returned_category
149
- except Exception as e:
150
- print(f"Classification error: {e}. Summary: '{summary}'. Defaulting to 'None' category.")
151
- return "None" # Default to "None" on any exception during classification
152
-
153
- def summarize_and_classify_post(text: str | None, labels: list[str]) -> dict:
154
- """
155
- Summarizes and then classifies a single post text.
156
- Handles cases where text is None or summarization fails.
157
- """
158
- summary = summarize_post(text) # This can return None
159
-
160
- # If summarization didn't produce a result (e.g. empty input, error),
161
- # or if the summary itself is effectively empty, the category is "None".
162
- if summary is None or not summary.strip():
163
- category = "None"
164
- else:
165
- # If we have a valid summary, try to classify it.
166
- # classify_post is designed to return one of the labels or "None".
167
- category = classify_post(summary, labels)
168
-
169
- return {
170
- "summary": summary, # This can be None
171
- "category": category # This will be one of the labels or "None"
172
- }
173
-
174
- def batch_summarize_and_classify(posts_data: list[dict]) -> list[dict]:
175
- """
176
- Processes a batch of posts, performing summarization and classification for each.
177
- Expects posts_data to be a list of dictionaries, each with at least 'id' and 'text' keys.
178
- Returns a list of dictionaries, each with 'id', 'summary', and 'category'.
179
- """
180
-
181
- results = []
182
- if not posts_data:
183
- print("Input 'posts_data' is empty. Returning empty results.")
184
- return results
185
-
186
- for i, post_item in enumerate(posts_data):
187
- if not isinstance(post_item, dict):
188
- print(f"Warning: Item at index {i} is not a dictionary. Skipping.")
189
- continue
190
-
191
- post_id = post_item.get("id")
192
- text_to_process = post_item.get("text") # This text is passed to summarize_and_classify_post
193
-
194
- print(f"\nProcessing Post ID: {post_id if post_id else 'N/A (ID missing)'}, Text: '{str(text_to_process)[:50]}...'")
195
-
196
- # summarize_and_classify_post will handle None/empty text internally
197
- # and ensure category is "None" in such cases.
198
- summary_and_category_result = summarize_and_classify_post(text_to_process, CLASSIFICATION_LABELS)
199
-
200
- results.append({
201
- "id": post_id, # Include the ID for mapping back to original data
202
- "summary": summary_and_category_result["summary"],
203
- "category": summary_and_category_result["category"] # This is now validated
204
- })
205
- print(f"Result for Post ID {post_id}: Summary='{summary_and_category_result['summary']}', Category='{summary_and_category_result['category']}'")
206
-
207
- return results