GuglielmoTor commited on
Commit
7dc216d
·
verified ·
1 Parent(s): 738320e

Update Linkedin_Data_API_Calls.py

Browse files
Files changed (1) hide show
  1. Linkedin_Data_API_Calls.py +435 -139
Linkedin_Data_API_Calls.py CHANGED
@@ -1,8 +1,10 @@
1
  import json
2
  import requests
3
  import html
 
4
  from datetime import datetime
5
  from collections import defaultdict
 
6
  from transformers import pipeline
7
 
8
  from sessions import create_session
@@ -14,19 +16,72 @@ import logging
14
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
15
 
16
  API_V2_BASE = 'https://api.linkedin.com/v2'
17
- API_REST_BASE = "https://api.linkedin.com/rest" # Corrected from API_REST_BASE to API_REST_BASE
18
 
19
- # Initialize sentiment pipeline (consider loading it once globally if this module is imported multiple times)
20
  sentiment_pipeline = pipeline("text-classification", model="tabularisai/multilingual-sentiment-analysis")
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
23
  """
24
  Fetches raw posts, their basic statistics, and performs summarization/categorization.
25
- Does NOT fetch comments or analyze sentiment.
26
  """
27
  token_dict = community_token if isinstance(community_token, dict) else {'access_token': community_token, 'token_type': 'Bearer'}
28
  session = create_session(comm_client_id, token=token_dict)
29
- org_name = "GRLS" # Placeholder or fetch if necessary
 
 
 
30
 
31
  posts_url = f"{API_REST_BASE}/posts?author={org_urn}&q=author&count={count}&sortBy=LAST_MODIFIED"
32
  logging.info(f"Fetching posts from URL: {posts_url}")
@@ -37,19 +92,19 @@ def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
37
  logging.info(f"Fetched {len(raw_posts_api)} raw posts from API.")
38
  except requests.exceptions.RequestException as e:
39
  status = getattr(e.response, 'status_code', 'N/A')
40
- logging.error(f"Failed to fetch posts (Status: {status}): {e}")
 
41
  raise ValueError(f"Failed to fetch posts (Status: {status})") from e
 
 
 
42
 
43
  if not raw_posts_api:
44
  logging.info("No raw posts found.")
45
- return [], {}, org_name
46
 
47
- # Filter for valid post types if necessary, e.g., shares or ugcPosts
48
- # post_urns_for_stats = [p["id"] for p in raw_posts_api if ":share:" in p["id"] or ":ugcPost:" in p["id"]]
49
  post_urns_for_stats = [p["id"] for p in raw_posts_api if p.get("id")]
50
 
51
-
52
- # Prepare texts for summarization/classification
53
  post_texts_for_nlp = []
54
  for p in raw_posts_api:
55
  text_content = p.get("commentary") or \
@@ -57,39 +112,57 @@ def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
57
  "[No text content]"
58
  post_texts_for_nlp.append({"text": text_content, "id": p.get("id")})
59
 
60
- logging.info(f"Prepared {len(post_texts_for_nlp)} posts for NLP.")
61
- structured_results_list = batch_summarize_and_classify(post_texts_for_nlp)
62
- # Create a dictionary for easy lookup of structured results by post ID
63
- structured_results_map = {res["id"]: res for res in structured_results_list if "id" in res}
 
 
64
 
 
65
 
66
- # Fetch statistics
67
  stats_map = {}
68
  if post_urns_for_stats:
69
- for i in range(0, len(post_urns_for_stats), 20): # LinkedIn API often has batch limits
70
- batch_urns = post_urns_for_stats[i:i+20]
 
71
  params = {'q': 'organizationalEntity', 'organizationalEntity': org_urn}
72
- for idx, urn_str in enumerate(batch_urns):
73
- # Determine if it's a share or ugcPost based on URN structure (simplified)
74
- key_prefix = "shares" if ":share:" in urn_str else "ugcPosts"
75
- params[f"{key_prefix}[{idx}]"] = urn_str
 
 
 
 
 
 
 
 
76
 
 
 
 
77
  try:
78
- logging.info(f"Fetching stats for batch starting with URN: {batch_urns[0]}")
79
  stat_resp = session.get(f"{API_REST_BASE}/organizationalEntityShareStatistics", params=params)
80
  stat_resp.raise_for_status()
81
- for stat_element in stat_resp.json().get("elements", []):
82
- urn = stat_element.get("share") or stat_element.get("ugcPost")
83
- if urn:
84
- stats_map[urn] = stat_element.get("totalShareStatistics", {})
85
- logging.info(f"Successfully fetched stats for {len(batch_urns)} URNs. Current stats_map size: {len(stats_map)}")
 
 
 
 
86
  except requests.exceptions.RequestException as e:
87
- logging.warning(f"Failed to fetch stats for a batch: {e}. Response: {e.response.text if e.response else 'No response'}")
88
- # Continue to next batch, some stats might be missing
 
89
  except json.JSONDecodeError as e:
90
  logging.warning(f"Failed to decode JSON from stats response: {e}. Response: {stat_resp.text if stat_resp else 'No response text'}")
91
 
92
-
93
  processed_raw_posts = []
94
  for p in raw_posts_api:
95
  post_id = p.get("id")
@@ -101,7 +174,7 @@ def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
101
  p.get("specificContent", {}).get("com.linkedin.ugc.ShareContent", {}).get("shareCommentaryV2", {}).get("text", "") or \
102
  "[No text content]"
103
 
104
- timestamp = p.get("publishedAt") or p.get("createdAt")
105
  published_at_iso = datetime.fromtimestamp(timestamp / 1000).isoformat() if timestamp else None
106
 
107
  structured_res = structured_results_map.get(post_id, {"summary": "N/A", "category": "N/A"})
@@ -113,68 +186,72 @@ def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
113
  "category": structured_res["category"],
114
  "published_at_timestamp": timestamp,
115
  "published_at_iso": published_at_iso,
116
- # These are placeholders for actual fields from LinkedIn API response. Verify field names.
117
- "organization_urn": p.get("author", "urn:li:unknown"), # e.g., "urn:li:person:xxxx" or "urn:li:organization:xxxx"
118
- "is_ad": 'adContext' in p
119
- #"media_type": p.get("mediaCategory", "NONE") # e.g., ARTICLE, IMAGE, VIDEO, NONE
 
120
  })
121
  logging.info(f"Processed {len(processed_raw_posts)} posts with core data.")
122
- return processed_raw_posts, stats_map, org_name
123
 
124
 
125
- def fetch_comments(comm_client_id, token_dict, post_urns, stats_map):
126
  """
127
  Fetches comments for a list of post URNs.
128
  Uses stats_map to potentially skip posts with 0 comments.
129
  """
130
- from requests_oauthlib import OAuth2Session # Keep import here if OAuth2Session is specific to this
131
-
132
- linkedin_session = OAuth2Session(comm_client_id, token=token_dict)
133
- # LinkedIn API versions can change, ensure this is up-to-date.
134
- # Using a recent version like "202402" or as per current LinkedIn docs.
135
- # The user had "202502", which might be a future version. Using a slightly older one for safety.
136
- linkedin_session.headers.update({'LinkedIn-Version': "202502"})
137
 
138
  all_comments_by_post = {}
139
  logging.info(f"Fetching comments for {len(post_urns)} posts.")
140
 
141
  for post_urn in post_urns:
142
- # Optimization: if stats show 0 comments, skip API call for this post's comments
143
- if stats_map.get(post_urn, {}).get('commentCount', 0) == 0:
 
 
144
  logging.info(f"Skipping comment fetch for {post_urn} as commentCount is 0 in stats_map.")
145
  all_comments_by_post[post_urn] = []
146
  continue
147
 
148
  try:
149
- # According to LinkedIn docs, comments are often under /socialActions/{activityUrn}/comments
150
- # or /commentsV2?q=entity&entity={activityUrn}
151
- # The user's URL was /socialActions/{post_urn}/comments - this seems plausible for URNs like ugcPost URNs.
152
- url = f"{API_REST_BASE}/socialActions/{post_urn}/comments"
153
  logging.debug(f"Fetching comments from URL: {url} for post URN: {post_urn}")
154
  response = linkedin_session.get(url)
155
 
156
  if response.status_code == 200:
157
  elements = response.json().get('elements', [])
158
- comments_texts = [
159
- c.get('message', {}).get('text')
160
- for c in elements
161
- if c.get('message') and c.get('message', {}).get('text')
162
- ]
163
  all_comments_by_post[post_urn] = comments_texts
164
  logging.info(f"Fetched {len(comments_texts)} comments for {post_urn}.")
165
- elif response.status_code == 403: # Forbidden, often permissions or versioning
166
- logging.warning(f"Forbidden (403) to fetch comments for {post_urn}. URL: {url}. Response: {response.text}")
167
  all_comments_by_post[post_urn] = []
168
- elif response.status_code == 404: # Not found
169
  logging.warning(f"Comments not found (404) for {post_urn}. URL: {url}. Response: {response.text}")
170
  all_comments_by_post[post_urn] = []
171
  else:
172
- logging.error(f"Error fetching comments for {post_urn}. Status: {response.status_code}. Response: {response.text}")
173
  all_comments_by_post[post_urn] = []
174
  except requests.exceptions.RequestException as e:
175
  logging.error(f"RequestException fetching comments for {post_urn}: {e}")
176
  all_comments_by_post[post_urn] = []
177
- except Exception as e: # Catch any other unexpected errors
 
 
 
178
  logging.error(f"Unexpected error fetching comments for {post_urn}: {e}")
179
  all_comments_by_post[post_urn] = []
180
 
@@ -182,64 +259,68 @@ def fetch_comments(comm_client_id, token_dict, post_urns, stats_map):
182
 
183
  def analyze_sentiment(all_comments_data):
184
  """
185
- Analyzes sentiment for comments grouped by post_urn.
186
  all_comments_data is a dict: {post_urn: [comment_text_1, comment_text_2,...]}
187
- Returns a dict: {post_urn: {"sentiment": "DominantSentiment", "percentage": X.X}}
188
  """
189
  results_by_post = {}
190
- logging.info(f"Analyzing sentiment for comments from {len(all_comments_data)} posts.")
191
  for post_urn, comments_list in all_comments_data.items():
192
- sentiment_counts = defaultdict(int)
193
  total_valid_comments_for_post = 0
194
 
195
  if not comments_list:
196
- results_by_post[post_urn] = {"sentiment": "Neutral 😐", "percentage": 0.0, "details": sentiment_counts}
197
  continue
198
 
199
  for comment_text in comments_list:
200
- if not comment_text or not comment_text.strip(): # Skip empty comments
201
  continue
202
- try:
203
- # The pipeline expects a string or list of strings.
204
- # Ensure comment_text is a string.
205
- analysis_result = sentiment_pipeline(str(comment_text))
206
- label = analysis_result[0]['label'].upper()
207
-
208
- if label in ['POSITIVE', 'VERY POSITIVE']:
209
- sentiment_counts['Positive 👍'] += 1
210
- elif label in ['NEGATIVE', 'VERY NEGATIVE']:
211
- sentiment_counts['Negative 👎'] += 1
212
- elif label == 'NEUTRAL':
213
- sentiment_counts['Neutral 😐'] += 1
214
- else: # Other labels from the model
215
- sentiment_counts['Unknown'] += 1
216
- total_valid_comments_for_post += 1
217
- except Exception as e:
218
- logging.error(f"Sentiment analysis failed for comment under {post_urn}: '{comment_text[:50]}...'. Error: {e}")
219
- sentiment_counts['Error'] += 1
220
 
 
 
 
221
  if total_valid_comments_for_post > 0:
222
- dominant_sentiment = max(sentiment_counts, key=sentiment_counts.get, default='Neutral 😐')
223
- percentage = round((sentiment_counts[dominant_sentiment] / total_valid_comments_for_post) * 100, 1)
224
- else: # No valid comments to analyze
225
- dominant_sentiment = 'Neutral 😐'
226
- percentage = 0.0
227
- if sentiment_counts['Error'] > 0 : # If there were only errors
228
- dominant_sentiment = 'Error'
 
 
 
 
 
 
 
 
 
229
 
230
  results_by_post[post_urn] = {
231
- "sentiment": dominant_sentiment,
232
  "percentage": percentage,
233
- "details": dict(sentiment_counts) # Store counts for more detailed reporting if needed
234
  }
235
- logging.debug(f"Sentiment for {post_urn}: {results_by_post[post_urn]}")
236
-
237
  return results_by_post
238
 
239
 
240
  def compile_detailed_posts(processed_raw_posts, stats_map, sentiments_per_post):
241
  """
242
- Combines processed raw post data with their statistics and overall sentiment.
243
  """
244
  detailed_post_list = []
245
  logging.info(f"Compiling detailed data for {len(processed_raw_posts)} posts.")
@@ -248,23 +329,18 @@ def compile_detailed_posts(processed_raw_posts, stats_map, sentiments_per_post):
248
  stats = stats_map.get(post_id, {})
249
 
250
  likes = stats.get("likeCount", 0)
251
- # Use 'commentSummary' from stats for comment count if available, else 'commentCount'
252
- # LinkedIn sometimes has commentSummary.totalComments
253
- comments_stat_count = stats.get("commentSummary", {}).get("totalComments") if "commentSummary" in stats else stats.get("commentCount", 0)
254
 
255
  clicks = stats.get("clickCount", 0)
256
  shares = stats.get("shareCount", 0)
257
  impressions = stats.get("impressionCount", 0)
258
- unique_impressions = stats.get("uniqueImpressionsCount", 0) # Ensure this field is in API response
259
 
260
- # Calculate engagement: (likes + comments + clicks + shares) / impressions
261
- # Ensure impressions is not zero to avoid DivisionByZeroError
262
  engagement_numerator = likes + comments_stat_count + clicks + shares
263
- engagement_rate = (engagement_numerator / impressions * 100) if impressions else 0.0
264
 
265
- sentiment_info = sentiments_per_post.get(post_id, {"sentiment": "Neutral 😐", "percentage": 0.0})
266
 
267
- # Format text for display (escaped and truncated)
268
  display_text = html.escape(proc_post["raw_text"][:250]).replace("\n", "<br>") + \
269
  ("..." if len(proc_post["raw_text"]) > 250 else "")
270
 
@@ -274,25 +350,25 @@ def compile_detailed_posts(processed_raw_posts, stats_map, sentiments_per_post):
274
  detailed_post_list.append({
275
  "id": post_id,
276
  "when": when_formatted,
277
- "text_for_display": display_text, # Shortened, escaped text
278
- "raw_text": proc_post["raw_text"], # Full original text
279
  "likes": likes,
280
- "comments_stat_count": comments_stat_count, # Count from post statistics
281
  "clicks": clicks,
282
  "shares": shares,
283
  "impressions": impressions,
284
  "uniqueImpressionsCount": unique_impressions,
285
- "engagement": f"{engagement_rate:.2f}%", # Formatted string
286
- "engagement_raw": engagement_rate, # Raw float for potential calculations
287
  "sentiment": sentiment_info["sentiment"],
288
  "sentiment_percent": sentiment_info["percentage"],
289
- "sentiment_details": sentiment_info.get("details", {}), # Detailed counts
290
  "summary": proc_post["summary"],
291
  "category": proc_post["category"],
292
  "organization_urn": proc_post["organization_urn"],
293
- "is_ad": 'adContext' in proc_post,
294
- #"media_type": proc_post["media_type"],
295
- "published_at": proc_post["published_at_iso"] # ISO format datetime string
296
  })
297
  logging.info(f"Compiled {len(detailed_post_list)} detailed posts.")
298
  return detailed_post_list
@@ -306,48 +382,268 @@ def prepare_data_for_bubble(detailed_posts, all_actual_comments_data):
306
  """
307
  li_posts = []
308
  li_post_stats = []
309
- li_post_comments = [] # For individual comments
310
- logging.info("Preparing data for Bubble.")
311
- org_urn = detailed_posts[0]["organization_urn"]
 
 
 
 
 
 
312
  for post_data in detailed_posts:
313
- # Data for LI_post table in Bubble
314
  li_posts.append({
315
  "organization_urn": post_data["organization_urn"],
316
- "id": post_data["id"], # Post URN
317
- "is_ad": 'adContext' in post_data,
318
- #"media_type": post_data["media_type"],
319
- "published_at": post_data["published_at"], # ISO datetime string
320
- "sentiment": post_data["sentiment"], # Overall sentiment of the post based on its comments
321
- "text": post_data["raw_text"], # Storing the full raw text
322
- #"summary_text": post_data["summary"],
323
- "li_eb_label": post_data["category"]
324
- # Add any other fields from post_data needed for LI_post table
325
  })
326
 
327
- # Data for LI_post_stats table in Bubble
328
  li_post_stats.append({
329
  "clickCount": post_data["clicks"],
330
- "commentCount": post_data["comments_stat_count"], # From post's own stats
331
- "engagement": post_data["engagement"], # Formatted string e.g., "12.34%"
332
  "impressionCount": post_data["impressions"],
333
  "likeCount": post_data["likes"],
334
  "shareCount": post_data["shares"],
335
  "uniqueImpressionsCount": post_data["uniqueImpressionsCount"],
336
- "post_id": post_data["id"], # Foreign key to LI_post
337
- "organization_urn": org_urn
338
  })
339
 
340
- # Data for LI_post_comments table in Bubble (individual comments)
341
- # This iterates through the actual comments fetched, not just the count.
342
  for post_urn, comments_text_list in all_actual_comments_data.items():
 
 
 
 
 
 
343
  for single_comment_text in comments_text_list:
344
- if single_comment_text and single_comment_text.strip(): # Ensure comment text is not empty
345
  li_post_comments.append({
346
  "comment_text": single_comment_text,
347
- "post_id": post_urn, # Foreign key to LI_post
348
- "organization_urn": org_urn
349
- # Could add sentiment per comment here if analyzed at that granularity
350
  })
351
 
352
  logging.info(f"Prepared {len(li_posts)} posts, {len(li_post_stats)} stats entries, and {len(li_post_comments)} comments for Bubble.")
353
- return li_posts, li_post_stats, li_post_comments
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
2
  import requests
3
  import html
4
+ import time # Added for potential rate limiting if needed
5
  from datetime import datetime
6
  from collections import defaultdict
7
+ from urllib.parse import quote # Added for URL encoding
8
  from transformers import pipeline
9
 
10
  from sessions import create_session
 
16
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
17
 
18
  API_V2_BASE = 'https://api.linkedin.com/v2'
19
+ API_REST_BASE = "https://api.linkedin.com/rest"
20
 
21
+ # Initialize sentiment pipeline (loaded once globally)
22
  sentiment_pipeline = pipeline("text-classification", model="tabularisai/multilingual-sentiment-analysis")
23
 
24
+ # --- Utility Function ---
25
+ def extract_text_from_mention_commentary(commentary):
26
+ """
27
+ Extracts clean text from a commentary string, removing potential placeholders like {mention}.
28
+ """
29
+ import re
30
+ if not commentary:
31
+ return ""
32
+ return re.sub(r"{.*?}", "", commentary).strip()
33
+
34
+ # --- Core Sentiment Analysis Helper ---
35
+ def _get_sentiment_from_text(text_to_analyze):
36
+ """
37
+ Analyzes a single piece of text and returns its sentiment label and raw counts.
38
+ Returns a dict: {"label": "Sentiment Label", "counts": defaultdict(int)}
39
+ """
40
+ sentiment_counts = defaultdict(int)
41
+ dominant_sentiment_label = "Neutral 😐" # Default
42
+
43
+ if not text_to_analyze or not text_to_analyze.strip():
44
+ return {"label": dominant_sentiment_label, "counts": sentiment_counts}
45
+
46
+ try:
47
+ # Truncate to avoid issues with very long texts for the model
48
+ analysis_result = sentiment_pipeline(str(text_to_analyze)[:512])
49
+ label = analysis_result[0]['label'].upper()
50
+
51
+ if label in ['POSITIVE', 'VERY POSITIVE']:
52
+ dominant_sentiment_label = 'Positive 👍'
53
+ sentiment_counts['Positive 👍'] += 1
54
+ elif label in ['NEGATIVE', 'VERY NEGATIVE']:
55
+ dominant_sentiment_label = 'Negative 👎'
56
+ sentiment_counts['Negative 👎'] += 1
57
+ elif label == 'NEUTRAL':
58
+ dominant_sentiment_label = 'Neutral 😐' # Already default, but for clarity
59
+ sentiment_counts['Neutral 😐'] += 1
60
+ else:
61
+ dominant_sentiment_label = 'Unknown' # Catch any other labels from the model
62
+ sentiment_counts['Unknown'] += 1
63
+
64
+ except Exception as e:
65
+ # Log the error with more context if possible
66
+ logging.error(f"Sentiment analysis failed for text snippet '{str(text_to_analyze)[:50]}...'. Error: {e}")
67
+ sentiment_counts['Error'] += 1
68
+ dominant_sentiment_label = "Error" # Indicate error in sentiment
69
+
70
+ return {"label": dominant_sentiment_label, "counts": sentiment_counts}
71
+
72
+
73
+ # --- Post Retrieval Functions ---
74
  def fetch_linkedin_posts_core(comm_client_id, community_token, org_urn, count):
75
  """
76
  Fetches raw posts, their basic statistics, and performs summarization/categorization.
77
+ Does NOT fetch comments or analyze sentiment of comments here.
78
  """
79
  token_dict = community_token if isinstance(community_token, dict) else {'access_token': community_token, 'token_type': 'Bearer'}
80
  session = create_session(comm_client_id, token=token_dict)
81
+ session.headers.update({
82
+ "X-Restli-Protocol-Version": "2.0.0",
83
+ "LinkedIn-Version": "202402"
84
+ })
85
 
86
  posts_url = f"{API_REST_BASE}/posts?author={org_urn}&q=author&count={count}&sortBy=LAST_MODIFIED"
87
  logging.info(f"Fetching posts from URL: {posts_url}")
 
92
  logging.info(f"Fetched {len(raw_posts_api)} raw posts from API.")
93
  except requests.exceptions.RequestException as e:
94
  status = getattr(e.response, 'status_code', 'N/A')
95
+ text = getattr(e.response, 'text', 'No response text')
96
+ logging.error(f"Failed to fetch posts (Status: {status}): {e}. Response: {text}")
97
  raise ValueError(f"Failed to fetch posts (Status: {status})") from e
98
+ except json.JSONDecodeError as e:
99
+ logging.error(f"Failed to decode JSON from posts response: {e}. Response text: {resp.text if resp else 'No response object'}")
100
+ raise ValueError("Failed to decode JSON from posts response") from e
101
 
102
  if not raw_posts_api:
103
  logging.info("No raw posts found.")
104
+ return [], {}, "DefaultOrgName"
105
 
 
 
106
  post_urns_for_stats = [p["id"] for p in raw_posts_api if p.get("id")]
107
 
 
 
108
  post_texts_for_nlp = []
109
  for p in raw_posts_api:
110
  text_content = p.get("commentary") or \
 
112
  "[No text content]"
113
  post_texts_for_nlp.append({"text": text_content, "id": p.get("id")})
114
 
115
+ logging.info(f"Prepared {len(post_texts_for_nlp)} posts for NLP (summarization/classification).")
116
+ if 'batch_summarize_and_classify' in globals():
117
+ structured_results_list = batch_summarize_and_classify(post_texts_for_nlp)
118
+ else:
119
+ logging.warning("batch_summarize_and_classify not found, using fallback.")
120
+ structured_results_list = [{"id": p["id"], "summary": "N/A", "category": "N/A"} for p in post_texts_for_nlp]
121
 
122
+ structured_results_map = {res["id"]: res for res in structured_results_list if "id" in res}
123
 
 
124
  stats_map = {}
125
  if post_urns_for_stats:
126
+ batch_size_stats = 20
127
+ for i in range(0, len(post_urns_for_stats), batch_size_stats):
128
+ batch_urns = post_urns_for_stats[i:i+batch_size_stats]
129
  params = {'q': 'organizationalEntity', 'organizationalEntity': org_urn}
130
+ share_idx = 0
131
+ ugc_idx = 0
132
+ for urn_str in batch_urns:
133
+ if ":share:" in urn_str:
134
+ params[f"shares[{share_idx}]"] = urn_str
135
+ share_idx += 1
136
+ elif ":ugcPost:" in urn_str:
137
+ params[f"ugcPosts[{ugc_idx}]"] = urn_str
138
+ ugc_idx += 1
139
+ else:
140
+ logging.warning(f"URN {urn_str} is not a recognized share or ugcPost type for stats. Skipping.")
141
+ continue
142
 
143
+ if not share_idx and not ugc_idx:
144
+ continue
145
+
146
  try:
147
+ logging.info(f"Fetching stats for batch of {len(batch_urns)} URNs starting with URN: {batch_urns[0]}")
148
  stat_resp = session.get(f"{API_REST_BASE}/organizationalEntityShareStatistics", params=params)
149
  stat_resp.raise_for_status()
150
+ stats_data = stat_resp.json()
151
+ for urn_key, stat_element_values in stats_data.get("results", {}).items():
152
+ stats_map[urn_key] = stat_element_values.get("totalShareStatistics", {})
153
+
154
+ if stats_data.get("errors"):
155
+ for urn_errored, error_detail in stats_data.get("errors", {}).items():
156
+ logging.warning(f"Error fetching stats for URN {urn_errored}: {error_detail.get('message', 'Unknown error')}")
157
+
158
+ logging.info(f"Successfully processed stats response for {len(batch_urns)} URNs. Current stats_map size: {len(stats_map)}")
159
  except requests.exceptions.RequestException as e:
160
+ status_code = getattr(e.response, 'status_code', 'N/A')
161
+ response_text = getattr(e.response, 'text', 'No response text')
162
+ logging.warning(f"Failed to fetch stats for a batch (Status: {status_code}): {e}. Params: {params}. Response: {response_text}")
163
  except json.JSONDecodeError as e:
164
  logging.warning(f"Failed to decode JSON from stats response: {e}. Response: {stat_resp.text if stat_resp else 'No response text'}")
165
 
 
166
  processed_raw_posts = []
167
  for p in raw_posts_api:
168
  post_id = p.get("id")
 
174
  p.get("specificContent", {}).get("com.linkedin.ugc.ShareContent", {}).get("shareCommentaryV2", {}).get("text", "") or \
175
  "[No text content]"
176
 
177
+ timestamp = p.get("publishedAt") or p.get("createdAt") or p.get("firstPublishedAt")
178
  published_at_iso = datetime.fromtimestamp(timestamp / 1000).isoformat() if timestamp else None
179
 
180
  structured_res = structured_results_map.get(post_id, {"summary": "N/A", "category": "N/A"})
 
186
  "category": structured_res["category"],
187
  "published_at_timestamp": timestamp,
188
  "published_at_iso": published_at_iso,
189
+ "organization_urn": p.get("author", f"urn:li:organization:{org_urn.split(':')[-1]}"),
190
+ "is_ad": 'adContext' in p,
191
+ "media_category": p.get("content",{}).get("com.linkedin.voyager.feed.render.LinkedInVideoComponent",{}).get("mediaCategory") or \
192
+ p.get("content",{}).get("com.linkedin.voyager.feed.render.ImageComponent",{}).get("mediaCategory") or \
193
+ p.get("content",{}).get("com.linkedin.voyager.feed.render.ArticleComponent",{}).get("mediaCategory") or "NONE"
194
  })
195
  logging.info(f"Processed {len(processed_raw_posts)} posts with core data.")
196
+ return processed_raw_posts, stats_map, "DefaultOrgName"
197
 
198
 
199
+ def fetch_comments(comm_client_id, community_token, post_urns, stats_map):
200
  """
201
  Fetches comments for a list of post URNs.
202
  Uses stats_map to potentially skip posts with 0 comments.
203
  """
204
+ token_dict = community_token if isinstance(community_token, dict) else {'access_token': community_token, 'token_type': 'Bearer'}
205
+ linkedin_session = create_session(comm_client_id, token=token_dict)
206
+ linkedin_session.headers.update({
207
+ 'LinkedIn-Version': "202402",
208
+ "X-Restli-Protocol-Version": "2.0.0"
209
+ })
 
210
 
211
  all_comments_by_post = {}
212
  logging.info(f"Fetching comments for {len(post_urns)} posts.")
213
 
214
  for post_urn in post_urns:
215
+ post_stats = stats_map.get(post_urn, {})
216
+ comment_count_from_stats = post_stats.get("commentSummary", {}).get("totalComments", post_stats.get('commentCount', 0))
217
+
218
+ if comment_count_from_stats == 0:
219
  logging.info(f"Skipping comment fetch for {post_urn} as commentCount is 0 in stats_map.")
220
  all_comments_by_post[post_urn] = []
221
  continue
222
 
223
  try:
224
+ encoded_post_urn = quote(post_urn, safe='')
225
+ url = f"{API_REST_BASE}/comments?q=entity&entityUrn={encoded_post_urn}&sortOrder=CHRONOLOGICAL"
226
+
 
227
  logging.debug(f"Fetching comments from URL: {url} for post URN: {post_urn}")
228
  response = linkedin_session.get(url)
229
 
230
  if response.status_code == 200:
231
  elements = response.json().get('elements', [])
232
+ comments_texts = []
233
+ for c in elements:
234
+ comment_text = c.get('message', {}).get('text')
235
+ if comment_text:
236
+ comments_texts.append(comment_text)
237
  all_comments_by_post[post_urn] = comments_texts
238
  logging.info(f"Fetched {len(comments_texts)} comments for {post_urn}.")
239
+ elif response.status_code == 403:
240
+ logging.warning(f"Forbidden (403) to fetch comments for {post_urn}. URL: {url}. Response: {response.text}. Check permissions or API version.")
241
  all_comments_by_post[post_urn] = []
242
+ elif response.status_code == 404:
243
  logging.warning(f"Comments not found (404) for {post_urn}. URL: {url}. Response: {response.text}")
244
  all_comments_by_post[post_urn] = []
245
  else:
246
+ logging.error(f"Error fetching comments for {post_urn}. Status: {response.status_code}. URL: {url}. Response: {response.text}")
247
  all_comments_by_post[post_urn] = []
248
  except requests.exceptions.RequestException as e:
249
  logging.error(f"RequestException fetching comments for {post_urn}: {e}")
250
  all_comments_by_post[post_urn] = []
251
+ except json.JSONDecodeError as e:
252
+ logging.error(f"JSONDecodeError fetching comments for {post_urn}. Response: {response.text if 'response' in locals() else 'N/A'}. Error: {e}")
253
+ all_comments_by_post[post_urn] = []
254
+ except Exception as e:
255
  logging.error(f"Unexpected error fetching comments for {post_urn}: {e}")
256
  all_comments_by_post[post_urn] = []
257
 
 
259
 
260
  def analyze_sentiment(all_comments_data):
261
  """
262
+ Analyzes sentiment for comments grouped by post_urn using the helper function.
263
  all_comments_data is a dict: {post_urn: [comment_text_1, comment_text_2,...]}
264
+ Returns a dict: {post_urn: {"sentiment": "DominantOverallSentiment", "percentage": X.X, "details": {aggregated_counts}}}
265
  """
266
  results_by_post = {}
267
+ logging.info(f"Analyzing aggregated sentiment for comments from {len(all_comments_data)} posts.")
268
  for post_urn, comments_list in all_comments_data.items():
269
+ aggregated_sentiment_counts = defaultdict(int)
270
  total_valid_comments_for_post = 0
271
 
272
  if not comments_list:
273
+ results_by_post[post_urn] = {"sentiment": "Neutral 😐", "percentage": 0.0, "details": dict(aggregated_sentiment_counts)}
274
  continue
275
 
276
  for comment_text in comments_list:
277
+ if not comment_text or not comment_text.strip():
278
  continue
279
+
280
+ # Use the helper for individual comment sentiment
281
+ single_comment_sentiment = _get_sentiment_from_text(comment_text)
282
+
283
+ # Aggregate counts
284
+ for label, count in single_comment_sentiment["counts"].items():
285
+ aggregated_sentiment_counts[label] += count
286
+
287
+ if single_comment_sentiment["label"] != "Error": # Count valid analyses
288
+ total_valid_comments_for_post +=1
 
 
 
 
 
 
 
 
289
 
290
+ dominant_overall_sentiment = "Neutral 😐" # Default
291
+ percentage = 0.0
292
+
293
  if total_valid_comments_for_post > 0:
294
+ # Determine dominant sentiment from aggregated_sentiment_counts
295
+ # Exclude 'Error' from being a dominant sentiment unless it's the only category with counts
296
+ valid_sentiments = {k: v for k, v in aggregated_sentiment_counts.items() if k != 'Error' and v > 0}
297
+ if not valid_sentiments:
298
+ dominant_overall_sentiment = 'Error' if aggregated_sentiment_counts['Error'] > 0 else 'Neutral 😐'
299
+ else:
300
+ # Simple max count logic for dominance
301
+ dominant_overall_sentiment = max(valid_sentiments, key=valid_sentiments.get)
302
+
303
+ if dominant_overall_sentiment != 'Error':
304
+ percentage = round((aggregated_sentiment_counts[dominant_overall_sentiment] / total_valid_comments_for_post) * 100, 1)
305
+ else: # if dominant is 'Error' or only errors were found
306
+ percentage = 0.0
307
+ elif aggregated_sentiment_counts['Error'] > 0 : # No valid comments, but errors occurred
308
+ dominant_overall_sentiment = 'Error'
309
+
310
 
311
  results_by_post[post_urn] = {
312
+ "sentiment": dominant_overall_sentiment,
313
  "percentage": percentage,
314
+ "details": dict(aggregated_sentiment_counts) # Store aggregated counts
315
  }
316
+ logging.debug(f"Aggregated sentiment for post {post_urn}: {results_by_post[post_urn]}")
317
+
318
  return results_by_post
319
 
320
 
321
  def compile_detailed_posts(processed_raw_posts, stats_map, sentiments_per_post):
322
  """
323
+ Combines processed raw post data with their statistics and overall comment sentiment.
324
  """
325
  detailed_post_list = []
326
  logging.info(f"Compiling detailed data for {len(processed_raw_posts)} posts.")
 
329
  stats = stats_map.get(post_id, {})
330
 
331
  likes = stats.get("likeCount", 0)
332
+ comments_stat_count = stats.get("commentSummary", {}).get("totalComments", stats.get("commentCount", 0))
 
 
333
 
334
  clicks = stats.get("clickCount", 0)
335
  shares = stats.get("shareCount", 0)
336
  impressions = stats.get("impressionCount", 0)
337
+ unique_impressions = stats.get("uniqueImpressionsCount", stats.get("impressionCount", 0))
338
 
 
 
339
  engagement_numerator = likes + comments_stat_count + clicks + shares
340
+ engagement_rate = (engagement_numerator / impressions * 100) if impressions and impressions > 0 else 0.0
341
 
342
+ sentiment_info = sentiments_per_post.get(post_id, {"sentiment": "Neutral 😐", "percentage": 0.0, "details": {}})
343
 
 
344
  display_text = html.escape(proc_post["raw_text"][:250]).replace("\n", "<br>") + \
345
  ("..." if len(proc_post["raw_text"]) > 250 else "")
346
 
 
350
  detailed_post_list.append({
351
  "id": post_id,
352
  "when": when_formatted,
353
+ "text_for_display": display_text,
354
+ "raw_text": proc_post["raw_text"],
355
  "likes": likes,
356
+ "comments_stat_count": comments_stat_count,
357
  "clicks": clicks,
358
  "shares": shares,
359
  "impressions": impressions,
360
  "uniqueImpressionsCount": unique_impressions,
361
+ "engagement": f"{engagement_rate:.2f}%",
362
+ "engagement_raw": engagement_rate,
363
  "sentiment": sentiment_info["sentiment"],
364
  "sentiment_percent": sentiment_info["percentage"],
365
+ "sentiment_details": sentiment_info.get("details", {}),
366
  "summary": proc_post["summary"],
367
  "category": proc_post["category"],
368
  "organization_urn": proc_post["organization_urn"],
369
+ "is_ad": proc_post["is_ad"],
370
+ "media_category": proc_post.get("media_category", "NONE"),
371
+ "published_at": proc_post["published_at_iso"]
372
  })
373
  logging.info(f"Compiled {len(detailed_post_list)} detailed posts.")
374
  return detailed_post_list
 
382
  """
383
  li_posts = []
384
  li_post_stats = []
385
+ li_post_comments = []
386
+ logging.info("Preparing posts data for Bubble.")
387
+
388
+ if not detailed_posts:
389
+ logging.warning("No detailed posts to prepare for Bubble.")
390
+ return [], [], []
391
+
392
+ org_urn_default = detailed_posts[0]["organization_urn"] if detailed_posts else "urn:li:organization:UNKNOWN"
393
+
394
  for post_data in detailed_posts:
 
395
  li_posts.append({
396
  "organization_urn": post_data["organization_urn"],
397
+ "id": post_data["id"],
398
+ "is_ad": post_data["is_ad"],
399
+ "media_category": post_data.get("media_category", "NONE"),
400
+ "published_at": post_data["published_at"],
401
+ "sentiment": post_data["sentiment"],
402
+ "text": post_data["raw_text"],
403
+ "summary_text": post_data["summary"],
404
+ "li_eb_label": post_data["category"]
 
405
  })
406
 
 
407
  li_post_stats.append({
408
  "clickCount": post_data["clicks"],
409
+ "commentCount": post_data["comments_stat_count"],
410
+ "engagement": post_data["engagement_raw"],
411
  "impressionCount": post_data["impressions"],
412
  "likeCount": post_data["likes"],
413
  "shareCount": post_data["shares"],
414
  "uniqueImpressionsCount": post_data["uniqueImpressionsCount"],
415
+ "post_id": post_data["id"],
416
+ "organization_urn": post_data["organization_urn"]
417
  })
418
 
 
 
419
  for post_urn, comments_text_list in all_actual_comments_data.items():
420
+ current_post_org_urn = org_urn_default
421
+ for p in detailed_posts:
422
+ if p["id"] == post_urn:
423
+ current_post_org_urn = p["organization_urn"]
424
+ break
425
+
426
  for single_comment_text in comments_text_list:
427
+ if single_comment_text and single_comment_text.strip():
428
  li_post_comments.append({
429
  "comment_text": single_comment_text,
430
+ "post_id": post_urn,
431
+ "organization_urn": current_post_org_urn
 
432
  })
433
 
434
  logging.info(f"Prepared {len(li_posts)} posts, {len(li_post_stats)} stats entries, and {len(li_post_comments)} comments for Bubble.")
435
+ return li_posts, li_post_stats, li_post_comments
436
+
437
+ # --- Mentions Retrieval Functions ---
438
+
439
+ def fetch_linkedin_mentions_core(comm_client_id, community_token, org_urn, count=20):
440
+ """
441
+ Fetches raw mention notifications and the details of the posts where the organization was mentioned.
442
+ Returns a list of processed mention data (internal structure).
443
+ """
444
+ token_dict = community_token if isinstance(community_token, dict) else {'access_token': community_token, 'token_type': 'Bearer'}
445
+ session = create_session(comm_client_id, token=token_dict)
446
+ session.headers.update({
447
+ "X-Restli-Protocol-Version": "2.0.0",
448
+ "LinkedIn-Version": "202402"
449
+ })
450
+
451
+ encoded_org_urn = quote(org_urn, safe='')
452
+
453
+ notifications_url_base = (
454
+ f"{API_REST_BASE}/organizationalEntityNotifications"
455
+ f"?q=criteria"
456
+ f"&actions=List(SHARE_MENTION)"
457
+ f"&organizationalEntity={encoded_org_urn}"
458
+ f"&count={count}"
459
+ )
460
+
461
+ all_notifications = []
462
+ start_index = 0
463
+ processed_mentions_internal = []
464
+ page_count = 0
465
+ max_pages = 10
466
+
467
+ while page_count < max_pages:
468
+ current_url = f"{notifications_url_base}&start={start_index}"
469
+ logging.info(f"Fetching notifications page {page_count + 1} from URL: {current_url}")
470
+ try:
471
+ resp = session.get(current_url)
472
+ resp.raise_for_status()
473
+ data = resp.json()
474
+ elements = data.get("elements", [])
475
+
476
+ if not elements:
477
+ logging.info(f"No more notifications found on page {page_count + 1}. Total notifications fetched: {len(all_notifications)}.")
478
+ break
479
+
480
+ all_notifications.extend(elements)
481
+
482
+ paging = data.get("paging", {})
483
+ if 'start' not in paging or 'count' not in paging or len(elements) < paging.get('count', count):
484
+ logging.info(f"Last page of notifications fetched. Total notifications: {len(all_notifications)}.")
485
+ break
486
+
487
+ start_index = paging['start'] + paging['count']
488
+ page_count += 1
489
+
490
+ except requests.exceptions.RequestException as e:
491
+ status = getattr(e.response, 'status_code', 'N/A')
492
+ text = getattr(e.response, 'text', 'No response text')
493
+ logging.error(f"Failed to fetch notifications (Status: {status}): {e}. Response: {text}")
494
+ break
495
+ except json.JSONDecodeError as e:
496
+ logging.error(f"Failed to decode JSON from notifications response: {e}. Response: {resp.text if resp else 'No resp obj'}")
497
+ break
498
+ if page_count >= max_pages:
499
+ logging.info(f"Reached max_pages ({max_pages}) for fetching notifications.")
500
+ break
501
+
502
+ if not all_notifications:
503
+ logging.info("No mention notifications found after fetching.")
504
+ return []
505
+
506
+ mention_share_urns = list(set([
507
+ n.get("generatedActivity") for n in all_notifications
508
+ if n.get("action") == "SHARE_MENTION" and n.get("generatedActivity")
509
+ ]))
510
+
511
+ logging.info(f"Found {len(mention_share_urns)} unique share URNs from SHARE_MENTION notifications.")
512
+
513
+ for share_urn in mention_share_urns:
514
+ encoded_share_urn = quote(share_urn, safe='')
515
+ post_detail_url = f"{API_REST_BASE}/posts/{encoded_share_urn}"
516
+ logging.info(f"Fetching details for mentioned post: {post_detail_url}")
517
+ try:
518
+ post_resp = session.get(post_detail_url)
519
+ post_resp.raise_for_status()
520
+ post_data = post_resp.json()
521
+
522
+ commentary_raw = post_data.get("commentary")
523
+ if not commentary_raw and "specificContent" in post_data:
524
+ share_content = post_data.get("specificContent", {}).get("com.linkedin.ugc.ShareContent", {})
525
+ commentary_raw = share_content.get("shareCommentaryV2", {}).get("text", "")
526
+
527
+ if not commentary_raw:
528
+ logging.warning(f"No commentary found for share URN {share_urn}. Skipping.")
529
+ continue
530
+
531
+ mention_text_cleaned = extract_text_from_mention_commentary(commentary_raw)
532
+ timestamp = post_data.get("publishedAt") or post_data.get("createdAt") or post_data.get("firstPublishedAt")
533
+ published_at_iso = datetime.fromtimestamp(timestamp / 1000).isoformat() if timestamp else None
534
+ author_urn = post_data.get("author", "urn:li:unknown")
535
+
536
+ processed_mentions_internal.append({
537
+ "mention_id": f"mention_{share_urn}",
538
+ "share_urn": share_urn,
539
+ "mention_text_raw": commentary_raw,
540
+ "mention_text_cleaned": mention_text_cleaned,
541
+ "published_at_timestamp": timestamp,
542
+ "published_at_iso": published_at_iso,
543
+ "mentioned_by_author_urn": author_urn,
544
+ "organization_urn_mentioned": org_urn
545
+ })
546
+ except requests.exceptions.RequestException as e:
547
+ status = getattr(e.response, 'status_code', 'N/A')
548
+ text = getattr(e.response, 'text', 'No response text')
549
+ logging.warning(f"Failed to fetch post details for share URN {share_urn} (Status: {status}): {e}. Response: {text}")
550
+ except json.JSONDecodeError as e:
551
+ logging.warning(f"Failed to decode JSON for post details {share_urn}: {e}. Response: {post_resp.text if post_resp else 'No resp obj'}")
552
+
553
+ logging.info(f"Processed {len(processed_mentions_internal)} mentions with their post details.")
554
+ return processed_mentions_internal
555
+
556
+
557
+ def analyze_mentions_sentiment(processed_mentions_list):
558
+ """
559
+ Analyzes sentiment for the text of each processed mention using the helper function.
560
+ Input: list of processed_mention dicts (internal structure from fetch_linkedin_mentions_core).
561
+ Returns: a dict {mention_id: {"sentiment_label": "DominantSentiment", "percentage": 100.0, "details": {counts}}}
562
+ """
563
+ mention_sentiments_map = {}
564
+ logging.info(f"Analyzing individual sentiment for {len(processed_mentions_list)} mentions.")
565
+
566
+ for mention_data in processed_mentions_list:
567
+ mention_internal_id = mention_data["mention_id"] # Internal ID from fetch_linkedin_mentions_core
568
+ text_to_analyze = mention_data.get("mention_text_cleaned", "")
569
+
570
+ sentiment_result = _get_sentiment_from_text(text_to_analyze)
571
+
572
+ # For single text, percentage is 100% for the dominant label if not error
573
+ percentage = 0.0
574
+ if sentiment_result["label"] != "Error" and any(sentiment_result["counts"].values()):
575
+ percentage = 100.0
576
+
577
+ mention_sentiments_map[mention_internal_id] = {
578
+ "sentiment_label": sentiment_result["label"], # The dominant sentiment label
579
+ "percentage": percentage,
580
+ "details": dict(sentiment_result["counts"]) # Raw counts for this specific mention
581
+ }
582
+ logging.debug(f"Individual sentiment for mention {mention_internal_id}: {mention_sentiments_map[mention_internal_id]}")
583
+
584
+ return mention_sentiments_map
585
+
586
+
587
+ def compile_detailed_mentions(processed_mentions_list, mention_sentiments_map):
588
+ """
589
+ Combines processed mention data (internal structure) with their sentiment analysis
590
+ into the user-specified output format.
591
+ processed_mentions_list: list of dicts from fetch_linkedin_mentions_core
592
+ mention_sentiments_map: dict from analyze_mentions_sentiment, keyed by "mention_id" (internal)
593
+ and contains "sentiment_label".
594
+ """
595
+ detailed_mentions_output = []
596
+ logging.info(f"Compiling detailed data for {len(processed_mentions_list)} mentions into specified format.")
597
+
598
+ for mention_core_data in processed_mentions_list:
599
+ mention_internal_id = mention_core_data["mention_id"]
600
+ sentiment_info = mention_sentiments_map.get(mention_internal_id, {"sentiment_label": "Neutral 😐"})
601
+
602
+ date_formatted = "Unknown"
603
+ if mention_core_data["published_at_timestamp"]:
604
+ try:
605
+ date_formatted = datetime.fromtimestamp(mention_core_data["published_at_timestamp"] / 1000).strftime("%Y-%m-%d %H:%M")
606
+ except TypeError:
607
+ logging.warning(f"Could not format timestamp for mention_id {mention_internal_id}")
608
+
609
+ detailed_mentions_output.append({
610
+ "date": date_formatted, # User-specified field name
611
+ "id": mention_core_data["share_urn"], # User-specified field name (URN of the post with mention)
612
+ "mention_text": mention_core_data["mention_text_cleaned"], # User-specified field name
613
+ "organization_urn": mention_core_data["organization_urn_mentioned"], # User-specified field name
614
+ "sentiment_label": sentiment_info["sentiment_label"] # User-specified field name
615
+ })
616
+ logging.info(f"Compiled {len(detailed_mentions_output)} detailed mentions with specified fields.")
617
+ return detailed_mentions_output
618
+
619
+
620
+ def prepare_mentions_for_bubble(compiled_detailed_mentions_list):
621
+ """
622
+ Prepares mention data for uploading to a Bubble table.
623
+ The input `compiled_detailed_mentions_list` is already in the user-specified format:
624
+ [{"date": ..., "id": ..., "mention_text": ..., "organization_urn": ..., "sentiment_label": ...}, ...]
625
+ This function directly uses these fields as per user's selection for Bubble upload.
626
+ """
627
+ li_mentions_bubble = []
628
+ logging.info(f"Preparing {len(compiled_detailed_mentions_list)} compiled mentions for Bubble upload.")
629
+
630
+ if not compiled_detailed_mentions_list:
631
+ return []
632
+
633
+ for mention_data in compiled_detailed_mentions_list:
634
+ # The mention_data dictionary already has the keys:
635
+ # "date", "id", "mention_text", "organization_urn", "sentiment_label"
636
+ # These are used directly for the Bubble upload list.
637
+ li_mentions_bubble.append({
638
+ "date": mention_data["date"],
639
+ "id": mention_data["id"],
640
+ "mention_text": mention_data["mention_text"],
641
+ "organization_urn": mention_data["organization_urn"],
642
+ "sentiment_label": mention_data["sentiment_label"]
643
+ # If Bubble table has different field names, mapping would be done here.
644
+ # Example: "bubble_mention_date": mention_data["date"],
645
+ # For now, using direct mapping as per user's selected code for the append.
646
+ })
647
+
648
+ logging.info(f"Prepared {len(li_mentions_bubble)} mention entries for Bubble, using direct field names from compiled data.")
649
+ return li_mentions_bubble