File size: 5,219 Bytes
9e34c6c
 
d0c3617
9e34c6c
d0c3617
9e34c6c
d0c3617
9e34c6c
d057691
 
23f1bc8
 
 
 
 
d0c3617
 
9e34c6c
 
d0c3617
9e34c6c
 
d0c3617
 
 
 
 
 
 
 
 
 
 
 
 
 
9e34c6c
 
 
 
 
d057691
 
9e34c6c
 
 
 
 
 
 
 
59aceaa
9e34c6c
 
 
 
 
 
d057691
ff90d8b
9e34c6c
ff90d8b
9e34c6c
ff90d8b
9e34c6c
 
 
 
 
 
 
 
 
 
 
 
 
23f1bc8
 
6d932ec
 
9e34c6c
 
 
 
 
d057691
9e34c6c
 
 
 
 
 
 
 
 
 
d0c3617
9e34c6c
 
 
 
 
 
d0c3617
9e34c6c
 
d0c3617
 
 
 
 
9e34c6c
d0c3617
9e34c6c
d0c3617
 
 
 
9e34c6c
 
 
d0c3617
 
 
 
 
 
 
9e34c6c
 
d0c3617
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e76d34a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import time
from datetime import datetime
from collections import defaultdict
from urllib.parse import quote

import matplotlib.pyplot as plt
from transformers import pipeline

from sessions import create_session

import logging

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')


# Load transformer-based sentiment model globally
sentiment_pipeline = pipeline("text-classification", model="tabularisai/multilingual-sentiment-analysis")

def extract_text_from_commentary(commentary):
    import re
    return re.sub(r"{.*?}", "", commentary).strip()

def classify_sentiment(text):
    try:
        result = sentiment_pipeline(text[:512])  # Limit to 512 chars for transformers
        label = result[0]['label'].upper()
        if label in ['POSITIVE', 'VERY POSITIVE']:
            return 'Positive πŸ‘'
        elif label in ['NEGATIVE', 'VERY NEGATIVE']:
            return 'Negative πŸ‘Ž'
        elif label == 'NEUTRAL':
            return 'Neutral 😐'
        else:
            return 'Unknown'
    except Exception as e:
        return 'Error'

def generate_mentions_dashboard(comm_client_id, comm_token_dict):
    org_urn = "urn:li:organization:19010008"
    encoded_urn = quote(org_urn, safe='')

    session = create_session(comm_client_id, token=comm_token_dict)
    session.headers.update({
        "X-Restli-Protocol-Version": "2.0.0"
    })

    base_url = (
        "https://api.linkedin.com/rest/organizationalEntityNotifications"
        "?q=criteria"
        "&actions=List(COMMENT,SHARE_MENTION)"
        f"&organizationalEntity={encoded_urn}"
        "&count=20"
    )

    all_notifications = []
    start = 0
    while True:
        url = f"{base_url}&start={start}"
        resp = session.get(url)
        
        if resp.status_code != 200:
            logging.error(f"❌ Error fetching notifications: {resp.status_code} - {resp.text}")
            break
            
        data = resp.json()
        elements = data.get("elements", [])
        all_notifications.extend(elements)

        if len(elements) < data.get("paging", {}).get("count", 0):
            break

        start += len(elements)
        time.sleep(0.5)

    mention_shares = [e.get("generatedActivity") for e in all_notifications if e.get("action") == "SHARE_MENTION"]
    mention_data = []

    logging.info(f"Fetched {len(all_notifications)} total notifications.")



    for share_urn in mention_shares:
        if not share_urn:
            continue
        encoded_share_urn = quote(share_urn, safe='')
        share_url = f"https://api.linkedin.com/rest/posts/{encoded_share_urn}"
        response = session.get(share_url)

        if response.status_code != 200:
            continue

        post = response.json()
        commentary_raw = post.get("commentary", "")
        if not commentary_raw:
            continue

        commentary = extract_text_from_commentary(commentary_raw)
        sentiment_label = classify_sentiment(commentary)
        timestamp = post.get("createdAt", 0)
        dt = datetime.fromtimestamp(timestamp / 1000.0)

        mention_data.append({
            "date": dt,
            "text": commentary,
            "sentiment": sentiment_label
        })

    # --- HTML rendering ---
    html_parts = [
        "<h2 style='text-align:center;'>πŸ“£ Mentions Sentiment Dashboard</h2>"
    ]

    for mention in mention_data:
        short_text = (mention["text"][:200] + "…") if len(mention["text"]) > 200 else mention["text"]
        html_parts.append(f"""
            <div style='border:1px solid #ddd; border-radius:12px; padding:15px; margin:15px; box-shadow:2px 2px 8px rgba(0,0,0,0.05); background:#fafafa;'>
                <p><strong>πŸ“… Date:</strong> {mention["date"].strftime('%Y-%m-%d')}</p>
                <p style='color:#333;'>{short_text}</p>
                <p><strong>Sentiment:</strong> {mention["sentiment"]}</p>
            </div>
        """)

    html_content = "\n".join(html_parts)

    # --- Plotting ---
    from matplotlib.figure import Figure
    fig = Figure(figsize=(12, 6))
    ax = fig.subplots()
    fig.subplots_adjust(bottom=0.2)

    if mention_data:
        # Sort by date
        mention_data.sort(key=lambda x: x["date"])

        date_labels = [m["date"].strftime('%Y-%m-%d') for m in mention_data]
        sentiment_scores = [1 if m["sentiment"] == "Positive πŸ‘" else
                            -1 if m["sentiment"] == "Negative πŸ‘Ž" else
                            0 for m in mention_data]

        ax.plot(date_labels, sentiment_scores, marker='o', linestyle='-', color='#0073b1')
        ax.set_title("πŸ“Š Mention Sentiment Over Time")
        ax.set_xlabel("Date")
        ax.set_ylabel("Sentiment Score (1=πŸ‘, 0=😐, -1=πŸ‘Ž)")
        ax.tick_params(axis='x', rotation=45)
        ax.grid(True, linestyle='--', alpha=0.6)
        ax.set_ylim([-1.2, 1.2])
    else:
        ax.text(0.5, 0.5, "No mention sentiment data available.", 
                ha='center', va='center', transform=ax.transAxes, fontsize=12, color='grey')
        ax.set_xticks([])
        ax.set_yticks([])
        ax.set_title("πŸ“Š Mention Sentiment Over Time")

    return html_content, fig, mention_data