File size: 4,846 Bytes
559513f
 
 
 
 
 
 
391392f
 
559513f
 
 
 
2317b49
559513f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391392f
559513f
 
 
 
 
 
 
 
 
 
 
 
 
391392f
559513f
 
 
 
 
 
391392f
 
559513f
 
 
 
 
 
 
391392f
 
 
 
 
 
 
 
 
 
 
 
 
3b03cca
559513f
 
2abd5aa
 
 
 
 
 
 
3b03cca
391392f
 
3b03cca
2abd5aa
 
 
 
 
559513f
 
 
 
2abd5aa
391392f
 
2abd5aa
559513f
 
391392f
 
559513f
 
391392f
 
559513f
2abd5aa
391392f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import os
from PyPDF2 import PdfReader
import pandas as pd
from dotenv import load_dotenv
import groq
import json
from datetime import datetime
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer

class TweetDatasetProcessor:
    def __init__(self):
        load_dotenv()
        self.groq_client = groq.Groq(api_key=os.getenv('Groq_api'))
        self.tweets = []
        self.personality_profile = {}

    def extract_text_from_pdf(self, pdf_path):
        """Extract text content from PDF file"""
        reader = PdfReader(pdf_path)
        text = ""
        for page in reader.pages:
            text += page.extract_text()
        return text

    def process_pdf_content(self, text):
        """Process PDF content and extract tweets with metadata"""
        lines = text.split('\n')
        for line in lines:
            if line.strip():
                self.tweets.append({
                    'content': line.strip(),
                    'timestamp': self._extract_timestamp(line) if self._extract_timestamp(line) else datetime.now(),
                    'mentions': self._extract_mentions(line),
                    'hashtags': self._extract_hashtags(line)
                })
        
        df = pd.DataFrame(self.tweets)
        df.to_csv('processed_tweets.csv', index=False)
        return df

    def _extract_timestamp(self, text):
        """Extract timestamp if present in tweet"""
        return None  # Implement timestamp extraction logic if needed

    def _extract_mentions(self, text):
        """Extract mentioned users from tweet"""
        return [word for word in text.split() if word.startswith('@')]

    def _extract_hashtags(self, text):
        """Extract hashtags from tweet"""
        return [word for word in text.split() if word.startswith('#')]

    def analyze_personality(self):
        """Comprehensive personality analysis"""
        all_tweets = [tweet['content'] for tweet in self.tweets]
        analysis_prompt = f"""Perform a deep psychological analysis of the author based on these tweets. Analyze:
        Core beliefs, emotional tendencies, cognitive patterns, etc.
        Tweets for analysis:
        {json.dumps(all_tweets[:30], indent=2)}
        """
        
        response = self.groq_client.chat.completions.create(
            messages=[
                {"role": "system", "content": "You are an expert psychologist."},
                {"role": "user", "content": analysis_prompt},
            ],
            model="mixtral-8x7b-32768",
            temperature=0.1,
        )
        self.personality_profile = response.choices[0].message.content
        return self.personality_profile

    def analyze_topics(self, n_topics=5):
        """Extract and identify different topics the author has tweeted about"""
        all_tweets = [tweet['content'] for tweet in self.tweets]
        vectorizer = TfidfVectorizer(stop_words='english')
        tfidf_matrix = vectorizer.fit_transform(all_tweets)
        nmf_model = NMF(n_components=n_topics, random_state=1)
        nmf_model.fit(tfidf_matrix)
        
        topics = []
        for topic_idx, topic in enumerate(nmf_model.components_):
            topic_words = [vectorizer.get_feature_names_out()[i] for i in topic.argsort()[:-n_topics - 1:-1]]
            topics.append(" ".join(topic_words))
        return topics

    def generate_tweet(self, context=""):
        """Generate a new tweet based on personality profile and optional context"""
        additional_contexts = [
            "Comment on a recent technological advancement.",
            "Share a motivational thought.",
            "Discuss a current trending topic.",
            "Reflect on a past experience.",
            "Provide advice to followers."
        ]

        # Include historical topics in the context
        historical_topics = self.analyze_topics()
        additional_contexts.extend(historical_topics)
    
        # Randomly choose an additional context to diversify tweets
        import random
        random_context = random.choice(additional_contexts)
    
        generation_prompt = f"""Based on this personality profile:
        {self.personality_profile}
        Current context or topic (if any):
        {context}
        Additionally, consider this specific context:
        {random_context}
        Generate a tweet that this person would write right now."""
    
        response = self.groq_client.chat.completions.create(
            messages=[
                {"role": "system", "content": "You are an expert in replicating writing patterns."},
                {"role": "user", "content": generation_prompt},
            ],
            model="mixtral-8x7b-32768",
            temperature=0.8,
            max_tokens=150,
        )
        return response.choices[0].message.content