File size: 1,537 Bytes
8066f29
 
 
 
 
 
 
 
 
 
 
 
9b973b5
b911ac9
8066f29
 
 
b911ac9
8066f29
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import streamlit as st
import tweepy as tw
import pandas as pd
from transformers import pipeline
consumer_key = 'OCgWzDW6PaBvBeVimmGBqdAg1'
consumer_secret = 'tBKnmyg5Jfsewkpmw74gxHZbbZkGIH6Ee4rsM0lD1vFL7SrEIM'
access_token = '1449663645412065281-LNjZoEO9lxdtxPcmLtM35BRdIKYHpk'
access_token_secret = 'FL3SGsUWSzPVFnG7bNMnyh4vYK8W1SlABBNtdF7Xcbh7a'
auth = tw.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tw.API(auth, wait_on_rate_limit=True)
classifier = pipeline('sentiment-analysis')
st.title('Sentiment Analysis')
st.markdown('Live Real Time Twitter sentiment enter:  @TwitterAccount (Examples @lexfridman, @hubermanlab, @StanfordMed, @grok_, @annakaharris, @drmichaellevin, @CDCgov)  to see last N sentiments on mentions .')
def run():
    with st.form(key='Enter name'):
        search_words = st.text_input('Enter the name for which you want to know the sentiment')
        number_of_tweets = st.number_input('Enter the number of latest tweets', 0,50,50)
        submit_button = st.form_submit_button(label='Submit')
    if submit_button:
        tweets =tw.Cursor(api.search_tweets,q=search_words,lang="en").items(number_of_tweets)
        tweet_list = [i.text for i in tweets]
        p = [i for i in classifier(tweet_list)]
        q=[p[i]['label'] for i in range(len(p))]
        df = pd.DataFrame(list(zip(tweet_list, q)),columns =['Latest '+str(number_of_tweets)+' Tweets'+' on '+search_words, 'sentiment'])
        st.write(df)
if __name__=='__main__':
    run()