File size: 5,913 Bytes
5b744f4
 
 
b64289a
b876064
b64289a
b876064
b64289a
5b744f4
b64289a
b876064
5b744f4
b64289a
 
 
5b744f4
b64289a
 
 
5b744f4
b64289a
 
5b744f4
b64289a
5b744f4
b64289a
 
 
 
 
 
 
 
 
b876064
b64289a
b876064
b64289a
b876064
 
b64289a
 
 
b876064
 
b64289a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e96fb9
 
 
 
 
 
 
 
 
 
 
 
b64289a
 
 
 
 
 
1e96fb9
 
b64289a
 
1e96fb9
b64289a
 
 
 
 
 
1e96fb9
 
b64289a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import streamlit as st
import requests
from transformers import pipeline
import concurrent.futures
import os
import json
from dotenv import load_dotenv
from requests.exceptions import JSONDecodeError

# Load environment variables
load_dotenv()

# Initialize Hugging Face API for Llama 3
HF_API_URL = "https://api-inference.huggingface.co/v1"
HF_API_KEY = os.getenv('HFSecret')

# Initialize pipelines for Transformers
pipe_sent_transformers = pipeline('sentiment-analysis')
pipe_summ_transformers = pipeline("summarization", model="facebook/bart-large-cnn")

# Define the Llama 3 model ID
LLAMA_MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"

# Function to fetch text content from Transformers app
def fetch_text_content(selected_option):
    options_urls = {
        'Appreciation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
        'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
        'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
        'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
        'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
        'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
    }
    return requests.get(options_urls[selected_option]).text if selected_option in options_urls else ""

# Function to analyze sentiment using Llama
def analyze_with_llama(text):
    headers = {"Authorization": f"Bearer {HF_API_KEY}"}
    data = {
        "inputs": text,
        "options": {
            "use_cache": False,
            "wait_for_model": True
        }
    }
    
    try:
        response = requests.post(f"{HF_API_URL}/models/{LLAMA_MODEL_ID}", headers=headers, json=data)
        response.raise_for_status()
        return response.json()  # Ensure valid JSON
    except (requests.RequestException, json.JSONDecodeError):
        return {"error": "Error occurred while processing Llama model response."}

# Function to run Transformer-based analysis
def transformer_analysis(text):
    # Sentiment analysis
    sentiment_result = pipe_sent_transformers(text)
    sentiment_score = sentiment_result[0]['score']
    sentiment_label = sentiment_result[0]['label']
    
    # Summarization
    summary_result = pipe_summ_transformers(text)
    summary = summary_result[0]['summary_text']
    
    return sentiment_score, sentiment_label, summary

# Function to run Llama-based analysis
def llama_analysis(text):
    llama_response = analyze_with_llama(text)
    
    if "error" in llama_response:
        return "Error", "Error", "Error"
    
    # Extract sentiment and summary if valid JSON
    sentiment_label = llama_response.get('sentiment', 'UNKNOWN')
    sentiment_score = llama_response.get('sentiment_score', 0.0)
    summary = llama_response.get('summary', 'No summary available.')
    
    return sentiment_score, sentiment_label, summary

# Streamlit app layout with two columns
st.title("Parallel Sentiment Analysis with Transformers and Llama")

# Select text to analyze from dropdown
options = ['None', 'Appreciation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
selected_option = st.selectbox("Select a preset option", options)

# Fetch text content for analysis
jd = fetch_text_content(selected_option)
text = st.text_area('Enter the text to analyze', jd)

if st.button("Start Analysis"):
    # Set up the two columns for parallel analysis
    col1, col2 = st.columns(2)

    with st.spinner("Running sentiment analysis..."):
        with concurrent.futures.ThreadPoolExecutor() as executor:
            # Execute analyses in parallel
            future_transformer = executor.submit(transformer_analysis, text)
            future_llama = executor.submit(llama_analysis, text)

            # Retrieve results from both transformers and Llama
            sentiment_score_transformer, sentiment_label_transformer, summary_transformer = future_transformer.result()
            sentiment_score_llama, sentiment_label_llama, summary_llama = future_llama.result()




    # Ensure that the score is properly handled as a float, or display the string as-is
    def display_score(score):
        try:
            # Attempt to format as float if it's a valid number
            return f"{float(score):.2f}"
        except ValueError:
            # If it's not a number, just return the score as is (probably a string error message)
            return score
    
    # Display results for Transformers-based analysis in the first column
    with col1:
        st.subheader("Transformers Analysis")
        with st.expander("Sentiment Analysis - Transformers"):
            sentiment_emoji = '😊' if sentiment_label_transformer == 'POSITIVE' else '😞'
            st.write(f"Sentiment: {sentiment_label_transformer} ({sentiment_emoji})")
            st.write(f"Score: {display_score(sentiment_score_transformer)}")  # Use the display_score function
    
        with st.expander("Summarization - Transformers"):
            st.write(summary_transformer)
    
    # Display results for Llama-based analysis in the second column
    with col2:
        st.subheader("Llama Analysis")
        with st.expander("Sentiment Analysis - Llama"):
            sentiment_emoji = '😊' if sentiment_label_llama == 'POSITIVE' else '😞'
            st.write(f"Sentiment: {sentiment_label_llama} ({sentiment_emoji})")
            st.write(f"Score: {display_score(sentiment_score_llama)}")  # Use the display_score function
    
        with st.expander("Summarization - Llama"):
            st.write(summary_llama)