Spaces:
Runtime error
Runtime error
import streamlit as st | |
import requests | |
from transformers import pipeline | |
import concurrent.futures | |
import os | |
import json | |
from dotenv import load_dotenv | |
from requests.exceptions import JSONDecodeError | |
# Load environment variables | |
load_dotenv() | |
# Initialize Hugging Face API for Llama 3 | |
HF_API_URL = "https://api-inference.huggingface.co/v1" | |
HF_API_KEY = os.getenv('HFSecret') | |
# Initialize pipelines for Transformers | |
pipe_sent_transformers = pipeline('sentiment-analysis') | |
pipe_summ_transformers = pipeline("summarization", model="facebook/bart-large-cnn") | |
# Define the Llama 3 model ID | |
LLAMA_MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct" | |
# Function to fetch text content from Transformers app | |
def fetch_text_content(selected_option): | |
options_urls = { | |
'Appreciation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt", | |
'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt", | |
'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt", | |
'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt", | |
'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt", | |
'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt" | |
} | |
return requests.get(options_urls[selected_option]).text if selected_option in options_urls else "" | |
# Function to analyze sentiment using Llama | |
def analyze_with_llama(text): | |
headers = {"Authorization": f"Bearer {HF_API_KEY}"} | |
data = { | |
"inputs": text, | |
"options": { | |
"use_cache": False, | |
"wait_for_model": True | |
} | |
} | |
try: | |
response = requests.post(f"{HF_API_URL}/models/{LLAMA_MODEL_ID}", headers=headers, json=data) | |
response.raise_for_status() | |
return response.json() # Ensure valid JSON | |
except (requests.RequestException, json.JSONDecodeError): | |
return {"error": "Error occurred while processing Llama model response."} | |
# Function to run Transformer-based analysis | |
def transformer_analysis(text): | |
# Sentiment analysis | |
sentiment_result = pipe_sent_transformers(text) | |
sentiment_score = sentiment_result[0]['score'] | |
sentiment_label = sentiment_result[0]['label'] | |
# Summarization | |
summary_result = pipe_summ_transformers(text) | |
summary = summary_result[0]['summary_text'] | |
return sentiment_score, sentiment_label, summary | |
# Function to run Llama-based analysis | |
def llama_analysis(text): | |
llama_response = analyze_with_llama(text) | |
if "error" in llama_response: | |
return "Error", "Error", "Error" | |
# Extract sentiment and summary if valid JSON | |
sentiment_label = llama_response.get('sentiment', 'UNKNOWN') | |
sentiment_score = llama_response.get('sentiment_score', 0.0) | |
summary = llama_response.get('summary', 'No summary available.') | |
return sentiment_score, sentiment_label, summary | |
# Streamlit app layout with two columns | |
st.title("Parallel Sentiment Analysis with Transformers and Llama") | |
# Select text to analyze from dropdown | |
options = ['None', 'Appreciation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2'] | |
selected_option = st.selectbox("Select a preset option", options) | |
# Fetch text content for analysis | |
jd = fetch_text_content(selected_option) | |
text = st.text_area('Enter the text to analyze', jd) | |
if st.button("Start Analysis"): | |
# Set up the two columns for parallel analysis | |
col1, col2 = st.columns(2) | |
with st.spinner("Running sentiment analysis..."): | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
# Execute analyses in parallel | |
future_transformer = executor.submit(transformer_analysis, text) | |
future_llama = executor.submit(llama_analysis, text) | |
# Retrieve results from both transformers and Llama | |
sentiment_score_transformer, sentiment_label_transformer, summary_transformer = future_transformer.result() | |
sentiment_score_llama, sentiment_label_llama, summary_llama = future_llama.result() | |
# Ensure that the score is properly handled as a float, or display the string as-is | |
def display_score(score): | |
try: | |
# Attempt to format as float if it's a valid number | |
return f"{float(score):.2f}" | |
except ValueError: | |
# If it's not a number, just return the score as is (probably a string error message) | |
return score | |
# Display results for Transformers-based analysis in the first column | |
with col1: | |
st.subheader("Transformers Analysis") | |
with st.expander("Sentiment Analysis - Transformers"): | |
sentiment_emoji = 'π' if sentiment_label_transformer == 'POSITIVE' else 'π' | |
st.write(f"Sentiment: {sentiment_label_transformer} ({sentiment_emoji})") | |
st.write(f"Score: {display_score(sentiment_score_transformer)}") # Use the display_score function | |
with st.expander("Summarization - Transformers"): | |
st.write(summary_transformer) | |
# Display results for Llama-based analysis in the second column | |
with col2: | |
st.subheader("Llama Analysis") | |
with st.expander("Sentiment Analysis - Llama"): | |
sentiment_emoji = 'π' if sentiment_label_llama == 'POSITIVE' else 'π' | |
st.write(f"Sentiment: {sentiment_label_llama} ({sentiment_emoji})") | |
st.write(f"Score: {display_score(sentiment_score_llama)}") # Use the display_score function | |
with st.expander("Summarization - Llama"): | |
st.write(summary_llama) | |