"""
Visualization components for RoBERTa sentiment analysis
"""
import gradio as gr
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import json
def create_sentiment_visualization(analysis_results):
"""
Create visualizations for RoBERTa sentiment analysis results
Args:
analysis_results (dict): Analysis results from the sentiment analysis
Returns:
list: List of gradio components with visualizations
"""
print("Starting create_sentiment_visualization function")
output_components = []
# Check if we have valid results
if not analysis_results or "analyses" not in analysis_results:
print("No analysis results found.")
return [gr.Markdown("No analysis results found.")]
# Add debug print for each step
print(f"Number of prompts: {len(analysis_results['analyses'])}")
# Process each prompt
for prompt, analyses in analysis_results["analyses"].items():
output_components.append(gr.Markdown(f"## Analysis of Prompt: \"{prompt[:100]}{'...' if len(prompt) > 100 else ''}\""))
# Process RoBERTa sentiment analysis if available
if "roberta_sentiment" in analyses:
sentiment_results = analyses["roberta_sentiment"]
# Check if there's an error
if "error" in sentiment_results:
output_components.append(gr.Markdown(f"**Error in sentiment analysis:** {sentiment_results['error']}"))
continue
# Show models being compared
models = sentiment_results.get("models", [])
if len(models) >= 2:
output_components.append(gr.Markdown(f"### RoBERTa Sentiment Analysis: Comparing {models[0]} and {models[1]}"))
# Create text-based summary of sentiment scores
sa_data = sentiment_results.get("sentiment_analysis", {})
if sa_data and len(models) >= 2:
# Extract sentiment scores and labels for comparison
model_data = []
summary_html = "
"
summary_html += "
Sentiment Score Comparison
"
summary_html += "
"
summary_html += "Model | "
summary_html += "Sentiment Score | "
summary_html += "Label |
"
for model_name in models:
if model_name in sa_data:
model_result = sa_data.get(model_name)
if model_result is not None:
score = model_result.get("sentiment_score", 0)
label = model_result.get("label", "neutral").capitalize()
else:
score = 0
label = "Neutral"
# Set color based on sentiment
if label.lower() == "positive":
color = "green"
elif label.lower() == "negative":
color = "red"
else:
color = "gray"
summary_html += f""
summary_html += f"{model_name} | "
summary_html += f"{score:.2f} | "
summary_html += f"{label} | "
summary_html += f"
"
summary_html += "
"
output_components.append(gr.HTML(summary_html))
# Create HTML-based score comparison gauge
model_scores = []
for model_name in models:
if model_name in sa_data:
model_result = sa_data.get(model_name)
if model_result is not None:
score = model_result.get("sentiment_score", 0)
model_scores.append((model_name, score))
if len(model_scores) >= 2:
gauge_html = ""
gauge_html += "
Sentiment Scale
"
gauge_html += "
"
gauge_html += "Very Negative (-2.0)"
gauge_html += "Neutral (0.0)"
gauge_html += "Very Positive (2.0)"
gauge_html += "
"
# Create the gauge background
gauge_html += "
"
# Add model markers
for model_name, score in model_scores:
# Calculate position (0-100%)
position = ((score + 2.0) / 4.0) * 100
position = max(0, min(100, position)) # Clamp between 0-100%
# Calculate color
if score > 0.5:
color = "#006400" # Dark green
elif score < -0.5:
color = "#8B0000" # Dark red
else:
color = "#000000" # Black
gauge_html += f"
"
gauge_html += f"
"
gauge_html += f"
{model_name}: {score:.2f}
"
gauge_html += "
"
gauge_html += "
"
output_components.append(gr.HTML(gauge_html))
# Display comparison summary
if "comparison" in sentiment_results:
comparison = sentiment_results["comparison"]
summary_html = """
Sentiment Comparison Summary
"""
# Add difference direction
if "difference_direction" in comparison:
summary_html += f"""
{comparison["difference_direction"]}
"""
# Add significance info
if "significant_difference" in comparison:
color = "red" if comparison["significant_difference"] else "green"
significance = "Significant" if comparison["significant_difference"] else "Minor"
summary_html += f"""
{significance} difference in sentiment
(difference score: {comparison.get("sentiment_difference", 0):.2f})
"""
summary_html += "
"
output_components.append(gr.HTML(summary_html))
# Display sentence-level sentiment analysis for both responses
model_sentences = {}
for model_name in models:
if model_name in sa_data:
model_result = sa_data.get(model_name)
if model_result is not None and "sentence_scores" in model_result:
sentence_scores = model_result.get("sentence_scores")
if sentence_scores:
model_sentences[model_name] = sentence_scores
if model_sentences and any(len(sentences) > 0 for sentences in model_sentences.values()):
output_components.append(gr.Markdown("### Sentence-Level Sentiment Analysis"))
for model_name, sentences in model_sentences.items():
if sentences:
output_components.append(gr.Markdown(f"#### {model_name} Response Breakdown"))
# Create HTML visualization for sentences with sentiment
sentences_html = """
"""
for i, sentence in enumerate(sentences):
score = sentence.get("score", 0)
label = sentence.get("label", "neutral")
text = sentence.get("text", "")
# Skip very short sentences or empty text
if len(text.split()) < 3:
continue
# Color based on sentiment
if label == "positive":
color = f"rgba(0, 128, 0, {min(1.0, abs(score) * 0.5)})"
border = "rgba(0, 128, 0, 0.3)"
elif label == "negative":
color = f"rgba(255, 0, 0, {min(1.0, abs(score) * 0.5)})"
border = "rgba(255, 0, 0, 0.3)"
else:
color = "rgba(128, 128, 128, 0.1)"
border = "rgba(128, 128, 128, 0.3)"
sentences_html += f"""
{text}
{score:.2f} ({label.capitalize()})
"""
sentences_html += "
"
output_components.append(gr.HTML(sentences_html))
# If no components were added, show a message
if len(output_components) <= 1:
output_components.append(gr.Markdown("No detailed sentiment analysis found in results."))
return output_components
def process_and_visualize_sentiment_analysis(analysis_results):
"""
Process the sentiment analysis results and create visualization components
Args:
analysis_results (dict): The analysis results
Returns:
list: List of gradio components for visualization
"""
try:
print(f"Starting visualization of sentiment analysis results")
components = create_sentiment_visualization(analysis_results)
return components
except Exception as e:
import traceback
error_msg = f"Sentiment visualization error: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
return [
gr.Markdown(f"**Error during sentiment visualization:**"),
gr.HTML(f"")
]