525GradioApp / processors /bias_processor.py
Ryan
update
2031ee5
raw
history blame
8.54 kB
import gradio as gr
import logging
import traceback
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import io
import base64
# Set up logging
logger = logging.getLogger('gradio_app.processors.bias')
def process_bias_detection(analysis_results, prompt, analyses):
"""
Process Bias Detection analysis and return UI updates
Args:
analysis_results (dict): Complete analysis results
prompt (str): The prompt being analyzed
analyses (dict): Analysis data for the prompt
Returns:
tuple: UI component updates
"""
logger.info("Processing Bias Detection visualization")
models = analyses["bias_detection"].get("models", ["Model 1", "Model 2"])
logger.info(f"Bias models: {models}")
try:
# Get the bias detection results
bias_results = analyses["bias_detection"]
# Create markdown components for text description
results_markdown = f"""
## Bias Analysis Results
### Sentiment Analysis
- {models[0]}: {bias_results[models[0]]['sentiment']['bias_direction']} (strength: {bias_results[models[0]]['sentiment']['bias_strength']:.2f})
- {models[1]}: {bias_results[models[1]]['sentiment']['bias_direction']} (strength: {bias_results[models[1]]['sentiment']['bias_strength']:.2f})
- Difference: {bias_results['comparative']['sentiment']['difference']:.2f}
### Partisan Leaning
- {models[0]}: {bias_results[models[0]]['partisan']['leaning']} (score: {bias_results[models[0]]['partisan']['lean_score']:.2f})
- {models[1]}: {bias_results[models[1]]['partisan']['leaning']} (score: {bias_results[models[1]]['partisan']['lean_score']:.2f})
- Difference: {bias_results['comparative']['partisan']['difference']:.2f}
### Framing Analysis
- {models[0]} dominant frame: {bias_results[models[0]]['framing']['dominant_frame']}
- {models[1]} dominant frame: {bias_results[models[1]]['framing']['dominant_frame']}
- Different frames: {'Yes' if bias_results['comparative']['framing']['different_frames'] else 'No'}
### Liberal Terms Found
- {models[0]}: {', '.join(bias_results[models[0]]['partisan']['liberal_terms'][:10])}
- {models[1]}: {', '.join(bias_results[models[1]]['partisan']['liberal_terms'][:10])}
### Conservative Terms Found
- {models[0]}: {', '.join(bias_results[models[0]]['partisan']['conservative_terms'][:10])}
- {models[1]}: {', '.join(bias_results[models[1]]['partisan']['conservative_terms'][:10])}
### Overall Comparison
The overall bias difference is {bias_results['comparative']['overall']['difference']:.2f}, which is
{'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
"""
# Create visual components
# 1. Sentiment Analysis Visualization
fig1 = go.Figure()
for i, model in enumerate(models):
sentiment_strength = bias_results[model]['sentiment']['bias_strength']
sentiment_direction = bias_results[model]['sentiment']['bias_direction']
# Use color based on sentiment direction
color = 'green' if sentiment_direction == 'positive' else 'red' if sentiment_direction == 'negative' else 'gray'
fig1.add_trace(go.Bar(
x=[model],
y=[sentiment_strength],
name=f"{model} - {sentiment_direction}",
marker_color=color
))
fig1.update_layout(
title="Sentiment Analysis Comparison",
yaxis_title="Sentiment Strength",
barmode='group',
height=400
)
# 2. Partisan Leaning Visualization
fig2 = go.Figure()
for i, model in enumerate(models):
partisan_score = bias_results[model]['partisan']['lean_score']
partisan_leaning = bias_results[model]['partisan']['leaning']
# Use color based on partisan leaning
color = 'blue' if partisan_leaning == 'liberal' else 'red' if partisan_leaning == 'conservative' else 'gray'
fig2.add_trace(go.Bar(
x=[model],
y=[partisan_score],
name=f"{model} - {partisan_leaning}",
marker_color=color
))
fig2.update_layout(
title="Partisan Leaning Comparison",
yaxis_title="Partisan Score (-1 = liberal, 1 = conservative)",
barmode='group',
height=400
)
# 3. Framing Analysis Visualization
frames = ['economic', 'moral', 'security', 'social_welfare']
# Create subplots with 1 row and 2 columns for side-by-side comparison
fig3 = make_subplots(rows=1, cols=2, subplot_titles=models)
for i, model in enumerate(models):
# Fix: Instead of directly accessing 'frame_counts', extract counts from frame_counts data
# Check if the data structure has 'frame_counts' key, otherwise use 'frames' directly
if 'frame_counts' in bias_results[model]['framing']:
frame_counts = bias_results[model]['framing']['frame_counts']
else:
# Assuming frames data is available directly in the 'framing' section
frame_data = bias_results[model]['framing'].get('frames', {})
# If not found, try to reconstruct from log data
if not frame_data:
# Use the information from logs to construct a minimal frame counts
frame_counts = {}
for frame in frames:
# Default to 0 or try to get count from other available sources
frame_counts[frame] = bias_results[model]['framing'].get(frame, 0)
else:
frame_counts = frame_data
# Add trace for each model
fig3.add_trace(
go.Bar(
x=list(frame_counts.keys()),
y=list(frame_counts.values()),
name=model
),
row=1, col=i + 1
)
fig3.update_layout(
title="Framing Analysis Comparison",
height=400
)
# Create individual components for results rather than trying to return a Column
markdown_component = results_markdown
plot1 = fig1
plot2 = fig2
plot3 = fig3
return (
analysis_results, # analysis_results_state
False, # analysis_output visibility
True, # visualization_area_visible
gr.update(visible=True), # analysis_title
gr.update(visible=True, value=f"## Analysis of Prompt: \"{prompt[:100]}...\""), # prompt_title
gr.update(visible=True, value=f"### Comparing responses from {models[0]} and {models[1]}"),
# models_compared
gr.update(visible=True, value="#### Bias detection visualization is available below"), # model1_title
gr.update(visible=True,
value="The detailed bias analysis includes sentiment analysis, partisan term detection, and framing analysis."),
# model1_words
gr.update(visible=False), # model2_title
gr.update(visible=False), # model2_words
gr.update(visible=False), # similarity_metrics_title
gr.update(visible=False), # similarity_metrics
False, # status_message_visible
gr.update(visible=False), # status_message
markdown_component, # Instead of returning a list, return individual components
plot1,
plot2,
plot3
)
except Exception as e:
logger.error(f"Error generating bias visualization: {str(e)}\n{traceback.format_exc()}")
return (
analysis_results,
True, # Show raw JSON for debugging
False,
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
True,
gr.update(visible=True, value=f"❌ **Error generating bias visualization:** {str(e)}"),
"", # Return empty strings for visualization components instead of None
None,
None,
None
)