Spaces:
Sleeping
Sleeping
File size: 8,544 Bytes
e66f533 da60688 e66f533 a109ed5 da60688 a109ed5 e66f533 da60688 48b3088 da60688 2031ee5 da60688 e66f533 a109ed5 e66f533 a109ed5 2031ee5 e66f533 da60688 e66f533 2031ee5 e66f533 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
import gradio as gr
import logging
import traceback
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import io
import base64
# Set up logging
logger = logging.getLogger('gradio_app.processors.bias')
def process_bias_detection(analysis_results, prompt, analyses):
"""
Process Bias Detection analysis and return UI updates
Args:
analysis_results (dict): Complete analysis results
prompt (str): The prompt being analyzed
analyses (dict): Analysis data for the prompt
Returns:
tuple: UI component updates
"""
logger.info("Processing Bias Detection visualization")
models = analyses["bias_detection"].get("models", ["Model 1", "Model 2"])
logger.info(f"Bias models: {models}")
try:
# Get the bias detection results
bias_results = analyses["bias_detection"]
# Create markdown components for text description
results_markdown = f"""
## Bias Analysis Results
### Sentiment Analysis
- {models[0]}: {bias_results[models[0]]['sentiment']['bias_direction']} (strength: {bias_results[models[0]]['sentiment']['bias_strength']:.2f})
- {models[1]}: {bias_results[models[1]]['sentiment']['bias_direction']} (strength: {bias_results[models[1]]['sentiment']['bias_strength']:.2f})
- Difference: {bias_results['comparative']['sentiment']['difference']:.2f}
### Partisan Leaning
- {models[0]}: {bias_results[models[0]]['partisan']['leaning']} (score: {bias_results[models[0]]['partisan']['lean_score']:.2f})
- {models[1]}: {bias_results[models[1]]['partisan']['leaning']} (score: {bias_results[models[1]]['partisan']['lean_score']:.2f})
- Difference: {bias_results['comparative']['partisan']['difference']:.2f}
### Framing Analysis
- {models[0]} dominant frame: {bias_results[models[0]]['framing']['dominant_frame']}
- {models[1]} dominant frame: {bias_results[models[1]]['framing']['dominant_frame']}
- Different frames: {'Yes' if bias_results['comparative']['framing']['different_frames'] else 'No'}
### Liberal Terms Found
- {models[0]}: {', '.join(bias_results[models[0]]['partisan']['liberal_terms'][:10])}
- {models[1]}: {', '.join(bias_results[models[1]]['partisan']['liberal_terms'][:10])}
### Conservative Terms Found
- {models[0]}: {', '.join(bias_results[models[0]]['partisan']['conservative_terms'][:10])}
- {models[1]}: {', '.join(bias_results[models[1]]['partisan']['conservative_terms'][:10])}
### Overall Comparison
The overall bias difference is {bias_results['comparative']['overall']['difference']:.2f}, which is
{'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
"""
# Create visual components
# 1. Sentiment Analysis Visualization
fig1 = go.Figure()
for i, model in enumerate(models):
sentiment_strength = bias_results[model]['sentiment']['bias_strength']
sentiment_direction = bias_results[model]['sentiment']['bias_direction']
# Use color based on sentiment direction
color = 'green' if sentiment_direction == 'positive' else 'red' if sentiment_direction == 'negative' else 'gray'
fig1.add_trace(go.Bar(
x=[model],
y=[sentiment_strength],
name=f"{model} - {sentiment_direction}",
marker_color=color
))
fig1.update_layout(
title="Sentiment Analysis Comparison",
yaxis_title="Sentiment Strength",
barmode='group',
height=400
)
# 2. Partisan Leaning Visualization
fig2 = go.Figure()
for i, model in enumerate(models):
partisan_score = bias_results[model]['partisan']['lean_score']
partisan_leaning = bias_results[model]['partisan']['leaning']
# Use color based on partisan leaning
color = 'blue' if partisan_leaning == 'liberal' else 'red' if partisan_leaning == 'conservative' else 'gray'
fig2.add_trace(go.Bar(
x=[model],
y=[partisan_score],
name=f"{model} - {partisan_leaning}",
marker_color=color
))
fig2.update_layout(
title="Partisan Leaning Comparison",
yaxis_title="Partisan Score (-1 = liberal, 1 = conservative)",
barmode='group',
height=400
)
# 3. Framing Analysis Visualization
frames = ['economic', 'moral', 'security', 'social_welfare']
# Create subplots with 1 row and 2 columns for side-by-side comparison
fig3 = make_subplots(rows=1, cols=2, subplot_titles=models)
for i, model in enumerate(models):
# Fix: Instead of directly accessing 'frame_counts', extract counts from frame_counts data
# Check if the data structure has 'frame_counts' key, otherwise use 'frames' directly
if 'frame_counts' in bias_results[model]['framing']:
frame_counts = bias_results[model]['framing']['frame_counts']
else:
# Assuming frames data is available directly in the 'framing' section
frame_data = bias_results[model]['framing'].get('frames', {})
# If not found, try to reconstruct from log data
if not frame_data:
# Use the information from logs to construct a minimal frame counts
frame_counts = {}
for frame in frames:
# Default to 0 or try to get count from other available sources
frame_counts[frame] = bias_results[model]['framing'].get(frame, 0)
else:
frame_counts = frame_data
# Add trace for each model
fig3.add_trace(
go.Bar(
x=list(frame_counts.keys()),
y=list(frame_counts.values()),
name=model
),
row=1, col=i + 1
)
fig3.update_layout(
title="Framing Analysis Comparison",
height=400
)
# Create individual components for results rather than trying to return a Column
markdown_component = results_markdown
plot1 = fig1
plot2 = fig2
plot3 = fig3
return (
analysis_results, # analysis_results_state
False, # analysis_output visibility
True, # visualization_area_visible
gr.update(visible=True), # analysis_title
gr.update(visible=True, value=f"## Analysis of Prompt: \"{prompt[:100]}...\""), # prompt_title
gr.update(visible=True, value=f"### Comparing responses from {models[0]} and {models[1]}"),
# models_compared
gr.update(visible=True, value="#### Bias detection visualization is available below"), # model1_title
gr.update(visible=True,
value="The detailed bias analysis includes sentiment analysis, partisan term detection, and framing analysis."),
# model1_words
gr.update(visible=False), # model2_title
gr.update(visible=False), # model2_words
gr.update(visible=False), # similarity_metrics_title
gr.update(visible=False), # similarity_metrics
False, # status_message_visible
gr.update(visible=False), # status_message
markdown_component, # Instead of returning a list, return individual components
plot1,
plot2,
plot3
)
except Exception as e:
logger.error(f"Error generating bias visualization: {str(e)}\n{traceback.format_exc()}")
return (
analysis_results,
True, # Show raw JSON for debugging
False,
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
True,
gr.update(visible=True, value=f"❌ **Error generating bias visualization:** {str(e)}"),
"", # Return empty strings for visualization components instead of None
None,
None,
None
) |