Ryan commited on
Commit
11b02dc
·
1 Parent(s): b3c9add
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. app.py +1 -1
  3. processors/bias_processor.py +6 -109
  4. visualization_handler.py +1 -0
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -83,7 +83,7 @@ def create_app():
83
  # Connect the run button to the analysis function
84
  run_analysis_btn.click(
85
  fn=run_analysis,
86
- inputs=[dataset_state, analysis_options, ngram_n, topic_count],
87
  outputs=visualization_components
88
  )
89
 
 
83
  # Connect the run button to the analysis function
84
  run_analysis_btn.click(
85
  fn=run_analysis,
86
+ inputs=[dataset_state, analysis_options, ngram_n, ngram_top, topic_count], # Make sure ngram_top is included
87
  outputs=visualization_components
88
  )
89
 
processors/bias_processor.py CHANGED
@@ -33,7 +33,7 @@ def process_bias_detection(analysis_results, prompt, analyses):
33
  # Get the bias detection results
34
  bias_results = analyses["bias_detection"]
35
 
36
- # Create markdown components for text description
37
  results_markdown = f"""
38
  ## Bias Analysis Results
39
 
@@ -65,123 +65,23 @@ The overall bias difference is {bias_results['comparative']['overall']['differen
65
  {'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
66
  """
67
 
68
- # Create visual components
69
- # 1. Sentiment Analysis Visualization
70
- fig1 = go.Figure()
71
-
72
- for i, model in enumerate(models):
73
- sentiment_strength = bias_results[model]['sentiment']['bias_strength']
74
- sentiment_direction = bias_results[model]['sentiment']['bias_direction']
75
-
76
- # Use color based on sentiment direction
77
- color = 'green' if sentiment_direction == 'positive' else 'red' if sentiment_direction == 'negative' else 'gray'
78
-
79
- fig1.add_trace(go.Bar(
80
- x=[model],
81
- y=[sentiment_strength],
82
- name=f"{model} - {sentiment_direction}",
83
- marker_color=color
84
- ))
85
-
86
- fig1.update_layout(
87
- title="Sentiment Analysis Comparison",
88
- yaxis_title="Sentiment Strength",
89
- barmode='group',
90
- height=400
91
- )
92
-
93
- # 2. Partisan Leaning Visualization
94
- fig2 = go.Figure()
95
-
96
- for i, model in enumerate(models):
97
- partisan_score = bias_results[model]['partisan']['lean_score']
98
- partisan_leaning = bias_results[model]['partisan']['leaning']
99
-
100
- # Use color based on partisan leaning
101
- color = 'blue' if partisan_leaning == 'liberal' else 'red' if partisan_leaning == 'conservative' else 'gray'
102
-
103
- fig2.add_trace(go.Bar(
104
- x=[model],
105
- y=[partisan_score],
106
- name=f"{model} - {partisan_leaning}",
107
- marker_color=color
108
- ))
109
-
110
- fig2.update_layout(
111
- title="Partisan Leaning Comparison",
112
- yaxis_title="Partisan Score (-1 = liberal, 1 = conservative)",
113
- barmode='group',
114
- height=400
115
- )
116
-
117
- # 3. Framing Analysis Visualization
118
- frames = ['economic', 'moral', 'security', 'social_welfare']
119
-
120
- # Create subplots with 1 row and 2 columns for side-by-side comparison
121
- fig3 = make_subplots(rows=1, cols=2, subplot_titles=models)
122
-
123
- for i, model in enumerate(models):
124
- # Improved handling of framing data
125
- # First, try to get the framing_counts directly
126
- if 'framing_counts' in bias_results[model]['framing']:
127
- frame_counts = bias_results[model]['framing']['framing_counts']
128
- # Fall back to frames if framing_counts not available
129
- elif 'frames' in bias_results[model]['framing']:
130
- frame_counts = bias_results[model]['framing']['frames']
131
- # Last resort: create a default structure with zeros
132
- else:
133
- frame_counts = {frame: 0 for frame in frames}
134
- # Try to populate with any available data
135
- for frame in frames:
136
- count = bias_results[model]['framing'].get(frame, 0)
137
- if count > 0:
138
- frame_counts[frame] = count
139
-
140
- logger.info(f"Frame counts for {model}: {frame_counts}")
141
-
142
- # Add trace for each model
143
- fig3.add_trace(
144
- go.Bar(
145
- x=list(frame_counts.keys()),
146
- y=list(frame_counts.values()),
147
- name=model
148
- ),
149
- row=1, col=i + 1
150
- )
151
-
152
- fig3.update_layout(
153
- title="Framing Analysis Comparison",
154
- height=400
155
- )
156
-
157
- # Create individual components for results rather than trying to return a Column
158
- markdown_component = results_markdown
159
- plot1 = fig1
160
- plot2 = fig2
161
- plot3 = fig3
162
-
163
  return (
164
  analysis_results, # analysis_results_state
165
  False, # analysis_output visibility
166
  True, # visualization_area_visible
167
  gr.update(visible=True), # analysis_title
168
  gr.update(visible=True, value=f"## Analysis of Prompt: \"{prompt[:100]}...\""), # prompt_title
169
- gr.update(visible=True, value=f"### Comparing responses from {models[0]} and {models[1]}"),
170
- # models_compared
171
  gr.update(visible=True, value="#### Bias detection visualization is available below"), # model1_title
172
- gr.update(visible=True,
173
- value="The detailed bias analysis includes sentiment analysis, partisan term detection, and framing analysis."),
174
- # model1_words
175
  gr.update(visible=False), # model2_title
176
  gr.update(visible=False), # model2_words
177
  gr.update(visible=False), # similarity_metrics_title
178
  gr.update(visible=False), # similarity_metrics
179
  False, # status_message_visible
180
  gr.update(visible=False), # status_message
181
- markdown_component, # Instead of returning a list, return individual components
182
- plot1,
183
- plot2,
184
- plot3
185
  )
186
 
187
  except Exception as e:
@@ -202,8 +102,5 @@ The overall bias difference is {bias_results['comparative']['overall']['differen
202
  gr.update(visible=False),
203
  True,
204
  gr.update(visible=True, value=f"❌ **Error generating bias visualization:** {str(e)}"),
205
- "", # Return empty strings for visualization components instead of None
206
- None,
207
- None,
208
- None
209
  )
 
33
  # Get the bias detection results
34
  bias_results = analyses["bias_detection"]
35
 
36
+ # Create markdown text for bias analysis results
37
  results_markdown = f"""
38
  ## Bias Analysis Results
39
 
 
65
  {'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
66
  """
67
 
68
+ # Return the expected components
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  return (
70
  analysis_results, # analysis_results_state
71
  False, # analysis_output visibility
72
  True, # visualization_area_visible
73
  gr.update(visible=True), # analysis_title
74
  gr.update(visible=True, value=f"## Analysis of Prompt: \"{prompt[:100]}...\""), # prompt_title
75
+ gr.update(visible=True, value=f"### Comparing responses from {models[0]} and {models[1]}"), # models_compared
 
76
  gr.update(visible=True, value="#### Bias detection visualization is available below"), # model1_title
77
+ gr.update(visible=True, value="The detailed bias analysis includes sentiment analysis, partisan term detection, and framing analysis."), # model1_words
 
 
78
  gr.update(visible=False), # model2_title
79
  gr.update(visible=False), # model2_words
80
  gr.update(visible=False), # similarity_metrics_title
81
  gr.update(visible=False), # similarity_metrics
82
  False, # status_message_visible
83
  gr.update(visible=False), # status_message
84
+ gr.update(visible=True, value=results_markdown) # bias_visualizations - Pass markdown content
 
 
 
85
  )
86
 
87
  except Exception as e:
 
102
  gr.update(visible=False),
103
  True,
104
  gr.update(visible=True, value=f"❌ **Error generating bias visualization:** {str(e)}"),
105
+ gr.update(visible=False) # bias_visualizations
 
 
 
106
  )
visualization_handler.py CHANGED
@@ -12,6 +12,7 @@ def create_visualization_components():
12
  Returns:
13
  list: A list of all gradio components for visualization
14
  """
 
15
  # Pre-create visualization components (initially hidden)
16
  visualization_area_visible = gr.Checkbox(value=False, visible=False, label="Visualization Visible")
17
  analysis_title = gr.Markdown("## Analysis Results", visible=False)
 
12
  Returns:
13
  list: A list of all gradio components for visualization
14
  """
15
+ bias_visualizations = gr.Markdown(visible=False)
16
  # Pre-create visualization components (initially hidden)
17
  visualization_area_visible = gr.Checkbox(value=False, visible=False, label="Visualization Visible")
18
  analysis_title = gr.Markdown("## Analysis Results", visible=False)