Ryan commited on
Commit
da60688
·
1 Parent(s): a109ed5
.idea/workspace.xml CHANGED
@@ -5,7 +5,10 @@
5
  </component>
6
  <component name="ChangeListManager">
7
  <list default="true" id="8e67814c-7f04-433c-ab7a-2b65a1106d4c" name="Changes" comment="">
 
 
8
  <change beforePath="$PROJECT_DIR$/processors/bias_processor.py" beforeDir="false" afterPath="$PROJECT_DIR$/processors/bias_processor.py" afterDir="false" />
 
9
  </list>
10
  <option name="SHOW_DIALOG" value="false" />
11
  <option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -62,7 +65,7 @@
62
  <option name="presentableId" value="Default" />
63
  <updated>1745170754325</updated>
64
  <workItem from="1745170755404" duration="245000" />
65
- <workItem from="1745172030020" duration="18429000" />
66
  </task>
67
  <servers />
68
  </component>
 
5
  </component>
6
  <component name="ChangeListManager">
7
  <list default="true" id="8e67814c-7f04-433c-ab7a-2b65a1106d4c" name="Changes" comment="">
8
+ <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
9
+ <change beforePath="$PROJECT_DIR$/app.py" beforeDir="false" afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
10
  <change beforePath="$PROJECT_DIR$/processors/bias_processor.py" beforeDir="false" afterPath="$PROJECT_DIR$/processors/bias_processor.py" afterDir="false" />
11
+ <change beforePath="$PROJECT_DIR$/ui/analysis_screen.py" beforeDir="false" afterPath="$PROJECT_DIR$/ui/analysis_screen.py" afterDir="false" />
12
  </list>
13
  <option name="SHOW_DIALOG" value="false" />
14
  <option name="HIGHLIGHT_CONFLICTS" value="true" />
 
65
  <option name="presentableId" value="Default" />
66
  <updated>1745170754325</updated>
67
  <workItem from="1745170755404" duration="245000" />
68
+ <workItem from="1745172030020" duration="18901000" />
69
  </task>
70
  <servers />
71
  </component>
app.py CHANGED
@@ -75,8 +75,7 @@ def create_app():
75
  run_analysis_btn = analysis_components[2]
76
  analysis_output = analysis_components[3]
77
  ngram_n = analysis_components[4]
78
- ngram_top = analysis_components[5]
79
- topic_count = analysis_components[6]
80
 
81
  # Create visualization components
82
  visualization_components = create_visualization_components()
@@ -84,7 +83,7 @@ def create_app():
84
  # Connect the run button to the analysis function
85
  run_analysis_btn.click(
86
  fn=run_analysis,
87
- inputs=[dataset_state, analysis_options, ngram_n, ngram_top, topic_count],
88
  outputs=visualization_components
89
  )
90
 
 
75
  run_analysis_btn = analysis_components[2]
76
  analysis_output = analysis_components[3]
77
  ngram_n = analysis_components[4]
78
+ topic_count = analysis_components[5]
 
79
 
80
  # Create visualization components
81
  visualization_components = create_visualization_components()
 
83
  # Connect the run button to the analysis function
84
  run_analysis_btn.click(
85
  fn=run_analysis,
86
+ inputs=[dataset_state, analysis_options, ngram_n, topic_count],
87
  outputs=visualization_components
88
  )
89
 
processors/bias_processor.py CHANGED
@@ -1,6 +1,12 @@
1
  import gradio as gr
2
  import logging
3
  import traceback
 
 
 
 
 
 
4
 
5
  # Set up logging
6
  logger = logging.getLogger('gradio_app.processors.bias')
@@ -24,13 +30,10 @@ def process_bias_detection(analysis_results, prompt, analyses):
24
  logger.info(f"Bias models: {models}")
25
 
26
  try:
27
- # Create bias visualization components
28
- from visualization.bias_visualizer import create_bias_visualization
29
-
30
  # Get the bias detection results
31
  bias_results = analyses["bias_detection"]
32
 
33
- # Create markdown components directly rather than trying to return them
34
  results_markdown = f"""
35
  ## Bias Analysis Results
36
 
@@ -62,6 +65,87 @@ The overall bias difference is {bias_results['comparative']['overall']['differen
62
  {'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
63
  """
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  return (
66
  analysis_results, # analysis_results_state
67
  False, # analysis_output visibility
@@ -80,9 +164,9 @@ The overall bias difference is {bias_results['comparative']['overall']['differen
80
  gr.update(visible=False), # similarity_metrics
81
  False, # status_message_visible
82
  gr.update(visible=False), # status_message
83
- gr.update(visible=True, value=results_markdown)
84
- # bias_visualizations - Update with text instead of components
85
  )
 
86
  except Exception as e:
87
  logger.error(f"Error generating bias visualization: {str(e)}\n{traceback.format_exc()}")
88
 
 
1
  import gradio as gr
2
  import logging
3
  import traceback
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+ import plotly.graph_objects as go
7
+ from plotly.subplots import make_subplots
8
+ import io
9
+ import base64
10
 
11
  # Set up logging
12
  logger = logging.getLogger('gradio_app.processors.bias')
 
30
  logger.info(f"Bias models: {models}")
31
 
32
  try:
 
 
 
33
  # Get the bias detection results
34
  bias_results = analyses["bias_detection"]
35
 
36
+ # Create markdown components for text description
37
  results_markdown = f"""
38
  ## Bias Analysis Results
39
 
 
65
  {'significant' if bias_results['comparative']['overall']['significant_bias_difference'] else 'not significant'}.
66
  """
67
 
68
+ # Create visual components
69
+ # 1. Sentiment Analysis Visualization
70
+ fig1 = go.Figure()
71
+
72
+ for i, model in enumerate(models):
73
+ sentiment_strength = bias_results[model]['sentiment']['bias_strength']
74
+ sentiment_direction = bias_results[model]['sentiment']['bias_direction']
75
+
76
+ # Use color based on sentiment direction
77
+ color = 'green' if sentiment_direction == 'positive' else 'red' if sentiment_direction == 'negative' else 'gray'
78
+
79
+ fig1.add_trace(go.Bar(
80
+ x=[model],
81
+ y=[sentiment_strength],
82
+ name=f"{model} - {sentiment_direction}",
83
+ marker_color=color
84
+ ))
85
+
86
+ fig1.update_layout(
87
+ title="Sentiment Analysis Comparison",
88
+ yaxis_title="Sentiment Strength",
89
+ barmode='group',
90
+ height=400
91
+ )
92
+
93
+ # 2. Partisan Leaning Visualization
94
+ fig2 = go.Figure()
95
+
96
+ for i, model in enumerate(models):
97
+ partisan_score = bias_results[model]['partisan']['lean_score']
98
+ partisan_leaning = bias_results[model]['partisan']['leaning']
99
+
100
+ # Use color based on partisan leaning
101
+ color = 'blue' if partisan_leaning == 'liberal' else 'red' if partisan_leaning == 'conservative' else 'gray'
102
+
103
+ fig2.add_trace(go.Bar(
104
+ x=[model],
105
+ y=[partisan_score],
106
+ name=f"{model} - {partisan_leaning}",
107
+ marker_color=color
108
+ ))
109
+
110
+ fig2.update_layout(
111
+ title="Partisan Leaning Comparison",
112
+ yaxis_title="Partisan Score (-1 = liberal, 1 = conservative)",
113
+ barmode='group',
114
+ height=400
115
+ )
116
+
117
+ # 3. Framing Analysis Visualization
118
+ frames = ['economic', 'moral', 'security', 'social_welfare']
119
+
120
+ # Create subplots with 1 row and 2 columns for side-by-side comparison
121
+ fig3 = make_subplots(rows=1, cols=2, subplot_titles=models)
122
+
123
+ for i, model in enumerate(models):
124
+ frame_counts = bias_results[model]['framing']['frame_counts']
125
+
126
+ # Add trace for each model
127
+ fig3.add_trace(
128
+ go.Bar(
129
+ x=list(frame_counts.keys()),
130
+ y=list(frame_counts.values()),
131
+ name=model
132
+ ),
133
+ row=1, col=i + 1
134
+ )
135
+
136
+ fig3.update_layout(
137
+ title="Framing Analysis Comparison",
138
+ height=400
139
+ )
140
+
141
+ # Return all components together
142
+ visualization_components = [
143
+ results_markdown,
144
+ fig1,
145
+ fig2,
146
+ fig3
147
+ ]
148
+
149
  return (
150
  analysis_results, # analysis_results_state
151
  False, # analysis_output visibility
 
164
  gr.update(visible=False), # similarity_metrics
165
  False, # status_message_visible
166
  gr.update(visible=False), # status_message
167
+ visualization_components # visualization components
 
168
  )
169
+
170
  except Exception as e:
171
  logger.error(f"Error generating bias visualization: {str(e)}\n{traceback.format_exc()}")
172
 
ui/analysis_screen.py CHANGED
@@ -19,7 +19,7 @@ def create_analysis_screen():
19
  Create the analysis options screen
20
 
21
  Returns:
22
- tuple: (analysis_options, analysis_params, run_analysis_btn, analysis_output, ngram_n, ngram_top, topic_count)
23
  """
24
  with gr.Column() as analysis_screen:
25
  gr.Markdown("## Analysis Options")
@@ -46,11 +46,8 @@ def create_analysis_screen():
46
  label="N-gram Size",
47
  visible=False
48
  )
49
- ngram_top = gr.Slider(
50
- minimum=5, maximum=30, value=10, step=1,
51
- label="Top N-grams to Display",
52
- visible=False
53
- )
54
 
55
  # Create topic modeling parameter accessible at top level
56
  topic_count = gr.Slider(
@@ -66,10 +63,10 @@ def create_analysis_screen():
66
  gr.Markdown("### Topic Modeling Parameters")
67
  # We'll use the topic_count defined above
68
 
69
- # N-gram parameters group (using external ngram_n and ngram_top)
70
  with gr.Group(visible=False) as ngram_params:
71
  gr.Markdown("### N-gram Parameters")
72
- # We're already using ngram_n and ngram_top defined above
73
 
74
  # Bias detection parameters - simplified with no checkboxes
75
  with gr.Group(visible=False) as bias_params:
@@ -90,7 +87,6 @@ def create_analysis_screen():
90
  bias_params: gr.update(visible=selected == "Bias Detection"),
91
  classifier_params: gr.update(visible=selected == "Classifier"),
92
  ngram_n: gr.update(visible=selected == "N-gram Analysis"),
93
- ngram_top: gr.update(visible=selected == "N-gram Analysis"),
94
  topic_count: gr.update(visible=selected == "Topic Modeling"),
95
  }
96
 
@@ -104,7 +100,6 @@ def create_analysis_screen():
104
  bias_params,
105
  classifier_params,
106
  ngram_n,
107
- ngram_top,
108
  topic_count,
109
  ]
110
  )
@@ -115,8 +110,8 @@ def create_analysis_screen():
115
  # Analysis output area - hidden JSON component to store raw results
116
  analysis_output = gr.JSON(label="Analysis Results", visible=False)
117
 
118
- # Return the components needed by app.py, with bow_top_slider removed
119
- return analysis_options, analysis_params, run_analysis_btn, analysis_output, ngram_n, ngram_top, topic_count
120
 
121
  # Add the implementation of these helper functions
122
  def extract_important_words(text, top_n=20):
 
19
  Create the analysis options screen
20
 
21
  Returns:
22
+ tuple: (analysis_options, analysis_params, run_analysis_btn, analysis_output, ngram_n, topic_count)
23
  """
24
  with gr.Column() as analysis_screen:
25
  gr.Markdown("## Analysis Options")
 
46
  label="N-gram Size",
47
  visible=False
48
  )
49
+
50
+ # Removed ngram_top slider
 
 
 
51
 
52
  # Create topic modeling parameter accessible at top level
53
  topic_count = gr.Slider(
 
63
  gr.Markdown("### Topic Modeling Parameters")
64
  # We'll use the topic_count defined above
65
 
66
+ # N-gram parameters group (using external ngram_n, removed ngram_top)
67
  with gr.Group(visible=False) as ngram_params:
68
  gr.Markdown("### N-gram Parameters")
69
+ # We're already using ngram_n defined above
70
 
71
  # Bias detection parameters - simplified with no checkboxes
72
  with gr.Group(visible=False) as bias_params:
 
87
  bias_params: gr.update(visible=selected == "Bias Detection"),
88
  classifier_params: gr.update(visible=selected == "Classifier"),
89
  ngram_n: gr.update(visible=selected == "N-gram Analysis"),
 
90
  topic_count: gr.update(visible=selected == "Topic Modeling"),
91
  }
92
 
 
100
  bias_params,
101
  classifier_params,
102
  ngram_n,
 
103
  topic_count,
104
  ]
105
  )
 
110
  # Analysis output area - hidden JSON component to store raw results
111
  analysis_output = gr.JSON(label="Analysis Results", visible=False)
112
 
113
+ # Return the components needed by app.py, with ngram_top removed
114
+ return analysis_options, analysis_params, run_analysis_btn, analysis_output, ngram_n, topic_count
115
 
116
  # Add the implementation of these helper functions
117
  def extract_important_words(text, top_n=20):