LLM-Open-Generation-Bias / pages /3_Demo_pairwise_computation.py
ProgU
wider functions covering domain-wise-comparison and selected pairs comparisons
4e02702
raw
history blame
11.2 kB
import streamlit as st
import pandas as pd
from datasets import load_dataset, Dataset
from random import sample
from utils.metric import Regard
from utils.model import gpt2
import matplotlib.pyplot as plt
import os
# Set up the Streamlit interface
st.title('Gender Bias Analysis in Text Generation')
def check_password():
def password_entered():
if password_input == os.getenv('PASSWORD'):
# if password_input == " ":
st.session_state['password_correct'] = True
else:
st.error("Incorrect Password, please try again.")
password_input = st.text_input("Enter Password:", type="password")
submit_button = st.button("Submit", on_click=password_entered)
if submit_button and not st.session_state.get('password_correct', False):
st.error("Please enter a valid password to access the demo.")
if not st.session_state.get('password_correct', False):
check_password()
else:
st.sidebar.success("Password Verified. Proceed with the demo.")
if 'data_size' not in st.session_state:
st.session_state['data_size'] = 10
if 'bold' not in st.session_state:
bold = pd.DataFrame({})
bold_raw = pd.DataFrame(load_dataset("AlexaAI/bold", split="train"))
for index, row in bold_raw.iterrows():
bold_raw_prompts = list(row['prompts'])
bold_raw_wikipedia = list(row['wikipedia'])
bold_expansion = zip(bold_raw_prompts, bold_raw_wikipedia)
for bold_prompt, bold_wikipedia in bold_expansion:
bold = bold._append(
{'domain': row['domain'], 'name': row['name'], 'category': row['category'], 'prompts': bold_prompt,
'wikipedia': bold_wikipedia}, ignore_index=True)
st.session_state['bold'] = Dataset.from_pandas(bold)
if 'female_bold' not in st.session_state:
st.session_state['female_bold'] = []
if 'male_bold' not in st.session_state:
st.session_state['male_bold'] = []
domain = st.selectbox(
"Select your domain",
pd.DataFrame(st.session_state['bold'])['domain'].unique())
domain_limited = [p for p in st.session_state['bold'] if p['domain'] == domain]
st.session_state['option_one'] = st.selectbox(
"Select your profession 1",
pd.DataFrame(domain_limited)['category'].unique())
option_one_list = [p for p in st.session_state['bold'] if p['category'] == st.session_state['option_one']]
o_one = st.session_state['option_one']
st.session_state['option_two'] = st.selectbox(
"Select your profession 2",
pd.DataFrame(domain_limited)['category'].unique())
option_two_list = [p for p in st.session_state['bold'] if p['category'] == st.session_state['option_two']]
o_two = st.session_state['option_two']
st.subheader('Step 1: Set Data Size')
max_length = min(len(option_one_list), len(option_two_list), 50)
data_size = st.slider('Select number of samples per category:', min_value=1, max_value=max_length,
value=st.session_state['data_size'])
st.session_state['data_size'] = data_size
if st.button('Show Data'):
st.session_state['male_bold'] = sample(
option_one_list, data_size)
st.session_state['female_bold'] = sample(
option_two_list, data_size)
st.write(f'Sampled {data_size} female and male American actors.')
st.write(f'**{o_one} Samples:**', pd.DataFrame(st.session_state['female_bold']))
st.write(f'**{o_two} Samples:**', pd.DataFrame(st.session_state['male_bold']))
if st.session_state['female_bold'] and st.session_state['male_bold']:
st.subheader('Step 2: Generate Text')
if st.button('Generate Text'):
GPT2 = gpt2()
st.session_state['male_prompts'] = [p['prompts'] for p in st.session_state['male_bold']]
st.session_state['female_prompts'] = [p['prompts'] for p in st.session_state['female_bold']]
st.session_state['male_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
st.session_state['male_bold']]
st.session_state['female_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
st.session_state['female_bold']]
progress_bar = st.progress(0)
st.write(f'Generating text for {o_one} prompts...')
male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
do_sample=False, truncation=True)
st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(male_generation, st.session_state['male_prompts'])]
progress_bar.progress(50)
st.write(f'Generating text for {o_two} prompts...')
female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
max_length=50, do_sample=False, truncation=True)
st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(female_generation, st.session_state['female_prompts'])]
progress_bar.progress(100)
st.write('Text generation completed.')
st.session_state.pop('rmr', None)
st.session_state.pop('rfr', None)
st.subheader('Step 3: Sample Generated Texts')
if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
st.write(f"{o_one} Data Samples:")
samples_df = pd.DataFrame({
f'{o_one} Prompt': st.session_state['male_prompts'],
f'{o_one} Continuation': st.session_state['male_continuations'],
f'{o_one} Wiki Continuation': st.session_state['male_wiki_continuation'],
})
st.write(samples_df)
st.write(f"{o_two} Data Samples:")
samples_df = pd.DataFrame({
f'{o_two} Prompt': st.session_state['female_prompts'],
f'{o_two} Continuation': st.session_state['female_continuations'],
f'{o_two} Wiki Continuation': st.session_state['female_wiki_continuation'],
})
st.write(samples_df)
if st.button('Evaluate'):
st.subheader('Step 4: Regard Results')
regard = Regard("inner_compare")
st.write('Computing regard results to compare male and female continuations...')
with st.spinner('Computing regard results...'):
regard_male_results = regard.compute(data=st.session_state['male_continuations'],
references=st.session_state['male_wiki_continuation'])
st.write(f'**{o_one} Regard Results:**')
st.json(regard_male_results)
st.session_state['rmr'] = regard_male_results
regard_female_results = regard.compute(data=st.session_state['female_continuations'],
references=st.session_state['female_wiki_continuation'])
st.write(f'**{o_two} Regard Results:**')
st.json(regard_female_results)
st.session_state['rfr'] = regard_female_results
if st.session_state.get('rmr') and st.session_state.get('rfr'):
st.subheader('Step 5: Regard Results Plotting')
if st.button('Plot'):
categories = ['GPT2', 'Wiki']
mp_gpt = st.session_state['rmr']['no_ref_diff_mean']['positive']
mn_gpt = st.session_state['rmr']['no_ref_diff_mean']['negative']
mo_gpt = 1 - (mp_gpt + mn_gpt)
mp_wiki = mp_gpt - st.session_state['rmr']['ref_diff_mean']['positive']
mn_wiki = mn_gpt - st.session_state['rmr']['ref_diff_mean']['negative']
mo_wiki = 1 - (mn_wiki + mp_wiki)
fp_gpt = st.session_state['rfr']['no_ref_diff_mean']['positive']
fn_gpt = st.session_state['rfr']['no_ref_diff_mean']['negative']
fo_gpt = 1 - (fp_gpt + fn_gpt)
fp_wiki = fp_gpt - st.session_state['rfr']['ref_diff_mean']['positive']
fn_wiki = fn_gpt - st.session_state['rfr']['ref_diff_mean']['negative']
fo_wiki = 1 - (fn_wiki + fp_wiki)
positive_m = [mp_gpt, mp_wiki]
other_m = [mo_gpt, mo_wiki]
negative_m = [mn_gpt, mn_wiki]
positive_f = [fp_gpt, fp_wiki]
other_f = [fo_gpt, fo_wiki]
negative_f = [fn_gpt, fn_wiki]
# Plotting
fig_a, ax_a = plt.subplots()
ax_a.bar(categories, negative_m, label='Negative', color='blue')
ax_a.bar(categories, other_m, bottom=negative_m, label='Other', color='orange')
ax_a.bar(categories, positive_m, bottom=[negative_m[i] + other_m[i] for i in range(len(negative_m))],
label='Positive', color='green')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title(f'GPT vs Wiki on {o_one} regard')
plt.legend()
st.pyplot(fig_a)
fig_b, ax_b = plt.subplots()
ax_b.bar(categories, negative_f, label='Negative', color='blue')
ax_b.bar(categories, other_f, bottom=negative_f, label='Other', color='orange')
ax_b.bar(categories, positive_f, bottom=[negative_f[i] + other_f[i] for i in range(len(negative_f))],
label='Positive', color='green')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title(f'GPT vs Wiki on {o_two} regard')
plt.legend()
st.pyplot(fig_b)
m_increase = mp_gpt - mn_gpt
m_relative_increase = mp_gpt - mp_wiki - (mn_gpt - mn_wiki)
f_increase = fp_gpt - fn_gpt
f_relative_increase = fp_gpt - fp_wiki - (fn_gpt - fn_wiki)
absolute_difference = [m_increase, f_increase]
relative_difference = [m_relative_increase, f_relative_increase]
new_categories = [f'{o_one}', f'{o_two}']
fig_c, ax_c = plt.subplots()
ax_c.bar(new_categories, absolute_difference, label='Positive - Negative', color='#40E0D0')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title(f'Difference of positive and negative: {o_one} vs {o_two}')
plt.legend()
st.pyplot(fig_c)
fig_d, ax_d = plt.subplots()
ax_d.bar(new_categories, relative_difference, label='Positive - Negative', color='#40E0D0')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title(f'Difference of positive and negative (relative to Wiki): {o_one} vs {o_two}')
plt.legend()
st.pyplot(fig_d)