|
import streamlit as st |
|
import pandas as pd |
|
from datasets import load_dataset |
|
from random import sample |
|
from utils.metric import Regard |
|
from utils.model import gpt2 |
|
import os |
|
|
|
|
|
st.title('Gender Bias Analysis in Text Generation') |
|
|
|
|
|
def check_password(): |
|
def password_entered(): |
|
if password_input == os.getenv('PASSWORD'): |
|
st.session_state['password_correct'] = True |
|
else: |
|
st.error("Incorrect Password, please try again.") |
|
|
|
password_input = st.text_input("Enter Password:", type="password") |
|
submit_button = st.button("Submit", on_click=password_entered) |
|
|
|
if st.session_state.get('password_correct', False): |
|
load_and_process_data() |
|
else: |
|
st.error("Please enter a valid password to access the demo.") |
|
|
|
|
|
def load_and_process_data(): |
|
st.subheader('Loading and Processing Data') |
|
st.write('Loading the BOLD dataset...') |
|
bold = load_dataset("AlexaAI/bold", split="train") |
|
|
|
st.write('Sampling 10 female and male American actors...') |
|
female_bold = sample([p for p in bold if p['category'] == 'American_actresses'], 10) |
|
male_bold = sample([p for p in bold if p['category'] == 'American_actors'], 10) |
|
|
|
male_prompts = [p['prompts'][0] for p in male_bold] |
|
female_prompts = [p['prompts'][0] for p in female_bold] |
|
|
|
GPT2 = gpt2() |
|
|
|
st.write('Generating text for male prompts...') |
|
male_generation = GPT2.text_generation(male_prompts, pad_token_id=50256, max_length=50, do_sample=False) |
|
male_continuations = [gen.replace(prompt, '') for gen, prompt in zip(male_generation, male_prompts)] |
|
|
|
st.write('Generating text for female prompts...') |
|
female_generation = GPT2.text_generation(female_prompts, pad_token_id=50256, max_length=50, do_sample=False) |
|
female_continuations = [gen.replace(prompt, '') for gen, prompt in zip(female_generation, female_prompts)] |
|
|
|
st.write('Generated {} male continuations'.format(len(male_continuations))) |
|
st.write('Generated {} female continuations'.format(len(female_continuations))) |
|
|
|
st.subheader('Sample Generated Texts') |
|
st.write('**Male Prompt:**', male_prompts[0]) |
|
st.write('**Male Continuation:**', male_continuations[0]) |
|
st.write('**Female Prompt:**', female_prompts[0]) |
|
st.write('**Female Continuation:**', female_continuations[0]) |
|
|
|
regard = Regard("compare") |
|
st.write('Computing regard results to compare male and female continuations...') |
|
regard_results = regard.compute(data=male_continuations, references=female_continuations) |
|
st.subheader('Regard Results') |
|
st.write('**Raw Regard Results:**') |
|
st.json(regard_results) |
|
|
|
st.write('Computing average regard results for comparative analysis...') |
|
regard_results_avg = regard.compute(data=male_continuations, references=female_continuations, aggregation='average') |
|
st.write('**Average Regard Results:**') |
|
st.json(regard_results_avg) |
|
|
|
|
|
if __name__ == '__main__': |
|
check_password() |
|
|
|
|
|
|