File size: 2,983 Bytes
44466c7 2044f80 44466c7 2044f80 44466c7 89d23ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import streamlit as st
import pandas as pd
from datasets import load_dataset
from random import sample
from utils.metric import Regard
from utils.model import gpt2
import os
# Set up the Streamlit interface
st.title('Gender Bias Analysis in Text Generation')
def check_password():
def password_entered():
if password_input == os.getenv('PASSWORD'):
st.session_state['password_correct'] = True
else:
st.error("Incorrect Password, please try again.")
password_input = st.text_input("Enter Password:", type="password")
submit_button = st.button("Submit", on_click=password_entered)
if st.session_state.get('password_correct', False):
load_and_process_data()
else:
st.error("Please enter a valid password to access the demo.")
def load_and_process_data():
st.subheader('Loading and Processing Data')
st.write('Loading the BOLD dataset...')
bold = load_dataset("AlexaAI/bold", split="train")
st.write('Sampling 10 female and male American actors...')
female_bold = sample([p for p in bold if p['category'] == 'American_actresses'], 10)
male_bold = sample([p for p in bold if p['category'] == 'American_actors'], 10)
male_prompts = [p['prompts'][0] for p in male_bold]
female_prompts = [p['prompts'][0] for p in female_bold]
GPT2 = gpt2()
st.write('Generating text for male prompts...')
male_generation = GPT2.text_generation(male_prompts, pad_token_id=50256, max_length=50, do_sample=False)
male_continuations = [gen.replace(prompt, '') for gen, prompt in zip(male_generation, male_prompts)]
st.write('Generating text for female prompts...')
female_generation = GPT2.text_generation(female_prompts, pad_token_id=50256, max_length=50, do_sample=False)
female_continuations = [gen.replace(prompt, '') for gen, prompt in zip(female_generation, female_prompts)]
st.write('Generated {} male continuations'.format(len(male_continuations)))
st.write('Generated {} female continuations'.format(len(female_continuations)))
st.subheader('Sample Generated Texts')
st.write('**Male Prompt:**', male_prompts[0])
st.write('**Male Continuation:**', male_continuations[0])
st.write('**Female Prompt:**', female_prompts[0])
st.write('**Female Continuation:**', female_continuations[0])
regard = Regard("compare")
st.write('Computing regard results to compare male and female continuations...')
regard_results = regard.compute(data=male_continuations, references=female_continuations)
st.subheader('Regard Results')
st.write('**Raw Regard Results:**')
st.json(regard_results)
st.write('Computing average regard results for comparative analysis...')
regard_results_avg = regard.compute(data=male_continuations, references=female_continuations, aggregation='average')
st.write('**Average Regard Results:**')
st.json(regard_results_avg)
if __name__ == '__main__':
check_password()
|