File size: 3,297 Bytes
44466c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bf893a
44466c7
 
1bf893a
 
 
 
44466c7
 
 
 
 
5f3d0b7
 
 
 
 
 
44466c7
 
 
 
 
 
 
d9015e3
f391aa6
44466c7
 
 
d9015e3
 
f391aa6
44466c7
 
 
 
 
 
 
 
 
 
 
 
 
 
89d23ea
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
import pandas as pd
from datasets import load_dataset
from random import sample
from utils.metric import Regard
from utils.model import gpt2
import os

# Set up the Streamlit interface
st.title('Gender Bias Analysis in Text Generation')

def check_password():
    def password_entered():
        if password_input == os.getenv('PASSWORD'):
            st.session_state['password_correct'] = True
        else:
            st.error("Incorrect Password, please try again.")

    password_input = st.text_input("Enter Password:", type="password")
    submit_button = st.button("Submit", on_click=password_entered)

    if submit_button and not st.session_state.get('password_correct', False):
        st.error("Please enter a valid password to access the demo.")

if not st.session_state.get('password_correct', False):
    check_password()
else:
    st.sidebar.success("Password Verified. Proceed with the demo.")

    st.subheader('Loading and Processing Data')
    st.write('Loading the BOLD dataset...')
    bold = load_dataset("AlexaAI/bold", split="train")

    # Allow user to set the sample size
    data_size = st.sidebar.slider('Select number of samples per category:', min_value=1, max_value=50, value=10)

    st.write(f'Sampling {data_size} female and male American actors...')
    female_bold = sample([p for p in bold if p['category'] == 'American_actresses'], data_size)
    male_bold = sample([p for p in bold if p['category'] == 'American_actors'], data_size)

    male_prompts = [p['prompts'][0] for p in male_bold]
    female_prompts = [p['prompts'][0] for p in female_bold]

    GPT2 = gpt2()

    st.write('Generating text for male prompts...')
    male_generation = GPT2.text_generation(male_prompts, pad_token_id=50256, max_length=50, do_sample=False,truncation=True)
    print(male_generation)
    male_continuations = [gen.replace(prompt, '') for gen, prompt in zip(male_generation, male_prompts)]

    st.write('Generating text for female prompts...')

    female_generation = GPT2.text_generation(female_prompts, pad_token_id=50256, max_length=50, do_sample=False,truncation=True)
    print(male_generation)
    female_continuations = [gen.replace(prompt, '') for gen, prompt in zip(female_generation, female_prompts)]

    st.write('Generated {} male continuations'.format(len(male_continuations)))
    st.write('Generated {} female continuations'.format(len(female_continuations)))

    st.subheader('Sample Generated Texts')
    st.write('**Male Prompt:**', male_prompts[0])
    st.write('**Male Continuation:**', male_continuations[0])
    st.write('**Female Prompt:**', female_prompts[0])
    st.write('**Female Continuation:**', female_continuations[0])

    regard = Regard("compare")
    st.write('Computing regard results to compare male and female continuations...')
    regard_results = regard.compute(data=male_continuations, references=female_continuations)
    st.subheader('Regard Results')
    st.write('**Raw Regard Results:**')
    st.json(regard_results)

    st.write('Computing average regard results for comparative analysis...')
    regard_results_avg = regard.compute(data=male_continuations, references=female_continuations, aggregation='average')
    st.write('**Average Regard Results:**')
    st.json(regard_results_avg)